code stringlengths 1 1.72M | language stringclasses 1
value |
|---|---|
# Copyright 2011 Tom SF Haines
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from utils.start_cpp import start_cpp
from utils.numpy_help_cpp import numpy_util_code
from dp_utils.sampling_cpp import sampling_code
smp_code = numpy_util_code + sampling_code + start_cpp() + """
#ifndef SMP_CODE
#define SMP_CODE
class SMP
{
public:
// Basic constructor - after construction before anything else the Init method must be called...
SMP()
:fia(0),priorMN(0),sam(0),samPos(0),samTemp(0),power(0),temp(0)
{}
// Constructor that calls the init method...
SMP(int flagSize, int fliSize)
:fia(0),priorMN(0),sam(0),samPos(0),samTemp(0),power(0),temp(0)
{
Init(flagSize, fliSize);
}
// Cleans up...
~SMP()
{
delete[] temp;
delete[] power;
delete[] samTemp;
delete[] samPos;
delete[] sam;
delete[] priorMN;
delete[] fia;
}
// Initialises the Sparse Multinomial Posterior object with the length of each flag sequence as flagSize and the number of such flag sequences in the system with fliSize. Note that the flag list must be provided by a flag index array that has had its addSingles method correctly called...
void Init(int flagSize, int fliSize)
{
// Clean up...
delete[] temp;
delete[] power;
delete[] samTemp;
delete[] samPos;
delete[] sam;
delete[] priorMN;
delete[] fia;
// Store sizes...
flagLen = flagSize;
fliLen = fliSize;
// Initialise the flag index array - its filled in later...
fia = new unsigned char[flagLen*fliLen];
// Setup the prior - by default a uniform...
priorMN = new float[flagLen];
for (int f=0;f<flagLen;f++) priorMN[f] = 1.0/flagLen;
priorConc = flagLen;
// Zero out the sampling set - user has to add some samples before use...
samLen = 0;
sam = 0;
samPos = 0;
samTemp = 0;
// The power counting array - stores the exponent term for each flag list...
power = new int[fliLen];
for (int s=0;s<fliLen;s++) power[s] = 0;
// The temporary vector, which gets so many uses...
temp = new float[flagLen];
}
// Fills in the flag index array - must be called in practise immediatly after the constructor. Input is the output of calling getFlagMatrix on a FlagIndexArray.
void SetFIA(PyArrayObject * arr)
{
for (int s=0;s<fliLen;s++)
{
for (int f=0;f<flagLen;f++)
{
fia[s*flagLen + f] = Byte2D(arr,s,f);
}
}
}
// For if you have the fia as an array of unsigned char's instead...
void SetFIA(unsigned char * arr)
{
for (int s=0;s<fliLen;s++)
{
for (int f=0;f<flagLen;f++)
{
fia[s*flagLen + f] = arr[s*flagLen + f];
}
}
}
// Sets the number of samples to use for the estimation - basically draws a large number of positions from a uniform Dirichlet distribution and then pre-calculates the values required such that the calculation of the mean given samples is trivial. Must be called before sampling occurs.
void SetSampleCount(int count = 1024)
{
// Handle memory...
samLen = count;
delete[] sam;
sam = new float[samLen*fliLen];
delete[] samPos;
samPos = new float[samLen*flagLen];
delete[] samTemp;
samTemp = new float[samLen];
// Generate the samples...
for (int a=0;a<samLen;a++)
{
// Draw a distribution from the uniform Dirichlet - we are going to integrate by the classic summing of lots of uniform samples approach...
float sum = 0.0;
for (int f=0;f<flagLen;f++)
{
temp[f] = -log(1.0 - sample_uniform()); // Identical to sample_gamma(1), but without the code to deal with values other than 1!
sum += temp[f];
}
for (int f=0;f<flagLen;f++) temp[f] /= sum;
// Calculate and store the log of each of the sums implied by the flag array - this makes the integration sampling nice and efficient...
for (int s=0;s<fliLen;s++)
{
float * out = &sam[a*fliLen + s];
*out = 0.0;
for (int f=0;f<flagLen;f++)
{
if (fia[s*flagLen + f]!=0) *out += temp[f];
}
*out = log(*out);
}
// Also fill in the samPos array...
for (int f=0;f<flagLen;f++)
{
samPos[a*flagLen + f] = temp[f];
}
}
}
// Sets the Dirichlet prior, using a vector that sums to unity and a concentration...
void SetPrior(float * mn, float conc)
{
for (int f=0;f<flagLen;f++) priorMN[f] = mn[f];
priorConc = conc;
}
// Sets the Dirichlet prior, using a vector that sums to the concentration...
void SetPrior(float * dir)
{
priorConc = 0.0;
for (int f=0;f<flagLen;f++)
{
priorMN[f] = dir[f];
priorConc += priorMN[f];
}
for (int f=0;f<flagLen;f++) priorMN[f] /= priorConc;
}
// This version takes python objects - a numpy array of floats and a python float for the concentration...
void SetPrior(PyArrayObject * mn, PyObject * conc)
{
for (int f=0;f<flagLen;f++) priorMN[f] = Float1D(mn,f);
priorConc = PyFloat_AsDouble(conc);
}
// Resets the counts, ready to add a bunch of new samples for a new estimate...
void Reset()
{
for (int s=0;s<fliLen;s++) power[s] = 0;
}
// Given a flag list index indicating which counts are valid and a set of counts indicating the sample counts drawn from the unknown multinomial. Updates the model accordingly...
void Add(int fli, const int * counts)
{
int total = 0;
for (int f=0;f<flagLen;f++)
{
if (fia[fli*flagLen + f]!=0)
{
power[f] += counts[f];
total += counts[f];
}
}
power[fli] -= total + 1;
}
// An alternate add method - adds the return value of Power(), allowing the combining of samples stored in seperate SMP objects...
void Add(const int * pow)
{
for (int s=0;s<fliLen;s++) power[s] += pow[s];
}
// For incase you have a power vector as a numpy array...
void Add(PyArrayObject * pow)
{
for (int s=0;s<fliLen;s++) power[s] += Int1D(pow,s);
}
// These return the dimensions of the entities...
int FlagSize() {return flagLen;}
int FlagIndexSize() {return fliLen;}
// These return info about the prior...
float * GetPriorMN() {return priorMN;}
float GetPriorConc() {return priorConc;}
// Returns the power vector - can be used to combine SMP objects, assuming the same fia...
const int * Power() {return power;}
// Calculates the mean of the multinomial drawn from the prior - has lots of sexy optimisations to make it fast. out must be of flagSize() and will have the estimate of the mean written into it...
void Mean(float * out)
{
// First calculate the log probability of each sample, including the prior, storing into samTemp...
float maxVal = -1e100;
for (int a=0;a<samLen;a++)
{
samTemp[a] = 0.0;
// Prior...
for (int f=0;f<flagLen;f++)
{
samTemp[a] += priorConc * priorMN[f] * sam[a*fliLen + f];
}
// Informaiton provided by samples...
for (int s=0;s<fliLen;s++)
{
samTemp[a] += power[s] * sam[a*fliLen + s];
}
// Keep the maximum, for the next bit...
if (samTemp[a]>maxVal) maxVal = samTemp[a];
}
// Convert samTemp into an array of weights that sum to one - done in a numerically stable way, as the logs will represent extremelly small probabilities...
float sum = 0.0;
for (int a=0;a<samLen;a++)
{
samTemp[a] = exp(samTemp[a] - maxVal);
sum += samTemp[a];
}
for (int a=0;a<samLen;a++) samTemp[a] /= sum;
// Calculate the mean by suming the weights multiplied by the sample draws into the output...
for (int f=0;f<flagLen;f++) out[f] = 0.0;
for (int a=0;a<samLen;a++)
{
for (int f=0;f<flagLen;f++)
{
out[f] += samTemp[a] * samPos[a*flagLen + f];
}
}
}
private:
int flagLen; // Length of each flag list.
int fliLen; // Number of flag lists in system.
unsigned char * fia; // Array indexed [fli * flagLen + flagIndex] of {0,1} indicating inclusion in each flag list.
float * priorMN; // Multinomial of prior.
float priorConc; // Concentration of prior.
int samLen; // Number of samples used when sampling the mean.
float * sam; // Array indexed by [sam * fliLen + fli], giving the log of each sum of terms of a draw from Dirichlet(1,...,1).
float * samPos; // Array indexed by [sam * flagLen + flag], giving the not-log of the above for the single flag entries.
float * samTemp; // Temporary of length samLen.
int * power; // Count array, indexed by fli, of power terms for current distribution.
float * temp; // Temporary array of length flagLen.
};
#endif
"""
| Python |
# Copyright 2011 Tom SF Haines
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy
class FlagIndexArray:
"""Provides a register for flag lists - given a list of true/false flags gives a unique number for each combination. Requesting the numebr associated with a combination that has already been entered will always return the same number. All flag lists should be the same length and you can obtain a numpy matrix of {0,1} valued unsigned chars where each row corresponds to the flag list with that index. Also has a function to add the flags for each case of only one flag being on, which if called before anything else puts them so the index of the flag and the index of the flag list correspond - a trick required by the rest of the system."""
def __init__(self, length, addSingles = False):
"""Requires the length of the flag lists. Alternativly it can clone another FlagIndexArray. Will call the addSingles method for you if the flag is set."""
if isinstance(length, FlagIndexArray):
self.length = length.length
self.flags = dict(length.flags)
else:
self.length = length
self.flags = dict() # Dictionary from flag lists to integers. Flag lists are represented with tuples of {0,1}.
if addSingles: self.addSingles()
def getLength(self):
"""Return the length that all flag lists should be."""
return self.length
def addSingles(self):
"""Adds the entries where only a single flag is set, with the index of the flag list set to match the index of the flag that is set. Must be called first, before flagIndex is ever called."""
for i in xrange(self.length):
t = tuple([0]*i + [1] + [0]*(self.length-(i+1)))
self.flags[t] = i
def flagIndex(self, flags, create = True):
"""Given a flag list returns its index - if it has been previously supplied then it will be the same index, otherwise a new one. Can be passed any entity that can be indexed via [] to get the integers {0,1}. Returns a natural. If the create flag is set to False in the event of a previously unseen flag list it will raise an exception instead of assigning it a new natural."""
f = [0]*self.length
for i in xrange(self.length):
if flags[i]!=0: f[i] = 1
f = tuple(f)
if f in self.flags: return self.flags[f]
if create==False: raise Exception('Unrecognised flag list')
index = len(self.flags)
self.flags[f] = index
return index
def addFlagIndexArray(self, fia, remap = None):
"""Given a flag index array this merges its flags into the new flags, returning a dictionary indexed by fia's indices that converts them to the new indices in self. remap is optionally a dictionary converting flag indices in fia to flag indexes in self - remap[fia index] = self index."""
def adjust(fi):
fo = [0]*self.length
for i in xrange(fia.length):
fo[remap[i]] = fi[i]
return tuple(fo)
ret = dict()
for f, index in fia.flags.iteritems():
if remap: f = adjust(f)
ret[index] = self.flagIndex(f)
return ret
def flagCount(self):
"""Returns the number of flag lists that are in the system."""
return len(self.flags)
def getFlagMatrix(self):
"""Returns a 2D numpy array of type numpy.uint8 containing {0,1}, indexed by [flag index,flag entry] - basically all the flags stacked into a single matrix and indexed by the entries returned by flagIndex. Often refered to as a 'flag index array' (fia)."""
ret = numpy.zeros((len(self.flags),self.length), dtype=numpy.uint8)
for flags,row in self.flags.iteritems():
for col in xrange(self.length):
if flags[col]!=0: ret[row,col] = 1
return ret
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Some basic matrix operations that come in use...
matrix_code = start_cpp() + """
#ifndef MATRIX_CODE
#define MATRIX_CODE
template <typename T>
inline void MemSwap(T * lhs, T * rhs, int count = 1)
{
while(count!=0)
{
T t = *lhs;
*lhs = *rhs;
*rhs = t;
++lhs;
++rhs;
--count;
}
}
// Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default.
template <typename T>
inline T Determinant(T * pos, int size, int stride = -1)
{
if (stride==-1) stride = size;
if (size==1) return pos[0];
else
{
if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride];
else
{
T ret = 0.0;
for (int i=0; i<size; i++)
{
if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1);
T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1];
if ((i+size)%2) ret += sub;
else ret -= sub;
}
for (int i=1; i<size; i++)
{
MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1);
}
return ret;
}
}
}
// Inverts a square matrix, will fail on singular and very occasionally on
// non-singular matrices, returns true on success. Uses Gauss-Jordan elimination
// with partial pivoting.
// in is the input matrix, out the output matrix, just be aware that the input matrix is trashed.
// You have to provide its size (Its square, obviously.), and optionally a stride if different from size.
template <typename T>
inline bool Inverse(T * in, T * out, int size, int stride = -1)
{
if (stride==-1) stride = size;
for (int r=0; r<size; r++)
{
for (int c=0; c<size; c++)
{
out[r*stride + c] = (c==r)?1.0:0.0;
}
}
for (int r=0; r<size; r++)
{
// Find largest pivot and swap in, fail if best we can get is 0...
T max = in[r*stride + r];
int index = r;
for (int i=r+1; i<size; i++)
{
if (fabs(in[i*stride + r])>fabs(max))
{
max = in[i*stride + r];
index = i;
}
}
if (index!=r)
{
MemSwap(&in[index*stride], &in[r*stride], size);
MemSwap(&out[index*stride], &out[r*stride], size);
}
if (fabs(max-0.0)<1e-6) return false;
// Divide through the entire row...
max = 1.0/max;
in[r*stride + r] = 1.0;
for (int i=r+1; i<size; i++) in[r*stride + i] *= max;
for (int i=0; i<size; i++) out[r*stride + i] *= max;
// Row subtract to generate 0's in the current column, so it matches an identity matrix...
for (int i=0; i<size; i++)
{
if (i==r) continue;
T factor = in[i*stride + r];
in[i*stride + r] = 0.0;
for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j];
for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j];
}
}
return true;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
class ProgBar:
"""Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops."""
def __init__(self, width = 60, onCallback = None):
self.start = time.time()
self.fill = 0
self.width = width
self.onCallback = onCallback
sys.stdout.write(('_'*self.width)+'\n')
sys.stdout.flush()
def __del__(self):
self.end = time.time()
self.__show(self.width)
sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n')
sys.stdout.flush()
def callback(self, nDone, nToDo):
"""Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo)."""
if self.onCallback:
self.onCallback()
n = int(float(self.width)*float(nDone)/float(nToDo))
n = min((n,self.width))
if n>self.fill:
self.__show(n)
def __show(self,n):
sys.stdout.write('|'*(n-self.fill))
sys.stdout.flush()
self.fill = n
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pydoc
import inspect
class DocGen:
"""A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code."""
def __init__(self, name, title = None, summary = None):
"""name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title."""
if title==None: title = name
if summary==None: summary = title
self.doc = pydoc.HTMLDoc()
self.html = open('%s.html'%name,'w')
self.html.write('<html>\n')
self.html.write('<head>\n')
self.html.write('<title>%s</title>\n'%title)
self.html.write('</head>\n')
self.html.write('<body>\n')
self.html_variables = ''
self.html_functions = ''
self.html_classes = ''
self.wiki = open('%s.wiki'%name,'w')
self.wiki.write('#summary %s\n\n'%summary)
self.wiki.write('= %s= \n\n'%title)
self.wiki_variables = ''
self.wiki_functions = ''
self.wiki_classes = ''
def __del__(self):
if self.html_variables!='':
self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables))
if self.html_functions!='':
self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions))
if self.html_classes!='':
self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes))
self.html.write('</body>\n')
self.html.write('</html>\n')
self.html.close()
if self.wiki_variables!='':
self.wiki.write('= Variables =\n\n')
self.wiki.write(self.wiki_variables)
self.wiki.write('\n')
if self.wiki_functions!='':
self.wiki.write('= Functions =\n\n')
self.wiki.write(self.wiki_functions)
self.wiki.write('\n')
if self.wiki_classes!='':
self.wiki.write('= Classes =\n\n')
self.wiki.write(self.wiki_classes)
self.wiki.write('\n')
self.wiki.close()
def addFile(self, fn, title, fls = True):
"""Given a filename and section title adds the contents of said file to the output. Various flags influence how this works."""
html = []
wiki = []
for i, line in enumerate(open(fn,'r').readlines()):
hl = line.replace('\n', '')
if i==0 and fls:
hl = '<strong>' + hl + '</strong>'
for ext in ['py','txt']:
if '.%s - '%ext in hl:
s = hl.split('.%s - '%ext, 1)
hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1]
html.append(hl)
wl = line.strip()
if i==0 and fls:
wl = '*%s*'%wl
for ext in ['py','txt']:
if '.%s - '%ext in wl:
s = wl.split('.%s - '%ext, 1)
wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n'
wiki.append(wl)
self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html)))
self.wiki.write('== %s ==\n'%title)
self.wiki.write('\n'.join(wiki))
self.wiki.write('----\n\n')
def addVariable(self, var, desc):
"""Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc.."""
self.html_variables += '<strong>%s</strong><br/>'%var
self.html_variables += '%s<br/><br/>\n'%desc
self.wiki_variables += '*`%s`*\n'%var
self.wiki_variables += ' %s\n\n'%desc
def addFunction(self, func):
"""Adds a function to the documentation. You provide the actual function instance."""
self.html_functions += self.doc.docroutine(func).replace(' ',' ')
self.html_functions += '\n'
name = func.__name__
args, varargs, keywords, defaults = inspect.getargspec(func)
doc = inspect.getdoc(func)
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_functions += ' %s\n\n'%doc
def addClass(self, cls):
"""Adds a class to the documentation. You provide the actual class object."""
self.html_classes += self.doc.docclass(cls).replace(' ',' ')
self.html_classes += '\n'
name = cls.__name__
parents = filter(lambda a: a!=cls, inspect.getmro(cls))
doc = inspect.getdoc(cls)
par_str = ''
if len(parents)!=0:
par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents))
self.wiki_classes += '== %s(%s) ==\n'%(name, par_str)
self.wiki_classes += ' %s\n\n'%doc
methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x))
def method_key(pair):
if pair[0]=='__init__': return '___'
else: return pair[0]
methods.sort(key=method_key)
for name, method in methods:
if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'):
if inspect.ismethod(method):
args, varargs, keywords, defaults = inspect.getargspec(method)
else:
args = ['?']
varargs = None
keywords = None
defaults = None
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
def fetch_doc(cls, name):
try:
method = getattr(cls, name)
if method.__doc__!=None: return inspect.getdoc(method)
except: pass
for parent in filter(lambda a: a!=cls, inspect.getmro(cls)):
ret = fetch_doc(parent, name)
if ret!=None: return ret
return None
doc = fetch_doc(cls, name)
self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_classes += ' %s\n\n'%doc
variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float))
for name, var in variables:
if not name.startswith('__'):
if hasattr(var, '__doc__'): d = var.__doc__
else: d = str(var)
self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import random
import math
from scipy.special import gammaln, psi, polygamma
from scipy import weave
from utils.start_cpp import start_cpp
# Provides various gamma-related functions...
gamma_code = start_cpp() + """
#ifndef GAMMA_CODE
#define GAMMA_CODE
#include <cmath>
// Returns the natural logarithm of the Gamma function...
// (Uses Lanczos's approximation.)
double lnGamma(double z)
{
static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7};
if (z<0.5)
{
// Use reflection formula, as approximation doesn't work down here...
return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z);
}
else
{
double x = coeff[0];
for (int i=1;i<9;i++) x += coeff[i]/(z+i-1);
double t = z + 6.5;
return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x);
}
}
// Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values...
double digamma(double z)
{
static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point.
double ret = 0.0;
while (z<highVal)
{
ret -= 1.0/z;
z += 1.0;
}
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz4 = iz2*iz2;
double iz6 = iz4*iz2;
ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0;
return ret;
}
// Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically...
double trigamma(double z)
{
static const double highVal = 8.0;
double ret = 0.0;
while (z<highVal)
{
ret += 1.0/(z*z);
z += 1.0;
}
z -= 1.0;
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz3 = iz1*iz2;
double iz5 = iz3*iz2;
double iz7 = iz5*iz2;
double iz9 = iz7*iz2;
ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0;
return ret;
}
#endif
"""
def lnGamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function"""
code = start_cpp(gamma_code) + """
return_val = lnGamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def digamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function"""
code = start_cpp(gamma_code) + """
return_val = digamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def trigamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function"""
code = start_cpp(gamma_code) + """
return_val = trigamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
class TestFuncs(unittest.TestCase):
"""Test code for the assorted gamma-related functions."""
def test_compile(self):
code = start_cpp(gamma_code) + """
"""
weave.inline(code, support_code=gamma_code)
def test_error_lngamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = lnGamma(z)
good = gammaln(z)
assert(math.fabs(own-good)<1e-12)
def test_error_digamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = digamma(z)
good = psi(z)
assert(math.fabs(own-good)<1e-9)
def test_error_trigamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = trigamma(z)
good = polygamma(1,z)
assert(math.fabs(own-good)<1e-9)
# If this file is run do the unit tests...
if __name__ == '__main__':
unittest.main()
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
import hashlib
def start_cpp(hash_str = None):
"""This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>"""
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
if hash_str==None:
return '#line %i "%s"\n'%(info[1],info[0])
else:
h = hashlib.md5()
h.update(hash_str)
hash_val = h.hexdigest()
return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
| Python |
# -*- coding: utf-8 -*-
# Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.).
import cv
import numpy as np
def cv2array(im):
"""Converts a cv array to a numpy array."""
depth2dtype = {
cv.IPL_DEPTH_8U: 'uint8',
cv.IPL_DEPTH_8S: 'int8',
cv.IPL_DEPTH_16U: 'uint16',
cv.IPL_DEPTH_16S: 'int16',
cv.IPL_DEPTH_32S: 'int32',
cv.IPL_DEPTH_32F: 'float32',
cv.IPL_DEPTH_64F: 'float64',
}
arrdtype=im.depth
a = np.fromstring(
im.tostring(),
dtype=depth2dtype[im.depth],
count=im.width*im.height*im.nChannels)
a.shape = (im.height,im.width,im.nChannels)
return a
def array2cv(a):
"""Converts a numpy array to a cv array, if possible."""
dtype2depth = {
'uint8': cv.IPL_DEPTH_8U,
'int8': cv.IPL_DEPTH_8S,
'uint16': cv.IPL_DEPTH_16U,
'int16': cv.IPL_DEPTH_16S,
'int32': cv.IPL_DEPTH_32S,
'float32': cv.IPL_DEPTH_32F,
'float64': cv.IPL_DEPTH_64F,
}
try:
nChannels = a.shape[2]
except:
nChannels = 1
cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]),
dtype2depth[str(a.dtype)],
nChannels)
cv.SetData(cv_im, a.tostring(),
a.dtype.itemsize*nChannels*a.shape[1])
return cv_im
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing as mp
import multiprocessing.synchronize # To make sure we have all the functionality.
import types
import marshal
import unittest
def repeat(x):
"""A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant."""
while True: yield x
def run_code(code,args):
"""Internal use function that does the work in each process."""
code = marshal.loads(code)
func = types.FunctionType(code, globals(), '_')
return func(*args)
def mp_map(func, *iters, **keywords):
"""A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity."""
if 'pool' in keywords: pool = keywords['pool']
else: pool = mp.Pool()
code = marshal.dumps(func.func_code)
jobs = []
for args in zip(*iters):
jobs.append(pool.apply_async(run_code,(code,args)))
for i in xrange(len(jobs)):
jobs[i] = jobs[i].get()
return jobs
class TestMpMap(unittest.TestCase):
def test_simple1(self):
data = ['a','b','c','d']
def noop(data):
return data
data_noop = mp_map(noop, data)
self.assertEqual(data, data_noop)
def test_simple2(self):
data = [x for x in xrange(1000)]
data_double = mp_map(lambda a: a*2, data)
self.assertEqual(map(lambda a: a*2,data), data_double)
def test_gen(self):
def gen():
for i in xrange(100): yield i
data_double = mp_map(lambda a: a*2, gen())
self.assertEqual(map(lambda a: a*2,gen()), data_double)
def test_repeat(self):
def mult(a,b):
return a*b
data = [x for x in xrange(50,5000,5)]
data_triple = mp_map(mult, data, repeat(3))
self.assertEqual(map(lambda a: a*3,data),data_triple)
def test_none(self):
data = []
data_sqr = mp_map(lambda x: x*x, data)
self.assertEqual([],data_sqr)
if __name__ == '__main__':
unittest.main()
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os.path
import tempfile
import shutil
from distutils.core import setup, Extension
import distutils.ccompiler
import distutils.dep_util
try:
__default_compiler = distutils.ccompiler.new_compiler()
except:
__default_compiler = None
def make_mod(name, base, source, openCL = False):
"""Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place."""
if __default_compiler==None: raise Exception('No compiler!')
# Work out the various file names - check if we actually need to do anything...
if not isinstance(source, list): source = [source]
source_path = map(lambda s: os.path.join(base, s), source)
library_path = os.path.join(base, __default_compiler.shared_object_filename(name))
if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)):
try:
print 'b'
# Backup the argv variable and create a temporary directory to do all work in...
old_argv = sys.argv[:]
temp_dir = tempfile.mkdtemp()
# Prepare the extension...
sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir]
comp_path = filter(lambda s: not s.endswith('.h'), source_path)
depends = filter(lambda s: s.endswith('.h'), source_path)
if openCL:
ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends)
else:
ext = Extension(name, comp_path, depends=depends)
# Compile...
setup(name=name, version='1.0.0', ext_modules=[ext])
finally:
# Cleanup the argv variable and the temporary directory...
sys.argv = old_argv
shutil.rmtree(temp_dir, True)
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
from numpy_help_cpp import numpy_util_code
# Provides various functions to assist with manipulating python objects from c++ code.
python_obj_code = numpy_util_code + start_cpp() + """
#ifndef PYTHON_OBJ_CODE
#define PYTHON_OBJ_CODE
// Extracts a boolean from an object...
bool GetObjectBoolean(PyObject * obj, const char * name)
{
PyObject * b = PyObject_GetAttrString(obj, name);
bool ret = b!=Py_False;
Py_DECREF(b);
return ret;
}
// Extracts an int from an object...
int GetObjectInt(PyObject * obj, const char * name)
{
PyObject * i = PyObject_GetAttrString(obj, name);
int ret = PyInt_AsLong(i);
Py_DECREF(i);
return ret;
}
// Extracts a float from an object...
float GetObjectFloat(PyObject * obj, const char * name)
{
PyObject * f = PyObject_GetAttrString(obj, name);
float ret = PyFloat_AsDouble(f);
Py_DECREF(f);
return ret;
}
// Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored...
unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
unsigned char * ret = new unsigned char[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i);
Py_DECREF(nao);
return ret;
}
// Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored...
float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
float * ret = new float[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i);
Py_DECREF(nao);
return ret;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Defines helper functions for accessing numpy arrays...
numpy_util_code = start_cpp() + """
#ifndef NUMPY_UTIL_CODE
#define NUMPY_UTIL_CODE
float & Float1D(PyArrayObject * arr, int index = 0)
{
return *(float*)(arr->data + index*arr->strides[0]);
}
float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
unsigned char & Byte1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index*arr->strides[0]);
}
unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
int & Int1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index*arr->strides[0]);
}
int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
#endif
"""
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cvarray
import mp_map
import prog_bar
import numpy_help_cpp
import python_obj_cpp
import matrix_cpp
import gamma_cpp
import setProcName
import start_cpp
import make
import doc_gen
# Setup...
doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.')
doc.addFile('readme.txt', 'Overview')
# Variables...
doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.')
doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.')
doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++')
doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++')
# Functions...
doc.addFunction(make.make_mod)
doc.addFunction(cvarray.cv2array)
doc.addFunction(cvarray.array2cv)
doc.addFunction(mp_map.repeat)
doc.addFunction(mp_map.mp_map)
doc.addFunction(setProcName.setProcName)
doc.addFunction(start_cpp.start_cpp)
doc.addFunction(make.make_mod)
# Classes...
doc.addClass(prog_bar.ProgBar)
doc.addClass(doc_gen.DocGen)
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ctypes import *
def setProcName(name):
"""Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases."""
# Call the process control function...
libc = cdll.LoadLibrary('libc.so.6')
libc.prctl(15, c_char_p(name), 0, 0, 0)
# Update argv...
charPP = POINTER(POINTER(c_char))
argv = charPP.in_dll(libc,'_dl_argv')
size = libc.strlen(argv[0])
libc.strncpy(argv[0],c_char_p(name),size)
if __name__=='__main__':
# Quick test that it works...
import os
ps1 = 'ps'
ps2 = 'ps -f'
os.system(ps1)
os.system(ps2)
setProcName('wibble_wobble')
os.system(ps1)
os.system(ps2)
| Python |
#! /usr/bin/env python
# Copyright 2011 Tom SF Haines
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import smp
from utils import doc_gen
# Setup...
doc = doc_gen.DocGen('smp', 'Sparse Multinomial Posterior', 'Estimate a multinomial distribution, given sparse draws')
doc.addFile('readme.txt', 'Overview')
# Variables...
doc.addVariable('smp_code', 'String containing the C++ code that does the actual work for the system.')
# Classes...
doc.addClass(smp.SMP)
doc.addClass(smp.FlagIndexArray)
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from utils.start_cpp import start_cpp
# Code for sampling from various distributions, including some very specific situations involving Dirichlet processes...
sampling_code = start_cpp() + """
#ifndef SAMPLING_CODE
#define SAMPLING_CODE
#include <stdlib.h>
#include <math.h>
const double gamma_approx = 32.0; // Threshold between the two methods of doing a gamma draw.
// Returns a sample from the natural numbers [0,n)...
int sample_nat(int n)
{
return lrand48()%n;
}
// Returns a sample from [0.0,1.0)...
double sample_uniform()
{
return drand48();
//return double(random())/(double(RAND_MAX)+1.0);
}
// Samples from a normal distribution with a mean of 0 and a standard deviation of 1...
double sample_standard_normal()
{
double u = 1.0-sample_uniform();
double v = 1.0-sample_uniform();
return sqrt(-2.0*log(u)) * cos(2.0*M_PI*v);
}
// Samples from a normal distribution with the given mean and standard deviation...
double sample_normal(double mean, double sd)
{
return mean + sd*sample_standard_normal();
}
// Samples from the Gamma distribution, base version that has no scaling parameter...
/*double sample_gamma(double alpha)
{
// Check if the alpha value is high enough to approximate via a normal distribution...
if (alpha>gamma_approx)
{
while (true)
{
double ret = sample_normal(alpha, sqrt(alpha));
if (ret<0.0) continue;
return ret;
}
}
// First do the integer part of gamma(alpha)...
double ret = 0.0; // 1.0
while (alpha>=1.0)
{
alpha -= 1.0;
//ret /= 1.0 - sample_uniform();
ret -= log(1.0-sample_uniform());
}
//ret = log(ret);
// Now do the remaining fractional part and sum it in - uses rejection sampling...
if (alpha>1e-4)
{
while (true)
{
double u1 = 1.0 - sample_uniform();
double u2 = 1.0 - sample_uniform();
double u3 = 1.0 - sample_uniform();
double frac, point;
if (u1<=(M_E/(M_E+alpha)))
{
frac = pow(u2,1.0/alpha);
point = u3*pow(frac,alpha-1.0);
}
else
{
frac = 1.0 - log(u2);
point = u3*exp(-frac);
}
if (point<=(pow(frac,alpha-1.0)*exp(-frac)))
{
ret += frac;
break;
}
}
}
// Finally return...
return ret;
}*/
// As above, but faster...
double sample_gamma(double alpha)
{
// Check if the alpha value is high enough to approximate via a normal distribution...
if (alpha>gamma_approx)
{
while (true)
{
double ret = sample_normal(alpha, sqrt(alpha));
if (ret<0.0) continue;
return ret;
}
}
// If alpha is one, within tolerance, just use an exponential distribution...
if (fabs(alpha-1.0)<1e-4)
{
return -log(1.0-sample_uniform());
}
if (alpha>1.0)
{
// If alpha is 1 or greater use the Cheng/Feast method...
while (true)
{
double u1 = sample_uniform();
double u2 = sample_uniform();
double v = ((alpha - 1.0/(6.0*alpha))*u1) / ((alpha-1.0)*u2);
double lt2 = 2.0*(u2-1.0)/(alpha-1) + v + 1.0/v;
if (lt2<=2.0)
{
return (alpha-1.0)*v;
}
double lt1 = 2.0*log(u2)/(alpha-1.0) - log(v) + v;
if (lt1<=1.0)
{
return (alpha-1.0)*v;
}
}
}
else
{
// If alpha is less than 1 use a rejection sampling method...
while (true)
{
double u1 = 1.0 - sample_uniform();
double u2 = 1.0 - sample_uniform();
double u3 = 1.0 - sample_uniform();
double frac, point;
if (u1<=(M_E/(M_E+alpha)))
{
frac = pow(u2,1.0/alpha);
point = u3*pow(frac,alpha-1.0);
}
else
{
frac = 1.0 - log(u2);
point = u3*exp(-frac);
}
if (point<=(pow(frac,alpha-1.0)*exp(-frac)))
{
return frac;
break;
}
}
}
}
// Samples from the Gamma distribution, version that has a scaling parameter...
double sample_gamma(double alpha, double beta)
{
return sample_gamma(alpha)/beta;
}
// Samples from the Beta distribution...
double sample_beta(double alpha, double beta)
{
double g1 = sample_gamma(alpha);
double g2 = sample_gamma(beta);
return g1 / (g1 + g2);
}
#endif
"""
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from utils.python_obj_cpp import python_obj_code
from linked_list_cpp import linked_list_gc_code
from utils.gamma_cpp import gamma_code
from sampling_cpp import sampling_code
from conc_cpp import conc_code
from dir_est_cpp import dir_est_code
# Put all the suplied code together into one easy to use include...
dp_utils_code = python_obj_code + linked_list_gc_code + gamma_code + sampling_code + conc_code + dir_est_code
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from utils.start_cpp import start_cpp
conc_code = start_cpp() + """
// This funky little function is used to resample the concentration parameter of a Dirichlet process, using the previous parameter - allows this parameter to be Gibbs sampled. Also works for any level of a HDP, due to the limited interactions.
// Parameters are:
// pcp - previous concentration parameter.
// n - number of samples taken from the Dirichlet process
// k - number of discretly different samples, i.e. table count in the Chinese restaurant process.
// prior_alpha - alpha value of the Gamma prior on the concentration parameter.
// prior_beta - beta value of the Gamma prior on the concentration parameter.
double sample_dirichlet_proc_conc(double pcp, double n, double k, double prior_alpha = 1.01, double prior_beta = 0.01)
{
if ((n<(1.0-1e-6))||(k<(2.0-1e-6)))
{
return pcp; // Doesn't work in this case, so just repeat.
}
double nn = sample_beta(pcp+1.0, n);
double log_nn = log(nn);
double f_alpha = prior_alpha + k;
double f_beta = prior_beta - log_nn;
double pi_n_mod = (f_alpha - 1.0) / (n * f_beta);
double r = sample_uniform();
double r_mod = r / (1.0 - r);
if (r_mod>=pi_n_mod) f_alpha -= 1.0;
double ret = sample_gamma(f_alpha, f_beta);
if (ret<1e-3) ret = 1e-3;
return ret;
}
// Class to represent the concentration parameter associated with a DP - consists of the prior and the previous/current value...
struct Conc
{
float alpha; // Parameter for Gamma prior.
float beta; // "
float conc; // Previously sampled concentration value - needed for next sample, and for output/use.
// Resamples the concentration value, assuming only a single DP is using it. n = number of samples from DP, k = number of unique samples, i.e. respectivly RefTotal() and Size() for a ListRef.
void ResampleConc(int n, int k)
{
conc = sample_dirichlet_proc_conc(conc, n, k, alpha, beta);
if (conc<1e-3) conc = 1e-3;
}
};
// This class is the generalisation of the above for when multiple Dirichlet processes share a single concentration parameter - again allows a new concentration parameter to be drawn given the previous one and a Gamma prior, but takes multiple pairs of sample count/discrete sample counts, hence the class interface to allow it to accumilate the relevant information.
class SampleConcDP
{
public:
SampleConcDP():f_alpha(1.0),f_beta(1.0),prev_conc(1.0) {}
~SampleConcDP() {}
// Sets the prior and resets the entire class....
void SetPrior(double alpha, double beta)
{
f_alpha = alpha;
f_beta = beta;
}
// Set the previous concetration parameter - must be called before any DP stats are added...
void SetPrevConc(double prev)
{
prev_conc = prev;
}
// Call once for each DP that is using the concentration parameter...
// (n is the number of samples drawn, k the number of discretly different samples.)
void AddDP(double n, double k)
{
if (k>1.0)
{
double s = 0.0;
if (sample_uniform()>(1.0/(1.0+n/prev_conc))) s = 1.0;
double w = sample_beta(prev_conc+1.0,n);
f_alpha += k - s;
f_beta -= log(w);
}
}
// Once all DP have been added call this to draw a new concentration value...
double Sample()
{
double ret = sample_gamma(f_alpha, f_beta);
if (ret<1e-3) ret = 1e-3;
return ret;
}
private:
double f_alpha;
double f_beta;
double prev_conc;
};
"""
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from scipy import weave
import unittest
from utils.start_cpp import start_cpp
# Defines code for a doubly linked list - simple but works as expected... (Includes its data via templated inheritance - a little strange, but neat and saves on memory thrashing.)
linked_list_code = start_cpp() + """
// Predefinitions...
template <typename ITEM, typename BODY> class Item;
template <typename ITEM, typename BODY> class List;
// Useful default...
struct Empty {};
// Item for the linked list data structure - simply inherits extra data stuff...
template <typename ITEM = Empty, typename BODY = Empty>
class Item : public ITEM
{
public:
Item(List<ITEM,BODY> * head):head(head),next(this),prev(this) {}
~Item() {}
Item<ITEM,BODY> * Next() {return next;}
Item<ITEM,BODY> * Prev() {return prev;}
List<ITEM,BODY> * GetList() {return head;}
bool Valid() {return static_cast< Item<ITEM,BODY>* >(head)!=this;}
bool IsDummy() {return static_cast< Item<ITEM,BODY>* >(head)==this;}
Item<ITEM,BODY> * PreNew() // Adds a new item before this one.
{
Item<ITEM,BODY> * ret = new Item<ITEM,BODY>(head);
head->size += 1;
ret->prev = this->prev;
ret->next = this;
ret->prev->next = ret;
ret->next->prev = ret;
return ret;
}
Item<ITEM,BODY> * PostNew() // Adds a new item after this one.
{
Item<ITEM,BODY> * ret = new Item<ITEM,BODY>(head);
head->size += 1;
ret->prev = this;
ret->next = this->next;
ret->prev->next = ret;
ret->next->prev = ret;
return ret;
}
void Suicide() // Removes this node from its list and makes it delete itself.
{
head->size -= 1;
next->prev = prev;
prev->next = next;
delete this;
}
protected:
List<ITEM,BODY> * head;
Item<ITEM,BODY> * next;
Item<ITEM,BODY> * prev;
};
// Simple totally inline doubly linked list structure, where
template <typename ITEM = Empty, typename BODY = Empty>
class List : protected Item<ITEM,BODY>
{
public:
List():Item<ITEM,BODY>(this),size(0) {}
~List()
{
while(this->size!=0)
{
this->next->Suicide();
}
}
Item<ITEM,BODY> * Append() {return this->PreNew();}
Item<ITEM,BODY> * Prepend() {return this->PostNew();}
Item<ITEM,BODY> * First() {return this->next;}
Item<ITEM,BODY> * Last() {return this->prev;}
int Size() {return this->size;}
BODY & Body() {return body;}
Item<ITEM,BODY> * Index(int i)
{
Item<ITEM,BODY> * ret = this->next;
while(i>0)
{
ret = ret->next;
i -= 1;
}
return ret;
}
protected:
friend class Item<ITEM,BODY>;
int size;
BODY body;
};
"""
class TestLinkedList(unittest.TestCase):
"""Test code for the linked list."""
def test_compile(self):
code = start_cpp(linked_list) + """
"""
weave.inline(code, support_code=linked_list)
def test_size(self):
code = start_cpp(linked_list) + """
int errors = 0;
List<> wibble;
if (wibble.Size()!=0) errors += 1;
Item<> * it = wibble.Append();
if (wibble.Size()!=1) errors += 1;
it->Suicide();
if (wibble.Size()!=0) errors += 1;
return_val = errors;
"""
errors = weave.inline(code, support_code=linked_list)
self.assertEqual(errors,0)
def test_loop(self):
extra = """
struct Number
{
int num;
};
"""
code = start_cpp(linked_list_code+extra) + """
int errors = 0;
List<Number> wibble;
for (int i=0;i<10;i++)
{
Item<Number> * it = wibble.Append();
it->num = i;
}
if (wibble.Size()!=10) errors += 1;
int i = 0;
for (Item<Number> * targ = wibble.First(); targ->Valid(); targ = targ->Next())
{
if (i!=targ->num) errors += 1;
i += 1;
}
return_val = errors;
"""
errors = weave.inline(code, support_code=linked_list_code+extra)
self.assertEqual(errors,0)
# Code for a linked list with garbage collection - each entry has a reference count, and it also allows access of the reference counts and the total number of reference counts for all entrys. This structure is very useful for modelling a Dirichlet process as a direct consequence, as it has all its properties...
linked_list_gc_code = linked_list_code + start_cpp() + """
// Predefinitions...
template <typename ITEM, typename BODY> class ItemRef;
template <typename ITEM, typename BODY> class ListRef;
// Item for the linked list data structure - simply inherits extra data stuff...
template <typename ITEM = Empty, typename BODY = Empty>
class ItemRef : public ITEM
{
public:
ItemRef(ListRef<ITEM,BODY> * head):head(head),next(this),prev(this),refCount(0) {}
~ItemRef() {}
ItemRef<ITEM,BODY> * Next() {return next;}
ItemRef<ITEM,BODY> * Prev() {return prev;}
ListRef<ITEM,BODY> * GetList() {return head;}
bool Valid() {return static_cast< ItemRef<ITEM,BODY>* >(head)!=this;}
bool IsDummy() {return static_cast< ItemRef<ITEM,BODY>* >(head)==this;}
ItemRef<ITEM,BODY> * PreNew() // Adds a new item before this one.
{
ItemRef<ITEM,BODY> * ret = new ItemRef<ITEM,BODY>(head);
head->size += 1;
ret->prev = this->prev;
ret->next = this;
ret->prev->next = ret;
ret->next->prev = ret;
return ret;
}
ItemRef<ITEM,BODY> * PostNew() // Adds a new item after this one.
{
ItemRef<ITEM,BODY> * ret = new ItemRef<ITEM,BODY>(head);
head->size += 1;
ret->prev = this;
ret->next = this->next;
ret->prev->next = ret;
ret->next->prev = ret;
return ret;
}
void Suicide() // Removes this node from its list and makes it delete itself.
{
head->size -= 1;
head->refTotal -= refCount;
next->prev = prev;
prev->next = next;
delete this;
}
void IncRef(int amount = 1)
{
this->refCount += amount;
head->refTotal += amount;
}
void DecRef(int amount = 1) // If the ref count reaches zero the object will delete itself.
{
this->refCount -= amount;
head->refTotal -= amount;
if (refCount<=0) this->Suicide();
}
int RefCount() {return refCount;}
protected:
ListRef<ITEM,BODY> * head;
ItemRef<ITEM,BODY> * next;
ItemRef<ITEM,BODY> * prev;
int refCount;
};
// Simple totally inline doubly linked list structure...
template <typename ITEM = Empty, typename BODY = Empty>
class ListRef : protected ItemRef<ITEM,BODY>
{
public:
ListRef():ItemRef<ITEM,BODY>(this),size(0),refTotal(0) {}
~ListRef()
{
while(this->size!=0)
{
this->next->Suicide();
}
}
ItemRef<ITEM,BODY> * Append() {return this->PreNew();}
ItemRef<ITEM,BODY> * Prepend() {return this->PostNew();}
ItemRef<ITEM,BODY> * First() {return this->next;}
ItemRef<ITEM,BODY> * Last() {return this->prev;}
int Size() {return this->size;}
int RefTotal() {return this->refTotal;}
BODY & Body() {return body;}
ItemRef<ITEM,BODY> * Index(int i)
{
ItemRef<ITEM,BODY> * ret = this->next;
while(i>0)
{
ret = ret->Next();
i -= 1;
}
return ret;
}
protected:
friend class ItemRef<ITEM,BODY>;
int size;
int refTotal;
BODY body;
};
"""
class TestLinkedListGC(unittest.TestCase):
"""Test code for the linked list with garbage collection."""
def test_compile(self):
code = start_cpp(linked_list_gc) + """
"""
weave.inline(code, support_code=linked_list_gc)
def test_size_gc(self):
code = start_cpp(linked_list_gc_code) + """
int errors = 0;
ListRef<> wibble;
if (wibble.Size()!=0) errors += 1;
ItemRef<> * it = wibble.Append();
if (wibble.Size()!=1) errors += 1;
if (wibble.RefTotal()!=0) errors += 1;
it->IncRef();
it->IncRef();
if (it->RefCount()!=2) errors += 1;
if (wibble.RefTotal()!=2) errors += 1;
it->DecRef();
it->DecRef();
if (wibble.RefTotal()!=0) errors += 1;
if (wibble.Size()!=0) errors += 1;
return_val = errors;
"""
errors = weave.inline(code, support_code=linked_list_gc_code)
self.assertEqual(errors,0)
# If this file is run do the unit tests...
if __name__ == '__main__':
unittest.main()
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from utils.start_cpp import start_cpp
# Provides code for estimating the Dirichlet distribution from which a number of multinomial distributions were drawn from, given those multinomials...
dir_est_code = start_cpp() + """
// Defined as a class - you then add each multinomial before requesting a maximum likelihood update of the Dirichlet distribution. It uses Newton-Raphson iterations, and so needs a starting point - you provide a vector to be updated, which can of course save time if it is already close...
class EstimateDir
{
public:
EstimateDir(int vecSize):size(vecSize), samples(0), meanLog(new double[vecSize]), grad(new double[vecSize]), qq(new double[vecSize])
{
for (int i=0;i<vecSize;i++) meanLog[i] = 0.0;
}
~EstimateDir() {delete[] meanLog; delete[] grad; delete[] qq;}
void Add(float * mn)
{
samples += 1;
for (int i=0;i<size;i++)
{
meanLog[i] += (log(mn[i]) - meanLog[i]) / double(samples);
}
}
void Add(double * mn)
{
samples += 1;
for (int i=0;i<size;i++)
{
meanLog[i] += (log(mn[i]) - meanLog[i]) / double(samples);
}
}
void Update(float * dir, int maxIter = 64, float epsilon = 1e-3, float cap = 1e6)
{
for (int iter=0;iter<maxIter;iter++)
{
// We will need the sum of the dir vector...
double dirSum = 0.0;
for (int i=0;i<size;i++)
{
dirSum += dir[i];
}
// Check for Nan/inf - if so reset to basic value...
if ((dirSum==dirSum) || (dirSum>1e100))
{
for (int i=0;i<size;i++) dir[i] = 1.0;
dirSum = size;
}
// Safety - don't let it get too precise, that probably means its being crazy (Can happen with too few samples.)...
if (dirSum>cap)
{
float mult = cap / dirSum;
for (int i=0;i<size;i++)
{
dir[i] *= mult;
}
dirSum = cap;
}
// Calculate the gradiant and the Hessian 'matrix', except its actually diagonal...
double digDirSum = digamma(dirSum);
for (int i=0;i<size;i++)
{
grad[i] = samples * (digDirSum - digamma(dir[i]) + meanLog[i]);
qq[i] = -samples * trigamma(dir[i]);
}
// Calculate b...
double b = 0.0;
double bDiv = 1.0 / (samples*trigamma(dirSum));
for (int i=0;i<size;i++)
{
b += grad[i]/qq[i];
bDiv += 1.0/qq[i];
}
b /= bDiv;
// Do the update, sum the change...
double change = 0.0;
for (int i=0;i<size;i++)
{
double delta = (grad[i] - b) / qq[i];
dir[i] -= delta;
if (dir[i]<1e-3) dir[i] = 1e-3;
change += fabs(delta);
}
// Break if no change...
if (change<epsilon) break;
}
}
void Update(double * dir, int maxIter = 64, double epsilon = 1e-6, double cap = 1e6)
{
for (int iter=0;iter<maxIter;iter++)
{
// We will need the sum of the dir vector...
double dirSum = 0.0;
for (int i=0;i<size;i++)
{
dirSum += dir[i];
}
// Check for Nan/inf - if so reset to basic value...
if ((dirSum==dirSum) || (dirSum>1e100))
{
for (int i=0;i<size;i++) dir[i] = 1.0;
dirSum = size;
}
// Safety - don't let it get too precise, that probably means its being crazy (Can happen with too few samples.)...
if (dirSum>cap)
{
float mult = cap / dirSum;
for (int i=0;i<size;i++)
{
dir[i] *= mult;
}
dirSum = cap;
}
// Calculate the gradiant and the Hessian 'matrix', except its actually diagonal...
double digDirSum = digamma(dirSum);
for (int i=0;i<size;i++)
{
grad[i] = samples * (digDirSum - digamma(dir[i]) + meanLog[i]);
qq[i] = -samples * trigamma(dir[i]);
}
// Calculate b...
double b = 0.0;
double bDiv = 1.0 / (samples*trigamma(dirSum));
for (int i=0;i<size;i++)
{
b += grad[i]/qq[i];
bDiv += 1.0/qq[i];
}
b /= bDiv;
// Do the update, sum the change...
double change = 0.0;
for (int i=0;i<size;i++)
{
double delta = (grad[i] - b) / qq[i];
dir[i] -= delta;
change += fabs(delta);
}
// Break if no change...
if (change<epsilon) break;
}
}
private:
int size;
int samples;
double * meanLog; // Vector of length size, contains the component-wise mean of the log of each of the samples - consititutes the sufficient statistics required to do the update.
double * grad; // Temporary during update.
double * qq; // Temporary during update.
};
"""
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Some basic matrix operations that come in use...
matrix_code = start_cpp() + """
#ifndef MATRIX_CODE
#define MATRIX_CODE
template <typename T>
inline void MemSwap(T * lhs, T * rhs, int count = 1)
{
while(count!=0)
{
T t = *lhs;
*lhs = *rhs;
*rhs = t;
++lhs;
++rhs;
--count;
}
}
// Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default.
template <typename T>
inline T Determinant(T * pos, int size, int stride = -1)
{
if (stride==-1) stride = size;
if (size==1) return pos[0];
else
{
if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride];
else
{
T ret = 0.0;
for (int i=0; i<size; i++)
{
if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1);
T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1];
if ((i+size)%2) ret += sub;
else ret -= sub;
}
for (int i=1; i<size; i++)
{
MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1);
}
return ret;
}
}
}
// Inverts a square matrix, will fail on singular and very occasionally on
// non-singular matrices, returns true on success. Uses Gauss-Jordan elimination
// with partial pivoting.
// in is the input matrix, out the output matrix, just be aware that the input matrix is trashed.
// You have to provide its size (Its square, obviously.), and optionally a stride if different from size.
template <typename T>
inline bool Inverse(T * in, T * out, int size, int stride = -1)
{
if (stride==-1) stride = size;
for (int r=0; r<size; r++)
{
for (int c=0; c<size; c++)
{
out[r*stride + c] = (c==r)?1.0:0.0;
}
}
for (int r=0; r<size; r++)
{
// Find largest pivot and swap in, fail if best we can get is 0...
T max = in[r*stride + r];
int index = r;
for (int i=r+1; i<size; i++)
{
if (fabs(in[i*stride + r])>fabs(max))
{
max = in[i*stride + r];
index = i;
}
}
if (index!=r)
{
MemSwap(&in[index*stride], &in[r*stride], size);
MemSwap(&out[index*stride], &out[r*stride], size);
}
if (fabs(max-0.0)<1e-6) return false;
// Divide through the entire row...
max = 1.0/max;
in[r*stride + r] = 1.0;
for (int i=r+1; i<size; i++) in[r*stride + i] *= max;
for (int i=0; i<size; i++) out[r*stride + i] *= max;
// Row subtract to generate 0's in the current column, so it matches an identity matrix...
for (int i=0; i<size; i++)
{
if (i==r) continue;
T factor = in[i*stride + r];
in[i*stride + r] = 0.0;
for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j];
for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j];
}
}
return true;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
class ProgBar:
"""Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops."""
def __init__(self, width = 60, onCallback = None):
self.start = time.time()
self.fill = 0
self.width = width
self.onCallback = onCallback
sys.stdout.write(('_'*self.width)+'\n')
sys.stdout.flush()
def __del__(self):
self.end = time.time()
self.__show(self.width)
sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n')
sys.stdout.flush()
def callback(self, nDone, nToDo):
"""Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo)."""
if self.onCallback:
self.onCallback()
n = int(float(self.width)*float(nDone)/float(nToDo))
n = min((n,self.width))
if n>self.fill:
self.__show(n)
def __show(self,n):
sys.stdout.write('|'*(n-self.fill))
sys.stdout.flush()
self.fill = n
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pydoc
import inspect
class DocGen:
"""A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code."""
def __init__(self, name, title = None, summary = None):
"""name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title."""
if title==None: title = name
if summary==None: summary = title
self.doc = pydoc.HTMLDoc()
self.html = open('%s.html'%name,'w')
self.html.write('<html>\n')
self.html.write('<head>\n')
self.html.write('<title>%s</title>\n'%title)
self.html.write('</head>\n')
self.html.write('<body>\n')
self.html_variables = ''
self.html_functions = ''
self.html_classes = ''
self.wiki = open('%s.wiki'%name,'w')
self.wiki.write('#summary %s\n\n'%summary)
self.wiki.write('= %s= \n\n'%title)
self.wiki_variables = ''
self.wiki_functions = ''
self.wiki_classes = ''
def __del__(self):
if self.html_variables!='':
self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables))
if self.html_functions!='':
self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions))
if self.html_classes!='':
self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes))
self.html.write('</body>\n')
self.html.write('</html>\n')
self.html.close()
if self.wiki_variables!='':
self.wiki.write('= Variables =\n\n')
self.wiki.write(self.wiki_variables)
self.wiki.write('\n')
if self.wiki_functions!='':
self.wiki.write('= Functions =\n\n')
self.wiki.write(self.wiki_functions)
self.wiki.write('\n')
if self.wiki_classes!='':
self.wiki.write('= Classes =\n\n')
self.wiki.write(self.wiki_classes)
self.wiki.write('\n')
self.wiki.close()
def addFile(self, fn, title, fls = True):
"""Given a filename and section title adds the contents of said file to the output. Various flags influence how this works."""
html = []
wiki = []
for i, line in enumerate(open(fn,'r').readlines()):
hl = line.replace('\n', '')
if i==0 and fls:
hl = '<strong>' + hl + '</strong>'
for ext in ['py','txt']:
if '.%s - '%ext in hl:
s = hl.split('.%s - '%ext, 1)
hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1]
html.append(hl)
wl = line.strip()
if i==0 and fls:
wl = '*%s*'%wl
for ext in ['py','txt']:
if '.%s - '%ext in wl:
s = wl.split('.%s - '%ext, 1)
wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n'
wiki.append(wl)
self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html)))
self.wiki.write('== %s ==\n'%title)
self.wiki.write('\n'.join(wiki))
self.wiki.write('----\n\n')
def addVariable(self, var, desc):
"""Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc.."""
self.html_variables += '<strong>%s</strong><br/>'%var
self.html_variables += '%s<br/><br/>\n'%desc
self.wiki_variables += '*`%s`*\n'%var
self.wiki_variables += ' %s\n\n'%desc
def addFunction(self, func):
"""Adds a function to the documentation. You provide the actual function instance."""
self.html_functions += self.doc.docroutine(func).replace(' ',' ')
self.html_functions += '\n'
name = func.__name__
args, varargs, keywords, defaults = inspect.getargspec(func)
doc = inspect.getdoc(func)
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_functions += ' %s\n\n'%doc
def addClass(self, cls):
"""Adds a class to the documentation. You provide the actual class object."""
self.html_classes += self.doc.docclass(cls).replace(' ',' ')
self.html_classes += '\n'
name = cls.__name__
parents = filter(lambda a: a!=cls, inspect.getmro(cls))
doc = inspect.getdoc(cls)
par_str = ''
if len(parents)!=0:
par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents))
self.wiki_classes += '== %s(%s) ==\n'%(name, par_str)
self.wiki_classes += ' %s\n\n'%doc
methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x))
def method_key(pair):
if pair[0]=='__init__': return '___'
else: return pair[0]
methods.sort(key=method_key)
for name, method in methods:
if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'):
if inspect.ismethod(method):
args, varargs, keywords, defaults = inspect.getargspec(method)
else:
args = ['?']
varargs = None
keywords = None
defaults = None
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
def fetch_doc(cls, name):
try:
method = getattr(cls, name)
if method.__doc__!=None: return inspect.getdoc(method)
except: pass
for parent in filter(lambda a: a!=cls, inspect.getmro(cls)):
ret = fetch_doc(parent, name)
if ret!=None: return ret
return None
doc = fetch_doc(cls, name)
self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_classes += ' %s\n\n'%doc
variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float))
for name, var in variables:
if not name.startswith('__'):
if hasattr(var, '__doc__'): d = var.__doc__
else: d = str(var)
self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import random
import math
from scipy.special import gammaln, psi, polygamma
from scipy import weave
from utils.start_cpp import start_cpp
# Provides various gamma-related functions...
gamma_code = start_cpp() + """
#ifndef GAMMA_CODE
#define GAMMA_CODE
#include <cmath>
// Returns the natural logarithm of the Gamma function...
// (Uses Lanczos's approximation.)
double lnGamma(double z)
{
static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7};
if (z<0.5)
{
// Use reflection formula, as approximation doesn't work down here...
return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z);
}
else
{
double x = coeff[0];
for (int i=1;i<9;i++) x += coeff[i]/(z+i-1);
double t = z + 6.5;
return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x);
}
}
// Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values...
double digamma(double z)
{
static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point.
double ret = 0.0;
while (z<highVal)
{
ret -= 1.0/z;
z += 1.0;
}
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz4 = iz2*iz2;
double iz6 = iz4*iz2;
ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0;
return ret;
}
// Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically...
double trigamma(double z)
{
static const double highVal = 8.0;
double ret = 0.0;
while (z<highVal)
{
ret += 1.0/(z*z);
z += 1.0;
}
z -= 1.0;
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz3 = iz1*iz2;
double iz5 = iz3*iz2;
double iz7 = iz5*iz2;
double iz9 = iz7*iz2;
ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0;
return ret;
}
#endif
"""
def lnGamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function"""
code = start_cpp(gamma_code) + """
return_val = lnGamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def digamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function"""
code = start_cpp(gamma_code) + """
return_val = digamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def trigamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function"""
code = start_cpp(gamma_code) + """
return_val = trigamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
class TestFuncs(unittest.TestCase):
"""Test code for the assorted gamma-related functions."""
def test_compile(self):
code = start_cpp(gamma_code) + """
"""
weave.inline(code, support_code=gamma_code)
def test_error_lngamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = lnGamma(z)
good = gammaln(z)
assert(math.fabs(own-good)<1e-12)
def test_error_digamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = digamma(z)
good = psi(z)
assert(math.fabs(own-good)<1e-9)
def test_error_trigamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = trigamma(z)
good = polygamma(1,z)
assert(math.fabs(own-good)<1e-9)
# If this file is run do the unit tests...
if __name__ == '__main__':
unittest.main()
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
import hashlib
def start_cpp(hash_str = None):
"""This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>"""
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
if hash_str==None:
return '#line %i "%s"\n'%(info[1],info[0])
else:
h = hashlib.md5()
h.update(hash_str)
hash_val = h.hexdigest()
return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
| Python |
# -*- coding: utf-8 -*-
# Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.).
import cv
import numpy as np
def cv2array(im):
"""Converts a cv array to a numpy array."""
depth2dtype = {
cv.IPL_DEPTH_8U: 'uint8',
cv.IPL_DEPTH_8S: 'int8',
cv.IPL_DEPTH_16U: 'uint16',
cv.IPL_DEPTH_16S: 'int16',
cv.IPL_DEPTH_32S: 'int32',
cv.IPL_DEPTH_32F: 'float32',
cv.IPL_DEPTH_64F: 'float64',
}
arrdtype=im.depth
a = np.fromstring(
im.tostring(),
dtype=depth2dtype[im.depth],
count=im.width*im.height*im.nChannels)
a.shape = (im.height,im.width,im.nChannels)
return a
def array2cv(a):
"""Converts a numpy array to a cv array, if possible."""
dtype2depth = {
'uint8': cv.IPL_DEPTH_8U,
'int8': cv.IPL_DEPTH_8S,
'uint16': cv.IPL_DEPTH_16U,
'int16': cv.IPL_DEPTH_16S,
'int32': cv.IPL_DEPTH_32S,
'float32': cv.IPL_DEPTH_32F,
'float64': cv.IPL_DEPTH_64F,
}
try:
nChannels = a.shape[2]
except:
nChannels = 1
cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]),
dtype2depth[str(a.dtype)],
nChannels)
cv.SetData(cv_im, a.tostring(),
a.dtype.itemsize*nChannels*a.shape[1])
return cv_im
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing as mp
import multiprocessing.synchronize # To make sure we have all the functionality.
import types
import marshal
import unittest
def repeat(x):
"""A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant."""
while True: yield x
def run_code(code,args):
"""Internal use function that does the work in each process."""
code = marshal.loads(code)
func = types.FunctionType(code, globals(), '_')
return func(*args)
def mp_map(func, *iters, **keywords):
"""A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity."""
if 'pool' in keywords: pool = keywords['pool']
else: pool = mp.Pool()
code = marshal.dumps(func.func_code)
jobs = []
for args in zip(*iters):
jobs.append(pool.apply_async(run_code,(code,args)))
for i in xrange(len(jobs)):
jobs[i] = jobs[i].get()
return jobs
class TestMpMap(unittest.TestCase):
def test_simple1(self):
data = ['a','b','c','d']
def noop(data):
return data
data_noop = mp_map(noop, data)
self.assertEqual(data, data_noop)
def test_simple2(self):
data = [x for x in xrange(1000)]
data_double = mp_map(lambda a: a*2, data)
self.assertEqual(map(lambda a: a*2,data), data_double)
def test_gen(self):
def gen():
for i in xrange(100): yield i
data_double = mp_map(lambda a: a*2, gen())
self.assertEqual(map(lambda a: a*2,gen()), data_double)
def test_repeat(self):
def mult(a,b):
return a*b
data = [x for x in xrange(50,5000,5)]
data_triple = mp_map(mult, data, repeat(3))
self.assertEqual(map(lambda a: a*3,data),data_triple)
def test_none(self):
data = []
data_sqr = mp_map(lambda x: x*x, data)
self.assertEqual([],data_sqr)
if __name__ == '__main__':
unittest.main()
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os.path
import tempfile
import shutil
from distutils.core import setup, Extension
import distutils.ccompiler
import distutils.dep_util
try:
__default_compiler = distutils.ccompiler.new_compiler()
except:
__default_compiler = None
def make_mod(name, base, source, openCL = False):
"""Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place."""
if __default_compiler==None: raise Exception('No compiler!')
# Work out the various file names - check if we actually need to do anything...
if not isinstance(source, list): source = [source]
source_path = map(lambda s: os.path.join(base, s), source)
library_path = os.path.join(base, __default_compiler.shared_object_filename(name))
if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)):
try:
print 'b'
# Backup the argv variable and create a temporary directory to do all work in...
old_argv = sys.argv[:]
temp_dir = tempfile.mkdtemp()
# Prepare the extension...
sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir]
comp_path = filter(lambda s: not s.endswith('.h'), source_path)
depends = filter(lambda s: s.endswith('.h'), source_path)
if openCL:
ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends)
else:
ext = Extension(name, comp_path, depends=depends)
# Compile...
setup(name=name, version='1.0.0', ext_modules=[ext])
finally:
# Cleanup the argv variable and the temporary directory...
sys.argv = old_argv
shutil.rmtree(temp_dir, True)
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
from numpy_help_cpp import numpy_util_code
# Provides various functions to assist with manipulating python objects from c++ code.
python_obj_code = numpy_util_code + start_cpp() + """
#ifndef PYTHON_OBJ_CODE
#define PYTHON_OBJ_CODE
// Extracts a boolean from an object...
bool GetObjectBoolean(PyObject * obj, const char * name)
{
PyObject * b = PyObject_GetAttrString(obj, name);
bool ret = b!=Py_False;
Py_DECREF(b);
return ret;
}
// Extracts an int from an object...
int GetObjectInt(PyObject * obj, const char * name)
{
PyObject * i = PyObject_GetAttrString(obj, name);
int ret = PyInt_AsLong(i);
Py_DECREF(i);
return ret;
}
// Extracts a float from an object...
float GetObjectFloat(PyObject * obj, const char * name)
{
PyObject * f = PyObject_GetAttrString(obj, name);
float ret = PyFloat_AsDouble(f);
Py_DECREF(f);
return ret;
}
// Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored...
unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
unsigned char * ret = new unsigned char[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i);
Py_DECREF(nao);
return ret;
}
// Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored...
float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
float * ret = new float[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i);
Py_DECREF(nao);
return ret;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Defines helper functions for accessing numpy arrays...
numpy_util_code = start_cpp() + """
#ifndef NUMPY_UTIL_CODE
#define NUMPY_UTIL_CODE
float & Float1D(PyArrayObject * arr, int index = 0)
{
return *(float*)(arr->data + index*arr->strides[0]);
}
float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
unsigned char & Byte1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index*arr->strides[0]);
}
unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
int & Int1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index*arr->strides[0]);
}
int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
#endif
"""
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cvarray
import mp_map
import prog_bar
import numpy_help_cpp
import python_obj_cpp
import matrix_cpp
import gamma_cpp
import setProcName
import start_cpp
import make
import doc_gen
# Setup...
doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.')
doc.addFile('readme.txt', 'Overview')
# Variables...
doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.')
doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.')
doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++')
doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++')
# Functions...
doc.addFunction(make.make_mod)
doc.addFunction(cvarray.cv2array)
doc.addFunction(cvarray.array2cv)
doc.addFunction(mp_map.repeat)
doc.addFunction(mp_map.mp_map)
doc.addFunction(setProcName.setProcName)
doc.addFunction(start_cpp.start_cpp)
doc.addFunction(make.make_mod)
# Classes...
doc.addClass(prog_bar.ProgBar)
doc.addClass(doc_gen.DocGen)
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ctypes import *
def setProcName(name):
"""Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases."""
# Call the process control function...
libc = cdll.LoadLibrary('libc.so.6')
libc.prctl(15, c_char_p(name), 0, 0, 0)
# Update argv...
charPP = POINTER(POINTER(c_char))
argv = charPP.in_dll(libc,'_dl_argv')
size = libc.strlen(argv[0])
libc.strncpy(argv[0],c_char_p(name),size)
if __name__=='__main__':
# Quick test that it works...
import os
ps1 = 'ps'
ps2 = 'ps -f'
os.system(ps1)
os.system(ps2)
setProcName('wibble_wobble')
os.system(ps1)
os.system(ps2)
| Python |
#! /usr/bin/env python
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import dp_utils
from utils import doc_gen
# Setup...
doc = doc_gen.DocGen('dp_utils', 'Dirichlet Process Utilities', 'Utility library for handling Dirichlet processes')
doc.addFile('readme.txt', 'Overview')
# Variables...
doc.addVariable('sampling_code', 'Code for sampling from various distributions - uniform, Gaussian, gamma and beta.')
doc.addVariable('conc_code', 'Contains code to sample a concentration parameter and two classes - one to represent the status of a concentration parameter - its prior and its estimated value, and another to do the same thing for when a concentration parameter is shared between multiple Dirichlet processes.')
doc.addVariable('dir_est_code', 'Contains a class for doing maximum likelihood estimation of a Dirichlet distrbution given multinomials that have been drawn from it.')
doc.addVariable('linked_list_code', 'A linked list implimentation - doubly linked, adds data via templated inheritance.')
doc.addVariable('linked_list_gc_code', 'A linked list with reference counting and garabge collection for its entries. Happens to be very good at representing a Dirichlet process.')
doc.addVariable('dp_utils_code', 'Combines all of the code provided in this module into a single variable.')
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from topic import *
import sys
if sys.modules.has_key('lda'):
import lda
elif sys.modules.has_key('lda_nmp'):
import lda_nmp as lda
else:
raise Exception('This module is not meant to be imported directly - import lda/lda_nmp instead.')
import solve_shared
class Corpus:
"""Contains a set of Document-s and a set of Topic-s associated with those Document-s. Also stores the alpha and beta parameters associated with the model."""
def __init__(self, topicCount):
"""Basic setup, only input is the number of topics. Chooses default values for alpha and beta which you can change later before fitting the model."""
# Create the array of documents and support variables...
self.docs = []
self.totalWords = 0
self.maxWordIdentifier = 0
# Create the array of topics...
self.topics = []
for i in xrange(topicCount):
self.topics.append(Topic(len(self.topics)))
# And then store some parameters...
self.alpha = 1.0
self.alphaMult = 10.0
self.beta = 1.0
def setTopicCount(self,topicCount):
"""Sets the number of topics. Note that this will reset the model, so after doing this all the model variables will be None etc."""
# Recreate topics...
self.topics = []
for i in xrange(topicCount):
self.topics.append(Topic(len(self.topics)))
# Remove models from docuemnts...
for doc in self.docs:
doc.model = None
def setAlpha(self, alpha):
"""Sets the alpha value - 1 is more often than not a good value, and is the default."""
self.alpha = alpha
def setAlphaMult(self, alphaMult):
"""Sets a multiplier of the alpha parameter used when the topic of a document is given - for increasing the prior for a given entry - can be used for semi-supervised classification. Defaults to a factor of 10.0"""
self.alphaMult = alphaMult
def setBeta(self, beta):
"""The authors of the paper observe that this is effectivly a scale parameter - use a low value to get a fine grained division into topics, or a high value to get just a few topics. Defaults to 1.0, which is a good number for most situations."""
self.beta = beta
def getAlpha(self):
"""Returns the current alpha value."""
return self.alpha
def getAlphaMult(self):
"""Returns the current alpha multiplier."""
return self.alphaMult
def getBeta(self):
"""Returns the current beta value."""
return self.beta
def add(self, doc):
"""Adds a document to the corpus."""
doc.ident = len(self.docs)
self.docs.append(doc)
maxDocIdent = int(doc.words[-1,0])
if maxDocIdent>self.maxWordIdentifier:
self.maxWordIdentifier = maxDocIdent
self.totalWords += doc.dupWords()
def setWordCount(self, wordCount):
"""Because the system autodetects words as being the identifiers 0..max where max is the largest identifier seen it is possible for you to tightly pack words but to want to reserve some past the end. Its also possible for a data set to never contain the last word, creating problems. This allows you to set the number of words, forcing the issue. Note that setting the number less than actually exist is a guaranteed crash, at a later time."""
self.maxWordIdentifier = wordCount-1
def fit(self, params = solve_shared.Params(), callback = None):
"""Fits a model to this Corpus. params is a Params object from solve-shared. callback if provided should take two numbers - the first is the number of iterations done, the second the number of iterations that need to be done; used to report progress. Note that it will probably not be called for every iteration for reasons of efficiency."""
lda.fit(self, params, callback)
def maxWordIdent(self):
"""Returns the maximum word ident currently in the system; note that unlike Topic-s and Document-s this can have gaps in as its user set. Only a crazy user would do that though as it affects the result due to the system presuming that the gap words exist."""
return self.maxWordIdentifier
def maxDocumentIdent(self):
"""Returns the highest ident; documents will then be found in the range {0..max ident}. Returns -1 if no documents exist."""
return len(self.docs)-1
def maxTopicIdent(self):
"""Returns the highest ident; topics will then be found in the range {0..max ident}. Returns -1 if no topics exist."""
return len(self.topics)-1
def wordCount(self):
"""Number of words as far as a fitter will be concerned; doesn't mean that they all actually exist however."""
return self.maxWordIdentifier+1
def documentCount(self):
"""Number of documents."""
return len(self.docs)
def topicCount(self):
"""Number of topics."""
return len(self.topics)
def getDocument(self, ident):
"""Returns the Document associated with the given ident."""
return self.docs[ident]
def getTopic(self, ident):
"""Returns the Topic associated with the given ident."""
return self.topics[ident]
def documentList(self):
"""Returns a list of all documents."""
return self.docs
def topicList(self):
"""Returns a list of all topics."""
return self.topics
def topicsWords(self):
"""Constructs and returns a topics X words array that represents the learned models key part. Simply an array topics X words of P(topic,word). This is the data best saved for analysing future data - you can use the numpy.save/.load functions. Note that you often want P(word|topic), which you can obtain by normalising the rows - (a.T/a.sum(axis=1)).T"""
ret = numpy.empty((len(self.topics), self.maxWordIdentifier+1), dtype=numpy.float_)
for topic in self.topics:
ret[topic.getIdent(),:] = topic.getModel()
return ret
def totalWordCount(self):
"""Returns the total number of words used by all the Document-s - is used by the solver, but may be of interest to curious users."""
return self.totalWords
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
class Topic:
"""Simple wrapper class for a topic - contains just the parameter vector for the multinomial distribution from which words in that topic are drawn from. The index into the vector is the ident of the word associated with each specific probability value."""
def __init__(self, ident):
"""Initialises the model to be None, so it can be later calculated. ident is the offset of this topic into the Corpus in which this topic is stored. Only Corpus-s should initialise this object and hence should know."""
self.model = None
self.mult = None # Multiplier of model to get P(word|topic)
self.ident = ident
def getIdent(self):
"""Ident - just the offset into the array in the Corpus where this topic is stored."""
return self.ident
def getModel(self):
"""Returns the unnormalised parameter vector for the multinomial distribution from which words generated by the topic are drawn. (The probabilities are actually a list of P(topic,word) for this topic, noting that there are all the other topics. You may normalise it to get P(word|topic), or take the other vectors and manipulate them to get all the relevant distributions, i.e. P(topic|word), P(topic), P(word). )"""
return self.model
def getNormModel(self):
"""Returns the model but normalised so it is the multinomial P(word|topic)."""
return self.model * self.mult
def setModel(self,model):
self.model = model
self.mult = 1.0/self.model.sum()
def probWord(self, ident):
"""Returns the probability of the topic emitting the given word. Only call if the model has been calculated."""
assert((self.model!=None) and (self.mult!=None))
return self.model[ident] * self.mult
def getTopWords(self):
"""Returns an array of word identifiers ordered by the probability of the topic emitting them."""
return self.model.argsort()[::-1]
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import heapq
import numpy
class State:
"""The state required by the solvers - stored as a few large numpy arrays for conveniance and speed. It is generated from a Corpus, but once generated is stand alone. It includes the ability to split out the final answer into the Corpus, as this is not seen as time critical (Must be the exact same and unedited Corpus.). To support parallel sampling you can duplicate this object, run the solvers on each duplicate, then merge them together, before extracting the final answer back into the Corpus."""
def __init__(self, corpus):
"""Given a Corpus calculates the state object by duplicating all of the needed information."""
# Break if cloning - let the work be done elsewhere...
if corpus==None:
return
# Counts of the assorted things avaliable...
dCount = corpus.documentCount()
tCount = corpus.topicCount()
wCount = corpus.wordCount()
# Create tempory variables...
self.topicWordTemp = numpy.empty((tCount, wCount), dtype=numpy.float_)
self.docTopicTemp = numpy.empty((dCount,tCount), dtype=numpy.float_)
# Copy over simple stuff...
self.alpha = corpus.getAlpha()
self.alphaMult = corpus.getAlphaMult()
self.beta = corpus.getBeta()
# Create the counters that are kept in synch with the current state, initialised to zero for the initialisation routine...
# Number of times each word is assigned to each topic over all documents...
self.topicWordCount = numpy.zeros((tCount, wCount), dtype=numpy.uint)
# Number of words assigned to each topic over all documents...
self.topicCount = numpy.zeros(tCount, dtype=numpy.uint)
# Number of times in each document a word is assigned to a topic...
self.docTopicCount = numpy.zeros((dCount,tCount), dtype=numpy.uint)
# Number of words in each document (Constant)...
self.docCount = numpy.zeros(dCount, dtype=numpy.uint)
# Most important part is the state - this is a words x 3 matrix of uint32's, where the first column is the document ident, the second column the word ident and the third column the topic ident - first create the array, then fill it...
self.state = numpy.empty((corpus.totalWordCount(),3), dtype=numpy.uint)
index = 0
for dIdent in xrange(dCount):
doc = corpus.getDocument(dIdent)
for uIndex in xrange(doc.uniqueWords()):
wIdent, count = doc.getWord(uIndex)
for c in xrange(count):
self.state[index,0] = dIdent
self.state[index,1] = wIdent
self.state[index,2] = 1000000000 # Dummy bad value.
index += 1
assert(index==self.state.shape[0])
# The boost array - for each document this provides the index of which topic alpha should be increased for, or -1 if none.
self.boost = numpy.empty(dCount,dtype=numpy.int_)
for dIdent in xrange(dCount):
val = corpus.getDocument(dIdent).getTopic()
if val==None: self.boost[dIdent] = -1
else: self.boost[dIdent] = val
# The output, before its been dropped into the corpus's data format - simply the output model obtained from the samples so far. It is the mean of the output calculated from each sample, calculated incrimentally. As a pair of matrices, rather than the shattered vectors stored in the Corpus...
self.sampleCount = 0
self.topicModel = numpy.zeros((tCount,wCount), dtype=numpy.float_)
self.documentModel = numpy.zeros((dCount,tCount), dtype=numpy.float_)
def clone(self):
"""Returns a clone of the object in question - typically called just after creation to make clones for each process of a multi-process implimentation."""
ret = State(None)
ret.topicWordTemp = self.topicWordTemp.copy()
ret.docTopicTemp = self.docTopicTemp.copy()
ret.alpha = self.alpha
ret.alphaMult = self.alphaMult
ret.beta = self.beta
ret.topicWordCount = self.topicWordCount.copy()
ret.topicCount = self.topicCount.copy()
ret.docTopicCount = self.docTopicCount.copy()
ret.docCount = self.docCount.copy()
ret.state = self.state.copy()
ret.boost = self.boost.copy()
ret.sampleCount = self.sampleCount
ret.topicModel = self.topicModel.copy()
ret.documentModel = self.documentModel.copy()
return ret
def absorbClone(self, clone):
"""Given a previous clone this merges back in the sample information, effectivly combining the samples to get a better estimate ready for extraction. Note that the clone is no longer usable after this operation. This is evidently incorrect, as you can't combine topic vectors - there is not even a guarantee that they contain the same topics! However, in practise it works well enough, and any topic not well enough defined to appear in all samples is probably useless anyway and best smoothed out by being combined with different topics."""
assert(self.sampleCount+clone.sampleCount>0)
# Get reordering of topics in clone to best match the destination, if needed...
if self.sampleCount==0:
self.sampleCount = clone.sampleCount
self.topicModel[:] = clone.topicModel
self.documentModel[:] = clone.documentModel
else:
indices = self.matchTopics(self.topicModel,clone.topicModel)
# Calculate weights...
sWeight = float(self.sampleCount)/float(self.sampleCount+clone.sampleCount)
cWeight = float(clone.sampleCount)/float(self.sampleCount+clone.sampleCount)
# Combine them...
self.sampleCount += clone.sampleCount
self.topicModel = sWeight*self.topicModel + cWeight*clone.topicModel[indices,:]
self.documentModel = sWeight*self.documentModel + cWeight*clone.documentModel[:,indices]
def extractModel(self, corpus):
"""Extracts the calculated model into the given corpus, to be called once the fitting is done."""
assert(self.sampleCount!=0)
# (Note that the below renormalises the outputs, as they could of suffered numerical error in the incrimental averaging.)
# First the topics...
self.topicModel /= self.topicModel.sum()
for t in xrange(self.topicModel.shape[0]):
model = self.topicModel[t,:]
corpus.getTopic(t).setModel(model)
# Now the documents...
for d in xrange(self.documentModel.shape[0]):
model = self.documentModel[d,:]
model /= model.sum()
corpus.getDocument(d).setModel(model)
def sample(self):
"""Samples the current state into the internal storage - needed by all solvers but not done regularly enough or is complex enough to require specialist optimisation."""
# Calculate the topic model, with normalisation to get P(topic,word)...
self.topicWordTemp[:] = numpy.asfarray(self.topicWordCount)
self.topicWordTemp += self.beta
self.topicWordTemp /= self.topicWordTemp.sum()
#self.topicWordTemp = (self.topicWordTemp.T / (numpy.asfarray(self.topicCount) + self.topicWordCount.shape[1]*self.beta)).T # Normalisation to get P(words|topic), kept from the original implimentation.
# Calculate the document model, with normalisation to get P(topic|doc)...
self.docTopicTemp[:] = numpy.asfarray(self.docTopicCount)
self.docTopicTemp += self.alpha
boostInd = (self.boost+1).nonzero()
self.docTopicTemp[boostInd,self.boost[boostInd]] += self.alpha*(self.alphaMult-1.0)
self.docTopicTemp = (self.docTopicTemp.T / (numpy.asfarray(self.docCount) + self.docTopicCount.shape[1]*self.alpha + numpy.where(self.boost+1,self.alpha*(self.alphaMult-1.0),0.0))).T
# Store...
self.sampleCount += 1
self.topicModel += (self.topicWordTemp-self.topicModel) / float(self.sampleCount)
self.documentModel += (self.docTopicTemp-self.documentModel) / float(self.sampleCount)
def matchTopics(self,topicWordA,topicWordB):
"""Returns the indices into topicWordB that best equate with topicWordA, used as topics do not necesarilly appear in the same order for each sample. Uses symmetric KL-divergance with greedy selection."""
# Below could be made a lot faster, but not worth it as topic counts are relativly small in the grand scheme of things.
# Normalise topicWord arrays by row, so we have P(word|topic), as needed for this matching method...
twA = (topicWordA.T/topicWordA.sum(axis=1)).T
twB = (topicWordB.T/topicWordB.sum(axis=1)).T
# Create a list of tuples - (cost, a index, b index), to get greedy on - this is basically calculating all the symmetric KL divergances...
heap = []
for aInd in xrange(twA.shape[0]):
for bInd in xrange(twA.shape[0]):
cost = 0.5 * (twA[aInd,:]*numpy.log(twA[aInd,:]/twB[bInd,:]) + twB[bInd,:]*numpy.log(twB[bInd,:]/twA[aInd,:])).sum()
heap.append((cost,aInd,bInd))
# Turn the list into a heap...
heapq.heapify(heap)
# Keep pulling items from the heap to construct the translation table...
aUsed = numpy.zeros(twA.shape[0],dtype=numpy.int_)
bUsed = numpy.zeros(twA.shape[0],dtype=numpy.int_)
remain = twA.shape[0]
ret = numpy.zeros(twA.shape[0],dtype=numpy.int_)
while remain!=0:
match = heapq.heappop(heap)
if (aUsed[match[1]]==0) and (bUsed[match[2]]==0):
aUsed[match[1]] = 1
bUsed[match[2]] = 1
remain -= 1
ret[match[1]] = match[2]
return ret
class Params:
"""Parameters for running the fitter that are universal to all fitters."""
def __init__(self):
self.runs = 8
self.samples = 10
self.burnIn = 1000
self.lag = 100
def setRuns(self,runs):
"""Sets the number fom runs, i.e. how many seperate chains are run."""
self.runs = runs
def setSamples(self,samples):
"""Number of samples to extract from each chain - total number of samples going into the final estimate will then be sampels*runs."""
self.samples = samples
def setBurnIn(self,burnIn):
"""Number of Gibbs iterations to do for burn in before sampling starts."""
self.burnIn = burnIn
def setLag(self,lag):
"""Number of Gibbs iterations to do between samples."""
self.lag = lag
def getRuns(self):
"""Returns the number of runs."""
return self.runs
def getSamples(self):
"""Returns the number of samples."""
return self.samples
def getBurnIn(self):
"""Returns the burn in length."""
return self.burnIn
def getLag(self):
"""Returns the lag length."""
return self.lag
def fromArgs(self,args,prefix = ''):
"""Extracts from an arg string, typically sys.argv[1:], the parameters, leaving them untouched if not given. Uses --runs, --samples, --burnIn and --lag. Can optionally provide a prefix which is inserted after the '--'"""
try:
ind = args[:-1].index('--'+prefix+'runs')
self.runs = int(args[ind+1])
except:
pass
try:
ind = args[:-1].index('--'+prefix+'samples')
self.samples = int(args[ind+1])
except:
pass
try:
ind = args[:-1].index('--'+prefix+'burnIn')
self.burnIn = int(args[ind+1])
except:
pass
try:
ind = args[:-1].index('--'+prefix+'lag')
self.lag = int(args[ind+1])
except:
pass
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# This loads in the entire libarary and provide the interface - only import needed by a user.
# Load in all the data structure types...
from document import Document
from topic import Topic
from corpus import Corpus
# Get the shared solvers params object...
from solve_shared import Params
# Load in a suitable solver - autodetect the most powerful supported...
try:
from solve_weave_mp import fit, fitDoc
__fitter = 'multiprocess weave'
except:
try:
from solve_weave import fit, fitDoc
__fitter = 'weave'
except:
try :
from solve_python_mp import fit, fitDoc
__fitter = 'multiprocess python'
except:
try:
from solve_python import fit, fitDoc
__fitter = 'python'
except:
raise Exception('All of the lda solvers failed to load.')
def getAlgorithm():
"""Returns a text string indicating which implimentation of the fitting algorithm is being used."""
global __fitter
return __fitter
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import numpy.random
import scipy
import solve_shared as shared
def iniGibbs(state):
"""Does the initialisation gibbs pass, where it incrimentally sets the starting topic assignments based on the documents so far fitted. Builds the count matrices/vectors in the state at the same time."""
dist = numpy.empty(state.topicCount.shape[0], dtype = numpy.float_)
boostQuant = state.alpha*(state.alphaMult-1.0)
for w in xrange(state.state.shape[0]): # Loop the words that consititute the state
# Calculate the unnormalised distribution...
dist[:] = numpy.asfarray(state.docTopicCount[state.state[w,0],:]) + state.alpha
if state.boost[state.state[w,0]]!=-1:
boostAmount = boostQuant
dist[state.boost[state.state[w,0]]] += boostQuant
else:
boostAmount = 0.0
dist /= numpy.asfarray(state.docCount[state.state[w,0]]) + state.topicCount.shape[0]*state.alpha + boostAmount
dist *= numpy.asfarray(state.topicWordCount[:,state.state[w,1]]) + state.beta
dist /= numpy.asfarray(state.topicCount) + state.topicWordCount.shape[1]*state.beta
# Normalise...
dist /= dist.sum()
# Select and set the state...
state.state[w,2] = numpy.nonzero(numpy.random.multinomial(1,dist))[0][0]
# Incriment the relevant counts from each of the 4 arrays...
state.topicWordCount[state.state[w,2],state.state[w,1]] += 1
state.topicCount[state.state[w,2]] += 1
state.docTopicCount[state.state[w,0],state.state[w,2]] += 1
state.docCount[state.state[w,0]] += 1
def gibbs(state,iters,next):
"""Does iters number of full gibbs iterations."""
dist = numpy.empty(state.topicCount.shape[0], dtype = numpy.float_)
for i in xrange(iters):
for w in xrange(state.state.shape[0]): # Loop the words that consititute the state
# Decriment the relevant counts from each of the 4 arrays...
state.topicWordCount[state.state[w,2],state.state[w,1]] -= 1
state.topicCount[state.state[w,2]] -= 1
state.docTopicCount[state.state[w,0],state.state[w,2]] -= 1
state.docCount[state.state[w,0]] -= 1
# Calculate the unnormalised distribution...
dist[:] = numpy.asfarray(state.docTopicCount[state.state[w,0],:]) + state.alpha
if state.boost[state.state[w,0]]!=-1:
boostAmount = boostQuant
dist[state.boost[state.state[w,0]]] += boostQuant
else:
boostAmount = 0.0
dist /= numpy.asfarray(state.docCount[state.state[w,0]]) + state.topicCount.shape[0]*state.alpha + boostAmount
dist *= numpy.asfarray(state.topicWordCount[:,state.state[w,1]]) + state.beta
dist /= numpy.asfarray(state.topicCount) + state.topicWordCount.shape[1]*state.beta
# Normalise...
dist /= dist.sum()
# Select and set the state...
state.state[w,2] = numpy.nonzero(numpy.random.multinomial(1,dist))[0][0]
# Incriment the relevant counts from each of the 4 arrays...
state.topicWordCount[state.state[w,2],state.state[w,1]] += 1
state.topicCount[state.state[w,2]] += 1
state.docTopicCount[state.state[w,0],state.state[w,2]] += 1
state.docCount[state.state[w,0]] += 1
# Update the iter count...
next()
def fitModel(state,params,next):
"""Given a state object generates samples."""
iniGibbs(state)
next()
if params.burnIn>params.lag:
gibbs(state,params.burnIn-params.lag,next)
for i in xrange(params.samples):
gibbs(state,params.lag,next)
state.sample()
next()
def fit(corpus,params,callback = None):
"""Complete fitting function - given a corpus fits a model. params is a Params object from solve-shared. callback if provided should take two numbers - the first is the number of iterations done, the second the number of iterations that need to be done; used to report progress. Note that it will probably not be called for every iteration, as that would be frightfully slow."""
# Class to allow progress to be reported...
class Reporter:
def __init__(self,params,callback):
self.doneIters = 0
self.totalIters = params.runs * (1 + params.burnIn + params.samples + (params.samples-1)*params.lag)
self.callback = callback
if self.callback:
self.callback(self.doneIters,self.totalIters)
def next(self):
self.doneIters += 1
if self.callback:
self.callback(self.doneIters,self.totalIters)
report = Reporter(params,callback)
s = shared.State(corpus)
# Iterate and do each of the runs...
for r in xrange(params.runs):
ss = s.clone()
fitModel(ss,params,report.next)
s.absorbClone(ss)
# Extract the final model into the corpus...
s.extractModel(corpus)
def iniGibbsDoc(state,topicCount,topicsWords,alpha):
dist = numpy.empty(topicCount.shape[0], dtype = numpy.float_)
for w in xrange(state.shape[0]): # Loop the words that consititute the state
# Calculate the unnormalised distribution...
dist[:] = topicsWords[:,state[w,0]]
dist *= numpy.asfarray(topicCount) + alpha
dist /= w + topicCount.shape[0]*alpha
# Normalise...
dist /= dist.sum()
# Select and set the state...
state[w,1] = numpy.nonzero(numpy.random.multinomial(1,dist))[0][0]
# Incriment the relevant count for the words-per-topic array...
topicCount[state[w,1]] += 1
def gibbsDoc(state,topicCount,topicsWords,alpha,iters):
dist = numpy.empty(topicCount.shape[0], dtype = numpy.float_)
for i in xrange(iters):
for w in xrange(state.shape[0]): # Loop the words that consititute the state
# Decriment the relevant count for the words-per-topic array...
topicCount[state[w,1]] -= 1
# Calculate the unnormalised distribution...
dist[:] = topicsWords[:,state[w,0]]
dist *= numpy.asfarray(topicCount) + alpha
dist /= state.shape[0] - 1.0 + topicCount.shape[0]*alpha
# Normalise...
dist /= dist.sum()
# Select and set the state...
state[w,1] = numpy.nonzero(numpy.random.multinomial(1,dist))[0][0]
# Incriment the relevant count for the words-per-topic array...
topicCount[state[w,1]] += 1
def fitDocModel(state,topicsWords,alpha,params):
# Storage required - number of words assigned to each topic in the document...
topicCount = numpy.zeros(topicsWords.shape[0], dtype=numpy.uint)
# Do the initialisation step...
iniGibbsDoc(state,topicCount,topicsWords,alpha)
# If required do burn in...
if params.burnIn>params.lag:
gibbsDoc(state,topicCount,topicsWords,alpha,params.burnIn-params.lag)
# Collect the samples...
ret = numpy.zeros(topicsWords.shape[0],dtype=numpy.float_)
prep = numpy.zeros(topicsWords.shape[0],dtype=numpy.float_)
for i in xrange(params.samples):
# Iterations...
gibbsDoc(state,topicCount,topicsWords,alpha,params.lag)
# Sample...
prep[:] = numpy.asfarray(topicCount)
prep += alpha
prep /= float(state.shape[0]) + topicCount.shape[0]*alpha
prep /= prep.sum()
ret += prep
# Return the model for combining...
return ret # Normalisation left for fitDoc method, as each run will return same number of samples.
def fitDoc(doc,topicsWords,alpha,params):
"""Given a single document finds the documents model parameter in the same way as the rest of the system, i.e. Gibbs sampling. Provided with a document to calculate for, a topics-words array giving the already trained topic-to-word distribution, the alpha parameter and a Params object indicating how much sampling to do."""
# Normalise input to get P(word|topic)...
tw = (topicsWords.T/topicsWords.sum(axis=1)).T
# First generate a two column array - first column word index, second column its currently assigned topic...
state = numpy.empty((doc.dupWords(),2),dtype=numpy.uint)
index = 0
for uIndex in xrange(doc.uniqueWords()):
wIdent,count = doc.getWord(uIndex)
for c in xrange(count):
state[index,0] = wIdent
state[index,1] = 0
index += 1
# Zero out the model...
doc.model = numpy.zeros(tw.shape[0],dtype=numpy.float_)
# Iterate and do each of the runs...
for i in xrange(params.runs):
doc.model += fitDocModel(state.copy(),tw,alpha,params)
# Renormalise the sum of models...
doc.model /= doc.model.sum()
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import time
import multiprocessing as mp
import multiprocessing.synchronize # To make sure we have all the functionality.
import numpy
import solve_shared as shared
from solve_weave import fitModel
from solve_weave import fitDocModel
def fitModelWrapper(state,params,doneIters,seed):
"""Wrapper around fitModel to make it suitable for multiprocessing."""
numpy.random.seed(seed)
def next(amount = 1):
doneIters.value += amount
fitModel(state,params,next)
return state
def fit(corpus,params,callback = None):
"""Complete fitting function - given a corpus fits a model. params is a Params object from solve-shared. callback if provided should take two numbers - the first is the number of iterations done, the second the number of iterations that need to be done; used to report progress. Note that it will probably not be called for every iteration, as that would be frightfully slow."""
# Create the state from the corpus and a pool of worker proccesses...
s = shared.State(corpus)
pool = mp.Pool()
# Create a value for sub-processes to report back their progress with...
manager = mp.Manager()
doneIters = manager.Value('i',0)
totalIters = params.runs * (1 + params.burnIn + params.samples + (params.samples-1)*params.lag)
# Create a callback for when a job completes...
def onComplete(state):
s.absorbClone(state)
# Create all the jobs...
try:
jobs = []
seeds = numpy.random.random_integers(0,10000000,params.runs)
for r in xrange(params.runs):
jobs.append(pool.apply_async(fitModelWrapper,(s.clone(),params,doneIters,seeds[r]),callback = onComplete))
finally:
# Close the pool and wait for all the jobs to complete...
pool.close()
while len(jobs)!=0:
if jobs[0].ready():
del jobs[0]
continue
time.sleep(0.1)
if callback!=None:
callback(doneIters.value,totalIters)
pool.join()
# Extract the final model into the corpus...
s.extractModel(corpus)
def fitDoc(doc,topicsWords,alpha,params):
"""Given a single document finds the documents model parameter in the same way as the rest of the system, i.e. Gibbs sampling. Provided with a document to calculate for, a topics-words array giving the already trained topic-to-word distribution, the alpha parameter and a Params object indicating how much sampling to do."""
# Normalise input to get P(word|topic)...
tw = (topicsWords.T/topicsWords.sum(axis=1)).T
# First generate a two column array - first column word index, second column its currently assigned topic...
state = numpy.empty((doc.dupWords(),2),dtype=numpy.uint)
index = 0
for uIndex in xrange(doc.uniqueWords()):
wIdent,count = doc.getWord(uIndex)
for c in xrange(count):
state[index,0] = wIdent
state[index,1] = 0
index += 1
# Zero out the model...
doc.model = numpy.zeros(tw.shape[0],dtype=numpy.float_)
# Create a pool of processes to run the fitting in...
pool = mp.Pool()
# Callback for when a process completes...
def onComplete(model):
doc.model += model
# Create all the jobs...
try:
jobs = []
for r in xrange(params.runs):
jobs.append(pool.apply_async(fitDocModel,(state.copy(),tw,alpha,params),callback = onComplete))
finally:
# Close the pool and wait for all the jobs to complete...
pool.close()
pool.join()
# Renormalise the sum of models...
doc.model /= doc.model.sum()
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import scipy.weave as weave
import solve_shared as shared
def iniGibbs(s):
"""Does the initialisation gibbs pass, where it incrimentally sets the starting topic assignments based on the documents so far fitted. Builds the count matrices/vectors in the state at the same time."""
dist = numpy.empty(s.topicCount.shape[0], dtype = numpy.float_)
topicWordCount = s.topicWordCount
topicCount = s.topicCount
docTopicCount = s.docTopicCount
docCount = s.docCount
state = s.state
alpha = s.alpha
beta = s.beta
boostAmount = s.alpha*(s.alphaMult-1.0)
boost = s.boost
rand = numpy.random.random(state.shape[0])
code = """
for (int w=0;w<Nstate[0];w++)
{
// Calculate the unnormalised distribution...
float sum = 0.0;
int bt = BOOST1(STATE2(w,0));
for (int t=0;t<Ndist[0];t++)
{
float top1 = TOPICWORDCOUNT2(t,STATE2(w,1)) + beta;
float bottom1 = TOPICCOUNT1(t) + NtopicWordCount[1]*beta;
float top2 = DOCTOPICCOUNT2(STATE2(w,0),t) + alpha;
if (bt==t) top2 += boostAmount;
DIST1(t) = (top1/bottom1) * top2;
sum += DIST1(t);
}
// Normalise the distribution...
for (int t=0;t<Ndist[0];t++) DIST1(t) /= sum;
// Select and set the state...
sum = 0.0;
for (int t=0;t<Ndist[0];t++)
{
STATE2(w,2) = t;
sum += DIST1(t);
if (sum>RAND1(w)) break;
}
// Incriment the relevant counts from each of the 4 arrays...
TOPICWORDCOUNT2(STATE2(w,2),STATE2(w,1)) += 1;
TOPICCOUNT1(STATE2(w,2)) += 1;
DOCTOPICCOUNT2(STATE2(w,0),STATE2(w,2)) += 1;
DOCCOUNT1(STATE2(w,0)) += 1;
}
"""
weave.inline(code, ['dist', 'topicWordCount', 'topicCount', 'docTopicCount', 'docCount', 'state', 'alpha', 'beta', 'rand', 'boostAmount', 'boost'])
def gibbs(s,iters,next):
"""Does iters number of Gibbs iterations."""
# Variables...
dist = numpy.empty(s.topicCount.shape[0], dtype = numpy.float_)
topicWordCount = s.topicWordCount
topicCount = s.topicCount
docTopicCount = s.docTopicCount
docCount = s.docCount
state = s.state
alpha = s.alpha
beta = s.beta
boostAmount = s.alpha*(s.alphaMult-1.0)
boost = s.boost
# Code...
code = """
for (int i=0;i<numIters;i++)
{
for (int w=0;w<Nstate[0];w++)
{
// Decriment the relevant counts from each of the 4 arrays...
TOPICWORDCOUNT2(STATE2(w,2),STATE2(w,1)) -= 1;
TOPICCOUNT1(STATE2(w,2)) -= 1;
DOCTOPICCOUNT2(STATE2(w,0),STATE2(w,2)) -= 1;
DOCCOUNT1(STATE2(w,0)) -= 1;
// Calculate the unnormalised distribution...
float sum = 0.0;
int bt = BOOST1(STATE2(w,0));
for (int t=0;t<Ndist[0];t++)
{
float top1 = TOPICWORDCOUNT2(t,STATE2(w,1)) + beta;
float bottom1 = TOPICCOUNT1(t) + NtopicWordCount[1]*beta;
float top2 = DOCTOPICCOUNT2(STATE2(w,0),t) + alpha;
if (bt==t) top2 += boostAmount;
float val = (top1/bottom1) * top2;
DIST1(t) = val;
sum += val;
}
// Select and set the state...
float offset = 0.0;
float threshold = RAND2(i,w) * sum;
STATE2(w,2) = Ndist[0]-1;
for (int t=0;t<Ndist[0];t++)
{
offset += DIST1(t);
if (offset>threshold)
{
STATE2(w,2) = t;
break;
}
}
// Incriment the relevant counts from each of the 4 arrays...
TOPICWORDCOUNT2(STATE2(w,2),STATE2(w,1)) += 1;
TOPICCOUNT1(STATE2(w,2)) += 1;
DOCTOPICCOUNT2(STATE2(w,0),STATE2(w,2)) += 1;
DOCCOUNT1(STATE2(w,0)) += 1;
}
}
"""
# Execution, taking care to not let the random number array get too large...
chunkSize = (8*1024*1024)/state.shape[0] + 1
while iters!=0:
numIters = min(chunkSize,iters)
iters -= numIters
rand = numpy.random.random((numIters,state.shape[0]))
weave.inline(code, ['dist', 'topicWordCount', 'topicCount', 'docTopicCount', 'docCount', 'state', 'alpha', 'beta', 'rand', 'numIters', 'boostAmount', 'boost'])
next(numIters)
def fitModel(state,params,next):
"""Given a state object generates samples."""
iniGibbs(state)
next()
if params.burnIn>params.lag:
gibbs(state,params.burnIn-params.lag,next)
for i in xrange(params.samples):
gibbs(state,params.lag,next)
state.sample()
next()
def fit(corpus,params,callback = None):
"""Complete fitting function - given a corpus fits a model. params is a Params object from solve-shared. callback if provided should take two numbers - the first is the number of iterations done, the second the number of iterations that need to be done; used to report progress. Note that it will probably not be called for every iteration, as that would be frightfully slow."""
# Class to allow progress to be reported...
class Reporter:
def __init__(self,params,callback):
self.doneIters = 0
self.totalIters = params.runs * (1 + params.burnIn + params.samples + (params.samples-1)*params.lag)
self.callback = callback
if self.callback:
self.callback(self.doneIters,self.totalIters)
def next(self,amount = 1):
self.doneIters += amount
if self.callback:
self.callback(self.doneIters,self.totalIters)
report = Reporter(params,callback)
s = shared.State(corpus)
# Iterate and do each of the runs...
for r in xrange(params.runs):
ss = s.clone()
fitModel(ss,params,report.next)
s.absorbClone(ss)
# Extract the final model into the corpus...
s.extractModel(corpus)
def iniGibbsDoc(state,topicCount,topicsWords,alpha):
dist = numpy.empty(topicCount.shape[0], dtype = numpy.float_)
rand = numpy.random.random(state.shape[0])
code = """
for (int w=0;w<Nstate[0];w++)
{
// Calculate the unnormalised distribution...
float sum = 0.0;
for (int t=0;t<Ndist[0];t++)
{
DIST1(t) = TOPICSWORDS2(t,STATE2(w,0));
DIST1(t) *= TOPICCOUNT1(t) + alpha;
DIST1(t) /= float(w) + NtopicCount[0]*alpha;
sum += DIST1(t);
}
// Normalise...
for (int t=0;t<Ndist[0];t++) DIST1(t) /= sum;
// Select and set the state...
sum = 0.0;
for (int t=0;t<Ndist[0];t++)
{
STATE2(w,1) = t;
sum += DIST1(t);
if (sum>RAND1(w)) break;
}
// Incriment the relevant count for the words-per-topic array...
TOPICCOUNT1(STATE2(w,1)) += 1;
}
"""
weave.inline(code,['state', 'topicCount', 'topicsWords', 'alpha', 'dist', 'rand'])
def gibbsDoc(state,topicCount,topicsWords,alpha,iters):
dist = numpy.empty(topicCount.shape[0], dtype = numpy.float_)
# Code...
code = """
for (int i=0;i<numIters;i++)
{
for (int w=0;w<Nstate[0];w++)
{
// Decriment the relevant count for the words-per-topic array...
TOPICCOUNT1(STATE2(w,1)) -= 1;
// Calculate the unnormalised distribution...
float sum = 0.0;
for (int t=0;t<Ndist[0];t++)
{
DIST1(t) = TOPICSWORDS2(t,STATE2(w,0));
DIST1(t) *= TOPICCOUNT1(t) + alpha;
DIST1(t) /= Nstate[0] - 1.0 + NtopicCount[0]*alpha;
sum += DIST1(t);
}
// Normalise...
for (int t=0;t<Ndist[0];t++) DIST1(t) /= sum;
// Select and set the state...
sum = 0.0;
for (int t=0;t<Ndist[0];t++)
{
STATE2(w,1) = t;
sum += DIST1(t);
if (sum>RAND2(i,w)) break;
}
// Incriment the relevant count for the words-per-topic array...
TOPICCOUNT1(STATE2(w,1)) += 1;
}
}
"""
chunkSize = (8*1024*1024)/state.shape[0] + 1
while iters!=0:
numIters = min(iters,chunkSize)
iters -= numIters
rand = numpy.random.random((numIters,state.shape[0]))
weave.inline(code,['state', 'topicCount', 'topicsWords', 'alpha', 'dist', 'rand', 'numIters'])
def fitDocModel(state,topicsWords,alpha,params):
# Storage required - number of words assigned to each topic in the document...
topicCount = numpy.zeros(topicsWords.shape[0], dtype=numpy.uint)
# Do the initialisation step...
iniGibbsDoc(state,topicCount,topicsWords,alpha)
# If required do burn in...
if params.burnIn>params.lag:
gibbsDoc(state,topicCount,topicsWords,alpha,params.burnIn-params.lag)
# Collect the samples...
ret = numpy.zeros(topicsWords.shape[0],dtype=numpy.float_)
prep = numpy.zeros(topicsWords.shape[0],dtype=numpy.float_)
for i in xrange(params.samples):
# Iterations...
gibbsDoc(state,topicCount,topicsWords,alpha,params.lag)
# Sample...
prep[:] = numpy.asfarray(topicCount)
prep += alpha
prep /= float(state.shape[0]) + topicCount.shape[0]*alpha
prep /= prep.sum()
ret += prep
# Return the model for combining...
return ret # Normalisation left for fitDoc method, as each run will return same number of samples.
def fitDoc(doc,topicsWords,alpha,params):
"""Given a single document finds the documents model parameter in the same way as the rest of the system, i.e. Gibbs sampling. Provided with a document to calculate for, a topics-words array giving the already trained topic-to-word distribution, the alpha parameter and a Params object indicating how much sampling to do."""
# Normalise input to get P(word|topic)...
tw = (topicsWords.T/topicsWords.sum(axis=1)).T
# First generate a two column array - first column word index, second column its currently assigned topic...
state = numpy.empty((doc.dupWords(),2),dtype=numpy.uint)
index = 0
for uIndex in xrange(doc.uniqueWords()):
wIdent,count = doc.getWord(uIndex)
for c in xrange(count):
state[index,0] = wIdent
state[index,1] = 0
index += 1
# Zero out the model...
doc.model = numpy.zeros(tw.shape[0],dtype=numpy.float_)
# Iterate and do each of the runs...
for i in xrange(params.runs):
doc.model += fitDocModel(state.copy(),tw,alpha,params)
# Renormalise the sum of models...
doc.model /= doc.model.sum()
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# This loads in the entire libarary and provide the interface - only import needed by a user.
# Version that doesn't consider using a multiprocess solver.
# Load in all the data structure types...
from document import Document
from topic import Topic
from corpus import Corpus
# Get the shared solvers params object...
from solve_shared import Params
# Load in a suitable solver - autodetect the most powerful supported...
try:
from solve_weave import fit,fitDoc
__fitter = 'weave'
except:
try:
from solve_python import fit,fitDoc
__fitter = 'python'
except:
raise Exception('All of the lda solvers failed to load.')
def getAlgorithm():
"""Returns a text string indicating which implimentation of the fitting algorithm is being used."""
global __fitter
return __fitter
| Python |
# -*- coding: utf-8 -*-
__all__ = ['lda'] | Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import time
import multiprocessing as mp
import multiprocessing.synchronize # To make sure we have all the functionality.
import numpy
import numpy.random
import scipy
import solve_shared as shared
from solve_python import fitModel
from solve_python import fitDocModel
def fitModelWrapper(state,params,doneIters):
"""Wrapper around fitModel to make it suitable for multiprocessing."""
def next():
doneIters.value += 1
fitModel(state,params,next)
return state
def fit(corpus,params,callback = None):
"""Complete fitting function - given a corpus fits a model. params is a Params object from solve-shared. callback if provided should take two numbers - the first is the number of iterations done, the second the number of iterations that need to be done; used to report progress. Note that it will probably not be called for every iteration, as that would be frightfully slow."""
# Create the state from the corpus and a pool of worker proccesses...
s = shared.State(corpus)
pool = mp.Pool()
# Create a value for sub-processes to report back their progress with...
manager = mp.Manager()
doneIters = manager.Value('i',0)
totalIters = params.runs * (1 + params.burnIn + params.samples + (params.samples-1)*params.lag)
# Create a callback for when a job completes...
def onComplete(state):
s.absorbClone(state)
# Create all the jobs...
try:
jobs = []
for r in xrange(params.runs):
jobs.append(pool.apply_async(fitModelWrapper,(s.clone(),params,doneIters),callback = onComplete))
finally:
# Close the pool and wait for all the jobs to complete...
pool.close()
while len(jobs)!=0:
if jobs[0].ready():
del jobs[0]
continue
time.sleep(0.1)
if callback!=None:
callback(doneIters.value,totalIters)
pool.join()
# Extract the final model into the corpus...
s.extractModel(corpus)
def fitDoc(doc,topicsWords,alpha,params):
"""Given a single document finds the documents model parameter in the same way as the rest of the system, i.e. Gibbs sampling. Provided with a document to calculate for, a topics-words array giving the already trained topic-to-word distribution, the alpha parameter and a Params object indicating how much sampling to do."""
# Normalise input to get P(word|topic)...
tw = (topicsWords.T/topicsWords.sum(axis=1)).T
# First generate a two column array - first column word index, second column its currently assigned topic...
state = numpy.empty((doc.dupWords(),2),dtype=numpy.uint)
index = 0
for uIndex in xrange(doc.uniqueWords()):
wIdent,count = doc.getWord(uIndex)
for c in xrange(count):
state[index,0] = wIdent
state[index,1] = 0
index += 1
# Zero out the model...
doc.model = numpy.zeros(tw.shape[0],dtype=numpy.float_)
# Create a pool of processes to run the fitting in...
pool = mp.Pool()
# Callback for when a process completes...
def onComplete(model):
doc.model += model
# Create all the jobs...
try:
jobs = []
for r in xrange(params.runs):
jobs.append(pool.apply_async(fitDocModel,(state.copy(),tw,alpha,params),callback = onComplete))
finally:
# Close the pool and wait for all the jobs to complete...
pool.close()
pool.join()
# Renormalise the sum of models...
doc.model /= doc.model.sum()
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Some basic matrix operations that come in use...
matrix_code = start_cpp() + """
#ifndef MATRIX_CODE
#define MATRIX_CODE
template <typename T>
inline void MemSwap(T * lhs, T * rhs, int count = 1)
{
while(count!=0)
{
T t = *lhs;
*lhs = *rhs;
*rhs = t;
++lhs;
++rhs;
--count;
}
}
// Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default.
template <typename T>
inline T Determinant(T * pos, int size, int stride = -1)
{
if (stride==-1) stride = size;
if (size==1) return pos[0];
else
{
if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride];
else
{
T ret = 0.0;
for (int i=0; i<size; i++)
{
if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1);
T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1];
if ((i+size)%2) ret += sub;
else ret -= sub;
}
for (int i=1; i<size; i++)
{
MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1);
}
return ret;
}
}
}
// Inverts a square matrix, will fail on singular and very occasionally on
// non-singular matrices, returns true on success. Uses Gauss-Jordan elimination
// with partial pivoting.
// in is the input matrix, out the output matrix, just be aware that the input matrix is trashed.
// You have to provide its size (Its square, obviously.), and optionally a stride if different from size.
template <typename T>
inline bool Inverse(T * in, T * out, int size, int stride = -1)
{
if (stride==-1) stride = size;
for (int r=0; r<size; r++)
{
for (int c=0; c<size; c++)
{
out[r*stride + c] = (c==r)?1.0:0.0;
}
}
for (int r=0; r<size; r++)
{
// Find largest pivot and swap in, fail if best we can get is 0...
T max = in[r*stride + r];
int index = r;
for (int i=r+1; i<size; i++)
{
if (fabs(in[i*stride + r])>fabs(max))
{
max = in[i*stride + r];
index = i;
}
}
if (index!=r)
{
MemSwap(&in[index*stride], &in[r*stride], size);
MemSwap(&out[index*stride], &out[r*stride], size);
}
if (fabs(max-0.0)<1e-6) return false;
// Divide through the entire row...
max = 1.0/max;
in[r*stride + r] = 1.0;
for (int i=r+1; i<size; i++) in[r*stride + i] *= max;
for (int i=0; i<size; i++) out[r*stride + i] *= max;
// Row subtract to generate 0's in the current column, so it matches an identity matrix...
for (int i=0; i<size; i++)
{
if (i==r) continue;
T factor = in[i*stride + r];
in[i*stride + r] = 0.0;
for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j];
for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j];
}
}
return true;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
class ProgBar:
"""Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops."""
def __init__(self, width = 60, onCallback = None):
self.start = time.time()
self.fill = 0
self.width = width
self.onCallback = onCallback
sys.stdout.write(('_'*self.width)+'\n')
sys.stdout.flush()
def __del__(self):
self.end = time.time()
self.__show(self.width)
sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n')
sys.stdout.flush()
def callback(self, nDone, nToDo):
"""Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo)."""
if self.onCallback:
self.onCallback()
n = int(float(self.width)*float(nDone)/float(nToDo))
n = min((n,self.width))
if n>self.fill:
self.__show(n)
def __show(self,n):
sys.stdout.write('|'*(n-self.fill))
sys.stdout.flush()
self.fill = n
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pydoc
import inspect
class DocGen:
"""A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code."""
def __init__(self, name, title = None, summary = None):
"""name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title."""
if title==None: title = name
if summary==None: summary = title
self.doc = pydoc.HTMLDoc()
self.html = open('%s.html'%name,'w')
self.html.write('<html>\n')
self.html.write('<head>\n')
self.html.write('<title>%s</title>\n'%title)
self.html.write('</head>\n')
self.html.write('<body>\n')
self.html_variables = ''
self.html_functions = ''
self.html_classes = ''
self.wiki = open('%s.wiki'%name,'w')
self.wiki.write('#summary %s\n\n'%summary)
self.wiki.write('= %s= \n\n'%title)
self.wiki_variables = ''
self.wiki_functions = ''
self.wiki_classes = ''
def __del__(self):
if self.html_variables!='':
self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables))
if self.html_functions!='':
self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions))
if self.html_classes!='':
self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes))
self.html.write('</body>\n')
self.html.write('</html>\n')
self.html.close()
if self.wiki_variables!='':
self.wiki.write('= Variables =\n\n')
self.wiki.write(self.wiki_variables)
self.wiki.write('\n')
if self.wiki_functions!='':
self.wiki.write('= Functions =\n\n')
self.wiki.write(self.wiki_functions)
self.wiki.write('\n')
if self.wiki_classes!='':
self.wiki.write('= Classes =\n\n')
self.wiki.write(self.wiki_classes)
self.wiki.write('\n')
self.wiki.close()
def addFile(self, fn, title, fls = True):
"""Given a filename and section title adds the contents of said file to the output. Various flags influence how this works."""
html = []
wiki = []
for i, line in enumerate(open(fn,'r').readlines()):
hl = line.replace('\n', '')
if i==0 and fls:
hl = '<strong>' + hl + '</strong>'
for ext in ['py','txt']:
if '.%s - '%ext in hl:
s = hl.split('.%s - '%ext, 1)
hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1]
html.append(hl)
wl = line.strip()
if i==0 and fls:
wl = '*%s*'%wl
for ext in ['py','txt']:
if '.%s - '%ext in wl:
s = wl.split('.%s - '%ext, 1)
wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n'
wiki.append(wl)
self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html)))
self.wiki.write('== %s ==\n'%title)
self.wiki.write('\n'.join(wiki))
self.wiki.write('----\n\n')
def addVariable(self, var, desc):
"""Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc.."""
self.html_variables += '<strong>%s</strong><br/>'%var
self.html_variables += '%s<br/><br/>\n'%desc
self.wiki_variables += '*`%s`*\n'%var
self.wiki_variables += ' %s\n\n'%desc
def addFunction(self, func):
"""Adds a function to the documentation. You provide the actual function instance."""
self.html_functions += self.doc.docroutine(func).replace(' ',' ')
self.html_functions += '\n'
name = func.__name__
args, varargs, keywords, defaults = inspect.getargspec(func)
doc = inspect.getdoc(func)
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_functions += ' %s\n\n'%doc
def addClass(self, cls):
"""Adds a class to the documentation. You provide the actual class object."""
self.html_classes += self.doc.docclass(cls).replace(' ',' ')
self.html_classes += '\n'
name = cls.__name__
parents = filter(lambda a: a!=cls, inspect.getmro(cls))
doc = inspect.getdoc(cls)
par_str = ''
if len(parents)!=0:
par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents))
self.wiki_classes += '== %s(%s) ==\n'%(name, par_str)
self.wiki_classes += ' %s\n\n'%doc
methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x))
def method_key(pair):
if pair[0]=='__init__': return '___'
else: return pair[0]
methods.sort(key=method_key)
for name, method in methods:
if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'):
if inspect.ismethod(method):
args, varargs, keywords, defaults = inspect.getargspec(method)
else:
args = ['?']
varargs = None
keywords = None
defaults = None
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
def fetch_doc(cls, name):
try:
method = getattr(cls, name)
if method.__doc__!=None: return inspect.getdoc(method)
except: pass
for parent in filter(lambda a: a!=cls, inspect.getmro(cls)):
ret = fetch_doc(parent, name)
if ret!=None: return ret
return None
doc = fetch_doc(cls, name)
self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_classes += ' %s\n\n'%doc
variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float))
for name, var in variables:
if not name.startswith('__'):
if hasattr(var, '__doc__'): d = var.__doc__
else: d = str(var)
self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import random
import math
from scipy.special import gammaln, psi, polygamma
from scipy import weave
from utils.start_cpp import start_cpp
# Provides various gamma-related functions...
gamma_code = start_cpp() + """
#ifndef GAMMA_CODE
#define GAMMA_CODE
#include <cmath>
// Returns the natural logarithm of the Gamma function...
// (Uses Lanczos's approximation.)
double lnGamma(double z)
{
static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7};
if (z<0.5)
{
// Use reflection formula, as approximation doesn't work down here...
return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z);
}
else
{
double x = coeff[0];
for (int i=1;i<9;i++) x += coeff[i]/(z+i-1);
double t = z + 6.5;
return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x);
}
}
// Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values...
double digamma(double z)
{
static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point.
double ret = 0.0;
while (z<highVal)
{
ret -= 1.0/z;
z += 1.0;
}
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz4 = iz2*iz2;
double iz6 = iz4*iz2;
ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0;
return ret;
}
// Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically...
double trigamma(double z)
{
static const double highVal = 8.0;
double ret = 0.0;
while (z<highVal)
{
ret += 1.0/(z*z);
z += 1.0;
}
z -= 1.0;
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz3 = iz1*iz2;
double iz5 = iz3*iz2;
double iz7 = iz5*iz2;
double iz9 = iz7*iz2;
ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0;
return ret;
}
#endif
"""
def lnGamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function"""
code = start_cpp(gamma_code) + """
return_val = lnGamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def digamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function"""
code = start_cpp(gamma_code) + """
return_val = digamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def trigamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function"""
code = start_cpp(gamma_code) + """
return_val = trigamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
class TestFuncs(unittest.TestCase):
"""Test code for the assorted gamma-related functions."""
def test_compile(self):
code = start_cpp(gamma_code) + """
"""
weave.inline(code, support_code=gamma_code)
def test_error_lngamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = lnGamma(z)
good = gammaln(z)
assert(math.fabs(own-good)<1e-12)
def test_error_digamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = digamma(z)
good = psi(z)
assert(math.fabs(own-good)<1e-9)
def test_error_trigamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = trigamma(z)
good = polygamma(1,z)
assert(math.fabs(own-good)<1e-9)
# If this file is run do the unit tests...
if __name__ == '__main__':
unittest.main()
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
import hashlib
def start_cpp(hash_str = None):
"""This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>"""
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
if hash_str==None:
return '#line %i "%s"\n'%(info[1],info[0])
else:
h = hashlib.md5()
h.update(hash_str)
hash_val = h.hexdigest()
return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
| Python |
# -*- coding: utf-8 -*-
# Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.).
import cv
import numpy as np
def cv2array(im):
"""Converts a cv array to a numpy array."""
depth2dtype = {
cv.IPL_DEPTH_8U: 'uint8',
cv.IPL_DEPTH_8S: 'int8',
cv.IPL_DEPTH_16U: 'uint16',
cv.IPL_DEPTH_16S: 'int16',
cv.IPL_DEPTH_32S: 'int32',
cv.IPL_DEPTH_32F: 'float32',
cv.IPL_DEPTH_64F: 'float64',
}
arrdtype=im.depth
a = np.fromstring(
im.tostring(),
dtype=depth2dtype[im.depth],
count=im.width*im.height*im.nChannels)
a.shape = (im.height,im.width,im.nChannels)
return a
def array2cv(a):
"""Converts a numpy array to a cv array, if possible."""
dtype2depth = {
'uint8': cv.IPL_DEPTH_8U,
'int8': cv.IPL_DEPTH_8S,
'uint16': cv.IPL_DEPTH_16U,
'int16': cv.IPL_DEPTH_16S,
'int32': cv.IPL_DEPTH_32S,
'float32': cv.IPL_DEPTH_32F,
'float64': cv.IPL_DEPTH_64F,
}
try:
nChannels = a.shape[2]
except:
nChannels = 1
cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]),
dtype2depth[str(a.dtype)],
nChannels)
cv.SetData(cv_im, a.tostring(),
a.dtype.itemsize*nChannels*a.shape[1])
return cv_im
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing as mp
import multiprocessing.synchronize # To make sure we have all the functionality.
import types
import marshal
import unittest
def repeat(x):
"""A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant."""
while True: yield x
def run_code(code,args):
"""Internal use function that does the work in each process."""
code = marshal.loads(code)
func = types.FunctionType(code, globals(), '_')
return func(*args)
def mp_map(func, *iters, **keywords):
"""A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity."""
if 'pool' in keywords: pool = keywords['pool']
else: pool = mp.Pool()
code = marshal.dumps(func.func_code)
jobs = []
for args in zip(*iters):
jobs.append(pool.apply_async(run_code,(code,args)))
for i in xrange(len(jobs)):
jobs[i] = jobs[i].get()
return jobs
class TestMpMap(unittest.TestCase):
def test_simple1(self):
data = ['a','b','c','d']
def noop(data):
return data
data_noop = mp_map(noop, data)
self.assertEqual(data, data_noop)
def test_simple2(self):
data = [x for x in xrange(1000)]
data_double = mp_map(lambda a: a*2, data)
self.assertEqual(map(lambda a: a*2,data), data_double)
def test_gen(self):
def gen():
for i in xrange(100): yield i
data_double = mp_map(lambda a: a*2, gen())
self.assertEqual(map(lambda a: a*2,gen()), data_double)
def test_repeat(self):
def mult(a,b):
return a*b
data = [x for x in xrange(50,5000,5)]
data_triple = mp_map(mult, data, repeat(3))
self.assertEqual(map(lambda a: a*3,data),data_triple)
def test_none(self):
data = []
data_sqr = mp_map(lambda x: x*x, data)
self.assertEqual([],data_sqr)
if __name__ == '__main__':
unittest.main()
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os.path
import tempfile
import shutil
from distutils.core import setup, Extension
import distutils.ccompiler
import distutils.dep_util
try:
__default_compiler = distutils.ccompiler.new_compiler()
except:
__default_compiler = None
def make_mod(name, base, source, openCL = False):
"""Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place."""
if __default_compiler==None: raise Exception('No compiler!')
# Work out the various file names - check if we actually need to do anything...
if not isinstance(source, list): source = [source]
source_path = map(lambda s: os.path.join(base, s), source)
library_path = os.path.join(base, __default_compiler.shared_object_filename(name))
if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)):
try:
print 'b'
# Backup the argv variable and create a temporary directory to do all work in...
old_argv = sys.argv[:]
temp_dir = tempfile.mkdtemp()
# Prepare the extension...
sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir]
comp_path = filter(lambda s: not s.endswith('.h'), source_path)
depends = filter(lambda s: s.endswith('.h'), source_path)
if openCL:
ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends)
else:
ext = Extension(name, comp_path, depends=depends)
# Compile...
setup(name=name, version='1.0.0', ext_modules=[ext])
finally:
# Cleanup the argv variable and the temporary directory...
sys.argv = old_argv
shutil.rmtree(temp_dir, True)
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
from numpy_help_cpp import numpy_util_code
# Provides various functions to assist with manipulating python objects from c++ code.
python_obj_code = numpy_util_code + start_cpp() + """
#ifndef PYTHON_OBJ_CODE
#define PYTHON_OBJ_CODE
// Extracts a boolean from an object...
bool GetObjectBoolean(PyObject * obj, const char * name)
{
PyObject * b = PyObject_GetAttrString(obj, name);
bool ret = b!=Py_False;
Py_DECREF(b);
return ret;
}
// Extracts an int from an object...
int GetObjectInt(PyObject * obj, const char * name)
{
PyObject * i = PyObject_GetAttrString(obj, name);
int ret = PyInt_AsLong(i);
Py_DECREF(i);
return ret;
}
// Extracts a float from an object...
float GetObjectFloat(PyObject * obj, const char * name)
{
PyObject * f = PyObject_GetAttrString(obj, name);
float ret = PyFloat_AsDouble(f);
Py_DECREF(f);
return ret;
}
// Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored...
unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
unsigned char * ret = new unsigned char[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i);
Py_DECREF(nao);
return ret;
}
// Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored...
float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
float * ret = new float[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i);
Py_DECREF(nao);
return ret;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Defines helper functions for accessing numpy arrays...
numpy_util_code = start_cpp() + """
#ifndef NUMPY_UTIL_CODE
#define NUMPY_UTIL_CODE
float & Float1D(PyArrayObject * arr, int index = 0)
{
return *(float*)(arr->data + index*arr->strides[0]);
}
float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
unsigned char & Byte1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index*arr->strides[0]);
}
unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
int & Int1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index*arr->strides[0]);
}
int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
#endif
"""
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cvarray
import mp_map
import prog_bar
import numpy_help_cpp
import python_obj_cpp
import matrix_cpp
import gamma_cpp
import setProcName
import start_cpp
import make
import doc_gen
# Setup...
doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.')
doc.addFile('readme.txt', 'Overview')
# Variables...
doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.')
doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.')
doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++')
doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++')
# Functions...
doc.addFunction(make.make_mod)
doc.addFunction(cvarray.cv2array)
doc.addFunction(cvarray.array2cv)
doc.addFunction(mp_map.repeat)
doc.addFunction(mp_map.mp_map)
doc.addFunction(setProcName.setProcName)
doc.addFunction(start_cpp.start_cpp)
doc.addFunction(make.make_mod)
# Classes...
doc.addClass(prog_bar.ProgBar)
doc.addClass(doc_gen.DocGen)
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ctypes import *
def setProcName(name):
"""Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases."""
# Call the process control function...
libc = cdll.LoadLibrary('libc.so.6')
libc.prctl(15, c_char_p(name), 0, 0, 0)
# Update argv...
charPP = POINTER(POINTER(c_char))
argv = charPP.in_dll(libc,'_dl_argv')
size = libc.strlen(argv[0])
libc.strncpy(argv[0],c_char_p(name),size)
if __name__=='__main__':
# Quick test that it works...
import os
ps1 = 'ps'
ps2 = 'ps -f'
os.system(ps1)
os.system(ps2)
setProcName('wibble_wobble')
os.system(ps1)
os.system(ps2)
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import lda
from utils import doc_gen
# Setup...
doc = doc_gen.DocGen('lda_gibbs', 'Latent Dirichlet Allocation (Gibbs)', 'Gibbs sampling implimentation of latent Dirichlet allocation')
doc.addFile('readme.txt', 'Overview')
# Functions...
doc.addFunction(lda.getAlgorithm)
# Classes...
doc.addClass(lda.Document)
doc.addClass(lda.Topic)
doc.addClass(lda.Corpus)
doc.addClass(lda.Params)
| Python |
# -*- coding: utf-8 -*-
# Copyright 2011 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import scipy
import sys
if sys.modules.has_key('lda'):
import lda
elif sys.modules.has_key('lda_nmp'):
import lda_nmp as lda
else:
raise Exception('This module is not meant to be imported directly - import lda/lda_nmp instead.')
import solve_shared
class Document:
"""Representation of a document used by the system. Consists of two parts: a) A list of words; each is referenced by a natural number and is associated with a count of how many of that particular word exist in the document. Stored in a matrix. b) The vector parameterising the multinomial distribution from which topics are drawn for the document, if this has been calculated."""
def __init__(self, dic):
"""Constructs a document given a dictionary dic[ident] = count, where ident is the natural number that indicates which word and count is how many times that word exists in the document. Excluded entries are effectivly assumed to have a count of zero. Note that the solver will construct an array 0..{max word ident} and assume all words in that range exist, going so far as smoothing in words that are never actually seen."""
# Create data store...
self.words = numpy.empty((len(dic),2), dtype=numpy.uint)
# Copy in the data...
index = 0
self.wordCount = 0 # Total number of words is sometimes useful - stored to save computation.
for key,value in dic.iteritems():
self.words[index,0] = key
self.words[index,1] = value
self.wordCount += value
index += 1
assert(index==self.words.shape[0])
# Sorts the data - experiance shows this is not actually needed as iteritems kicks out integers sorted, but as that is not part of the spec (As I know it.) this can not be assumed, and so this step is required, incase it ever changes (Or indeed another type that pretends to be a dictionary is passed in.)...
self.words = self.words[self.words[:,0].argsort(),:]
# Set the model variable to None, so it can be filled in later. It will ultimatly contain a numpy.array parametrising the multinomial distribution from which topics are drawn...
self.model = None
# Ident for the document, stored in here for conveniance. Only assigned when the document is stuffed into a Corpus...
self.ident = None
# Topic index, for (semi/weakly-) supervised classification problems...
self.topic = None
def getDic(self):
"""Returns a dictionary object that represents the document, basically a recreated version of the dictionary handed in to the constructor."""
ret = dict()
for i in xrange(self.words.shape[0]):
ret[self.words[i,0]] = self.words[i,1]
return ret
def getIdent(self):
"""Ident - just the offset into the array in the corpus where this document is stored, or None if its yet to be stored anywhere."""
return self.ident
def setTopic(self, topic = None):
"""Allows you to 'set the topic' for the document, which is by default not set. This simply results in an increase in the relevant entry of the prior dirichlet distribution, the size of which is decided by a parameter in the Corpus object. The purpose of this is to allow (semi/weakly-) supervised classification problems to be done, rather than just unsupervised. Defaults to None, which is no topic bias. This is of course not really setting - it is only a prior, and the algorithm could disagree with you. This is arguably an advantage, for if there are mistakes in your trainning set. Note that this is only used for trainning a complete topic model - for fitting a document to an existing model this is ignored. The input should be None to unset (The default) or an integer offset into the topic list."""
self.topic = topic
def getTopic(self):
"""Returns the pre-assigned topic, as in integer offset into the topic list, or None if not set."""
return self.topic
def dupWords(self):
"""Returns the number of words in the document, counting duplicates."""
return self.wordCount
def uniqueWords(self):
"""Returns the number of unique words in the document, i.e. not counting duplicates."""
return self.words.shape[0]
def getWord(self, word):
"""Given an index 0..uniqueWords()-1 this returns the tuple (ident,count) for that word."""
return (self.words[word,0], self.words[word,1])
def getModel(self):
"""Returns the vector defining the multinomial from which topics are drawn, P(topic), if it has been calculated, or None if it hasn't."""
return self.model
def setModel(self,model):
self.model = model
def probTopic(self,topic):
"""Returns the probability of the document emitting the given topic, where topics are represented by their ident. Do not call if model not calculated."""
assert(self.model!=None)
return self.model[topic]
def fit(self,topicsWords,alpha = 1.0,params = solve_shared.Params()):
"""Calculates a model for this document given a topics-words array, alpha value and a Params object. Note that the topic-words array is technically a point approximation of what is really a prior over a multinomial distribution, so this is not technically correct, but it is good enough for most purposes."""
# Call the fitDoc function provided by the solver system...
lda.fitDoc(self,topicsWords,alpha,params)
def negLogLikelihood(self,topicsWords):
"""Returns the negative log likelihood of the document given a topics-words array. (This document can be in the corpus that generated the list or not, just as long as it has a valid model. Can use fit if need be.) Ignores the priors given by alpha and beta - just the probability of the words given the topic multinomials and the documents multinomial. Note that it is assuming that the topics-words array and document model are both exactly right, rather than averages of samples taken from the distribution over these parameters, i.e. this is not corrrect, but is generally a good enough approximation."""
# Normalise the rows of the topicsArray, to get P(word|topic)...
tw = (topicsWords.T/topicsWords.sum(axis=1)).T
# First create a multinomial distribution for the sample of words that we have...
mn = (tw.T * self.model).sum(axis=1)
mn /= mn.sum()
# Now calculate the negative log likelihood of generating the sample given...
ret = -(numpy.log(mn[:,self.words[:,0]])*self.words[:,1]).sum()
# Need to factor in the normalising constant before returning...
# (Have to be careful here - these numbers can get silly sized.)
logInt = -numpy.log(numpy.arange(self.wordCount+1))
logInt[0] = 0.0
for i in xrange(1,logInt.shape[0]): logInt[i] += logInt[i-1]
ret += logInt[-1]
for i in xrange(self.words.shape[0]):
ret -= logInt[self.words[i,1]]
return ret
| Python |
# Copyright 2012 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import numpy.random
try: from scipy import weave
except: weave = None
from utils.start_cpp import start_cpp
class Node:
"""Defines a node - these are the bread and butter of the system. Each decision tree is made out of nodes, each of which contains a binary test - if a feature vector passes the test then it travels to the true child node; if it fails it travels to the false child node (Note lowercase to avoid reserved word clash.). Eventually a leaf node is reached, where test==None, at which point the stats object is obtained, merged with the equivalent for all decision trees, and then provided as the answer to the user. Note that this python object uses the __slots__ techneque to keep it small - there will often be many thousands of these in a trained model."""
__slots__ = ['test', 'true', 'false', 'stats', 'summary']
def __init__(self, goal, gen, pruner, es, index = slice(None), weights = None, depth = 0, stats = None, entropy = None, code = None):
"""This recursivly grows the tree until the pruner says to stop. goal is a Goal object, so it knows what to optimise, gen a Generator object that provides tests for it to choose between and pruner is a Pruner object that decides when to stop growing. The exemplar set to train on is then provided, optionally with the indices of which members to use and weights to assign to them (weights align with the exemplar set, not with the relative exemplar indices defined by index. depth is the depth of this node - part of the recursive construction and used by the pruner as a possible reason to stop growing. stats is optionally provided to save on duplicate calculation, as it will be calculated as part of working out the split. entropy should match up with stats. The static method initC can be called to generate code that can be used by this constructor to accelerate test selection, but only if it is passed in."""
if goal==None: return # For the clone method.
# Calculate the stats if not provided, and get the entropy...
if stats==None:
self.stats = goal.stats(es, index, weights)
else:
self.stats = stats
self.summary = None
# Use the grow method to do teh actual growth...
self.give_birth(goal, gen, pruner, es, index, weights, depth, entropy, code)
def give_birth(self, goal, gen, pruner, es, index = slice(None), weights = None, depth = 0, entropy = None, code = None):
"""This recursivly grows the tree until the pruner says to stop. goal is a Goal object, so it knows what to optimise, gen a Generator object that provides tests for it to choose between and pruner is a Pruner object that decides when to stop growing. The exemplar set to train on is then provided, optionally with the indices of which members to use and weights to assign to them (weights align with the exemplar set, not with the relative exemplar indices defined by index. depth is the depth of this node - part of the recursive construction and used by the pruner as a possible reason to stop growing. entropy should match up with self.stats. The static method initC can be called to generate code that can be used to accelerate test selection, but only if it is passed in."""
if entropy==None: entropy = goal.entropy(self.stats)
# Select the best test...
if isinstance(code, str) and weave!=None:
# Do things in C...
init = start_cpp(code) + """
if (Nindex[0]!=0)
{
srand48(rand);
// Create the Exemplar data structure, in triplicate!..
Exemplar * items = (Exemplar*)malloc(sizeof(Exemplar)*Nindex[0]);
Exemplar * splitItems = (Exemplar*)malloc(sizeof(Exemplar)*Nindex[0]);
Exemplar * temp = (Exemplar*)malloc(sizeof(Exemplar)*Nindex[0]);
for (int i=0; i<Nindex[0]; i++)
{
int ind = index[i];
float we = weights[ind];
items[i].index = ind;
items[i].weight = we;
items[i].next = &items[i+1];
splitItems[i].index = ind;
splitItems[i].weight = we;
splitItems[i].next = &splitItems[i+1];
temp[i].next = &temp[i+1];
}
items[Nindex[0]-1].next = 0;
splitItems[Nindex[0]-1].next = 0;
temp[Nindex[0]-1].next = 0;
// Do the work...
selectTest(out, data, items, splitItems, temp, entropy);
// Clean up...
free(temp);
free(splitItems);
free(items);
}
"""
data = es.tupleInputC()
out = dict()
rand = numpy.random.randint(-1000000000,1000000000)
if weights==None: weights = numpy.ones(es.exemplars(), dtype=numpy.float32)
weave.inline(init, ['out', 'data', 'index', 'weights', 'entropy', 'rand'], support_code=code)
if index.shape[0]==0: return
bestTest = out['bestTest']
if bestTest!=None:
bestInfoGain = out['bestInfoGain']
trueStats = out['trueStats']
trueEntropy = out['trueEntropy']
trueIndex = out['trueIndex']
falseStats = out['falseStats']
falseEntropy = out['falseEntropy']
falseIndex = out['falseIndex']
trueIndex.sort() # Not needed to work - to improve cache coherance.
falseIndex.sort() # "
else:
if index.shape[0]==0: return
# Do things in python...
## Details of best test found so far...
bestInfoGain = -1.0
bestTest = None
trueStats = None
trueEntropy = None
trueIndex = None
falseStats = None
falseEntropy = None
falseIndex = None
## Get a bunch of tests and evaluate them against the goal...
for test in gen.itertests(es, index, weights):
# Apply the test, work out which items pass and which fail..
res = gen.do(test, es, index)
tIndex = index[res==True]
fIndex = index[res==False]
# Check its safe to continue...
if tIndex.shape[0]==0 or fIndex.shape[0]==0: continue
# Calculate the statistics...
tStats = goal.stats(es, tIndex, weights)
fStats = goal.stats(es, fIndex, weights)
# Calculate the information gain...
tEntropy = goal.entropy(tStats)
fEntropy = goal.entropy(fStats)
if weights==None:
tWeight = float(tIndex.shape[0])
fWeight = float(fIndex.shape[0])
else:
tWeight = weights[tIndex].sum()
fWeight = weights[fIndex].sum()
div = tWeight + fWeight
tWeight /= div
fWeight /= div
infoGain = entropy - tWeight*tEntropy - fWeight*fEntropy
# Store if the best so far...
if infoGain>bestInfoGain:
bestInfoGain = infoGain
bestTest = test
trueStats = tStats
trueEntropy = tEntropy
trueIndex = tIndex
falseStats = fStats
falseEntropy = fEntropy
falseIndex = fIndex
# Use the pruner to decide if we should split or not, and if so do it...
self.test = bestTest
if bestTest!=None and pruner.keep(depth, trueIndex.shape[0], falseIndex.shape[0], bestInfoGain, self)==True:
# We are splitting - time to recurse...
self.true = Node(goal, gen, pruner, es, trueIndex, weights, depth+1, trueStats, trueEntropy, code)
self.false = Node(goal, gen, pruner, es, falseIndex, weights, depth+1, falseStats, falseEntropy, code)
else:
self.test = None
self.true = None
self.false = None
def clone(self):
"""Returns a deep copy of this node. Note that it only copys the nodes - test, stats and summary are all assumed to contain invariant entities that are always replaced, never editted."""
ret = Node(None, None, None, None)
ret.test = self.test
ret.true = self.true.clone() if self.true!=None else None
ret.false = self.false.clone() if self.false!=None else None
ret.stats = self.stats
ret.summary = self.summary
return ret
@staticmethod
def initC(goal, gen, es):
# Get the evaluateC code, which this code is dependent on...
code = Node.evaluateC(gen, es, 'es')
if code==None: return None
# Add in the generator code...
escl = es.listCodeC('es')
try:
gCode, gState = gen.genCodeC('gen', escl)
except NotImplementedError:
return None
code += gCode
# Add in the goal code...
try:
gDic = goal.codeC('goal', escl)
except NotImplementedError:
return None
try:
code += gDic['stats']
code += gDic['entropy']
except KeyError:
return None
# And finally add in the code we need to specifically handle the selection of a test for a node...
code += start_cpp() + """
// out - A dictionary to output into; data - The list of entities that represent the exemplar set; items - The set of items to optimise the test for, splitItems - A copy of items, which will be screwed with; temp - Like items, same size, so we can keep a temporary copy; entropy - The entropy of the set of items.
void selectTest(PyObject * out, PyObject * data, Exemplar * items, Exemplar * splitItems, Exemplar * temp, float entropy)
{
// Setup the generator...
%(gState)s state;
gen_init(state, data, items);
// Loop the tests, scoring each one and keeping the best so far...
void * bestTest = 0;
size_t bestTestLen = 0;
void * bestPassStats = 0;
size_t bestPassStatsLen = 0;
float bestPassEntropy = -1.0;
Exemplar * bestPassItems = temp;
int bestPassItemsLen = 0;
void * bestFailStats = 0;
size_t bestFailStatsLen = 0;
float bestFailEntropy = -1.0;
Exemplar * bestFailItems = 0;
int bestFailItemsLen = 0;
float bestGain = 0.0;
Exemplar * pass = splitItems;
void * passStats = 0;
size_t passStatsLength = 0;
Exemplar * fail = 0;
void * failStats = 0;
size_t failStatsLength = 0;
while (gen_next(state, data, items))
{
// Apply the test...
Exemplar * newPass = 0;
float passWeight = 0.0;
Exemplar * newFail = 0;
float failWeight = 0.0;
while (pass)
{
Exemplar * next = pass->next;
if (do_test(data, state.test, state.length, pass->index))
{
pass->next = newPass;
newPass = pass;
passWeight += pass->weight;
}
else
{
pass->next = newFail;
newFail = pass;
failWeight += pass->weight;
}
pass = next;
}
while (fail)
{
Exemplar * next = fail->next;
if (do_test(data, state.test, state.length, fail->index))
{
fail->next = newPass;
newPass = fail;
passWeight += fail->weight;
}
else
{
fail->next = newFail;
newFail = fail;
failWeight += fail->weight;
}
fail = next;
}
pass = newPass;
fail = newFail;
if ((pass==0)||(fail==0))
{
// All data has gone one way - this scernario can not provide an advantage so ignore it.
continue;
}
// Generate the stats objects and entropy...
goal_stats(data, pass, passStats, passStatsLength);
goal_stats(data, fail, failStats, failStatsLength);
float passEntropy = goal_entropy(passStats, passStatsLength);
float failEntropy = goal_entropy(failStats, failStatsLength);
// Calculate the information gain...
float div = passWeight + failWeight;
passWeight /= div;
failWeight /= div;
float gain = entropy - passWeight*passEntropy - failWeight*failEntropy;
// If it is the largest store its output for future consumption...
if (gain>bestGain)
{
bestTestLen = state.length;
bestTest = realloc(bestTest, bestTestLen);
memcpy(bestTest, state.test, bestTestLen);
bestPassStatsLen = passStatsLength;
bestPassStats = realloc(bestPassStats, bestPassStatsLen);
memcpy(bestPassStats, passStats, bestPassStatsLen);
bestFailStatsLen = failStatsLength;
bestFailStats = realloc(bestFailStats, bestFailStatsLen);
memcpy(bestFailStats, failStats, bestFailStatsLen);
bestPassEntropy = passEntropy;
bestFailEntropy = failEntropy;
bestGain = gain;
Exemplar * storeA = bestPassItems;
Exemplar * storeB = bestFailItems;
bestPassItems = 0;
bestPassItemsLen = 0;
bestFailItems = 0;
bestFailItemsLen = 0;
Exemplar * targPass = pass;
while (targPass)
{
// Get an output node...
Exemplar * out;
if (storeA)
{
out = storeA;
storeA = storeA->next;
}
else
{
out = storeB;
storeB = storeB->next;
}
// Store it...
out->next = bestPassItems;
bestPassItems = out;
bestPassItemsLen++;
out->index = targPass->index;
targPass = targPass->next;
}
Exemplar * targFail = fail;
while (targFail)
{
// Get an output node...
Exemplar * out;
if (storeA)
{
out = storeA;
storeA = storeA->next;
}
else
{
out = storeB;
storeB = storeB->next;
}
// Store it...
out->next = bestFailItems;
bestFailItems = out;
bestFailItemsLen++;
out->index = targFail->index;
targFail = targFail->next;
}
}
}
// Output the best into the provided dictionary - quite a lot of information...
if (bestTest!=0)
{
PyObject * t = PyFloat_FromDouble(bestGain);
PyDict_SetItemString(out, "bestInfoGain", t);
Py_DECREF(t);
t = PyString_FromStringAndSize((char*)bestTest, bestTestLen);
PyDict_SetItemString(out, "bestTest", t);
Py_DECREF(t);
t = PyString_FromStringAndSize((char*)bestPassStats, bestPassStatsLen);
PyDict_SetItemString(out, "trueStats", t);
Py_DECREF(t);
t = PyFloat_FromDouble(bestPassEntropy);
PyDict_SetItemString(out, "trueEntropy", t);
Py_DECREF(t);
PyArrayObject * ta = (PyArrayObject*)PyArray_FromDims(1, &bestPassItemsLen, NPY_INT32);
int i = 0;
while (bestPassItems)
{
*(int*)(ta->data + ta->strides[0]*i) = bestPassItems->index;
i++;
bestPassItems = bestPassItems->next;
}
PyDict_SetItemString(out, "trueIndex", (PyObject*)ta);
Py_DECREF(ta);
t = PyString_FromStringAndSize((char*)bestFailStats, bestFailStatsLen);
PyDict_SetItemString(out, "falseStats", t);
Py_DECREF(t);
t = PyFloat_FromDouble(bestFailEntropy);
PyDict_SetItemString(out, "falseEntropy", t);
Py_DECREF(t);
ta = (PyArrayObject*)PyArray_FromDims(1, &bestFailItemsLen, NPY_INT32);
i = 0;
while (bestFailItems)
{
*(int*)(ta->data + ta->strides[0]*i) = bestFailItems->index;
i++;
bestFailItems = bestFailItems->next;
}
PyDict_SetItemString(out, "falseIndex", (PyObject*)ta);
Py_DECREF(ta);
}
else
{
PyDict_SetItemString(out, "bestTest", Py_None);
Py_INCREF(Py_None);
}
// Clean up...
free(bestTest);
free(bestPassStats);
free(bestFailStats);
free(passStats);
free(failStats);
}
"""%{'gState':gState}
return code
def evaluate(self, out, gen, es, index = slice(None), code=None):
"""Given a set of exemplars, and possibly an index, this outputs the infered stats entities. Requires the generator so it can apply the tests. The output goes into out, a list indexed by exemplar position. If code is set to a string generated by evaluateC it uses that, for speed."""
if isinstance(index, slice): index = numpy.arange(*index.indices(es.exemplars()))
if isinstance(code, str) and weave!=None:
init = start_cpp(code) + """
if (Nindex[0]!=0)
{
// Create the Exemplar data structure...
Exemplar * test_set = (Exemplar*)malloc(sizeof(Exemplar)*Nindex[0]);
for (int i=0; i<Nindex[0]; i++)
{
test_set[i].index = index[i];
test_set[i].next = &test_set[i+1];
}
test_set[Nindex[0]-1].next = 0;
// Do the work...
evaluate(self, data, test_set, out);
// Clean up...
free(test_set);
}
"""
data = es.tupleInputC()
weave.inline(init, ['self', 'data', 'index', 'out'], support_code=code)
return
if self.test==None:
# At a leaf - store this nodes stats object for the relevent nodes...
for val in index: out[val] = self.stats
else:
# Need to split the index and send it down the two branches, as needed...
res = gen.do(self.test, es, index)
tIndex = index[res==True]
fIndex = index[res==False]
if tIndex.shape[0]!=0: self.true.evaluate(out, gen, es, tIndex)
if fIndex.shape[0]!=0: self.false.evaluate(out, gen, es, fIndex)
@staticmethod
def evaluateC(gen, es, esclName = 'es'):
"""For a given generator and exemplar set this returns the C code (Actually the support code.) that evaluate can use to accelerate its run time, or None if the various components involved do not support C code generation."""
# First do accessors for the data set...
try:
escl = es.listCodeC(esclName)
except NotImplementedError: return None
code = ''
for channel in escl:
code += channel['get'] + '\n'
code += channel['exemplars'] + '\n'
code += channel['features'] + '\n'
# Now throw in the test code...
try:
code += gen.testCodeC('do_test', escl) + '\n'
except NotImplementedError: return None
# Finally add in the code that recurses through and evaluates the nodes on the provided data...
code += start_cpp() + """
// So we can use an inplace modified linkied list to avoid malloc's during the real work (Weight is included because this code is reused by the generator system, which needs it.)...
struct Exemplar
{
int index;
float weight;
Exemplar * next;
};
// Recursivly does the work...
// node - node of the tree; for an external user this will always be the root.
// data - python tuple containing the inputs needed at each stage.
// test_set - Linked list of entities to analyse.
// out - python list in which the output is to be stored.
void evaluate(PyObject * node, PyObject * data, Exemplar * test_set, PyObject * out)
{
PyObject * test = PyObject_GetAttrString(node, "test");
if (test==Py_None)
{
// Leaf node - assign the relevent stats to the members of the test-set...
PyObject * stats = PyObject_GetAttrString(node, "stats");
while (test_set)
{
Py_INCREF(stats);
PyList_SetItem(out, test_set->index, stats);
test_set = test_set->next;
}
Py_DECREF(stats);
}
else
{
// Branch node - use the test to split the test_set and recurse...
// Tests...
Exemplar * pass = 0;
Exemplar * fail = 0;
void * test_ptr = PyString_AsString(test);
size_t test_len = PyString_Size(test);
while (test_set)
{
Exemplar * next = test_set->next;
if (do_test(data, test_ptr, test_len, test_set->index))
{
test_set->next = pass;
pass = test_set;
}
else
{
test_set->next = fail;
fail = test_set;
}
test_set = next;
}
// Recurse...
if (pass)
{
PyObject * child = PyObject_GetAttrString(node, "true");
evaluate(child, data, pass, out);
Py_DECREF(child);
}
if (fail)
{
PyObject * child = PyObject_GetAttrString(node, "false");
evaluate(child, data, fail, out);
Py_DECREF(child);
}
}
Py_DECREF(test);
}
"""
return code
def size(self):
"""Returns how many nodes this (sub-)tree consists of."""
if self.test==None: return 1
else: return 1 + self.true.size() + self.false.size()
def error(self, goal, gen, es, index = slice(None), weights = None, inc = False, store = None, code = None):
"""Once a tree is trained this method allows you to determine how good it is, using a test set, which would typically be its out-of-bag (oob) test set. Given a test set, possibly weighted, it will return its error rate, as defined by the goal. goal is the Goal object used for trainning, gen the Generator. Also supports incrimental testing, where the information gleened from the test set is stored such that new test exemplars can be added. This is the inc variable - True to store this (potentially large) quantity of information, and update it if it already exists, False to not store it and therefore disallow incrimental learning whilst saving memory. Note that the error rate will change by adding more training data as well as more testing data - you can call it with es==None to get an error score without adding more testing exemplars, assuming it has previously been called with inc==True. store is for internal use only. code can be provided by the relevent parameter, as generated by the errorC method, allowing a dramatic speedup."""
if code!=None and weave!=None:
init = start_cpp(code) + """
float err = 0.0;
float weight = 0.0;
if (dummy==0) // To allow for a dummy run.
{
if (Nindex[0]!=0)
{
Exemplar * test_set = (Exemplar*)malloc(sizeof(Exemplar)*Nindex[0]);
for (int i=0; i<Nindex[0]; i++)
{
int ind = index[i];
test_set[i].index = ind;
test_set[i].weight = weights[ind];
test_set[i].next = &test_set[i+1];
}
test_set[Nindex[0]-1].next = 0;
error(self, data, test_set, err, weight, incNum==1);
free(test_set);
}
else
{
error(self, data, 0, err, weight, incNum==1);
}
}
return_val = err;
"""
data = es.tupleInputC()
dummy = 1 if self==None else 0
incNum = 1 if inc else 0
if weights==None: weights = numpy.ones(es.exemplars(), dtype=numpy.float32)
return weave.inline(init, ['self', 'data', 'index', 'weights', 'incNum', 'dummy'], support_code=code)
else:
# Book-keeping - work out if we need to return a score; make sure there is a store list...
ret = store==None
if ret:
store = []
if isinstance(index, slice): index = numpy.arange(*index.indices(es.exemplars()))
# Update the summary at this node if needed...
summary = None
if es!=None and index.shape[0]!=0:
if self.summary==None: summary = goal.summary(es, index, weights)
else: summary = goal.updateSummary(self.summary, es, index, weights)
if inc: self.summary = summary
# Either recurse to the leafs or include this leaf...
if self.test==None:
# A leaf...
if summary!=None: store.append(goal.error(self.stats, summary))
else:
# Not a leaf...
if es!=None:
res = gen.do(self.test, es, index)
tIndex = index[res==True]
fIndex = index[res==False]
if tIndex.shape[0]!=0: self.true.error(goal, gen, es, tIndex, weights, inc, store)
elif inc==True: self.true.error(goal, gen, None, tIndex, weights, inc, store)
if fIndex.shape[0]!=0: self.false.error(goal, gen, es, fIndex, weights, inc, store)
elif inc==True: self.false.error(goal, gen, None, fIndex, weights, inc, store)
else:
self.true.error(goal, gen, es, index, weights, inc, store)
self.false.error(goal, gen, es, index, weights, inc, store)
# Calculate the weighted average of all the leafs all at once, to avoid an inefficient incrimental calculation, or just sum them up if a weight of None has been provided at any point...
if ret and len(store)!=0:
if None in map(lambda t: t[1], store):
return sum(map(lambda t: t[0], store))
else:
store = numpy.asarray(store, dtype=numpy.float32)
return numpy.average(store[:,0], weights=store[:,1])
@staticmethod
def errorC(goal, gen, es, esclName = 'es'):
"""Provides C code that can be used by the error method to go much faster. Makes use of a goal, a generator, and an exampler set, and the code will be unique for each keying combination of these. Will return None if C code generation is not supported for the particular combination."""
# First do accessors for the data set...
try:
escl = es.listCodeC(esclName)
except NotImplementedError: return None
code = ''
for channel in escl:
code += channel['get'] + '\n'
code += channel['exemplars'] + '\n'
code += channel['features'] + '\n'
# Throw in the test code...
try:
code += gen.testCodeC('do_test', escl) + '\n'
except NotImplementedError: return None
# Definition of Exemplar...
code += start_cpp() + """
// So we can use an inplace modified linkied list to avoid malloc's during the real work...
struct Exemplar
{
int index;
float weight;
Exemplar * next;
};
"""
# Add the needed goal code...
try:
gDic = goal.codeC('goal', escl)
except NotImplementedError:
return None
try:
code += gDic['summary']
code += gDic['updateSummary']
code += gDic['error']
except KeyError:
return None
# The actual code...
code += start_cpp() + """
// Recursivly calculates the error whilst updating the summaries...
// node - node of the tree; for an external user this will always be the root.
// data - python tuple containing the inputs needed at each stage.
// test_set - Linked list of entities to use to generate/update the error.
// err - Variable into which the error will be output. Must be 0.0 on call.
// weight - Weight that can be used in the error calculation - basically temporary storage. Must be 0.0 on call.
void error(PyObject * node, PyObject * data, Exemplar * test_set, float & err, float & weight, bool inc)
{
// Calculate/update the summary at this node, but only store it if inc is true...
void * sum = 0;
size_t sumLen = 0;
PyObject * summary = PyObject_GetAttrString(node, "summary");
if (summary==Py_None)
{
goal_summary(data, test_set, sum, sumLen);
}
else
{
sumLen = PyString_Size(summary);
sum = realloc(sum, sumLen);
memcpy(sum, PyString_AsString(summary), sumLen);
goal_updateSummary(data, test_set, sum, sumLen);
}
Py_DECREF(summary);
if (inc)
{
PyObject * t = PyString_FromStringAndSize((char*)sum, sumLen);
PyObject_SetAttrString(node, "summary", t);
Py_DECREF(t);
}
// If there is a test then recurse, otherwise calculate and include the error...
PyObject * test = PyObject_GetAttrString(node, "test");
if (test==Py_None)
{
// Leaf node - calculate and store the error...
PyObject * stats = PyObject_GetAttrString(node, "stats");
void * s = PyString_AsString(stats);
size_t sLen = PyString_Size(stats);
goal_error(s, sLen, sum, sumLen, err, weight);
Py_DECREF(stats);
}
else
{
// Branch node - use the test to split the test_set and recurse...
// Tests...
Exemplar * pass = 0;
Exemplar * fail = 0;
void * test_ptr = PyString_AsString(test);
size_t test_len = PyString_Size(test);
while (test_set)
{
Exemplar * next = test_set->next;
if (do_test(data, test_ptr, test_len, test_set->index))
{
test_set->next = pass;
pass = test_set;
}
else
{
test_set->next = fail;
fail = test_set;
}
test_set = next;
}
// Recurse...
if ((pass!=0)||inc)
{
PyObject * child = PyObject_GetAttrString(node, "true");
error(child, data, pass, err, weight, inc);
Py_DECREF(child);
}
if ((fail!=0)||inc)
{
PyObject * child = PyObject_GetAttrString(node, "false");
error(child, data, fail, err, weight, inc);
Py_DECREF(child);
}
}
// Clean up...
Py_DECREF(test);
free(sum);
}
"""
return code
def removeIncError(self):
"""Culls the information for incrimental testing from the data structure, either to reset ready for new information or just to shrink the data structure after learning is finished."""
self.summary = None
if self.test!=None:
self.false.removeIncError()
self.true.removeIncError()
def addTrain(self, goal, gen, es, index = slice(None), weights = None, code = None):
"""This allows you to update the nodes with more data, as though it was used for trainning. The actual tests are not affected, only the statistics at each node - part of incrimental learning. You can optionally proivde code generated by the addTrainC method to give it go faster stripes."""
if isinstance(index, slice): index = numpy.arange(*index.indices(es.exemplars()))
if code!=None:
init = start_cpp(code) + """
if (dummy==0) // To allow for a dummy run.
{
Exemplar * test_set = (Exemplar*)malloc(sizeof(Exemplar)*Nindex[0]);
for (int i=0; i<Nindex[0]; i++)
{
int ind = index[i];
test_set[i].index = ind;
test_set[i].weight = weights[ind];
test_set[i].next = &test_set[i+1];
}
test_set[Nindex[0]-1].next = 0;
addTrain(self, data, test_set);
free(test_set);
}
"""
data = es.tupleInputC()
dummy = 1 if self==None else 0
if weights==None: weights = numpy.ones(es.exemplars(), dtype=numpy.float32)
return weave.inline(init, ['self', 'data', 'index', 'weights', 'dummy'], support_code=code)
else:
# Update this nodes stats...
self.stats = goal.updateStats(self.stats, es, index, weights)
# Check if it has children that need updating...
if self.test!=None:
# Need to split the index and send it down the two branches, as needed...
res = gen.do(self.test, es, index)
tIndex = index[res==True]
fIndex = index[res==False]
if tIndex.shape[0]!=0: self.true.addTrain(goal, gen, es, tIndex, weights)
if fIndex.shape[0]!=0: self.false.addTrain(goal, gen, es, fIndex, weights)
@staticmethod
def addTrainC(goal, gen, es, esclName = 'es'):
"""Provides C code that the addTrain method can use to accelerate itself - standard rules about code being unique for each combination of input types applies."""
# First do accessors for the data set...
try:
escl = es.listCodeC(esclName)
except NotImplementedError: return None
code = ''
for channel in escl:
code += channel['get'] + '\n'
code += channel['exemplars'] + '\n'
code += channel['features'] + '\n'
# Throw in the test code...
try:
code += gen.testCodeC('do_test', escl) + '\n'
except NotImplementedError: return None
# Definition of Exemplar...
code += start_cpp() + """
// So we can use an inplace modified linkied list to avoid malloc's during the real work...
struct Exemplar
{
int index;
float weight;
Exemplar * next;
};
"""
# Add the needed goal code...
try:
gDic = goal.codeC('goal', escl)
except NotImplementedError:
return None
try:
code += gDic['updateStats']
except KeyError:
return None
code += start_cpp() + """
void addTrain(PyObject * node, PyObject * data, Exemplar * test_set)
{
// Update the stats at this node...
PyObject * stats = PyObject_GetAttrString(node, "stats");
size_t stLen = PyString_Size(stats);
void * st = malloc(stLen);
memcpy(st, PyString_AsString(stats), stLen);
goal_updateStats(data, test_set, st, stLen);
PyObject * t = PyString_FromStringAndSize((char*)st, stLen);
PyObject_SetAttrString(node, "stats", t);
Py_DECREF(t);
free(st);
Py_DECREF(stats);
// If its not a leaf recurse down and do its children also...
PyObject * test = PyObject_GetAttrString(node, "test");
if (test!=Py_None)
{
// Tests...
Exemplar * pass = 0;
Exemplar * fail = 0;
void * test_ptr = PyString_AsString(test);
size_t test_len = PyString_Size(test);
while (test_set)
{
Exemplar * next = test_set->next;
if (do_test(data, test_ptr, test_len, test_set->index))
{
test_set->next = pass;
pass = test_set;
}
else
{
test_set->next = fail;
fail = test_set;
}
test_set = next;
}
// Recurse...
if (pass!=0)
{
PyObject * child = PyObject_GetAttrString(node, "true");
addTrain(child, data, pass);
Py_DECREF(child);
}
if (fail!=0)
{
PyObject * child = PyObject_GetAttrString(node, "false");
addTrain(child, data, fail);
Py_DECREF(child);
}
}
Py_DECREF(test);
}
"""
return code
def grow(self, goal, gen, pruner, es, index = slice(None), weights = None, depth = 0, code = None):
"""This is called on a tree that has already grown - it recurses to the children and continues as though growth never stopped. This can be to grow the tree further using a less stritc pruner or to grow the tree after further information has been added. code can be passed in as generated by the initC static method, and will be used to optimise test generation."""
if self.test==None:
self.give_birth(goal, gen, pruner, es, index, weights, depth, code = code)
else:
res = gen.do(self.test, es, index)
tIndex = index[res==True]
fIndex = index[res==False]
self.true.grow(goal, gen, pruner, es, tIndex, weights, depth+1, code)
self.false.grow(goal, gen, pruner, es, fIndex, weights, depth+1, code)
| Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
class Kernel:
"""Enum of supported kernel types, with some helpful static methods."""
linear = 0 # dot(x1,x2)
homo_polynomial = 1 # dot(x1,x2)^p1
polynomial = 2 # (dot(x1,x2)+1)^p1
rbf = 3 # exp(-p1||x1-x2||^2)
gbf = 3 # exp(-||x1-x2||^2 / 2p1^2)
sigmoid = 4 # tanh(p2*dot(x1,x2) + p1)
def getList():
"""Returns a list of all the kernels."""
return [Kernel.linear, Kernel.homo_polynomial, Kernel.polynomial, Kernel.rbf, Kernel.gbf, Kernel.sigmoid]
def toName(kernel):
"""Returns the full name of the kernel."""
data = {Kernel.linear:'Linear', Kernel.homo_polynomial:'Homogeneous Polynomial', Kernel.polynomial:'Polynomial', Kernel.rbf:'Radial Basis Function', Kernel.gbf:'Gaussian Basis Function', Kernel.sigmoid:'Sigmoid'}
return data[kernel]
def toShortName(kernel):
"""Returns the short name of the kernel."""
data = {Kernel.linear:'lin', Kernel.homo_polynomial:'homo-poly', Kernel.polynomial:'poly', Kernel.rbf:'rbf', Kernel.gbf:'gbf', Kernel.sigmoid:'sig'}
return data[kernel]
def toEquation(kernel):
"""Return a textural representation of the equation implimented by the kernel."""
data = {Kernel.linear:'dot(x1,x2)', Kernel.homo_polynomial:'dot(x1,x2)^p1', Kernel.polynomial:'(dot(x1,x2)+1)^p1', Kernel.rbf:'exp(-p1||x1-x2||^2)', Kernel.gbf:'exp(-||x1-x2||^2 / 2p1^2)', Kernel.sigmoid:'tanh(p2*dot(x1,x2) + p1)'}
return data[kernel]
def toCode(kernel,p1,p2):
"""Given the two parameters this returns the C code for a kernel that calculates the function given the two vectors."""
# Head...
ret = 'double kernel(int length, double * x1, double * x2)\n'
ret += '{\n'
# If kernel requires a dot product generate it...
if kernel in [Kernel.linear, Kernel.homo_polynomial, Kernel.polynomial, Kernel.sigmoid]:
ret += ' double dot = 0.0;\n'
ret += ' for (int i=0;i<length;i++)\n'
ret += ' {\n'
ret += ' dot += x1[i]*x2[i];\n'
ret += ' }\n\n'
# If kernel requires distance generate it, as distance squared...
if kernel in [Kernel.rbf, Kernel.gbf]:
ret += ' double dist2 = 0.0;\n'
ret += ' for (int i=0;i<length;i++)\n'
ret += ' {\n'
ret += ' double diff = x1[i] - x2[i];\n'
ret += ' dist2 += diff*diff;\n'
ret += ' }\n\n'
# Add in the return statement, which is unique to each kernel. Also remove the polynomial 'pow' commands if its to the power of 2...
data = {Kernel.linear:'dot', Kernel.homo_polynomial:'pow(dot,{p1})', Kernel.polynomial:'pow(dot+1.0,{p1})', Kernel.rbf:'exp(-{p1}*dist2)', Kernel.gbf:'exp(-dist2 / (2.0*{p1}*{p1}))', Kernel.sigmoid:'tanh({p2}*dot + {p1})'}
if (abs(p1-2.0)<1e-6) and (kernel in [Kernel.homo_polynomial, Kernel.polynomial]):
if kernel==Kernel.homo_polynomial: exp = 'dot*dot'
else: exp = '(dot+1.0)*(dot+1.0)'
else:
exp = data[kernel]
exp = exp.replace('{p1}',str(p1))
exp = exp.replace('{p2}',str(p2))
ret += ' return ' + exp + ';\n'
# Tail and return...
ret += '}\n'
return ret
getList = staticmethod(getList)
toName = staticmethod(toName)
toShortName = staticmethod(toShortName)
toEquation = staticmethod(toEquation)
toCode = staticmethod(toCode)
class Params:
"""Parameters for the svm algorithm - softness and kernel. Defaults to a C value of 10 and a linear kernel."""
def __init__(self):
# The 'softness' parameter, and if it is rebalanced in the case of an unbalanced dataset...
self.c = 10.0
self.rebalance = True
# Which kernel to use and two parameters whose meanings depend on the kernel...
self.kernel = Kernel.linear
self.p1 = 1.0
self.p2 = 1.0
def __str__(self):
return '<C=' + str(self.c) + '(' + str(self.rebalance) + '); ' + Kernel.toShortName(self.kernel) + '(' + str(self.p1) + ',' + str(self.p2) + ')>'
def getC(self):
"""Returns c, the softness parameter."""
return self.c
def setC(self,c):
"""Sets the c value, whcih indicates how soft the answer can be. (0 don't care, infinity means perfect seperation.) Default is 10.0"""
self.c = c
def getRebalance(self):
"""Returns whether the c value is rebalanced or not - defaults to true."""
return self.rebalance
def setRebalance(self,rebalance):
"""Sets if c is rebalanced or not."""
self.rebalance = rebalance
def getKernel(self):
"""Returns which kernel is being used; see the Kernel enum for transilations of the value."""
return self.kernel
def getP1(self):
"""returns kernel parameter 1, not always used."""
return self.p1
def getP2(self):
"""returns kernel parameter 2, not always used."""
return self.p2
def setKernel(self,kernel, p1 = None, p2 = None):
"""Sets the kernel to use, and the parameters if need be."""
self.kernel = kernel
if p1!=None: self.p1 = p1
if p2!=None: self.p2 = p2
def setP1(self, p1):
"""Sets parameter p1."""
self.p1 = p1
def setP2(self, p2):
"""Sets parameter p2."""
self.p2 = p2
def setLinear(self):
"""Sets it to use the linear kernel."""
self.kernel = Kernel.Linear
def setHomoPoly(self,degree):
"""Sets it to use a homogenous polynomial, with the given degree."""
self.kernel = Kernel.homo_polynomial
self.p1 = degree
def setPoly(self,degree):
"""Sets it to use a polynomial, with the given degree."""
self.kernel = Kernel.polynomial
self.p1 = degree
def setRBF(self,scale):
"""Sets it to use a radial basis function, with the given distance scale."""
self.kernel = Kernel.rbf
self.p1 = scale
def setGBF(self,sd):
"""Sets it to use a gaussian basis function, with the given standard deviation. (This is equivalent to a RBF with the scale set to 1/(2*sd^2))"""
self.kernel = Kernel.gbf
self.p1 = sd
def setSigmoid(self,scale,offset):
"""Sets it to be a sigmoid, with the given parameters."""
self.kernel = Kernel.sigmoid
self.p1 = offset
self.p2 = scale
def getCode(self):
"""Returns the code for a function that impliments the specified kernel, with the parameters hard coded in."""
return Kernel.toCode(self.kernel,self.p1,self.p2)
def kernelKey(self):
"""Returns a string unique to the kernel/kernel parameters combo."""
ret = Kernel.toShortName(self.kernel)
if self.kernel!=Kernel.linear:
ret += ':' + str(self.p1)
if self.kernel==Kernel.sigmoid:
ret += ':' + str(self.p2)
return ret
| Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
# Some code used by the smo module...
cacheCode = """
// Right now only provides the most basic of caching, by pre-caclulating the diagonal, which is admitedly used very heavilly...
npy_intp * dmSize;
double * dm;
double * diagBuff;
void cacheBegin(npy_intp * dmSizeIn, double * dmIn)
{
dmSize = dmSizeIn;
dm = dmIn;
diagBuff = (double*)malloc(dmSize[0] * sizeof(double));
for (int i=0;i<dmSize[0];i++)
{
double * vec = dm + i*dmSize[1];
diagBuff[i] = kernel(dmSize[1], vec, vec);
}
}
double cacheK(int a,int b) // Indices of two vectors from the data matrix.
{
if (a!=b)
{
return kernel(dmSize[1], dm + a*dmSize[1], dm + b*dmSize[1]);
}
else
{
return diagBuff[a];
}
}
void cacheEnd()
{
free(diagBuff);
}
"""
smoCoreCode = """
// Constant...
const double eps = 1e-3;
// Initialise the cache...
cacheBegin(Ndm,dm);
// Iterate until convergance...
int pv1 = -1;
int pv2 = -1;
//long long int maxIter = ((long long int)Ny[0])*((long long int)Ny[0])*2; // Cap iters, to avoid any chance of it getting stuck in an infinite loop if a cyclic set of edits were to appear. Note that this number is really high - typically it going to need a few more than Ny[0] iterations.
//for (long long int iter=0;iter<maxIter;iter++)
while (true)
{
// Determine which pair we are going to optimise, break if all are optimised...
double maxG = -1e100;
double minG = 1e100;
double minObj = 1e100;
// Select the first member of the pair, v1...
int v1 = -1;
for (int i=0;i<Ny[0];i++)
{
double c = (y[i]<0)?cn:cp;
if (((y[i]>0)&&(alpha[i]<c))||((y[i]<0)&&(alpha[i]>0.0)))
{
double g = -y[i] * gradient[i];
if (g >= maxG)
{
v1 = i;
maxG = g;
}
}
}
// Select the second member of the pair, v2...
int v2 = -1;
double a;
for (int i=0;i<Ny[0];i++)
{
double c = (y[i]<0)?cn:cp;
if (((y[i]>0)&&(alpha[i]>0.0))||((y[i]<0)&&(alpha[i]<c)))
{
double g = -y[i] * gradient[i];
if (g <= minG) minG = g;
double b = maxG - g;
if (b>0)
{
double na = cacheK(v1,v1) + cacheK(i,i) - 2.0*cacheK(v1,i);
if (na<=0.0) na = 1e12;
double obj = -(b*b)/a;
if (obj <= minObj)
{
if ((i!=pv2)&&(v1!=pv1)) // Prevents it selecting the same pair twice in a row - this can cause an infinite loop.
{
v2 = i;
a = na;
minObj = obj;
}
}
}
}
}
// Check for convergance/algorithm has done its best...
if (v2==-1) break;
if ((maxG-minG)<eps) break;
pv1 = v1;
pv2 = v2;
// Calculate new alpha values, to reduce the objective function...
double b = -y[v1]*gradient[v1] + y[v2]*gradient[v2];
double oldA1 = alpha[v1];
double oldA2 = alpha[v2];
alpha[v1] += y[v1]*b/a;
alpha[v2] -= y[v2]*b/a;
// Correct for alpha being out of range...
double sum = y[v1]*oldA1 + y[v2]*oldA2;
double c = (y[v1]<0)?cn:cp;
if (alpha[v1]<0.0) alpha[v1] = 0.0;
else { if (alpha[v1]>c) alpha[v1] = c; }
alpha[v2] = y[v2] * (sum - y[v1]*alpha[v1]);
c = (y[v2]<0)?cn:cp;
if (alpha[v2]<0.0) alpha[v2] = 0.0;
else { if (alpha[v2]>c) alpha[v2] = c; }
alpha[v1] = y[v1] * (sum - y[v2]*alpha[v2]);
// Update the gradient...
double dA1 = alpha[v1] - oldA1;
double dA2 = alpha[v2] - oldA2;
for (int i=0;i<Ny[0];i++)
{
gradient[i] += y[i] * y[v1] * cacheK(i,v1) * dA1;
gradient[i] += y[i] * y[v2] * cacheK(i,v2) * dA2;
}
}
// Deinitialise the cache...
cacheEnd();
""" | Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from params import *
from smo_aux import *
from model import *
import numpy
from scipy.weave import inline
class SMO:
"""Implimentation of the 'Sequential Minimal Optimization' SVM solving method of Platt, using the WSS 3 pair selection method of Fan, Chen and Lin. This just solves for the alpha values - it is upto the wrapping code to do something useful with them. Makes extensive use of scipy.weave, so you need that working."""
def __init__(self):
"""Initalises the parameters to a suitable default, but does not fill in a dataset - at the very least you will have to provide that."""
self.params = Params()
self.dataMatrix = None
self.y = None
self.model = None
self.alpha = None
def setParams(self, params):
"""Sets the parameters, i.e. which model to fit."""
self.params = params
def getParams(self):
"""Returns the parameters object - by default this is a linear model with a C of 10.0"""
return self.params
def setData(self, dataMatrix, y=None):
"""Sets the data matrix and corresponding y vector of +/- 1 values. If given only one value this function assumes its a tuple of (dataMatrix,y), as returned by the Dataset getTrainData method."""
if y==None:
self.dataMatrix = dataMatrix[0]
self.y = dataMatrix[1]
else:
self.dataMatrix = dataMatrix
self.y = y
assert self.y.shape[0]==self.dataMatrix.shape[0] , 'dataMatrix and feature vector lengths do not match.'
def getDataMatrix(self):
"""Returns the current data matrix, where each row is a feature vector."""
return self.dataMatrix
def getY(self):
"""Returns the y vector, that is the labels for the feature vector."""
return self.y
def solve(self, alpha = None):
"""Solves for the current information and replaces the current model, if any. You can optionally provide an alpha vector of alpha values for each vector - this can speed up convergance if initialised better than the typical zeroed vector."""
# Check for having no samples of one type - handle elegantly...
dm = self.dataMatrix
y = self.y
pCount = numpy.nonzero(y>0)[0].shape[0]
nCount = y.shape[0] - pCount
if pCount==0 or nCount==0:
self.alpha = numpy.zeros(0,dtype=numpy.float_)
if pCount>0: b = 1.0
else: b = -1.0
self.model = Model(self.params, numpy.zeros((0,self.dataMatrix.shape[1]),dtype=numpy.float_), numpy.zeros(0,dtype=numpy.float_), b)
return
# First do the heavy weight task - calculate the alpha weights for the vectors...
support = self.params.getCode()
support += cacheCode
kernelKey = '// Kernel = '+self.params.kernelKey()+'\n'
if alpha==None: alpha = numpy.zeros(self.y.shape[0], dtype=numpy.double)
else: alpha = alpha.copy()
gradient = numpy.ones(self.y.shape[0],dtype=numpy.double)
gradient *= -1.0
if self.params.getRebalance():
r = self.params.getC() * y.shape[0]/(2.0*pCount*nCount)
cp = r * nCount
cn = r * pCount
else:
cp = self.params.getC()
cn = self.params.getC()
inline(kernelKey+smoCoreCode, ['dm','y','alpha','gradient','cp','cn'], support_code = support)
# Now build the model so far, but set b to zero...
self.alpha = alpha
indices = numpy.nonzero(alpha>=1e-3)[0]
self.model = Model(self.params, self.dataMatrix[indices], numpy.asfarray(self.y[indices]) * alpha[indices], 0.0)
# Finally, calculate the b offset value and stuff it into the model - its easier this way as we can use the model to calculate the offsets...
# (Note the below code 'handles' the scenario where all vectors are at 0 or c - this shouldn't happen, but better safe than screwed by numerical error.)
minB = -1e100
maxB = 1e100
actualB = 0.0
numActualB = 0
dec = self.model.multiDecision(self.dataMatrix)
for i in xrange(y.shape[0]):
if self.y[i]<0: cap = cn
else: cap = cp
if alpha[i]<1e-3:
if y[i]<0:
maxB = min((maxB,self.y[i] - dec[i]))
else:
minB = max((minB,self.y[i] - dec[i]))
elif alpha[i]>(cap-1e-3):
if y[i]<0:
minB = max((minB,self.y[i] - dec[i]))
else:
maxB = min((maxB,self.y[i] - dec[i]))
else:
numActualB += 1
actualB += (self.y[i] - dec[i] - actualB) / float(numActualB)
if numActualB>0:
self.model.b = actualB
else:
self.model.b = 0.5*(minB + maxB)
def getModel(self):
"""Returns the model from the last call to solve, or None if solve has never been called."""
return self.model
def getAlpha(self):
"""Returns the alpha vector that the algorithm converged to - can be useful for initialising a new but similar model."""
return self.alpha
def getIndices(self):
"""Returns an array of the indices of the vectors from the input dataset that form the support vectors of the current model, or None if solve has never been called."""
return numpy.nonzero(self.alpha>=1e-3)[0]
| Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from params import Params
from smo import SMO
from loo import looPair, looPairRange, looPairBrute
import math
import time
import multiprocessing as mp
import numpy
def mpLooPairRange(params, data, lNeg, lPos, looDist):
"""multiprocess wrapper around looPair needed for multiprocessing support."""
model = looPairRange(params, data, looDist)
return (lNeg,lPos,model[0],model[1])
class MultiModel:
"""This represents a model with multiple labels - uses one against one voting. Even if you only have two labels you are best off using this interface, as it makes everything neat. Supports model selection as well."""
def __init__(self, params, dataset, weightSVM = True, callback = None, pool = None, looDist = 1.1):
"""Trains the model given the dataset and either a params object or a iterator of params objects. If a list it trys all entrys of the list for each pairing, and selects the one that gives the best loo, i.e. does model selection. If weightSVM is True (The default) then it makes use of the leave one out scores calculated during model selection to weight the classification boundaries - this can result in slightly better behavour at the meeting points of multiple classes in feature space. The pool parameter can be passed in a Pool() object from the multiprocessing python module, or set to True to have it create an instance itself. This enables multiprocessor mode for doing each loo calculation required - good if you have lots of models to test and/or lots of labels."""
self.weightSVM = weightSVM
# Get a list of labels, create all the relevant pairings. A mapping from labels to numbers is used...
self.labels = dataset.getLabels()
self.labelToNum = dict()
for i,label in enumerate(self.labels):
self.labelToNum[label] = i
self.models = dict()
for lNeg in xrange(len(self.labels)):
for lPos in xrange(lNeg+1,len(self.labels)):
#print self.labels[lNeg], self.labels[lPos]
self.models[(lNeg,lPos)] = None
# Generate the list of models that need solving...
solveList = []
for lNeg,lPos in self.models.keys():
if isinstance(params,Params):
solveList.append((lNeg,lPos,params))
else:
for p in params:
solveList.append((lNeg,lPos,p))
# Loop through all models and solve them, reporting progress if required...
if pool==None:
# Single process implimentation...
for i,data in enumerate(solveList):
lNeg,lPos,params = data
if callback: callback(i,len(solveList))
model = looPairRange(params, dataset.getTrainData(self.labels[lNeg], self.labels[lPos]), looDist)
#print model[0], looPair(params, dataset.getTrainData(self.labels[lNeg], self.labels[lPos]))[0], looPairBrute(params, dataset.getTrainData(self.labels[lNeg], self.labels[lPos]))[0]
if self.models[lNeg,lPos]==None or model[0]>self.models[lNeg,lPos][0]:
self.models[lNeg,lPos] = model
else:
# Multiprocess implimentation...
# Create a pool if it hasn't been provided...
if type(pool)==type(True):
pool = mp.Pool()
# Callback for when each job completes...
self.numComplete = 0
if callback: callback(self.numComplete,len(solveList))
def taskComplete(ret):
self.numComplete += 1
if callback: callback(self.numComplete,len(solveList))
lNeg = ret[0]
lPos = ret[1]
model = (ret[2],ret[3])
if self.models[lNeg,lPos]==None or model[0]>self.models[lNeg,lPos][0]:
self.models[lNeg,lPos] = model
try:
# Create all the jobs, set them running...
jobs = []
for lNeg,lPos,params in solveList:
jobs.append(pool.apply_async(mpLooPairRange,(params,dataset.getTrainData(self.labels[lNeg], self.labels[lPos]), lNeg, lPos, looDist), callback = taskComplete))
finally:
# Wait for them all to complete...
while len(jobs)!=0:
if jobs[0].ready():
del jobs[0]
continue
time.sleep(0.1)
def getLabels(self):
"""Returns a list of the labels supported."""
return self.labels
def getModel(self,labA,labB):
"""Returns a tuple of (model,neg label,pos label, loo) where model is the model between the pair and the two labels indicate which label is associated with the negative result and which with the positive result. loo is the leave one out score of this particular boundary."""
la = self.labelToNum[labA]
lb = self.labelToNum[labB]
if la<lb:
return (self.models[(la,lb)][1],labA,labB,self.models[(la,lb)][0])
else:
return (self.models[(lb,la)][1],labB,labA,self.models[(lb,la)][0])
def paramsList(self):
"""Returns a list of parameters objects used by the model - good for curiosity."""
return map(lambda x: x[1].getParams(),self.models.values())
def classify(self,feature):
"""Classifies a single feature vector - returns the most likelly label."""
if self.weightSVM:
cost = numpy.zeros(len(self.labels),dtype=numpy.float_)
for lNeg,lPos in self.models.keys():
m = self.models[lNeg,lPos]
cg = -math.log(max((m[0],1e-3)))
cb = -math.log(max((1.0-m[0],1e-3))) # max required incase its perfect.
val = m[1].classify(feature)
if val<0:
cost[lNeg] += cg
cost[lPos] += cb
else:
cost[lNeg] += cb
cost[lPos] += cg
return self.labels[cost.argmin()]
else:
score = numpy.zeros(len(self.labels),dtype=numpy.int_)
for lNeg,lPos in self.models.keys():
val = self.models[lNeg,lPos][1].classify(feature)
if val<0: score[lNeg] += 1
else: score[lPos] += 1
return self.labels[score.argmax()]
def multiClassify(self,features):
"""Given a matrix where every row is a feature - returns a list of labels for the rows."""
if self.weightSVM:
cost = numpy.zeros((features.shape[0], len(self.labels)), dtype=numpy.float_)
for lNeg,lPos in self.models.keys():
m = self.models[lNeg,lPos]
cg = -math.log(m[0])
cb = -math.log(max((1.0-m[0],1e-3))) # max required incase its perfect.
vals = m[1].multiClassify(features)
cost[numpy.nonzero(vals<0)[0],lNeg] += cg
cost[numpy.nonzero(vals>0)[0],lNeg] += cb
cost[numpy.nonzero(vals>0)[0],lPos] += cg
cost[numpy.nonzero(vals<0)[0],lPos] += cb
ret = []
for i in xrange(features.shape[0]):
ret.append(self.labels[cost[i,:].argmin()])
return ret
else:
score = numpy.zeros((features.shape[0], len(self.labels)), dtype=numpy.int_)
for lNeg,lPos in self.models.keys():
vals = self.models[lNeg,lPos][1].multiClassify(features)
score[numpy.nonzero(vals<0)[0],lNeg] += 1
score[numpy.nonzero(vals>0)[0],lPos] += 1
ret = []
for i in xrange(features.shape[0]):
ret.append(self.labels[score[i,:].argmax()])
return ret
| Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from smo import SMO
import copy
import numpy
def looPair(params,data):
"""Given a parameters object and a pair of data matrix and y (As returned by dataset.getTrainData.) this returns a (good) approximation of the leave one out negative log likellihood, and a model trained on *all* the data as a pair. Makes the assumption that losing a non-supporting vector does not require retraining, which is correct the vast majority of the time, and as a bonus avoids retrainning for most of the data, making this relativly fast."""
dataMatrix,y = data
# First train on all the data...
smo = SMO()
smo.setParams(params)
smo.setData(dataMatrix,y)
smo.solve()
onAll = copy.deepcopy(smo.getModel())
indices = smo.getIndices()
# Collate statistics for all the non-supporting vectors...
scores = onAll.multiClassify(dataMatrix)*y
correct = (scores>0).sum() - (scores[indices]>0).sum()
# Now iterate and retrain without each of the supporting vectors, collating the statistics...
for i in xrange(indices.shape[0]):
index = indices[i]
noIndex = numpy.array(range(index)+range(index+1,y.shape[0]))
smo.setData(dataMatrix[noIndex],y[noIndex])
smo.solve()
res = smo.getModel().classify(dataMatrix[index]) * y[index]
if res>0: correct += 1
# Return the loo and initial trainning on all the data...
return (float(correct)/float(y.shape[0]),onAll)
def looPairRange(params, data, dist = 1.1):
"""Identical to looPair, except you specifiy a distance from the boundary and it retrains for all points in that range, but not for once outside that range. For a value of one, ignoring rounding error, it should be identical to looPair, though in practise you should never do this - dist should always be >1.0. This also has a better than optimisation - if it knows its result is going to be worse than betterThan it gives up and saves computation."""
dataMatrix,y = data
# First train on all the data...
smo = SMO()
smo.setParams(params)
smo.setData(dataMatrix,y)
smo.solve()
onAll = copy.deepcopy(smo.getModel())
# Get set of indices to retrain with, collate statistics for all the non-supporting vectors...
scores = onAll.multiClassify(dataMatrix)*y
indices = numpy.nonzero(scores<dist)[0]
correct = (scores>0).sum() - (scores[indices]>0).sum()
# Now iterate and retrain without each of the supporting vectors, collating the statistics...
for i in xrange(indices.shape[0]):
index = indices[i]
noIndex = numpy.array(range(index)+range(index+1,y.shape[0]))
smo.setData(dataMatrix[noIndex],y[noIndex])
smo.solve()
res = smo.getModel().classify(dataMatrix[index]) * y[index]
if res>0: correct += 1
# Return the loo and initial trainning on all the data...
return (float(correct)/float(y.shape[0]),onAll)
def looPairBrute(params,data):
"""Same as looPair but does it brute force style - no approximation here."""
dataMatrix,y = data
# First train on all the data...
smo = SMO()
smo.setParams(params)
smo.setData(dataMatrix,y)
smo.solve()
onAll = copy.deepcopy(smo.getModel())
# Now iterate and retrain without each of the vectors, collating the statistics...
correct = 0
for i in xrange(y.shape[0]):
noIndex = numpy.array(range(i)+range(i+1,y.shape[0]))
smo.setData(dataMatrix[noIndex],y[noIndex])
smo.solve()
res = smo.getModel().classify(dataMatrix[i]) * y[i]
if res>0: correct += 1
# Return the loo and initial trainning on all the data...
return (float(correct)/float(y.shape[0]),onAll)
def looPairSelect(paramsList,data):
"""Given an iterator of parameters this returns a pair of the loo score and model of the best set of parameters - just loops over looPair."""
best = None
for params in paramsList:
res = looPair(params,data)
if best==None or res[0]>best[0]:
best = res
return best
| Python |
# -*- coding: utf-8 -*-
| Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from params import Kernel
from params import Params
from copy import copy
class ParamsRange:
"""A parameters object where each variable takes a list rather than a single value - it then pretends to be a list of Params objects, which consists of every combination implied by the ranges."""
def __init__(self):
"""Initialises to contain just the default."""
self.c = [10.0]
self.rebalance = [True]
# Which kernel to use and two parameters whose meanings depend on the kernel...
self.kernel = [Kernel.linear]
self.p1 = [1.0]
self.p2 = [1.0]
def getCList(self):
"""Returns the list of c parameters."""
return self.c
def setCList(self,c):
"""Sets the list of c values."""
self.c = c
def getRebalanceList(self):
"""Returns the list of rebalance options - can only ever be two"""
return self.rebalance
def setRebalanceList(self,rebalance):
"""Sets if c is rebalanced or not."""
self.rebalance = rebalance
def setKernelList(self,kernel):
"""Sets the list of kernels."""
self.kernel = kernel
def setP1List(self,p1):
"""Sets the list of P1 values."""
self.p1 = p1
def setP2List(self,p2):
"""Sets the list of P2 values."""
self.p2 = p2
def getKernelList(self):
"""Returns the list of kernels."""
return self.kernel
def getP1List(self):
"""returns the list of kernel parameters 1, not always used."""
return self.p1
def getP2List(self):
"""returns the list of kernel parameters 2, not always used."""
return self.p2
def permutations(self):
p = Params()
for c in self.c:
p.setC(c)
for rebalance in self.rebalance:
p.setRebalance(c)
for kernel in self.kernel:
p.setKernel(kernel)
for p1 in self.p1:
p.setP1(p1)
for p2 in self.p2:
p.setP2(p2)
yield copy(p)
def __iter__(self):
return self.permutations()
class ParamsSet:
"""Pretends to be a list of parameters, when instead it is a list of parameter ranges, where each set of ranges defines a search grid - used for model selection, typically by being passed as the params input to the MultiModel class."""
def __init__(self, incDefault = False, incExtra = False):
"""Initialises the parameter set - with the default constructor this is empty. However, initalising it with paramsSet(True) gets you a good default set to model select with (That is the addLinear and addPoly methods are called with default parameters.), whilst paramsSet(True,True) gets you an insanely large default set for if your feeling particularly patient (It being all the add methods with default parameters.)."""
self.ranges = []
if incDefault:
self.addLinear()
self.addPoly()
if incExtra:
self.addHomoPoly()
self.addBasisFuncs()
self.addSigmoid()
def addRange(self, ran):
"""Adds a new ParamsRange to the set."""
self.ranges.append(ran)
def addLinear(self, cExpLow = -3, cExpHigh = 3, cExp = 10.0, rebalance = True):
"""Adds a standard linear model to the set, with a range of c values. These values will range from cExp^cExpLow to cExp^cExpHigh, and by default are the set {0.001,0.01,0.1,1,10,100,1000}, which is typically good enough."""
ran = ParamsRange()
ran.setCList(map(lambda x:cExp**x,xrange(cExpLow,cExpHigh+1)))
ran.setRebalanceList([rebalance])
self.addRange(ran)
def addHomoPoly(self, maxDegree = 6, cExpLow = -3, cExpHigh = 3, cExp = 10.0, rebalance = True):
"""Adds the homogenous polynomial to the set, from an exponent of 2 to the given value inclusive, which defaults to 8. Same c controls as for addLinear."""
ran = ParamsRange()
ran.setCList(map(lambda x:cExp**x,xrange(cExpLow,cExpHigh+1)))
ran.setRebalanceList([rebalance])
ran.setKernelList([Kernel.homo_polynomial])
ran.setP1List(range(2,maxDegree+1))
self.addRange(ran)
def addPoly(self, maxDegree = 6, cExpLow = -3, cExpHigh = 3, cExp = 10.0, rebalance = True):
"""Adds the polynomial to the set, from an exponent of 2 to the given value inclusive, which defaults to 8. Same c controls as for addLinear."""
ran = ParamsRange()
ran.setCList(map(lambda x:cExp**x,xrange(cExpLow,cExpHigh+1)))
ran.setRebalanceList([rebalance])
ran.setKernelList([Kernel.polynomial])
ran.setP1List(range(2,maxDegree+1))
self.addRange(ran)
def addBasisFuncs(self, rExpHigh = 6, rExp = 2.0, sdExpHigh = 6, sdExp = 2.0, cExpLow = -3, cExpHigh = 3, cExp = 10.0, rebalance = True):
"""Adds the basis functions to the set, both Radial and Gaussian. The parameter for the radial basis functions go from rExp^0 to rExp^rExpHigh, whilst the parameter for the Gaussian does the same thing, but with the sd parameters. Same c controls as for addLinear."""
ran = ParamsRange()
ran.setCList(map(lambda x:cExp**x,xrange(cExpLow,cExpHigh+1)))
ran.setRebalanceList([rebalance])
ran.setKernelList([Kernel.rbf])
ran.setP1List(map(lambda x:rExp**x,xrange(rExpHigh+1)))
self.addRange(ran)
ran = ParamsRange()
ran.setCList(map(lambda x:cExp**x,xrange(cExpLow,cExpHigh+1)))
ran.setRebalanceList([rebalance])
ran.setKernelList([Kernel.gbf])
ran.setP1List(map(lambda x:sdExp**x,xrange(sdExpHigh+1)))
self.addRange(ran)
def addSigmoid(self, sExpLow = -3, sExpHigh = 3, sExp = 10.0, oExpLow = -3, oExpHigh = 3, oExp = 10.0, cExpLow = -3, cExpHigh = 3, cExp = 10.0, rebalance = True):
"""Add sigmoids to the set - the parameters use s for the scale component and o for the offset component; these parameters use the same exponential scheme as for c and others. Same c controls as for addLinear."""
ran = ParamsRange()
ran.setCList(map(lambda x:cExp**x,xrange(cExpLow,cExpHigh+1)))
ran.setRebalanceList([rebalance])
ran.setKernelList([Kernel.sigmoid])
ran.setP1List(map(lambda x:oExp**x,xrange(oExpLow,oExpHigh+1)))
ran.setP2List(map(lambda x:sExp**x,xrange(sExpLow,sExpHigh+1)))
self.addRange(ran)
def permutations(self):
for ran in self.ranges:
for p in ran:
yield p
def __iter__(self):
return self.permutations()
| Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
from params import *
from scipy.weave import inline
class Model:
"""Defines a model - this will consist of a parameters object to define the kernel (C is ignored, but will be the same as the trainning parameter if needed for reference.), a list of support vectors in a dataMatrix and then a vector of weights, plus the b parameter. The weights are the multiple of the y value and alpha value. Uses weave to make evaluation of new features fast."""
def __init__(self, params, supportVectors, supportWeights, b):
"""Sets up a model given the parameters. Note that if given a linear kernel and multiple support vectors it does the obvious optimisation."""
self.params = params
self.supportVectors = supportVectors
self.supportWeights = supportWeights
self.b = b
# Get the kernel code ready for the weave call...
self.kernel = self.params.getCode()
self.kernelKey = self.params.kernelKey()
# Optimise the linear kernel if needed...
if self.params.getKernel()==Kernel.linear and len(self.supportWeights)>1:
self.supportVectors = (self.supportVectors.T * self.supportWeights).sum(axis=1)
self.supportVectors = self.supportVectors.reshape((1, self.supportVectors.shape[0]))
self.supportWeights = numpy.array((1.0,), dtype=numpy.double)
def getParams(self):
"""Returns the parameters the svm was trainned with."""
return self.params
def getSupportVectors(self):
"""Returns a 2D array where each row is a support vector."""
return self.supportVectors
def getSupportWeights(self):
"""Returns the vector of weights matching the support vectors."""
return self.supportWeights
def getB(self):
"""Returns the addative offset of the function defined by the support vectors to locate the decision boundary at 0."""
return self.b
def decision(self,feature):
"""Given a feature vector this returns its decision boundary evaluation, specifically the weighted sum of each of the kernel evaluations for the support vectors against the given feature vector, plus b."""
code = '// Kernel = '+self.kernelKey+'\n' + """
double ret = b;
for (int v=0;v<Nsw[0];v++)
{
ret += SW1(v) * kernel(Nsv[1],feature,&SV2(v,0));
}
return_val = ret;
"""
sv = self.supportVectors
sw = self.supportWeights
b = self.b
return inline(code,['feature','sv','sw','b'], support_code=self.kernel)
def classify(self,feature):
"""Classifies a single feature vector - returns -1 or +1 depending on its class. Just the sign of the decision method."""
if self.decision(feature)<0.0: return -1
else: return 1
def multiDecision(self,features):
"""Given a matrix where every row is a feature returns the decision boundary evaluation for each feature as an array of values."""
code = '// Kernel = '+self.kernelKey+'\n' + """
for (int f=0;f<Nfeatures[0];f++)
{
RET1(f) = b;
for (int v=0;v<Nsw[0];v++)
{
RET1(f) += SW1(v) * kernel(Nsv[1],&FEATURES2(f,0),&SV2(v,0));
}
}
"""
sv = self.supportVectors
sw = self.supportWeights
b = self.b
ret = numpy.empty(features.shape[0], dtype=numpy.float_)
inline(code, ['features','sv','sw','b','ret'], support_code=self.kernel)
return ret
def multiClassify(self,features):
"""Given a matrix where every row is a feature returns - returns -1 or +1 depending on the class of each vector, as an array. Just the sign of the multiDecision method. Be warned the classification vector is returned with a type of int8."""
dec = self.multiDecision(features)
ret = numpy.zeros(features.shape[0],dtype=numpy.int8)
code = """
for (int i=0;i<Ndec[0];i++)
{
if (dec[i]<0.0) ret[i] = -1;
else ret[i] = 1;
}
"""
inline(code,['dec','ret'])
return ret
| Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
from params import Kernel
from params import Params
from params_sets import ParamsRange
from params_sets import ParamsSet
from dataset import Dataset
from model import Model
from loo import looPair
from loo import looPairRange
from loo import looPairBrute
from loo import looPairSelect
from multiclass import MultiModel
import smo
def solvePair(params,dataset,negLabel,posLabel):
"""Solves for a pair of labels - you provide a parameters object, a data set and the labels to assign to -1 and +1 respectivly. It then returns a Model object."""
s = smo.SMO()
s.setParams(params)
s.setData(dataset.getTrainData(negLabel,posLabel))
s.solve()
return s.getModel()
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Some basic matrix operations that come in use...
matrix_code = start_cpp() + """
#ifndef MATRIX_CODE
#define MATRIX_CODE
template <typename T>
inline void MemSwap(T * lhs, T * rhs, int count = 1)
{
while(count!=0)
{
T t = *lhs;
*lhs = *rhs;
*rhs = t;
++lhs;
++rhs;
--count;
}
}
// Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default.
template <typename T>
inline T Determinant(T * pos, int size, int stride = -1)
{
if (stride==-1) stride = size;
if (size==1) return pos[0];
else
{
if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride];
else
{
T ret = 0.0;
for (int i=0; i<size; i++)
{
if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1);
T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1];
if ((i+size)%2) ret += sub;
else ret -= sub;
}
for (int i=1; i<size; i++)
{
MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1);
}
return ret;
}
}
}
// Inverts a square matrix, will fail on singular and very occasionally on
// non-singular matrices, returns true on success. Uses Gauss-Jordan elimination
// with partial pivoting.
// in is the input matrix, out the output matrix, just be aware that the input matrix is trashed.
// You have to provide its size (Its square, obviously.), and optionally a stride if different from size.
template <typename T>
inline bool Inverse(T * in, T * out, int size, int stride = -1)
{
if (stride==-1) stride = size;
for (int r=0; r<size; r++)
{
for (int c=0; c<size; c++)
{
out[r*stride + c] = (c==r)?1.0:0.0;
}
}
for (int r=0; r<size; r++)
{
// Find largest pivot and swap in, fail if best we can get is 0...
T max = in[r*stride + r];
int index = r;
for (int i=r+1; i<size; i++)
{
if (fabs(in[i*stride + r])>fabs(max))
{
max = in[i*stride + r];
index = i;
}
}
if (index!=r)
{
MemSwap(&in[index*stride], &in[r*stride], size);
MemSwap(&out[index*stride], &out[r*stride], size);
}
if (fabs(max-0.0)<1e-6) return false;
// Divide through the entire row...
max = 1.0/max;
in[r*stride + r] = 1.0;
for (int i=r+1; i<size; i++) in[r*stride + i] *= max;
for (int i=0; i<size; i++) out[r*stride + i] *= max;
// Row subtract to generate 0's in the current column, so it matches an identity matrix...
for (int i=0; i<size; i++)
{
if (i==r) continue;
T factor = in[i*stride + r];
in[i*stride + r] = 0.0;
for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j];
for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j];
}
}
return true;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
class ProgBar:
"""Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops."""
def __init__(self, width = 60, onCallback = None):
self.start = time.time()
self.fill = 0
self.width = width
self.onCallback = onCallback
sys.stdout.write(('_'*self.width)+'\n')
sys.stdout.flush()
def __del__(self):
self.end = time.time()
self.__show(self.width)
sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n')
sys.stdout.flush()
def callback(self, nDone, nToDo):
"""Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo)."""
if self.onCallback:
self.onCallback()
n = int(float(self.width)*float(nDone)/float(nToDo))
n = min((n,self.width))
if n>self.fill:
self.__show(n)
def __show(self,n):
sys.stdout.write('|'*(n-self.fill))
sys.stdout.flush()
self.fill = n
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pydoc
import inspect
class DocGen:
"""A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code."""
def __init__(self, name, title = None, summary = None):
"""name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title."""
if title==None: title = name
if summary==None: summary = title
self.doc = pydoc.HTMLDoc()
self.html = open('%s.html'%name,'w')
self.html.write('<html>\n')
self.html.write('<head>\n')
self.html.write('<title>%s</title>\n'%title)
self.html.write('</head>\n')
self.html.write('<body>\n')
self.html_variables = ''
self.html_functions = ''
self.html_classes = ''
self.wiki = open('%s.wiki'%name,'w')
self.wiki.write('#summary %s\n\n'%summary)
self.wiki.write('= %s= \n\n'%title)
self.wiki_variables = ''
self.wiki_functions = ''
self.wiki_classes = ''
def __del__(self):
if self.html_variables!='':
self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables))
if self.html_functions!='':
self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions))
if self.html_classes!='':
self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes))
self.html.write('</body>\n')
self.html.write('</html>\n')
self.html.close()
if self.wiki_variables!='':
self.wiki.write('= Variables =\n\n')
self.wiki.write(self.wiki_variables)
self.wiki.write('\n')
if self.wiki_functions!='':
self.wiki.write('= Functions =\n\n')
self.wiki.write(self.wiki_functions)
self.wiki.write('\n')
if self.wiki_classes!='':
self.wiki.write('= Classes =\n\n')
self.wiki.write(self.wiki_classes)
self.wiki.write('\n')
self.wiki.close()
def addFile(self, fn, title, fls = True):
"""Given a filename and section title adds the contents of said file to the output. Various flags influence how this works."""
html = []
wiki = []
for i, line in enumerate(open(fn,'r').readlines()):
hl = line.replace('\n', '')
if i==0 and fls:
hl = '<strong>' + hl + '</strong>'
for ext in ['py','txt']:
if '.%s - '%ext in hl:
s = hl.split('.%s - '%ext, 1)
hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1]
html.append(hl)
wl = line.strip()
if i==0 and fls:
wl = '*%s*'%wl
for ext in ['py','txt']:
if '.%s - '%ext in wl:
s = wl.split('.%s - '%ext, 1)
wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n'
wiki.append(wl)
self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html)))
self.wiki.write('== %s ==\n'%title)
self.wiki.write('\n'.join(wiki))
self.wiki.write('----\n\n')
def addVariable(self, var, desc):
"""Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc.."""
self.html_variables += '<strong>%s</strong><br/>'%var
self.html_variables += '%s<br/><br/>\n'%desc
self.wiki_variables += '*`%s`*\n'%var
self.wiki_variables += ' %s\n\n'%desc
def addFunction(self, func):
"""Adds a function to the documentation. You provide the actual function instance."""
self.html_functions += self.doc.docroutine(func).replace(' ',' ')
self.html_functions += '\n'
name = func.__name__
args, varargs, keywords, defaults = inspect.getargspec(func)
doc = inspect.getdoc(func)
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_functions += ' %s\n\n'%doc
def addClass(self, cls):
"""Adds a class to the documentation. You provide the actual class object."""
self.html_classes += self.doc.docclass(cls).replace(' ',' ')
self.html_classes += '\n'
name = cls.__name__
parents = filter(lambda a: a!=cls, inspect.getmro(cls))
doc = inspect.getdoc(cls)
par_str = ''
if len(parents)!=0:
par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents))
self.wiki_classes += '== %s(%s) ==\n'%(name, par_str)
self.wiki_classes += ' %s\n\n'%doc
methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x))
def method_key(pair):
if pair[0]=='__init__': return '___'
else: return pair[0]
methods.sort(key=method_key)
for name, method in methods:
if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'):
if inspect.ismethod(method):
args, varargs, keywords, defaults = inspect.getargspec(method)
else:
args = ['?']
varargs = None
keywords = None
defaults = None
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
def fetch_doc(cls, name):
try:
method = getattr(cls, name)
if method.__doc__!=None: return inspect.getdoc(method)
except: pass
for parent in filter(lambda a: a!=cls, inspect.getmro(cls)):
ret = fetch_doc(parent, name)
if ret!=None: return ret
return None
doc = fetch_doc(cls, name)
self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_classes += ' %s\n\n'%doc
variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float))
for name, var in variables:
if not name.startswith('__'):
if hasattr(var, '__doc__'): d = var.__doc__
else: d = str(var)
self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import random
import math
from scipy.special import gammaln, psi, polygamma
from scipy import weave
from utils.start_cpp import start_cpp
# Provides various gamma-related functions...
gamma_code = start_cpp() + """
#ifndef GAMMA_CODE
#define GAMMA_CODE
#include <cmath>
// Returns the natural logarithm of the Gamma function...
// (Uses Lanczos's approximation.)
double lnGamma(double z)
{
static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7};
if (z<0.5)
{
// Use reflection formula, as approximation doesn't work down here...
return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z);
}
else
{
double x = coeff[0];
for (int i=1;i<9;i++) x += coeff[i]/(z+i-1);
double t = z + 6.5;
return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x);
}
}
// Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values...
double digamma(double z)
{
static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point.
double ret = 0.0;
while (z<highVal)
{
ret -= 1.0/z;
z += 1.0;
}
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz4 = iz2*iz2;
double iz6 = iz4*iz2;
ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0;
return ret;
}
// Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically...
double trigamma(double z)
{
static const double highVal = 8.0;
double ret = 0.0;
while (z<highVal)
{
ret += 1.0/(z*z);
z += 1.0;
}
z -= 1.0;
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz3 = iz1*iz2;
double iz5 = iz3*iz2;
double iz7 = iz5*iz2;
double iz9 = iz7*iz2;
ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0;
return ret;
}
#endif
"""
def lnGamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function"""
code = start_cpp(gamma_code) + """
return_val = lnGamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def digamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function"""
code = start_cpp(gamma_code) + """
return_val = digamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def trigamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function"""
code = start_cpp(gamma_code) + """
return_val = trigamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
class TestFuncs(unittest.TestCase):
"""Test code for the assorted gamma-related functions."""
def test_compile(self):
code = start_cpp(gamma_code) + """
"""
weave.inline(code, support_code=gamma_code)
def test_error_lngamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = lnGamma(z)
good = gammaln(z)
assert(math.fabs(own-good)<1e-12)
def test_error_digamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = digamma(z)
good = psi(z)
assert(math.fabs(own-good)<1e-9)
def test_error_trigamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = trigamma(z)
good = polygamma(1,z)
assert(math.fabs(own-good)<1e-9)
# If this file is run do the unit tests...
if __name__ == '__main__':
unittest.main()
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
import hashlib
def start_cpp(hash_str = None):
"""This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>"""
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
if hash_str==None:
return '#line %i "%s"\n'%(info[1],info[0])
else:
h = hashlib.md5()
h.update(hash_str)
hash_val = h.hexdigest()
return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
| Python |
# -*- coding: utf-8 -*-
# Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.).
import cv
import numpy as np
def cv2array(im):
"""Converts a cv array to a numpy array."""
depth2dtype = {
cv.IPL_DEPTH_8U: 'uint8',
cv.IPL_DEPTH_8S: 'int8',
cv.IPL_DEPTH_16U: 'uint16',
cv.IPL_DEPTH_16S: 'int16',
cv.IPL_DEPTH_32S: 'int32',
cv.IPL_DEPTH_32F: 'float32',
cv.IPL_DEPTH_64F: 'float64',
}
arrdtype=im.depth
a = np.fromstring(
im.tostring(),
dtype=depth2dtype[im.depth],
count=im.width*im.height*im.nChannels)
a.shape = (im.height,im.width,im.nChannels)
return a
def array2cv(a):
"""Converts a numpy array to a cv array, if possible."""
dtype2depth = {
'uint8': cv.IPL_DEPTH_8U,
'int8': cv.IPL_DEPTH_8S,
'uint16': cv.IPL_DEPTH_16U,
'int16': cv.IPL_DEPTH_16S,
'int32': cv.IPL_DEPTH_32S,
'float32': cv.IPL_DEPTH_32F,
'float64': cv.IPL_DEPTH_64F,
}
try:
nChannels = a.shape[2]
except:
nChannels = 1
cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]),
dtype2depth[str(a.dtype)],
nChannels)
cv.SetData(cv_im, a.tostring(),
a.dtype.itemsize*nChannels*a.shape[1])
return cv_im
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing as mp
import multiprocessing.synchronize # To make sure we have all the functionality.
import types
import marshal
import unittest
def repeat(x):
"""A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant."""
while True: yield x
def run_code(code,args):
"""Internal use function that does the work in each process."""
code = marshal.loads(code)
func = types.FunctionType(code, globals(), '_')
return func(*args)
def mp_map(func, *iters, **keywords):
"""A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity."""
if 'pool' in keywords: pool = keywords['pool']
else: pool = mp.Pool()
code = marshal.dumps(func.func_code)
jobs = []
for args in zip(*iters):
jobs.append(pool.apply_async(run_code,(code,args)))
for i in xrange(len(jobs)):
jobs[i] = jobs[i].get()
return jobs
class TestMpMap(unittest.TestCase):
def test_simple1(self):
data = ['a','b','c','d']
def noop(data):
return data
data_noop = mp_map(noop, data)
self.assertEqual(data, data_noop)
def test_simple2(self):
data = [x for x in xrange(1000)]
data_double = mp_map(lambda a: a*2, data)
self.assertEqual(map(lambda a: a*2,data), data_double)
def test_gen(self):
def gen():
for i in xrange(100): yield i
data_double = mp_map(lambda a: a*2, gen())
self.assertEqual(map(lambda a: a*2,gen()), data_double)
def test_repeat(self):
def mult(a,b):
return a*b
data = [x for x in xrange(50,5000,5)]
data_triple = mp_map(mult, data, repeat(3))
self.assertEqual(map(lambda a: a*3,data),data_triple)
def test_none(self):
data = []
data_sqr = mp_map(lambda x: x*x, data)
self.assertEqual([],data_sqr)
if __name__ == '__main__':
unittest.main()
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os.path
import tempfile
import shutil
from distutils.core import setup, Extension
import distutils.ccompiler
import distutils.dep_util
try:
__default_compiler = distutils.ccompiler.new_compiler()
except:
__default_compiler = None
def make_mod(name, base, source, openCL = False):
"""Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place."""
if __default_compiler==None: raise Exception('No compiler!')
# Work out the various file names - check if we actually need to do anything...
if not isinstance(source, list): source = [source]
source_path = map(lambda s: os.path.join(base, s), source)
library_path = os.path.join(base, __default_compiler.shared_object_filename(name))
if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)):
try:
print 'b'
# Backup the argv variable and create a temporary directory to do all work in...
old_argv = sys.argv[:]
temp_dir = tempfile.mkdtemp()
# Prepare the extension...
sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir]
comp_path = filter(lambda s: not s.endswith('.h'), source_path)
depends = filter(lambda s: s.endswith('.h'), source_path)
if openCL:
ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends)
else:
ext = Extension(name, comp_path, depends=depends)
# Compile...
setup(name=name, version='1.0.0', ext_modules=[ext])
finally:
# Cleanup the argv variable and the temporary directory...
sys.argv = old_argv
shutil.rmtree(temp_dir, True)
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
from numpy_help_cpp import numpy_util_code
# Provides various functions to assist with manipulating python objects from c++ code.
python_obj_code = numpy_util_code + start_cpp() + """
#ifndef PYTHON_OBJ_CODE
#define PYTHON_OBJ_CODE
// Extracts a boolean from an object...
bool GetObjectBoolean(PyObject * obj, const char * name)
{
PyObject * b = PyObject_GetAttrString(obj, name);
bool ret = b!=Py_False;
Py_DECREF(b);
return ret;
}
// Extracts an int from an object...
int GetObjectInt(PyObject * obj, const char * name)
{
PyObject * i = PyObject_GetAttrString(obj, name);
int ret = PyInt_AsLong(i);
Py_DECREF(i);
return ret;
}
// Extracts a float from an object...
float GetObjectFloat(PyObject * obj, const char * name)
{
PyObject * f = PyObject_GetAttrString(obj, name);
float ret = PyFloat_AsDouble(f);
Py_DECREF(f);
return ret;
}
// Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored...
unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
unsigned char * ret = new unsigned char[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i);
Py_DECREF(nao);
return ret;
}
// Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored...
float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
float * ret = new float[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i);
Py_DECREF(nao);
return ret;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Defines helper functions for accessing numpy arrays...
numpy_util_code = start_cpp() + """
#ifndef NUMPY_UTIL_CODE
#define NUMPY_UTIL_CODE
float & Float1D(PyArrayObject * arr, int index = 0)
{
return *(float*)(arr->data + index*arr->strides[0]);
}
float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
unsigned char & Byte1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index*arr->strides[0]);
}
unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
int & Int1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index*arr->strides[0]);
}
int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
#endif
"""
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cvarray
import mp_map
import prog_bar
import numpy_help_cpp
import python_obj_cpp
import matrix_cpp
import gamma_cpp
import setProcName
import start_cpp
import make
import doc_gen
# Setup...
doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.')
doc.addFile('readme.txt', 'Overview')
# Variables...
doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.')
doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.')
doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++')
doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++')
# Functions...
doc.addFunction(make.make_mod)
doc.addFunction(cvarray.cv2array)
doc.addFunction(cvarray.array2cv)
doc.addFunction(mp_map.repeat)
doc.addFunction(mp_map.mp_map)
doc.addFunction(setProcName.setProcName)
doc.addFunction(start_cpp.start_cpp)
doc.addFunction(make.make_mod)
# Classes...
doc.addClass(prog_bar.ProgBar)
doc.addClass(doc_gen.DocGen)
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ctypes import *
def setProcName(name):
"""Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases."""
# Call the process control function...
libc = cdll.LoadLibrary('libc.so.6')
libc.prctl(15, c_char_p(name), 0, 0, 0)
# Update argv...
charPP = POINTER(POINTER(c_char))
argv = charPP.in_dll(libc,'_dl_argv')
size = libc.strlen(argv[0])
libc.strncpy(argv[0],c_char_p(name),size)
if __name__=='__main__':
# Quick test that it works...
import os
ps1 = 'ps'
ps2 = 'ps -f'
os.system(ps1)
os.system(ps2)
setProcName('wibble_wobble')
os.system(ps1)
os.system(ps2)
| Python |
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import operator
class Dataset:
"""Contains a dataset - lots of pairs of feature vectors and labels. For conveniance labels can be arbitrary python objects, or at least python objects that work for indexing a dictionary."""
def __init__(self):
# labels are internally stored as consecutive integers - this does the conversion...
self.labelToNum = dict()
self.numToLabel = []
# Store of data blocks - each block is a data matrix and label list pair (A lot of blocks could be of length one of course.)...
self.blocks = []
def add(self, featVect, label):
"""Adds a single feature vector and label."""
if label in self.labelToNum:
l = self.labelToNum[label]
else:
l = len(self.numToLabel)
self.numToLabel.append(label)
self.labelToNum[label] = l
self.blocks.append((featVect.reshape((1,featVect.shape[0])).astype(numpy.double),[l]))
def addMatrix(self, dataMatrix, labels):
"""This adds a data matrix alongside a list of labels for it. The number of rows in the matrix should match the number of labels in the list."""
assert(dataMatrix.shape[0]==len(labels))
# Add any labels not yet seen...
for l in labels:
if l not in self.labelToNum.keys():
num = len(self.numToLabel)
self.numToLabel.append(l)
self.labelToNum[l] = num
# Convert the given labels list to a list of numerical labels...
ls = map(lambda l:self.labelToNum[l],labels)
# Store...
self.blocks.append((dataMatrix.astype(numpy.double),ls))
def getLabels(self):
"""Returns a list of all the labels in the data set."""
return self.numToLabel
def getCounts(self):
"""Returns a how many features with each label have been seen - as a list which aligns with the output of getLabels."""
ret = [0]*len(self.numToLabel)
for block in self.blocks:
for label in block[1]: ret[label] += 1
return ret
def subsampleData(self, count):
"""Returns a new dataset object which contains count instances of the data, sampled from the data contained within without repetition. Returned Dataset could miss some of the classes."""
size = 0
for block in self.blocks: size += len(block[1])
subset = numpy.random.permutation(size)[:count]
subset.sort()
pos = 0
index = 0
ret = Dataset()
for block in self.blocks:
while subset[index]<(pos+len(block[1])):
loc = subset[index] - pos
ret.add(block[0][loc,:], block[1][loc])
index += 1
if index==subset.shape[0]: return ret
pos += len(block[1])
return ret
def getTrainData(self, lNeg, lPos):
"""Given two labels this returns a pair of a data matrix and a y vector, where lPos features have +1 and lNeg features have -1. Features that do not have one of these two labels will not be included."""
# Convert the given labels to label numbers...
if lNeg in self.labelToNum:
ln = self.labelToNum[lNeg]
else:
ln = -1
if lPos in self.labelToNum:
lp = self.labelToNum[lPos]
else:
lp = -1
# Go through the blocks and extract the relevant info...
dataList = []
yList = []
for dataMatrix, labels in self.blocks:
y = filter(lambda l:l==lp or l==ln,labels)
if len(y)!=0:
def signRes(l):
if l==lp: return 1.0
else: return -1.0
y = numpy.array(map(signRes,y), dtype=numpy.float_)
inds = map(operator.itemgetter(0), filter(lambda l:l[1]==lp or l[1]==ln, enumerate(labels)))
data = dataMatrix[numpy.array(inds),:]
dataList.append(data)
yList.append(y)
# Glue it all together into big blocks, and return 'em...
return (numpy.vstack(dataList),numpy.concatenate(yList))
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2010 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import svm
from utils import doc_gen
# Setup...
doc = doc_gen.DocGen('svm', 'Support Vector Machine', 'Support vector machines, classification only')
doc.addFile('readme.txt', 'Overview')
# Functions...
doc.addFunction(svm.solvePair)
doc.addFunction(svm.looPair)
doc.addFunction(svm.looPairSelect)
# Classes...
doc.addClass(svm.Kernel)
doc.addClass(svm.Params)
doc.addClass(svm.ParamsRange)
doc.addClass(svm.ParamsSet)
doc.addClass(svm.Dataset)
doc.addClass(svm.Model)
doc.addClass(svm.MultiModel)
| Python |
# Copyright 2012 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import numpy.random
from utils.start_cpp import start_cpp
class Generator:
"""A generator - provides lots of test entities designed to split an exemplar set via a (python) generator method (i.e. using yield). When a tree is constructed it is provided with a generator and each time it wants to split the generator is given the exemplar set and an index into the relevent exemplars to split on, plus an optional weighting. It then yields a set of test entities, which are applied and scored via the goal, such that the best can be selected. This is more inline with extremelly random decision forests, but there is nothing stopping the use of a goal-aware test generator that does do some kind of optimisation, potentially yielding just one test entity. The generator will contain the most important parameters of the decision forest, as it controls how the test entities are created and how many are tried - selecting the right generator and its associated parameters is essential for performance. An actual Generator is expected to also inherit from its associated Test object, such that it provides the do method. This is necesary as a test entity requires access to its associated Test object to work."""
def clone(self):
"""Returns a (deep) copy of this object."""
raise NotImplementedError
def itertests(self, es, index, weights = None):
"""Generates test entities that split the provided features into two sets, yielding them one at a time such that the caller can select the best, according to the current goal. It really is allowed to do whatever it likes to create these test entities, which means it provides an insane amount of customisation potential, if possibly rather too much choice. es is the exemplar set, whilst index is the set of exemplars within the exemplar set it is creating a test for. weights optionally provides a weight for each exemplar, aligned with es."""
raise NotImplementedError
yield
def genCodeC(self, name, exemplar_list):
"""Provides C code for a generator. The return value is a 2-tuple, the first entry containing the code and the second entry `<state>`, the name of a state object to be used by the system. The state object has two public variables for use by the user - `void * test` and `size_t length`. The code itself will contain the definition of `<state>` and two functions: `void <name>_init(<state> & state, PyObject * data, Exemplar * test_set)` and `bool <name>_next(<state> & state, PyObject * data, Exemplar * test_set)`. Usage consists of creating an instance of State and calling `<name>_init` on it, then repeatedly calling `<name>_next` - each time it returns true you can use the variables in `<state>` to get at the test, but when it returns false it is time to stop (And the `<State>` will have been cleaned up.). If it is not avaliable then a NotImplementedError will be raised."""
raise NotImplementedError
class MergeGen(Generator):
"""As most generators only handle a specific kind of data (discrete, continuous, one channel at a time.) the need arises to merge multiple generators for a given problem, in the sense that when iterating the generators tests it provides the union of all tests by all of the contained generators. Alternativly, the possibility exists to get better results by using multiple generators with different properties, as the best test from all provided will ultimatly be selected. This class merges upto 256 generators as one. The 256 limit comes from the fact the test entities provided by it have to encode which generator made them, so that the do method can send the test entity to the right test object, and it only uses a byte - in the unlikelly event that more are needed a hierarchy can be used, though your almost certainly doing it wrong if you get that far."""
def __init__(self, *args):
"""By default constructs the object without any generators in it, but you can provide generators to it as parameters to the constructor."""
self.gens = args
assert(len(self.gens)<=256)
def clone(self):
ret = MergeGen()
ret.gens = map(lambda g: g.clone(), self.gens)
return ret
def add(self, gen):
"""Adds a generator to the provided set. Generators can be in multiple MergeGen/RandomGen objects, just as long as a loop is not formed."""
self.gens.append(gen)
assert(len(self.gens)<=256)
def itertests(self, es, index, weights = None):
for c, gen in enumerate(self.gens):
code = chr(c)
for test in gen.itertests(es, index, weights):
yield code+test
def do(self, test, es, index = slice(None)):
code = ord(test[0])
return self.gens[code].do(test[1:], es, index)
def testCodeC(self, name, exemplar_list):
# Add the children...
ret = ''
for i, gen in enumerate(self.gens):
ret += gen.testCodeC(name + '_%i'%i, exemplar_list)
# Put in the final test function...
ret += start_cpp()
ret += 'bool %s(PyObject * data, void * test, size_t test_length, int exemplar)\n'%name
ret += '{\n'
ret += 'void * sub_test = ((char*)test) + 1;\n'
ret += 'size_t sub_test_length = test_length - 1;\n'
ret += 'int which = *(unsigned char*)test;\n'
ret += 'switch(which)\n'
ret += '{\n'
for i in xrange(len(self.gens)):
ret += 'case %i: return %s_%i(data, sub_test, sub_test_length, exemplar);\n'%(i, name, i)
ret += '}\n'
ret += 'return 0;\n' # To stop the compiler issuing a warning.
ret += '}\n'
return ret
def genCodeC(self, name, exemplar_list):
code = ''
states = []
for i, gen in enumerate(self.gens):
c, s = gen.genCodeC(name+'_%i'%i, exemplar_list)
code += c
states.append(s)
code += start_cpp() + """
struct State%(name)s
{
void * test;
size_t length;
"""%{'name':name}
for i,s in enumerate(states):
code += ' %s gen_%i;\n'%(s,i)
code += start_cpp() + """
int upto;
};
void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set)
{
state.test = 0;
state.length = 0;
"""%{'name':name}
for i in xrange(len(self.gens)):
code += '%(name)s_%(i)i_init(state.gen_%(i)i, data, test_set);\n'%{'name':name, 'i':i}
code += start_cpp() + """
state.upto = 0;
}
bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set)
{
switch (state.upto)
{
"""%{'name':name}
for i in xrange(len(self.gens)):
code += start_cpp() + """
case %(i)i:
if (%(name)s_%(i)i_next(state.gen_%(i)i, data, test_set))
{
state.length = 1 + state.gen_%(i)i.length;
state.test = realloc(state.test, state.length);
((unsigned char*)state.test)[0] = %(i)i;
memcpy((unsigned char*)state.test+1, state.gen_%(i)i.test, state.gen_%(i)i.length);
return true;
}
else state.upto += 1;
"""%{'name':name, 'i':i}
code += start_cpp() + """
}
free(state.test);
return false;
}
"""
return (code, 'State'+name)
class RandomGen(Generator):
"""This generator contains several generators, and randomly selects one to provide the tests each time itertests is called - not entirly sure what this could be used for, but it can certainly add some more randomness, for good or for bad. Supports weighting and merging multiple draws from the set of generators contained within. Has the same limit of 256 that MergeGen has, for the same reasons."""
def __init__(self, draws = 1, *args):
"""draws is the number of draws from the list of generators to merge to provide the final output. Note that it is drawing with replacement, and will call an underlying generator twice if it gets selected twice. After the draws parameter you can optionally provide generators, which will be put into the created object, noting that they will all have a selection weight of 1."""
self.gens = map(lambda a: (a,1.0), args)
self.draws = draws
assert(len(self.gens)<=256)
def clone(self):
ret = MergeGen(self.draws)
ret.gens = map(lambda g: g.clone(), self.gens)
return ret
def add(self, gen, weight = 1.0):
"""Adds a generator to the provided set. Generators can be in multiple MergeGen/RandomGen objects, just as long as a loop is not formed. You can also provide a weight, to bias how often particular generators are selected."""
self.gens.append((gen, weight))
assert(len(self.gens)<=256)
def itertests(self, es, index, weights = None):
# Select which generators get to play...
w = numpy.asarray(map(lambda g: g[1], self.gens))
w /= w.sum()
toDo = numpy.random.multinomial(self.draws, w)
# Go through and iterate the tests of each generator in turn, the number of times requested...
for genInd in numpy.where(toDo!=0)[0]:
code = chr(genInd)
for _ in xrange(toDo[genInd]):
for test in self.gens[genInd][0].itertests(es, index, weights):
yield code+test
def do(self, test, es, index = slice(None)):
code = ord(test[0])
return self.gens[code][0].do(test[1:], es, index)
def testCodeC(self, name, exemplar_list):
# Add the children...
ret = ''
for i, (gen, _) in enumerate(self.gens):
ret += gen.testCodeC(name + '_%i'%i, exemplar_list)
# Put in the final test function...
ret += start_cpp()
ret += 'bool %s(PyObject * data, void * test, size_t test_length, int exemplar)\n'%name
ret += '{\n'
ret += 'void * sub_test = ((char*)test) + 1;\n'
ret += 'size_t sub_test_length = test_length - 1;\n'
ret += 'int which = *(unsigned char*)test;\n'
ret += 'switch(which)\n'
ret += '{\n'
for i in xrange(len(self.gens)):
ret += 'case %i: return %s_%i(data, sub_test, sub_test_length, exemplar);\n'%(i, name, i)
ret += '}\n'
ret += 'return 0;\n' # To stop the compiler issuing a warning.
ret += '}\n'
return ret
def genCodeC(self, name, exemplar_list):
code = ''
states = []
for i, gen in enumerate(self.gens):
c, s = gen[0].genCodeC(name+'_%i'%i, exemplar_list)
code += c
states.append(s)
code += start_cpp() + """
struct State%(name)s
{
void * test;
size_t length;
"""%{'name':name}
for i,s in enumerate(states):
code += ' %s gen_%i;\n'%(s,i)
code += start_cpp() + """
int upto;
int * seq; // Sequence of things to try.
};
void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set)
{
state.test = 0;
state.length = 0;
state.upto = -1;
state.seq = (int*)malloc(sizeof(int)*%(draws)i);
for (int i=0;i<%(draws)i;i++)
{
float weight = drand48();
"""%{'name':name, 'draws':self.draws, 'count':len(self.gens)}
total = sum(map(lambda g: g[1], self.gens))
ssf = 0.0
for i,gen in enumerate(self.gens):
ssf += gen[1]/total
code += start_cpp() + """
if (weight<%(thres)f) state.seq[i] = %(i)i;
else
"""%{'i':i, 'thres':ssf}
code += start_cpp() + """
state.seq[i] = %(count)i-1;
}
}
bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set)
{
while (state.upto<%(draws)i)
{
if (state.upto!=-1)
{
switch (state.seq[state.upto])
{
"""%{'name':name, 'draws':self.draws, 'count':len(self.gens)}
for i in xrange(len(self.gens)):
code += start_cpp() + """
case %(i)i:
if (%(name)s_%(i)i_next(state.gen_%(i)i, data, test_set))
{
state.length = 1 + state.gen_%(i)i.length;
state.test = realloc(state.test, state.length);
((unsigned char*)state.test)[0] = %(i)i;
memcpy((unsigned char*)state.test+1, state.gen_%(i)i.test, state.gen_%(i)i.length);
return true;
}
break;
"""%{'name':name, 'i':i}
code += start_cpp() + """
}
}
state.upto++;
if (state.upto<%(draws)i)
{
switch(state.seq[state.upto])
{
""" %{'draws':self.draws}
for i in xrange(len(self.gens)):
code += start_cpp() + """
case %(i)i:
%(name)s_%(i)i_init(state.gen_%(i)i, data, test_set);
break;
"""%{'name':name, 'i':i}
code += start_cpp() + """
}
}
}
free(state.test);
free(state.seq);
return false;
}
"""
return (code, 'State'+name)
| Python |
# Copyright 2012 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
from utils.start_cpp import start_cpp
class Test:
"""Interface for a test definition. This provides the concept of a test that an exemplar either passes or fails. The test is actually defined by some arbitary entity made by a matching generator, but this object is required to actually do the test - contains the relevant code and any shared parameters to keep memory consumption low, as there could be an aweful lot of tests. The seperation of test from generator is required as there are typically many methods to generate a specific test - generators inherit from the relevant test object."""
def do(self, test, es, index = slice(-1)):
"""Does the test. Given the entity that defines the actual test and an ExemplarSet to run the test on. An optional index for the features can be provided (Passed directly through to the exemplar sets [] operator, so its indexing rules apply.), but if omitted it runs for all. Return value is a boolean numpy array indexed by the relative exemplar indices, that gives True if it passsed the test, False if it failed."""
raise NotImplementedError
def testCodeC(self, name, exemplar_list):
"""Provides C code to perform the test - provides a C function that is given the test object as a pointer to the first byte and the index of the exemplar to test; it then returns true or false dependeing on if it passes the test or not. Returned string contains a function with the calling convention `bool <name>(PyObject * data, void * test, size_t test_length, int exemplar)`. data is a python tuple indexed by channel containning the object to be fed to the exemplar access function. To construct this function it needs the return value of listCodeC for an ExemplarSet, so it can get the calling convention to access the channel. When compiled the various functions must be avaliable."""
raise NotImplementedError
class AxisSplit(Test):
"""Possibly the simplest test you can apply to continuous data - an axis-aligned split plane. Can also be applied to discrete data if that happens to make sense. This stores which channel to apply the tests to, whilst each test entity is a 8 byte string, encoding an int32 then a float32 - the first indexes the feature to use from the channel, the second the offset, such that an input has this value subtracted and then fails the test if the result is less than zero or passes if it is greater than or equal to."""
def __init__(self, channel):
"""Needs to know which channel this test is applied to."""
self.channel = channel
def do(self, test, es, index = slice(None)):
value_index = numpy.fromstring(test[0:4], dtype=numpy.int32, count=1)
offset = numpy.fromstring(test[4:8], dtype=numpy.float32, count=1)
values = es[self.channel, index, value_index[0]]
return (values-offset[0])>=0.0
def testCodeC(self, name, exemplar_list):
ret = start_cpp() + """
bool %(name)s(PyObject * data, void * test, size_t test_length, int exemplar)
{
int feature = *(int*)test;
float offset = *((float*)test + 1);
%(channelType)s channel = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
float value = (float)%(channelName)s_get(channel, exemplar, feature);
return (value-offset)>=0.0;
}
"""%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype']}
return ret
class LinearSplit(Test):
"""Does a linear split of data based on some small set of values. Can be applied to discrete data, though that would typically be a bit strange. This object stores both the channel to which the test is applied and how many dimensions are used, whilst the test entity is a string encoding three things in sequence. First are the int32 indices of the features from the exemplars channel to use, second are the float32 values forming the vector that is dot producted with the extracted values to project to the line perpendicular to the plane, and finally the float32 offset, subtracted from the line position to make it a negative to fail, zero or positive to pass decision."""
def __init__(self, channel, dims):
"""Needs to know which channel it is applied to and how many dimensions are to be considered."""
self.channel = channel
self.dims = dims
def do(self, test, es, index = slice(None)):
ss1 = 4*self.dims
ss2 = 2*ss1
ss3 = ss2+4
value_indices = numpy.fromstring(test[0:ss1], dtype=numpy.int32, count=self.dims)
plane_axis = numpy.fromstring(test[ss1:ss2], dtype=numpy.float32, count=self.dims)
offset = numpy.fromstring(test[ss2:ss3], dtype=numpy.float32, count=1)
values = es[self.channel, index, value_indices]
return ((values*plane_axis.reshape((1,-1))).sum(axis=1) - offset)>=0.0
def testCodeC(self, name, exemplar_list):
ret = start_cpp() + """
bool %(name)s(PyObject * data, void * test, size_t test_length, int exemplar)
{
int * feature = (int*)test;
float * plane_axis = (float*)test + %(dims)i;
float offset = *((float*)test + %(dims)i*2);
%(channelType)s channel = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
float value = 0.0;
for (int i=0;i<%(dims)i;i++)
{
float v = (float)%(channelName)s_get(channel, exemplar, feature[i]);
value += v*plane_axis[i];
}
return (value-offset)>=0.0;
}
"""%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype'], 'dims':self.dims}
return ret
class DiscreteBucket(Test):
"""For discrete values. The test is applied to a single value, and consists of a list of values such that if it is equal to one of them it passes, but if it is not equal to any of them it fails. Basically a binary split of categorical data. The test entity is a string encoding first a int32 of the index of which feature to use, followed by the remainder of the string forming a list of int32's that constitute the values that result in success."""
def __init__(self, channel):
"""Needs to know which channel this test is applied to."""
self.channel = channel
def do(self, test, es, index = slice(None)):
t = numpy.fromstring(test, dtype=numpy.int32)
values = es[self.channel, index, t[0]]
return numpy.in1d(values, t[1:])
def testCodeC(self, name, exemplar_list):
ret = start_cpp() + """
bool %(name)s(PyObject * data, void * test, size_t test_length, int exemplar)
{
size_t steps = test_length>>2;
int * accept = (int*)test;
%(channelType)s channel = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
int value = (int)%(channelName)s_get(channel, exemplar, accept[0]);
for (size_t i=1; i<steps; i++)
{
if (accept[i]==value) return true;
}
return false;
}
"""%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype']}
return ret
| Python |
# Copyright 2012 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
class Pruner:
"""This abstracts the decision of when to stop growing a tree. It takes various statistics and stops growing when some condition is met."""
def clone(self):
"""Returns a copy of this object."""
raise NotImplementedError
def keep(self, depth, trueCount, falseCount, infoGain, node):
"""Each time a node is split this method is called to decide if the split should be kept or not - it returns True to keep (And hence the children will be recursivly split, and have keep called on them, etc..) and False to discard the nodes children and stop. depth is how deep the node in question is, where the root node is 0, its children 1, and do on. trueCount and falseCount indicate how many data points in the training set go each way, whilst infoGain is the information gained by the split. Finally, node is the actual node incase some more complicated analysis is desired - at the time of passing in its test and stats will exist, but everything else will not."""
raise NotImplementedError
class PruneCap(Pruner):
"""A simple but effective Pruner implimentation - simply provides a set of thresholds on depth, number of training samples required to split and information gained - when any one of the thresholds is tripped it stops further branching."""
def __init__(self, maxDepth = 8, minTrain = 8, minGain = 1e-3, minDepth = 2):
"""maxDepth is the maximum depth of a node in the tree, after which it stops - remember that the maximum node count based on this threshold increases dramatically as this number goes up, so don't go too crazy. minTrain is the smallest size node it will consider for further splitting. minGain is a lower limit on how much information gain a split must provide to be accepted. minDepth overrides the minimum node size - as long as the node count does not reach zero in either branch it will always split to the given depth - used to force it to at least learn something."""
self.maxDepth = maxDepth
self.minDepth = minDepth
self.minTrain = minTrain
self.minGain = minGain
def clone(self):
return PruneCap(self.maxDepth, self.minTrain, self.minGain)
def keep(self, depth, trueCount, falseCount, infoGain, node):
if depth>=self.maxDepth: return False
if depth>=self.minDepth and (trueCount+falseCount)<self.minTrain: return False
if infoGain<self.minGain: return False
return True
def setMinDepth(self, minDepth):
"""Sets the minimum tree growing depth - trees will be grown at least this deep, baring insurmountable issues."""
self.minDepth = minDepth
def setMaxDepth(self, maxDepth):
"""Sets the depth cap on the trees."""
self.maxDepth = maxDepth
def setMinTrain(self, minTrain):
"""Sets the minimum number of nodes allowed to be split."""
self.minTrain = minTrain
def setMinGain(self, mingain):
"""Sets the minimum gain that is allowed for a split to be accepted."""
self.minGain = mingain
| Python |
# Copyright 2012 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import numpy.random
try:
from multiprocessing import Pool, Manager, cpu_count
except:
Pool = None
from exemplars import *
from goals import *
from pruners import *
from nodes import *
from tests import *
from generators import *
from gen_median import *
from gen_random import *
from gen_classify import *
class DF:
"""Master object for the decision forest system - provides the entire interface. Typical use consists of setting up the system - its goal, pruner and generator(s), providing data to train a model and then using the model to analyse new exemplars. Incrimental learning is also supported however, albeit a not very sophisticated implimentation. Note that this class is compatable with pythons serialisation routines, for if you need to save/load a trained model."""
def __init__(self, other = None):
"""Initialises as a blank model, ready to be setup and run. Can also act as a copy constructor if you provide an instance of DF as a single parameter."""
if isinstance(other, DF):
self.goal = other.goal.clone() if other.goal!=None else None
self.pruner = other.pruner.clone() if other.pruner!=None else None
self.gen = other.gen.clone() if other.gen!=None else None
self.trees = map(lambda t: (t[0].clone(), t[1], t[2].copy() if t[2]!=None else None), other.trees)
self.inc = other.inc
self.grow = other.grow
self.trainCount = other.trainCount
self.evaluateCodeC = dict(other.evaluateCodeC)
self.addCodeC = dict(other.addCodeC)
self.errorCodeC = dict(other.errorCodeC)
self.addTrainCodeC = dict(other.addTrainCodeC)
self.useC = other.useC
else:
self.goal = None
self.pruner = PruneCap()
self.gen = None
self.trees = [] # A list of tuples: (root node, oob score, draw) Last entry is None if self.grow is False, otherwise a numpy array of repeat counts for trainning for the exemplars.
self.inc = False # True to support incrimental learning, False to not.
self.grow = False # If true then during incrimental learning it checks pre-existing trees to see if they can grow some more each time.
self.trainCount = 0 # Count of how many trainning examples were used to train with - this is so it knows how to split up the data when doing incrimental learning (between new and old exmeplars.). Also used to detect if trainning has occured.
# Assorted code caches...
self.evaluateCodeC = dict()
self.addCodeC = dict()
self.errorCodeC = dict()
self.addTrainCodeC = dict()
self.useC = True
def setGoal(self, goal):
"""Allows you to set a goal object, of type Goal - must be called before doing anything, and must not be changed after anything is done."""
assert(self.trainCount==0)
self.addCodeC = dict()
self.errorCodeC = dict()
self.addTrainCodeC = dict()
self.goal = goal
def getGoal(self):
"""Returns the curent Goal object."""
return self.goal
def setPruner(self, pruner):
"""Sets the pruner, which controls when to stop growing each tree. By default this is set to the PruneCap object with default parameters, though you might want to use getPruner to get it so you can adjust its parameters to match the problem at hand, as the pruner is important for avoiding overfitting."""
assert(self.trainCount==0)
self.pruner = pruner
def getPruner(self):
"""Returns the current Pruner object."""
return self.pruner
def setGen(self, gen):
"""Allows you to set the Generator object from which node tests are obtained - must be set before anything happens. You must not change this once trainning starts."""
assert(self.trainCount==0)
self.evaluateCodeC = dict()
self.addCodeC = dict()
self.errorCodeC = dict()
self.addTrainCodeC = dict()
self.gen = gen
def getGen(self):
"""Returns the Generator object for the system."""
return self.gen
def setInc(self, inc, grow = False):
"""Set this to True to support incrimental learning, False to not. Having incrimental learning on costs extra memory, but has little if any computational affect. If incrimental learning is on you can also switch grow on, in which case as more data arrives it tries to split the leaf nodes of trees that have already been grown. Requires a bit more memory be used, as it needs to keep the indices of the training set for future growth. Note that the default pruner is entirly inappropriate for this mode - the pruner has to be set such that as more data arrives it will allow future growth."""
assert(self.trainCount==0)
self.inc = inc
self.grow = grow
def getInc(self):
"""Returns the status of incrimental learning - True if its enabled, False if it is not."""
return self.inc
def getGrow(self):
"""Returns True if the trees will be subject to further growth during incrimental learning, when they have gained enough data to subdivide further."""
return self.grow
def allowC(self, allow):
"""By default the system will attempt to compile and use C code instead of running the (much slower) python code - this allows you to force it to not use C code, or switch C back on if you had previously switched it off. Typically only used for speed comparisons and debugging, but also useful if the use of C code doesn't work on your system. Just be aware that the speed difference is galactic."""
self.useC = allow
def addTree(self, es, weightChannel = None, ret = False, dummy = False):
"""Adds an entirely new tree to the system given all of the new data. Uses all exemplars in the ExemplarSet, which can optionally include a channel with a single feature in it to weight the vectors; indicated via weightChannel. Typically this is used indirectly via the learn method, rather than by the user of an instance of this class."""
# Arrange for code...
if self.useC:
key = es.key()
if key not in self.addCodeC:
self.addCodeC[key] = Node.initC(self.goal, self.gen, es)
code = self.addCodeC[key]
if key not in self.errorCodeC:
self.errorCodeC[key] = Node.errorC(self.goal, self.gen, es)
errCode = self.errorCodeC[key]
else:
code = None
errCode = None
# Special case code for a dummy run...
if dummy:
i = numpy.zeros(0, dtype=numpy.int32)
w = numpy.ones(0, dtype=numpy.float32)
if code!=None:
Node(self.goal, self.gen, self.pruner, es, i, w, code=code)
if errCode!=None:
Node.error.im_func(None, self.goal, self.gen, es, i, w, self.inc, code = errCode)
return
# First select which samples are to be used for trainning, and which for testing, calculating the relevant weights...
draw = numpy.random.poisson(size=es.exemplars()) # Equivalent to a bootstrap sample, assuming an infinite number of exemplars are avaliable. Correct thing to do given that incrimental learning is an option.
train = numpy.asarray(numpy.where(draw!=0)[0], dtype=numpy.int32)
test = numpy.asarray(numpy.where(draw==0)[0], dtype=numpy.int32)
if weightChannel==None:
trainWeight = numpy.asarray(draw, dtype=numpy.float32)
testWeight = None
else:
weights = es[weightChannel,:,0]
trainWeight = numpy.asarray(draw * weights, dtype=numpy.float32)
testWeight = numpy.asarray(weights, dtype=numpy.float32)
if train.shape[0]==0: return # Safety for if it selects to use none of the items - do nothing...
# Grow a tree...
tree = Node(self.goal, self.gen, self.pruner, es, train, trainWeight, code=code)
# Apply the goal-specific post processor to the tree...
self.goal.postTreeGrow(tree, self.gen)
# Calculate the oob error for the tree...
if test.shape[0]!=0:
error = tree.error(self.goal, self.gen, es, test, testWeight, self.inc, code=errCode)
else:
error = 1e100 # Can't calculate an error - record a high value so we lose the tree at the first avaliable opportunity, which is sensible behaviour given that we don't know how good it is.
# Store it...
if self.grow==False: draw = None
if ret: return (tree, error, draw)
else: self.trees.append((tree, error, draw))
def lumberjack(self, count):
"""Once a bunch of trees have been learnt this culls them, reducing them such that there are no more than count. It terminates those with the highest error rate first, and does nothing if there are not enough trees to excede count. Typically this is used by the learn method, rather than by the object user."""
if len(self.trees)>count:
self.trees.sort(key = lambda t: t[1])
self.trees = self.trees[:count]
def learn(self, trees, es, weightChannel = None, clamp = None, mp = True, callback = None):
"""This learns a model given data, and, when it is switched on, will also do incrimental learning. trees is how many new trees to create - for normal learning this is just how many to make, for incrimental learning it is how many to add to those that have already been made - more is always better, within reason, but it is these that cost you computation and memory. es is the ExemplarSet containing the data to train on. For incrimental learning you always provide the previous data, at the same indices, with the new exemplars appended to the end. weightChannel allows you to give a channel containing a single feature if you want to weight the importance of the exemplars. clamp is only relevent to incrimental learning - it is effectivly a maximum number of trees to allow, where it throws away the weakest trees first. This is how incrimental learning works, and so must be set for that - by constantly adding new trees as new data arrives and updating the error metrics of the older trees (The error will typically increase with new data.) the less-well trainned (and typically older) trees will be culled. mp indicates if multiprocessing should be used or not - True to do so, False to not. Will automatically switch itself off if not supported."""
# Prepare for multiprocessing...
if Pool==None: mp = False
elif cpu_count()<2: mp = False
if mp:
pool = Pool()
manager = Manager()
treesDone = manager.Value('i',0)
result = None
totalTrees = len(self.trees) + trees
# If this is an incrimental pass then first update all the pre-existing trees...
if self.trainCount!=0:
assert(self.inc)
newCount = es.exemplars() - self.trainCount
key = es.key()
code = self.addCodeC[key] if key in self.addCodeC else None
errCode = self.errorCodeC[key] if key in self.errorCodeC else None
if key not in self.addTrainCodeC:
c = Node.addTrainC(self.goal, self.gen, es)
self.addTrainCodeC[key] = c
if c!=None:
# Do a dummy run, to avoid multiproccess race conditions...
i = numpy.zeros(0, dtype=numpy.int32)
w = numpy.ones(0, dtype=numpy.float32)
Node.addTrain.im_func(None, self.goal, self.gen, es, i, w, c)
addCode = self.addTrainCodeC[key]
if mp:
result = pool.map_async(updateTree, map(lambda tree_tup: (self.goal, self.gen, self.pruner if self.grow else None, tree_tup, self.trainCount, newCount, es, weightChannel, (code, errCode, addCode), treesDone, numpy.random.randint(1000000000)), self.trees))
else:
newTrees = []
for ti, tree_tup in enumerate(self.trees):
if callback: callback(ti, totalTrees)
data = (self.goal, self.gen, self.pruner if self.grow else None, tree_tup, self.trainCount, newCount, es, weightChannel, (code, errCode, addCode))
newTrees.append(updateTree(data))
self.trees = newTrees
# Record how many exemplars were trained with most recently - needed for incrimental learning...
self.trainCount = es.exemplars()
# Create new trees...
if trees!=0:
if mp and trees>1:
# There is a risk of a race condition caused by compilation - do a dummy run to make sure we compile in advance...
self.addTree(es, weightChannel, dummy=True)
# Set the runs going...
newTreesResult = pool.map_async(mpGrowTree, map(lambda _: (self, es, weightChannel, treesDone, numpy.random.randint(1000000000)), xrange(trees)))
# Wait for the runs to complete...
while (not newTreesResult.ready()) and ((result==None) or (not result.ready())):
newTreesResult.wait(0.1)
if result: result.wait(0.1)
if callback: callback(treesDone.value, totalTrees)
# Put the result into the dta structure...
if result: self.trees = result.get()
self.trees += filter(lambda tree: tree!=None, newTreesResult.get())
else:
for ti in xrange(trees):
if callback: callback(len(self.trees)+ti, totalTrees)
self.addTree(es, weightChannel)
# Prune trees down to the right number of trees if needed...
if clamp!=None: self.lumberjack(clamp)
# Clean up if we have been multiprocessing...
if mp:
pool.close()
pool.join()
def answer_types(self):
"""Returns a dictionary giving all the answer types that can be requested using the which parameter of the evaluate method. The keys give the string to be provided to which, whilst the values give human readable descriptions of what will be returned. 'best' is always provided, as a point estimate of the best answer; most models also provide 'prob', which is a probability distribution over 'best', such that 'best' is the argmax of 'prob'."""
return self.goal.answer_types()
def evaluate(self, es, index = slice(None), which = 'best', mp = False, callback = None):
"""Given some exemplars returns a list containing the output of the model for each exemplar. The returned list will align with the index, which defaults to everything and hence if not provided is aligned with es, the ExemplarSet. The meaning of the entrys in the list will depend on the Goal of the model and which: which can either be a single answer type from the goal object or a list of answer types, to get a tuple of answers for each list entry - the result is what the Goal-s answer method returns. The answer_types method passes through to provide relevent information. Can be run in multiprocessing mode if you set the mp variable to True - only worth it if you have a lot of data (Also note that it splits by tree, so each process does all data items but for just one of the trees.). Should not be called if size()==0."""
if isinstance(index, slice): index = numpy.arange(*index.indices(es.exemplars()))
# Handle the generation of C code, with caching...
if self.useC:
es_type = es.key()
if es_type not in self.evaluateCodeC:
self.evaluateCodeC[es_type] = Node.evaluateC(self.gen, es)
code = self.evaluateCodeC[es_type]
else:
code = None
# If multiprocessing has been requested set it up...
if Pool==None: mp = False
elif cpu_count()<2: mp = False
if mp:
pool = Pool()
pool_size = cpu_count()
manager = Manager()
treesDone = manager.Value('i',0)
# Collate the relevent stats objects...
store = []
if mp:
# Dummy run, to avoid a race condition during compilation...
if code!=None:
ei = numpy.zeros(0, dtype=index.dtype)
self.trees[0][0].evaluate([], self.gen, es, ei, code)
# Do the actual work...
result = pool.map_async(treeEval, map(lambda tree_error: (tree_error[0], self.gen, es, index, treesDone, code), self.trees))
while not result.ready():
result.wait(0.1)
if callback: callback(treesDone.value, len(self.trees))
store += result.get()
else:
for ti, (tree, _, _) in enumerate(self.trees):
if callback: callback(ti, len(self.trees))
res = [None] * es.exemplars()
tree.evaluate(res, self.gen, es, index, code)
store.append(res)
# Merge and obtain answers for the output...
if mp and index.shape[0]>1:
step = index.shape[0]//pool_size
excess = index.shape[0] - step*pool_size
starts = map(lambda i: i*(step+1), xrange(excess))
starts += map(lambda i: ranges[-1] + i*step, xrange(pool_size-excess))
starts += [index.shape[0]]
ranges = map(lambda a, b: slice(a, b), starts[:-1], starts[1:])
ret = pool.map(getAnswer, map(lambda ran: (self.goal, map(lambda i: map(lambda s: s[i], store), index[ran]), which, es, index[ran], map(lambda t: t[0], self.trees)), ranges))
ret = reduce(lambda a,b: a+b, ret)
else:
ret = self.goal.answer_batch(map(lambda i: map(lambda s: s[i], store), index), which, es, index, map(lambda t: t[0], self.trees))
# Clean up if we have been multiprocessing...
if mp:
pool.close()
pool.join()
# Return the answer...
return ret
def size(self):
"""Returns the number of trees within the forest."""
return len(self.trees)
def nodes(self):
"""Returns the total number of nodes in all the trees."""
return sum(map(lambda t: t[0].size(), self.trees))
def error(self):
"""Returns the average error of all the trees - meaning depends on the Goal at hand, but should provide an idea of how well the model is working."""
return numpy.mean(map(lambda t: t[1], self.trees))
def mpGrowTree(data):
"""Part of the multiprocessing system - grows and returns a tree."""
self, es, weightChannel, treesDone, seed = data
numpy.random.seed(seed)
ret = self.addTree(es, weightChannel, True)
treesDone.value += 1
return ret
def updateTree(data):
"""Updates a tree - kept external like this for the purpose of multiprocessing."""
goal, gen, pruner, (tree, error, old_draw), prevCount, newCount, es, weightChannel, (code, errCode, addCode) = data[:9]
if len(data)>10: numpy.random.seed(data[10])
# Choose which of the new samples are train and which are test, prepare the relevent inputs...
draw = numpy.random.poisson(size=newCount)
train = numpy.where(draw!=0)[0] + prevCount
test = numpy.where(draw==0)[0] + prevCount
if weightChannel==None:
trainWeight = numpy.asarray(draw, dtype=numpy.float32)
testWeight = None
else:
weights = es[weightChannel,:,prevCount:]
trainWeight = numpy.asarray(draw * weights, dtype=numpy.float32)
testWeight = numpy.asarray(weights, dtype=numpy.float32)
pad = numpy.zeros(prevCount, dtype=numpy.float32)
trainWeight = numpy.append(pad, trainWeight)
if testWeight!=None: testWeight = numpy.append(pad, testWeight)
# Update both test and train for the tree...
if train.shape[0]!=0:
tree.addTrain(goal, gen, es, train, trainWeight, addCode)
error = tree.error(goal, gen, es, test, testWeight, True, code=errCode)
# If we are growing its time to grow the tree...
draw = None if old_draw==None else numpy.append(old_draw, draw)
if pruner!=None:
index = numpy.where(draw!=0)[0]
if weightChannel==None: weights = numpy.asarray(draw, dtype=numpy.float32)
else: weights = es[weightChannel,:,prevCount:] * numpy.asarray(draw, dtype=numpy.float32)
tree.grow(goal, gen, pruner, es, index, weights, 0, code)
# If provided update the trees updated count...
if len(data)>9: data[9].value += 1
# Return the modified tree in a tuple with the updated error updated draw array...
return (tree, error, draw)
def treeEval(data):
"""Used by the evaluate method when doing multiprocessing."""
tree, gen, es, index, treesDone, code = data
ret = [None] * es.exemplars()
tree.evaluate(ret, gen, es, index, code)
treesDone.value += 1
return ret
def getAnswer(data):
"""Used for multiprocessing the calls to the answer method."""
goal, stores, which, es, indices, trees = data
return goal.answer_batch(stores, which, es, indices, trees)
| Python |
# Copyright 2012 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import numpy.random
from generators import Generator
from tests import *
from utils.start_cpp import start_cpp
class AxisMedianGen(Generator, AxisSplit):
"""Provides a generator for axis-aligned split planes that split the data set in half, i.e. uses the median. Has random selection of the dimension to split the axis on."""
def __init__(self, channel, count, ignoreWeights = False):
"""channel is which channel to select the values from, whilst count is how many tests it will return, where each has been constructed around a randomly selected feature from the channel. Setting ignore weights to True means it will not consider the weights when calculating the median."""
AxisSplit.__init__(self, channel)
self.count = count
self.ignoreWeights = ignoreWeights
def clone(self):
return AxisMedianGen(self.channel, self.count, self.ignoreWeights)
def itertests(self, es, index, weights = None):
for _ in xrange(self.count):
ind = numpy.random.randint(es.features(self.channel))
values = es[self.channel, index, ind]
if weights==None or self.ignoreWeights:
median = numpy.median(values)
else:
cw = numpy.cumsum(weights[index])
half = 0.5*cw[-1]
pos = numpy.searchsorted(cw, half)
t = (half - cw[pos-1]) / max(cw[pos] - cw[pos-1], 1e-6)
median = (1.0-t)*values[pos-1] + t*values[pos]
yield numpy.asarray([ind], dtype=numpy.int32).tostring() + numpy.asarray([median], dtype=numpy.float32).tostring()
def genCodeC(self, name, exemplar_list):
code = start_cpp() + """
struct State%(name)s
{
void * test; // Will be the length of a 32 bit int followed by a float.
size_t length;
int countRemain;
float * temp; // Temporary used for calculating the median.
};
int %(name)s_float_comp(const void * lhs, const void * rhs)
{
float l = (*(float*)lhs);
float r = (*(float*)rhs);
if (l<r) return -1;
if (l>r) return 1;
return 0;
}
void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set)
{
assert(sizeof(int)==4);
int count = 0;
while (test_set)
{
count++;
test_set = test_set->next;
}
state.length = sizeof(int) + sizeof(float);
state.test = malloc(state.length);
state.countRemain = %(count)i;
state.temp = (float*)malloc(sizeof(float) * 2 * count);
}
bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set)
{
// Check if we are done...
if (state.countRemain==0)
{
free(state.test);
free(state.temp);
return false;
}
state.countRemain--;
// Select a random feature...
%(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
int feat = lrand48() %% %(channelName)s_features(cd);
// Extract the values...
int count = 0;
while (test_set)
{
state.temp[count*2] = %(channelName)s_get(cd, test_set->index, feat);
state.temp[count*2+1] = test_set->weight;
count++;
test_set = test_set->next;
}
// Sort them...
qsort(state.temp, count, sizeof(float)*2, %(name)s_float_comp);
// Pull out the median...
float median;
if (%(ignoreWeights)s||(count<2))
{
int half = count/2;
if ((count%%2)==1) median = state.temp[half*2];
else median = 0.5*(state.temp[(half-1)*2] + state.temp[half*2]);
}
else
{
// Convert to a cumulative sum...
for (int i=1;i<count;i++)
{
state.temp[i*2+1] += state.temp[i*2-1];
}
float half = 0.5*state.temp[(count-1)*2+1];
// Find the position just after the half way point...
int low = 0;
int high = count-1;
while (low<high)
{
int middle = (low+high)/2;
if (state.temp[middle*2+1]<half)
{
if (low==middle) middle++;
low = middle;
}
else
{
if (high==middle) middle--;
high = middle;
}
}
// Use linear interpolation to select a value...
float t = half - state.temp[low*2-1];
float div = state.temp[low*2+1] - state.temp[low*2-1];
if (div<1e-6) div = 1e-6;
t /= div;
median = (1.0-t) * state.temp[low*2-2] + t * state.temp[low*2];
}
// Store the test and return...
((int*)state.test)[0] = feat;
((float*)state.test)[1] = median;
return true;
}
"""%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype'], 'count':self.count, 'ignoreWeights':('true' if self.ignoreWeights else 'false')}
return (code, 'State'+name)
class LinearMedianGen(Generator, LinearSplit):
"""Provides a generator for split planes that uses the median of the features projected perpendicular to the plane direction, such that it splits the data set in half. Randomly selects which dimensions to work with and the orientation of the split plane."""
def __init__(self, channel, dims, dimCount, dirCount, ignoreWeights = False):
"""channel is which channel to select for and dims how many features (dimensions) to test on for any given test. dimCount is how many sets of dimensions to randomly select to generate tests for, whilst dirCount is how many random dimensions (From a uniform distribution over a hyper-sphere.) to try. It actually generates the two independantly and trys every combination, as generating uniform random directions is somewhat expensive. Setting ignore weights to True means it will not consider the weights when calculating the median."""
LinearSplit.__init__(self, channel, dims)
self.dimCount = dimCount
self.dirCount = dirCount
self.ignoreWeights = ignoreWeights
def clone(self):
return LinearMedianGen(self.channel, self.dims, self.dimCount, self.dirCount, self.ignoreWeights)
def itertests(self, es, index, weights = None):
# Generate random points on the hyper-sphere...
dirs = numpy.random.normal(size=(self.dirCount, self.dims))
dirs /= numpy.sqrt(numpy.square(dirs).sum(axis=1)).reshape((-1,1))
# Iterate and select a set of dimensions before trying each direction on them...
for _ in xrange(self.dimCount):
#dims = numpy.random.choice(es.features(self.channel), size=self.dims, replace=False) For when numpy 1.7.0 is common
dims = numpy.zeros(self.dims, dtype=numpy.int32)
feats = es.features(self.channel)
for i in xrange(self.dims):
dims[i] = numpy.random.randint(feats-i)
dims[i] += (dims[:i]<=dims[i]).sum()
for di in dirs:
dists = (es[self.channel, index, dims] * di.reshape((1,-1))).sum(axis=1)
if weights==None or self.ignoreWeights:
median = numpy.median(dists)
else:
cw = numpy.cumsum(weights[index])
half = 0.5*cw[-1]
pos = numpy.searchsorted(cw,half)
t = (half - cw[pos-1])/max(cw[pos] - cw[pos-1], 1e-6)
median = (1.0-t)*dists[pos-1] + t*dists[pos]
yield numpy.asarray(dims, dtype=numpy.int32).tostring() + numpy.asarray(di, dtype=numpy.float32).tostring() + numpy.asarray([median], dtype=numpy.float32).tostring()
def genCodeC(self, name, exemplar_list):
code = start_cpp() + """
struct State%(name)s
{
void * test;
size_t length;
int dimRemain;
int dirRemain;
float * dirs; // Vectors giving points uniformly distributed on the hyper-sphere.
int * feat; // The features to index at this moment.
float * temp; // Temporary used for calculating the median.
};
int %(name)s_float_comp(const void * lhs, const void * rhs)
{
float l = (*(float*)lhs);
float r = (*(float*)rhs);
if (l<r) return -1;
if (l>r) return 1;
return 0;
}
void %(name)s_init(State%(name)s & state, PyObject * data, Exemplar * test_set)
{
assert(sizeof(int)==4);
// Count how many exemplars are in the input...
int count = 0;
while (test_set)
{
count++;
test_set = test_set->next;
}
// Setup the output...
state.length = sizeof(int) * %(dims)i + sizeof(float) * (%(dims)i+1);
state.test = malloc(state.length);
// Counters so we know when we are done...
state.dimRemain = %(dimCount)i;
state.dirRemain = 0;
// Generate a bunch of random directions...
state.dirs = (float*)malloc(sizeof(float)*%(dims)i*%(dirCount)i);
for (int d=0;d<%(dirCount)i;d++)
{
float length = 0.0;
int base = %(dims)i * d;
for (int f=0; f<%(dims)i; f++)
{
double u = 1.0-drand48();
double v = 1.0-drand48();
float bg = sqrt(-2.0*log(u)) * cos(2.0*M_PI*v);
length += bg*bg;
state.dirs[base+f] = bg;
}
length = sqrt(length);
for (int f=0; f<%(dims)i; f++)
{
state.dirs[base+f] /= length;
}
}
// Which features are currently being used...
state.feat = (int*)malloc(sizeof(int)*%(dims)i);
// Temporary for median calculation...
state.temp = (float*)malloc(sizeof(float) * 2 * count);
// Safety...
%(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
int featCount = %(channelName)s_features(cd);
if (%(dims)i>featCount)
{
state.dimRemain = 0; // Effectivly cancels work.
}
}
bool %(name)s_next(State%(name)s & state, PyObject * data, Exemplar * test_set)
{
// Need access to the data...
%(channelType)s cd = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
// If we are done for this set of features select a new set...
if (state.dirRemain==0)
{
if (state.dimRemain==0)
{
free(state.test);
free(state.dirs);
free(state.feat);
free(state.temp);
return false;
}
state.dimRemain--;
// Select a new set of features...
int featCount = %(channelName)s_features(cd);
for (int f=0; f<%(dims)i; f++)
{
state.feat[f] = lrand48() %% (featCount-f);
for (int j=0; j<f; j++)
{
if (state.feat[j]<=state.feat[f]) state.feat[f]++;
}
}
// Reset the counter...
state.dirRemain = %(dirCount)i;
}
state.dirRemain--;
// Extract the values, projecting them using the current direction...
int count = 0;
while (test_set)
{
float val = 0.0;
int base = %(dims)i * state.dirRemain;
for (int f=0; f<%(dims)i; f++)
{
val += state.dirs[base+f] * %(channelName)s_get(cd, test_set->index, state.feat[f]);
}
state.temp[count*2] = val;
state.temp[count*2+1] = test_set->weight;
count++;
test_set = test_set->next;
}
// Sort them...
qsort(state.temp, count, sizeof(float)*2, %(name)s_float_comp);
// Pull out the median...
float median;
if (%(ignoreWeights)s||(count<2))
{
int half = count/2;
if ((count%%2)==1) median = state.temp[half*2];
else median = 0.5*(state.temp[(half-1)*2] + state.temp[half*2]);
}
else
{
// Convert to a cumulative sum...
for (int i=1;i<count;i++)
{
state.temp[i*2+1] += state.temp[i*2-1];
}
float half = 0.5*state.temp[(count-1)*2+1];
// Find the position just after the half way point...
int low = 0;
int high = count-1;
while (low<high)
{
int middle = (low+high)/2;
if (state.temp[middle*2+1]<half)
{
if (low==middle) middle++;
low = middle;
}
else
{
if (high==middle) middle--;
high = middle;
}
}
// Use linear interpolation to select a value...
float t = half - state.temp[low*2-1];
float div = state.temp[low*2+1] - state.temp[low*2-1];
if (div<1e-6) div = 1e-6;
t /= div;
median = (1.0-t) * state.temp[low*2-2] + t * state.temp[low*2];
}
// Store it all in the output...
for (int i=0; i<%(dims)i;i++)
{
((int*)state.test)[i] = state.feat[i];
}
int base = %(dims)i * state.dirRemain;
for (int i=0; i<%(dims)i;i++)
{
((float*)state.test)[%(dims)i+i] = state.dirs[base+i];
}
((float*)state.test)[2*%(dims)i] = median;
return true;
}
"""%{'name':name, 'channel':self.channel, 'channelName':exemplar_list[self.channel]['name'], 'channelType':exemplar_list[self.channel]['itype'], 'dims':self.dims, 'dimCount':self.dimCount, 'dirCount':self.dirCount, 'ignoreWeights':('true' if self.ignoreWeights else 'false')}
return (code, 'State'+name)
| Python |
# Copyright 2012 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import numpy
import numpy.linalg
import numpy.random
from exemplars import MatrixFS
import scipy.weave as weave
from utils.start_cpp import start_cpp
class Goal:
"""Interface that defines the purpose of a decision forest - defines what the tree is optimising, what statistics to store at each node and what is returned to the user as the answer when they provide a novel feature to the forest (i.e. how to combine the statistics)."""
def clone(self):
"""Returns a deep copy of this object."""
raise NotImplementedError
def stats(self, es, index, weights = None):
"""Generates a statistics entity for a node, based on the features that make it to the node. The statistics entity is decided by the task at hand, but must allow the nodes entropy to be calculated, plus a collection of these is used to generate the answer when a feature is given to the decision forest. fs is a feature set, index the indices of the features in fs that have made it to this node. weights is an optional set of weights for the features, weighting how many features they are worth - will be a 1D numpy.float32 array aligned with the feature set, and can contain fractional weights."""
raise NotImplementedError
def updateStats(self, stats, es, index, weights = None):
"""Given a stats entity, as generated by the stats method, this returns a copy of that stats entity that has had additional exemplars factored in, specifically those passed in. This allows a tree to be updated with further trainning examples (Or, at least its stats to be updated - its structure is set in stone once built.) Needed for incrimental learning."""
raise NotImplementedError
def entropy(self, stats):
"""Given a statistics entity this returns the associated entropy - this is used to choose which test is best."""
raise NotImplementedError
def postTreeGrow(self, root, gen):
"""After a tree is initially grown (At which point its shape is locked, but incrimental learning could still be applied.) this method is given the root node of the tree, and can do anything it likes to it - a post processing step, in case the stats objects need some extra cleverness. Most Goal-s do not need to impliment this. Also provided the generator for the tests in the tree."""
pass
def answer_types(self):
"""When classifying a new feature an answer is to be provided, of which several possibilities exist. This returns a dictionary of those possibilities (key==name, value=human readable description of what it is.), from which the user can select. By convention 'best' must always exist, as the best guess that the algorithm can give (A point estimate of the answer the user is after.). If a probability distribution over 'best' can be provided then that should be avaliable as 'prob' (It is highly recomended that this be provided.)."""
return {'best':'Point estimate of the best guess at an answer, in the same form as provided to the trainning stage.'}
def answer(self, stats_list, which, es, index, trees):
"""Given a feature then using a forest a list of statistics entitys can be obtained from the leaf nodes that the feature ends up in, one for each tree (Could be as low as just one entity.). This converts that statistics entity list into an answer, to be passed to the user, possibly using the es with the index of the one entry that the stats list is for as well. As multiple answer types exist (As provided by the answer_types method.) you provide the one(s) you want to the which variable - if which is a string then that answer type is returned, if it is a list of strings then a tuple aligned with it is returned, containing multiple answers. If multiple types are needed then returning a list should hopefuly be optimised by this method to avoid duplicate calculation. Also requires the trees themselves, as a list aligned with stats_list."""
raise NotImplementedError
def answer_batch(self, stats_lists, which, es, indices, trees):
"""A batch version of answer, that does multiple stat lists at once. The stats_list now consists of a list of lists, where the outer list matches tne entrys in index (A numpy array), and the inner list are the samples, aligned with the trees list. es is the exemplar object that matches up with index, and which gives the output(s) to provide. Return value is a list, matching index, that contains the answer for each, which can be a tuple if which is alist/tuple. A default implimentation is provided."""
return map(lambda (i, stats_list): self.answer(stats_list, which, es, indices[i], trees), enumerate(stats_lists))
def summary(self, es, index, weights = None):
"""Once a tree has been grown a testing set (The 'out-of-bag' set) is typically run through to find out how good it is. This consists of two steps, the first of which is to generate a summary of the oob set that made it to each leaf. This generates the summary, and must be done such that the next step - the use of a stats and summary entity to infer an error metric with a weight for averaging the error metrics from all leafs, can be performed. For incrimental learning it is also required to be able to add new exemplars at a later time."""
raise NotImplementedError
def updateSummary(self, summary, es, index, weights = None):
"""For incrimental learning the summaries need to be updated with further testing examples - this does that. Given a summary and some exemplars it returns a copy of the summary updated with the new exemplars."""
raise NotImplementedError
def error(self, stats, summary):
"""Given a stats entity and a summary entity (i.e. the details of the testing and trainning sets that have reached a leaf) this returns the error of the testing set versus the model learnt from the trainning set. The actual return is a pair - (error, weight), so that the errors from all the leafs can be combined in a weighted average. The error metric is arbitary, but the probability of 'being wrong' is a good choice. An alternate mode exists, where weight is set to None - in this case no averaging occurs and the results from all nodes are just summed together."""
raise NotImplementedError
def codeC(self, name, escl):
"""Returns a dictionary of strings containing C code, that impliment the Goal's methods in C - name is a prefix on the names used, escl the result of listCodeC on the exemplar set from which it will get its data. The contents of its return value must contain some of: `{'stats': 'void <name>_stats(PyObject * data, Exemplar * index, void *& out, size_t & outLen)' - data is the list of channels for the exemplar object, index the exemplars to use. The stats object is stuck into out, and the size updated accordingly. If the provided out object is too small it will be free-ed and then a large enough buffer malloc-ed; null is handled correctly if outLen is 0., 'updateStats': 'void <name>_updateStats(PyObject * data, Exemplar * index, void *& inout, size_t & inoutLen)' - Same as stats, except the inout data arrives already containing a stats object, which is to be updated with the provided exemplars., 'entropy':'float <name>_entropy(void * stats, size_t statsLen) - Given a stats object returns its entropy.', 'summary': 'void <name>_summary(PyObject * data, Exemplar * index, void *& out, size_t & outLen)' - Basically the same as stats, except this time it is using the exemplars to calculate a summary. Interface works in the same way., 'updateSummary': 'void <name>_updateSummary(PyObject * data, Exemplar * index, void *& inout, size_t & inoutLen)' - Given a summary object, using the inout variables it updates it with the provided exemplars., 'error': 'void <name>_error(void * stats, size_t statsLen, void * summary, size_t summaryLen, float & error, float & weight)' - Given two buffers, representing the stats and the summary, this calculates the error, which is put into the reference error. This should be done incrimentally, such that errors from all nodes in a tree can be merged - error will be initialised at 0, and addtionally weight is provided which can be used as it wishes (Incremental mean is typical.), also initialised as 0.}`. Optional - if it throws the NotImplementedError (The default) everything will be done in python, if some C code is dependent on a missing C method it will also be done in python. The code can be dependent on the associated exempler code where applicable."""
raise NotImplementedError
def key(self):
"""Provides a unique string that can be used to hash the results of codeC, to avoid repeated generation. Must be implimented if codeC is implimented."""
raise NotImplementedError
class Classification(Goal):
"""The standard goal of a decision forest - classification. When trainning expects the existence of a discrete channel containing a single feature for each exemplar, the index of which is provided. Each discrete feature indicates a different trainning class, and they should be densly packed, starting from 0 inclusive, i.e. belonging to the set {0, ..., # of classes-1}. Number of classes is typically provided, though None can be provided instead in which case it will automatically resize data structures as needed to make them larger as more classes (Still densly packed.) are seen. A side effect of this mode is when it returns arrays indexed by class the size will be data driven, and from the view of the user effectivly arbitrary - user code will have to handle this."""
def __init__(self, classCount, channel):
"""You provide firstly how many classes exist (Or None if unknown.), and secondly the index of the channel that contains the ground truth for the exemplars. This channel must contain a single integer value, ranging from 0 inclusive to the number of classes, exclusive."""
self.classCount = classCount
self.channel = channel
def clone(self):
return Classification(self.classCount, self.channel)
def stats(self, es, index, weights = None):
if len(index)!=0:
ret = numpy.bincount(es[self.channel, index, 0], weights=weights[index] if weights!=None else None)
ret = numpy.asarray(ret, dtype=numpy.float32)
else:
ret = numpy.zeros(self.classCount if self.classCount!=None else 1, dtype=numpy.float32)
if self.classCount!=None and ret.shape[0]<self.classCount: ret = numpy.concatenate((ret, numpy.zeros(self.classCount-ret.shape[0], dtype=numpy.float32))) # When numpy 1.6.0 becomes common this line can be flipped to a minlength term in the bincount call.
return ret.tostring()
def updateStats(self, stats, es, index, weights = None):
ret = numpy.fromstring(stats, dtype=numpy.float32)
toAdd = numpy.bincount(es[self.channel, index, 0], weights=weights[index] if weights!=None else None)
if ret.shape[0]<toAdd.shape[0]:
ret = numpy.append(ret, numpy.zeros(toAdd.shape[0]-ret.shape[0], dtype=numpy.float32))
ret[:toAdd.shape[0]] += toAdd
return ret.tostring()
def entropy(self, stats):
dist = numpy.fromstring(stats, dtype=numpy.float32)
dist = dist[dist>1e-6] / dist.sum()
return -(dist*numpy.log(dist)).sum() # At the time of coding scipy.stats.distributions.entropy is broken-ish <rolls eyes> (Gives right answer at the expense of filling your screen with warnings about zeros.).
def answer_types(self):
return {'best':'An integer indexing the class this feature is most likelly to belong to given the model.',
'prob':'A categorical distribution over class membership, represented as a numpy array of float32 type. Gives the probability of it belonging to each class, P(class|data).',
'prob_samples':'The prob result is obtained by averaging a set of probability distributions, one from each tree - this outputs that list of distributions instead, so its varaibility can be accessed.',
'gen':'The default probability returned by the system is discriminative - this instead returns a generative result, P(data|class). A numpy array of float32 type containing the data probability for each class - will not sum to 1.',
'gen_list':'Is to gen as prob_samples is to prob. Gives a list of probabilities representing P(class|data), so the consistancy can be accessed.'}
def answer(self, stats_list, which, es, index, trees):
# Convert to a list, and process like that, before correcting for the return - simpler...
single = isinstance(which, str)
if single: which = [which]
# Calulate the probability distribution over class membership, both discriminativly and generativly...
needGen = ('gen' in which) or ('gen_list' in which)
prob_list = []
if needGen: gen_list = []
cCount = self.classCount if self.classCount!=None else 1
prob = numpy.zeros(cCount, dtype=numpy.float32)
if needGen: gen = numpy.zeros(cCount, dtype=numpy.float32)
for stats, tree in zip(stats_list, trees):
cat = numpy.fromstring(stats, dtype=numpy.float32)
dist = cat / cat.sum()
prob_list.append(dist)
if dist.shape[0]>prob.shape[0]:
prob = numpy.append(prob, numpy.zeros(dist.shape[0]-prob.shape[0], dtype=numpy.float32))
prob[:dist.shape[0]] += dist
if needGen:
div = numpy.fromstring(tree.stats, dtype=numpy.float32)
use = numpy.where(div[:cat.shape[0]]>0.0)
g = numpy.zeros(cat.shape[0], dtype=numpy.float32)
g[use] = cat[use] / div[use]
gen_list.append(g)
if g.shape[0]>gen.shape[0]:
gen = numpy.append(gen, numpy.zeros(g.shape[0]-gen.shape[0], dtype=numpy.float32))
gen[:g.shape[0]] += g
prob /= prob.sum()
if needGen: gen /= len(gen_list)
# Prepare the return...
def make_answer(t):
if t=='prob': return prob
elif t=='best': return prob.argmax()
elif t=='prob_samples': return prob_list
elif t=='gen': return gen
elif t=='gen_list': return gen_list
ret = map(make_answer, which)
# Make sure the correct thing is returned...
if single: return ret[0]
else: return tuple(ret)
def answer_batch(self, stats_lists, which, es, indices, trees):
# As this version might be dealing with lots of data we include a scipy.weave based optimisation...
if weave!=None:
code = start_cpp() + """
// Find out what we need to calculate...
bool doProbList = false;
bool doGen = false;
bool doGenList = false;
int wLength = PyList_Size(which);
int * wCodes = (int*)malloc(sizeof(int) * wLength);
for (int i=0; i<wLength; i++)
{
char * s = PyString_AsString(PyList_GetItem(which, i));
wCodes[i] = 0; // prob
if (strcmp(s,"best")==0) wCodes[i] = 1;
if (strcmp(s,"prob_samples")==0) {doProbList = true; wCodes[i] = 2;}
if (strcmp(s,"gen")==0) {doGen = true; wCodes[i] = 3;}
if (strcmp(s,"gen_list")==0) {doGenList = true; wCodes[i] = 4;}
}
// Buffers that are needed...
float * probBuf = 0;
float * genBuf = 0;
// Prep the return value...
int item_count = PyList_Size(stats_lists);
PyObject * ret = PyList_New(item_count);
// Loop through and do each exemplar in turn, adding its result to the return list...
for (int i=0; i<item_count; i++)
{
// Get the list of stats objects...
PyObject * stats = PyList_GetItem(stats_lists, i);
int statCount = PyList_Size(stats);
// Iterate the list and calculate the size of the largest element...
npy_intp vecLength = 0;
for (int j=0; j<statCount; j++)
{
PyObject * s = PyList_GetItem(stats, j);
int len = PyString_Size(s) / sizeof(float);
if (len>vecLength) vecLength = len;
}
// Resize the buffers accordingly, zero them...
probBuf = (float*)realloc(probBuf, sizeof(float)*vecLength);
for (int j=0; j<vecLength; j++) probBuf[j] = 0.0;
if (doGen)
{
genBuf = (float*)realloc(genBuf, sizeof(float)*vecLength);
for (int j=0; j<vecLength; j++) genBuf[j] = 0.0;
}
// Iterate the list and generate the various outputs we need (There are potentially 4 of them.)...
PyObject * probList = 0;
PyObject * genList = 0;
if (doProbList) probList = PyList_New(statCount);
if (doGenList) genList = PyList_New(statCount);
for (int j=0; j<statCount; j++)
{
PyObject * s = PyList_GetItem(stats, j);
int len = PyString_Size(s) / sizeof(float);
float * dist = (float*)(void*)PyString_AsString(s);
float sum = 0.0;
for (int k=0; k<len; k++) sum += dist[k];
if (sum<1e-6) sum = 1e-6; // For safety against divide by zero.
for (int k=0; k<len; k++) probBuf[k] += dist[k] / sum;
if (doProbList)
{
PyObject * arr = PyArray_ZEROS(1, &vecLength, NPY_FLOAT, 0);
for (int k=0; k<len; k++) *(float*)PyArray_GETPTR1(arr, k) = dist[k] / sum;
PyList_SetItem(probList, j, arr);
}
if ((doGen)||(doGenList))
{
PyObject * t = PyList_GetItem(root_stats, j);
float * div = (float*)(void*)PyString_AsString(t);
if (doGen)
{
for (int k=0; k<len; k++) genBuf[k] += dist[k] / div[k];
}
if (doGenList)
{
PyObject * arr = PyArray_ZEROS(1, &vecLength, NPY_FLOAT, 0);
for (int k=0; k<len; k++) *(float*)PyArray_GETPTR1(arr, k) = dist[k] / div[k];
PyList_SetItem(genList, j, arr);
}
}
}
// Normalise the buffers...
{
float sum = 0.0;
for (int j=0; j<vecLength; j++) sum += probBuf[j];
for (int j=0; j<vecLength; j++) probBuf[j] /= sum;
}
if (doGen)
{
for (int j=0; j<vecLength; j++) genBuf[j] /= statCount;
}
// Iterate the proxy for which, and store the required items in the correct positions...
PyObject * ans = PyTuple_New(wLength);
for (int j=0; j<wLength; j++)
{
PyObject * obj = 0;
switch(wCodes[j])
{
case 0: // prob
{
obj = PyArray_EMPTY(1, &vecLength, NPY_FLOAT, 0);
for (int k=0; k<vecLength; k++) *(float*)PyArray_GETPTR1(obj, k) = probBuf[k];
}
break;
case 1: // best
{
int best = 0;
for (int k=1; k<vecLength; k++)
{
if (probBuf[k]>probBuf[best]) best = k;
}
obj = PyInt_FromLong(best);
}
break;
case 2: // prob_samples
{
obj = probList;
Py_INCREF(obj);
}
break;
case 3: // gen
{
obj = PyArray_EMPTY(1, &vecLength, NPY_FLOAT, 0);
for (int k=0; k<vecLength; k++) *(float*)PyArray_GETPTR1(obj, k) = genBuf[k];
}
break;
case 4: // gen_list
{
obj = genList;
Py_INCREF(obj);
}
break;
}
PyTuple_SetItem(ans, j, obj);
}
// Store the answer tuple for this exemplar...
PyList_SetItem(ret, i, ans);
// Some cleaning up...
Py_XDECREF(genList);
Py_XDECREF(probList);
}
// Clean up...
free(probBuf);
free(genBuf);
free(wCodes);
// Return the list of results...
return_val = ret;
Py_XDECREF(ret);
"""
root_stats = map(lambda t: t.stats, trees)
single = isinstance(which, str)
if single: which = [which]
ret = weave.inline(code, ['stats_lists', 'which', 'root_stats'])
if single: ret = map(lambda r: r[0], ret)
return ret
else:
return map(lambda (i, stats_list): self.answer(stats_list, which, es, indices[i], trees), enumerate(stats_lists))
def summary(self, es, index, weights = None):
ret = numpy.bincount(es[self.channel, index, 0], weights=weights[index] if weights!=None else None)
ret = numpy.asarray(ret, dtype=numpy.float32)
if self.classCount!=None and ret.shape[0]<self.classCount: ret = numpy.append(ret, numpy.zeros(self.classCount-ret.shape[0], dtype=numpy.float32)) # When numpy 1.6.0 becomes common this line can be flipped to a minlength term in the bincount call.
return ret.tostring()
def updateSummary(self, summary, es, index, weights = None):
ret = numpy.fromstring(summary, dtype=numpy.float32)
toAdd = numpy.bincount(es[self.channel, index,0], weights=weights[index] if weights!=None else None)
if ret.shape[0]<toAdd.shape[0]:
ret = numpy.append(ret, numpy.zeros(toAdd.shape[0]-ret.shape[0], dtype=numpy.float32))
ret[:toAdd.shape[0]] += toAdd
return ret.tostring()
def error(self, stats, summary):
# Treats the histogram of trainning samples as a probability distribution from which the answer is drawn from - the error is then the average probability of getting each sample in the sample wrong, and the weight the number of exemplars that went into the sample...
## Fetch the distribution/counts...
dist = numpy.fromstring(stats, dtype=numpy.float32)
dist /= dist.sum()
test = numpy.fromstring(summary, dtype=numpy.float32)
count = test.sum()
if dist.shape[0] < test.shape[0]:
dist = numpy.append(dist, numpy.zeros(test.shape[0]-dist.shape[0], dtype=numpy.float32))
# Calculate and average the probabilities...
avgError = ((1.0-dist[:test.shape[0]])*test).sum() / count
return (avgError, count)
def codeC(self, name, escl):
cStats = start_cpp() + """
void %(name)s_stats(PyObject * data, Exemplar * index, void *& out, size_t & outLen)
{
// Make sure the output it at least as large as classCount, and zero it out...
if (outLen<(sizeof(float)*%(classCount)i))
{
outLen = sizeof(float) * %(classCount)i;
out = realloc(out, outLen);
}
for (int i=0; i<(outLen/sizeof(float)); i++)
{
((float*)out)[i] = 0.0;
}
// Iterate and play weighted histogram, growing out as needed...
%(channelType)s cData = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
int maxSeen = %(classCount)i;
while (index)
{
int cls = %(channelName)s_get(cData, index->index, 0);
int cap = cls+1;
if (cap>maxSeen) maxSeen = cap;
if ((cap*sizeof(float))>outLen)
{
int zero_start = outLen / sizeof(float);
outLen = cap*sizeof(float);
out = realloc(out, outLen);
for (int i=zero_start; i<cap; i++)
{
((float*)out)[i] = 0.0;
}
}
((float*)out)[cls] += index->weight;
index = index->next;
}
// Correct the output size if needed (It could be too large)...
outLen = maxSeen * sizeof(float);
}
"""%{'name':name, 'channel':self.channel, 'channelName':escl[self.channel]['name'], 'channelType':escl[self.channel]['itype'], 'classCount':self.classCount if self.classCount!=None else 1}
cUpdateStats = start_cpp() + """
void %(name)s_updateStats(PyObject * data, Exemplar * index, void *& inout, size_t & inoutLen)
{
// Iterate and play weighted histogram, growing out as needed...
%(channelType)s cData = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
int maxSeen = inoutLen / sizeof(float);
while (index)
{
int cls = %(channelName)s_get(cData, index->index, 0);
int cap = cls+1;
if (cap>maxSeen) maxSeen = cap;
if ((cap*sizeof(float))>inoutLen)
{
int zero_start = inoutLen / sizeof(float);
inoutLen = cap*sizeof(float);
inout = realloc(inout, inoutLen);
for (int i=zero_start; i<cap; i++)
{
((float*)inout)[i] = 0.0;
}
}
((float*)inout)[cls] += index->weight;
index = index->next;
}
}
"""%{'name':name, 'channel':self.channel, 'channelName':escl[self.channel]['name'], 'channelType':escl[self.channel]['itype']}
cEntropy = start_cpp() + """
float %(name)s_entropy(void * stats, size_t statsLen)
{
float sum = 0.0;
int length = statsLen>>2;
for (int i=0; i<length; i++)
{
sum += ((float*)stats)[i];
}
float ret = 0.0;
for (int i=0; i<length; i++)
{
float val = ((float*)stats)[i];
if (val>1e-6)
{
val /= sum;
ret -= val * log(val);
}
}
return ret;
}
"""%{'name':name}
cSummary = start_cpp() + """
void %(name)s_summary(PyObject * data, Exemplar * index, void *& out, size_t & outLen)
{
// Make sure the output it at least as large as classCount, and zero it out...
if (outLen<(sizeof(float)*%(classCount)i))
{
outLen = sizeof(float) * %(classCount)i;
out = realloc(out, outLen);
}
for (int i=0; i<(outLen/sizeof(float)); i++)
{
((float*)out)[i] = 0.0;
}
// Iterate and play weighted histogram, growing out as needed...
%(channelType)s cData = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
int maxSeen = %(classCount)i;
while (index)
{
int cls = %(channelName)s_get(cData, index->index, 0);
int cap = cls+1;
if (cap>maxSeen) maxSeen = cap;
if ((cap*sizeof(float))>outLen)
{
int zero_start = outLen / sizeof(float);
outLen = cap*sizeof(float);
out = realloc(out, outLen);
for (int i=zero_start; i<cap; i++)
{
((float*)out)[i] = 0.0;
}
}
((float*)out)[cls] += index->weight;
index = index->next;
}
// Correct the output size if needed (It could be too large)...
outLen = maxSeen * sizeof(float);
}
"""%{'name':name, 'channel':self.channel, 'channelName':escl[self.channel]['name'], 'channelType':escl[self.channel]['itype'], 'classCount':self.classCount if self.classCount!=None else 1}
cUpdateSummary = start_cpp() + """
void %(name)s_updateSummary(PyObject * data, Exemplar * index, void *& inout, size_t & inoutLen)
{
// Iterate and play weighted histogram, growing out as needed...
%(channelType)s cData = (%(channelType)s)PyTuple_GetItem(data, %(channel)i);
int maxSeen = inoutLen / sizeof(float);
while (index)
{
int cls = %(channelName)s_get(cData, index->index, 0);
int cap = cls+1;
if (cap>maxSeen) maxSeen = cap;
if ((cap*sizeof(float))>inoutLen)
{
int zero_start = inoutLen / sizeof(float);
inoutLen = cap*sizeof(float);
inout = realloc(inout, inoutLen);
for (int i=zero_start; i<cap; i++)
{
((float*)inout)[i] = 0.0;
}
}
((float*)inout)[cls] += index->weight;
index = index->next;
}
}
"""%{'name':name, 'channel':self.channel, 'channelName':escl[self.channel]['name'], 'channelType':escl[self.channel]['itype']}
cError = start_cpp() + """
void %(name)s_error(void * stats, size_t statsLen, void * summary, size_t summaryLen, float & error, float & weight)
{
// Sum the stuff in stats...
int statsSize = statsLen / sizeof(float);
float statsSum = 0.0;
for (int i=0; i<statsSize; i++) statsSum += ((float*)stats)[i];
// Go through and factor in each class from the summary in turn, using an incrimental mean...
int summarySize = summaryLen / sizeof(float);
for (int c=0; c<summarySize; c++)
{
float avgErr = (c<statsSize)?(1.0 - ((float*)stats)[c]/statsSum):1.0;
float w = ((float*)summary)[c];
weight += w;
if (weight>1e-3)
{
error += (avgErr-error) * w/weight;
}
}
}
"""%{'name':name}
return {'stats':cStats, 'updateStats':cUpdateStats, 'entropy':cEntropy, 'summary':cSummary, 'updateSummary':cUpdateSummary, 'error':cError}
def key(self):
return ('Classification|%i'%self.channel) + ('' if self.classCount==None else (':%i'%self.classCount))
class DensityGaussian(Goal):
"""Provides the ability to construct a density estimate, using Gaussian distributions to represent the density at each node in the tree. A rather strange thing to be doing with a decision forest, and I am a little suspicious of it, but it does give usable results, at least for low enough dimensionalities where everything remains sane. Due to its nature it can be very memory consuming if your doing incrmental learning - the summary has to store all the provided samples. Requires a channel to contain all the features that are fed into the density estimate (It is to this that a Gaussian is fitted.), which is always in channel 0. Other features can not exist, so typically input data would only have 1 channel. Because the divisions between nodes are sharp (This is a mixture model only between trees, not between leaf nodes within each tree.) the normalisation constant for each Gaussian has to be adjusted to take this into account. This is acheived by sampling - sending samples from the Gaussian down the tree and counting what percentage make the node. Note that when calculating the Gaussian at each node a prior is used, to avoid degeneracies, with a default weight of 1, so if weights are provided they should be scaled accordingly. Using a decision tree for density estimation is a bit hit and miss based on my experiance - you need to pay very close attention to tuning the min train parameter of the pruner, as information gain is a terrible stopping metric in this case. You also need a lot of trees to get something smooth out, which means it is quite computationally expensive."""
def __init__(self, feats, samples = 1024, prior_weight = 1.0):
"""feats is the number of features to be found in channel 0 of the data, which are uses to fit a Gaussian at each node. samples is how many samples per node it sends down the tree, to weight that node according to the samples that can actually reach it. prior_weight is the weight assigned to a prior used on each node to avoid degeneracies - it defaults to 1, with 0 removing it entirly (Not recomended.)."""
self.feats = feats
self.samples = samples
self.prior_weight = prior_weight
self.temp = numpy.empty((2, feats), dtype=numpy.float32)
def clone(self):
return DensityGaussian(self.feats, self.samples, self.prior_weight)
def stats(self, es, index, weights = None):
# First calculate the weighted mean of the samples we have...
data = es[0, index, :].copy()
w = weights[index] if weights!=None else None
weight = w.sum() if w!=None else float(data.shape[0])
mean = numpy.asarray(numpy.average(data, axis=0, weights=w), dtype=numpy.float32)
# Offset the data matrix by the mean...
data -= mean.reshape((1,-1))
# Calculate the size of a symmetric Gaussian, to be used as a prior to avoid degenerate situations...
sym_var = numpy.square(data).mean()
if sym_var<1e-3: sym_var = 1e-3 # For safety.
# Now calculate the covariance for a general Gaussian fitted to the data, with a symmetric prior with a weight of one...
covar = numpy.identity(self.feats, dtype=numpy.float32)
covar *= sym_var * self.prior_weight
if weights!=None:
covar += numpy.dot(data.T, data * w.reshape((-1,1)))
pw = self.prior_weight + weight
covar *= pw / (pw**2.0 - self.prior_weight**2.0 - numpy.square(w).sum())
else:
covar += numpy.dot(data.T, data)
covar /= self.prior_weight + weight
prec = numpy.linalg.inv(covar)
# Encode what we have in the required format and return it...
params = numpy.zeros(3, dtype=numpy.float32)
params[0] = self.prior_weight + weight
return params.tostring() + mean.tostring() + prec.tostring()
def updateStats(self, stats, es, index, weights = None):
# Extract the previous state...
params = numpy.fromstring(stats[:12], dtype=numpy.float32)
precStart = 12 + 4*self.feats
mean = numpy.fromstring(stats[12:precStart], dtype=numpy.float32)
prec = numpy.fromstring(stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
covar = numpy.linalg.inv(prec)
# Calculate the weighted mean of the new samples...
exData = es[0, index, :].copy()
exMean = numpy.empty(self.feats, dtype=numpy.float32)
if weights==None:
exMean[:] = exData.mean(axis=0)
weight = float(exData.shape[0])
else:
w = weights[index]
weight = w.sum()
exMean[:] = (exData * w.reshape((-1,1))).sum(axis=0)
exMean /= weight
# Offset the data matrix by said mean...
exData -= exMean.reshape((1,-1))
# Calculate the covariance matrix...
exCovar = numpy.zeros((self.feats, self.feats), dtype=numpy.float32)
if weights!=None: exData[:,:] *= w.reshape((-1,1))
exCovar += numpy.dot(exData.T, exData)
exCovar /= weight
# Update the previous model with the new samples...
newWeight = params[0] + weight
newMean = (params[0]*mean + weight*exMean) / newWeight
meanDiff = exMean - mean
newCovar = covar + exCovar + (params[0]*weight/newWeight) * numpy.outer(meanDiff)
newPrec = numpy.linalg.inv(newCovar)
# Update the log of the normalising constant...
params[2] = numpy.log(params[1]) + 0.5*numpy.linalg.slogdet(prec)[1] - 0.5*self.feats*numpy.log(2.0*numpy.pi)
# Encode what we have in the required format and return it...
return params.tostring() + newMean.tostring() + newPrec.tostring()
def entropy(self, stats):
# Extract precision...
precStart = 12 + 4*self.feats
prec = numpy.fromstring(stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
# Calculate and return the distributions entropy...
return 0.5 * (numpy.log(2.0*numpy.pi*numpy.e) * self.feats - numpy.linalg.slogdet(prec)[1])
def postTreeGrow(self, root, gen):
# Count the total weight in the system, to weight the nodes by the percentage of trainning samples they see...
def sumWeight(node):
w = numpy.fromstring(node.stats[:4], dtype=numpy.float32)[0] - self.prior_weight
if node.test!=None:
w += sumWeight(node.true)
w += sumWeight(node.false)
return w
totalWeight = sumWeight(root)
# Define a recursive function to analyse each node...
def weightNode(node, parents):
# Decode the samples stats...
params = numpy.fromstring(node.stats[:12], dtype=numpy.float32)
precStart = 12 + 4*self.feats
mean = numpy.fromstring(node.stats[12:precStart], dtype=numpy.float32)
prec = numpy.fromstring(node.stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
covar = numpy.linalg.inv(prec)
# Send samples down the chain and see how many arrive at the node, to measure how much it has been truncated by the decision boundaries...
## Draw the set of samples to send, and stick them into an exemplar set...
samples = numpy.random.multivariate_normal(mean, covar, (self.samples,))
samples = numpy.asarray(samples, dtype=numpy.float32)
es = MatrixFS(samples)
index = numpy.arange(self.samples)
## Go through the parents, culling samples at each step...
for par,path in parents:
res = gen.do(par.test, es, index)
index = index[res==path]
if index.shape[0]==0: break
## Count the survivors and factor in the weighting to get the tree-shape part of the normalising constant...
tsWeight = (params[0] - self.prior_weight) / totalWeight
tsWeight *= float(self.samples) / max(index.shape[0], 1.0)
# Calculate the normalising constant...
logNorm = numpy.log(tsWeight) + 0.5*numpy.linalg.slogdet(prec)[1] - 0.5*self.feats*numpy.log(2.0*numpy.pi)
# Rencode the nodes stats with the updates...
params[1] = tsWeight
params[2] = logNorm
node.stats = params.tostring() + node.stats[12:]
# If it has children recurse to them...
if node.test!=None:
weightNode(node.true, parents + [(node,True)])
weightNode(node.false, parents + [(node,False)])
# Do each node recursivly, starting from the root...
weightNode(root, [])
def answer_types(self):
return {'best':'Point estimate of the probability of the input point'}
def answer(self, stats_list, which, es, index, trees):
# Process each stat in turn, and calculate the average of the samples probability from each...
p = 0.0
for stats in stats_list:
# Extract the details from the stat object...
params = numpy.fromstring(stats[:12], dtype=numpy.float32)
precStart = 12 + 4*self.feats
mean = numpy.fromstring(stats[12:precStart], dtype=numpy.float32)
prec = numpy.fromstring(stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
# Calculate the probability and add it in...
delta = es[0,index,:] - mean
p += numpy.exp(params[2] - 0.5 * numpy.dot(delta, numpy.dot(prec, delta)))
if isinstance(which, str):
return p / len(stats_list)
else:
return tuple([p / len(stats_list)]*len(which))
def answer_batch(self, stats_lists, which, es, indices, trees):
if weave!=None:
esAccess = es.codeC(0, 'es')
code = start_cpp() + """
// Prepare the access to the es...
%(itype)s es = (%(itype)s)PyList_GetItem(esData, 0);
// Iterate and process each stat list in turn...
int item_count = PyList_Size(stats_lists);
PyObject * ret = PyList_New(item_count);
for (int i=0; i<item_count; i++)
{
// Get the list of stats objects...
PyObject * stats = PyList_GetItem(stats_lists, i);
int statCount = PyList_Size(stats);
// Iterate the list and handle each element in turn...
float p = 0.0;
for (int j=0; j<statCount; j++)
{
// Extract the information regarding the specific stat object...
float * params = (float*)(void*)PyString_AsString(PyList_GetItem(stats, j));
float * mean = params + 3;
float * prec = mean + feats;
// Put the delta into the temporary storage...
for (int k=0; k<feats; k++)
{
TEMP2(0, k) = es_get(es, indices[i], k) - mean[k];
TEMP2(1, k) = 0.0; // Preparation for the next bit.
}
// Calculate the multiplication with the precision...
for (int k=0; k<feats; k++)
{
for (int l=0; l<feats; l++)
{
TEMP2(1, k) += prec[feats*k+l] * TEMP2(0, l);
}
}
float d = 0.0;
for (int k=0; k<feats; k++)
{
d += TEMP2(0, k) * TEMP2(1, k);
}
// Do the final parts required...
p += exp(params[2] - 0.5 * d);
}
p /= statCount;
// Store the calculated probability...
PyObject * ans = PyFloat_FromDouble(p);
PyList_SetItem(ret, i, ans);
}
// Return...
return_val = ret;
Py_XDECREF(ret);
"""%{'itype':esAccess['itype']}
feats = self.feats
esData = [esAccess['input']]
temp = self.temp
ret = weave.inline(code, ['stats_lists', 'indices', 'feats', 'esData', 'temp'], support_code = esAccess['get'])
if isinstance(which, str): return ret
else:
return map(lambda p: tuple([p] * len(which)) , ret)
else:
return map(lambda (i, stats_list): self.answer(stats_list, which, es, indices[i], trees), enumerate(stats_lists))
def summary(self, es, index, weights = None):
# The summary simply contains a lot of feature vectors, tightly packed, with weights - it will consume a lot of space...
data = numpy.asarray(es[0,index,:], dtype=numpy.float32)
if weights==None: weights = numpy.ones(data.shape[0], dtype=numpy.float32)
else: weights = weights[index]
data = numpy.append(weights.reshape((-1,1)), data, axis=1)
return data.tostring()
def updateSummary(self, summary, es, index, weights = None):
data = numpy.asarray(es[0,index,:], dtype=numpy.float32)
if weights==None: weights = numpy.ones(data.shape[0], dtype=numpy.float32)
else: weights = weights[index]
data = numpy.append(weights.reshape((-1,1)), data, axis=1)
return summary + data.tostring()
def error(self, stats, summary):
# Error is defined as the negative logarithm of the probability of the data provided...
# Extract the details from the stats object...
params = numpy.fromstring(stats[:12], dtype=numpy.float32)
precStart = 12 + 4*self.feats
mean = numpy.fromstring(stats[12:precStart], dtype=numpy.float32)
prec = numpy.fromstring(stats[precStart:], dtype=numpy.float32).reshape((self.feats, self.feats))
# Factor in each feature vector from the summary, by summing in its negative log liklihood...
summary = numpy.fromstring(summary, dtype=numpy.float32).reshape((-1, self.feats + 1))
delta = summary[:,1:] - mean.reshape((1,-1))
vmv = (delta * numpy.dot(prec, delta.T).T).sum(axis=1)
err = 0.5 * (summary[:,0] * vmv).sum()
err -= summary[:,0].sum() * params[2]
return (err, None)
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Some basic matrix operations that come in use...
matrix_code = start_cpp() + """
#ifndef MATRIX_CODE
#define MATRIX_CODE
template <typename T>
inline void MemSwap(T * lhs, T * rhs, int count = 1)
{
while(count!=0)
{
T t = *lhs;
*lhs = *rhs;
*rhs = t;
++lhs;
++rhs;
--count;
}
}
// Calculates the determinant - you give it a pointer to the first elment of the array, and its size (It must be square), plus its stride, which would typically be identical to size, which is the default.
template <typename T>
inline T Determinant(T * pos, int size, int stride = -1)
{
if (stride==-1) stride = size;
if (size==1) return pos[0];
else
{
if (size==2) return pos[0]*pos[stride+1] - pos[1]*pos[stride];
else
{
T ret = 0.0;
for (int i=0; i<size; i++)
{
if (i!=0) MemSwap(&pos[0], &pos[stride*i], size-1);
T sub = Determinant(&pos[stride], size-1, stride) * pos[stride*i + size-1];
if ((i+size)%2) ret += sub;
else ret -= sub;
}
for (int i=1; i<size; i++)
{
MemSwap(&pos[(i-1)*stride], &pos[i*stride], size-1);
}
return ret;
}
}
}
// Inverts a square matrix, will fail on singular and very occasionally on
// non-singular matrices, returns true on success. Uses Gauss-Jordan elimination
// with partial pivoting.
// in is the input matrix, out the output matrix, just be aware that the input matrix is trashed.
// You have to provide its size (Its square, obviously.), and optionally a stride if different from size.
template <typename T>
inline bool Inverse(T * in, T * out, int size, int stride = -1)
{
if (stride==-1) stride = size;
for (int r=0; r<size; r++)
{
for (int c=0; c<size; c++)
{
out[r*stride + c] = (c==r)?1.0:0.0;
}
}
for (int r=0; r<size; r++)
{
// Find largest pivot and swap in, fail if best we can get is 0...
T max = in[r*stride + r];
int index = r;
for (int i=r+1; i<size; i++)
{
if (fabs(in[i*stride + r])>fabs(max))
{
max = in[i*stride + r];
index = i;
}
}
if (index!=r)
{
MemSwap(&in[index*stride], &in[r*stride], size);
MemSwap(&out[index*stride], &out[r*stride], size);
}
if (fabs(max-0.0)<1e-6) return false;
// Divide through the entire row...
max = 1.0/max;
in[r*stride + r] = 1.0;
for (int i=r+1; i<size; i++) in[r*stride + i] *= max;
for (int i=0; i<size; i++) out[r*stride + i] *= max;
// Row subtract to generate 0's in the current column, so it matches an identity matrix...
for (int i=0; i<size; i++)
{
if (i==r) continue;
T factor = in[i*stride + r];
in[i*stride + r] = 0.0;
for (int j=r+1; j<size; j++) in[i*stride + j] -= factor * in[r*stride + j];
for (int j=0; j<size; j++) out[i*stride + j] -= factor * out[r*stride + j];
}
}
return true;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import time
class ProgBar:
"""Simple console progress bar class. Note that object creation and destruction matter, as they indicate when processing starts and when it stops."""
def __init__(self, width = 60, onCallback = None):
self.start = time.time()
self.fill = 0
self.width = width
self.onCallback = onCallback
sys.stdout.write(('_'*self.width)+'\n')
sys.stdout.flush()
def __del__(self):
self.end = time.time()
self.__show(self.width)
sys.stdout.write('\nDone - '+str(self.end-self.start)+' seconds\n\n')
sys.stdout.flush()
def callback(self, nDone, nToDo):
"""Hand this into the callback of methods to get a progress bar - it works by users repeatedly calling it to indicate how many units of work they have done (nDone) out of the total number of units required (nToDo)."""
if self.onCallback:
self.onCallback()
n = int(float(self.width)*float(nDone)/float(nToDo))
n = min((n,self.width))
if n>self.fill:
self.__show(n)
def __show(self,n):
sys.stdout.write('|'*(n-self.fill))
sys.stdout.flush()
self.fill = n
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import pydoc
import inspect
class DocGen:
"""A helper class that is used to generate documentation for the system. Outputs multiple formats simultaneously, specifically html for local reading with a webbrowser and the markup used by the wiki system on Google code."""
def __init__(self, name, title = None, summary = None):
"""name is the module name - primarilly used for the file names. title is the title used as applicable - if not provide it just uses the name. summary is an optional line to go below the title."""
if title==None: title = name
if summary==None: summary = title
self.doc = pydoc.HTMLDoc()
self.html = open('%s.html'%name,'w')
self.html.write('<html>\n')
self.html.write('<head>\n')
self.html.write('<title>%s</title>\n'%title)
self.html.write('</head>\n')
self.html.write('<body>\n')
self.html_variables = ''
self.html_functions = ''
self.html_classes = ''
self.wiki = open('%s.wiki'%name,'w')
self.wiki.write('#summary %s\n\n'%summary)
self.wiki.write('= %s= \n\n'%title)
self.wiki_variables = ''
self.wiki_functions = ''
self.wiki_classes = ''
def __del__(self):
if self.html_variables!='':
self.html.write(self.doc.bigsection('Synonyms', '#ffffff', '#8d50ff', self.html_variables))
if self.html_functions!='':
self.html.write(self.doc.bigsection('Functions', '#ffffff', '#eeaa77', self.html_functions))
if self.html_classes!='':
self.html.write(self.doc.bigsection('Classes', '#ffffff', '#ee77aa', self.html_classes))
self.html.write('</body>\n')
self.html.write('</html>\n')
self.html.close()
if self.wiki_variables!='':
self.wiki.write('= Variables =\n\n')
self.wiki.write(self.wiki_variables)
self.wiki.write('\n')
if self.wiki_functions!='':
self.wiki.write('= Functions =\n\n')
self.wiki.write(self.wiki_functions)
self.wiki.write('\n')
if self.wiki_classes!='':
self.wiki.write('= Classes =\n\n')
self.wiki.write(self.wiki_classes)
self.wiki.write('\n')
self.wiki.close()
def addFile(self, fn, title, fls = True):
"""Given a filename and section title adds the contents of said file to the output. Various flags influence how this works."""
html = []
wiki = []
for i, line in enumerate(open(fn,'r').readlines()):
hl = line.replace('\n', '')
if i==0 and fls:
hl = '<strong>' + hl + '</strong>'
for ext in ['py','txt']:
if '.%s - '%ext in hl:
s = hl.split('.%s - '%ext, 1)
hl = '<i>' + s[0] + '.%s</i> - '%ext + s[1]
html.append(hl)
wl = line.strip()
if i==0 and fls:
wl = '*%s*'%wl
for ext in ['py','txt']:
if '.%s - '%ext in wl:
s = wl.split('.%s - '%ext, 1)
wl = '`' + s[0] + '.%s` - '%ext + s[1] + '\n'
wiki.append(wl)
self.html.write(self.doc.bigsection(title, '#ffffff', '#7799ee', '<br/>'.join(html)))
self.wiki.write('== %s ==\n'%title)
self.wiki.write('\n'.join(wiki))
self.wiki.write('----\n\n')
def addVariable(self, var, desc):
"""Adds a variable to the documentation. Given the nature of this you provide it as a pair of strings - one referencing the variable, the other some kind of description of its use etc.."""
self.html_variables += '<strong>%s</strong><br/>'%var
self.html_variables += '%s<br/><br/>\n'%desc
self.wiki_variables += '*`%s`*\n'%var
self.wiki_variables += ' %s\n\n'%desc
def addFunction(self, func):
"""Adds a function to the documentation. You provide the actual function instance."""
self.html_functions += self.doc.docroutine(func).replace(' ',' ')
self.html_functions += '\n'
name = func.__name__
args, varargs, keywords, defaults = inspect.getargspec(func)
doc = inspect.getdoc(func)
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
self.wiki_functions += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_functions += ' %s\n\n'%doc
def addClass(self, cls):
"""Adds a class to the documentation. You provide the actual class object."""
self.html_classes += self.doc.docclass(cls).replace(' ',' ')
self.html_classes += '\n'
name = cls.__name__
parents = filter(lambda a: a!=cls, inspect.getmro(cls))
doc = inspect.getdoc(cls)
par_str = ''
if len(parents)!=0:
par_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda p: p.__name__, parents))
self.wiki_classes += '== %s(%s) ==\n'%(name, par_str)
self.wiki_classes += ' %s\n\n'%doc
methods = inspect.getmembers(cls, lambda x: inspect.ismethod(x) or inspect.isbuiltin(x) or inspect.isroutine(x))
def method_key(pair):
if pair[0]=='__init__': return '___'
else: return pair[0]
methods.sort(key=method_key)
for name, method in methods:
if not name.startswith('_%s'%cls.__name__) and (not inspect.ismethod(method) and name[:2]!='__'):
if inspect.ismethod(method):
args, varargs, keywords, defaults = inspect.getargspec(method)
else:
args = ['?']
varargs = None
keywords = None
defaults = None
if defaults==None: defaults = list()
defaults = (len(args)-len(defaults)) * [None] + list(defaults)
arg_str = ''
if len(args)!=0:
arg_str += reduce(lambda a, b: '%s, %s'%(a,b), map(lambda arg, d: arg if d==None else '%s = %s'%(arg,d), args, defaults))
if varargs!=None:
arg_str += ', *%s'%varargs if arg_str!='' else '*%s'%varargs
if keywords!=None:
arg_str += ', **%s'%keywords if arg_str!='' else '**%s'%keywords
def fetch_doc(cls, name):
try:
method = getattr(cls, name)
if method.__doc__!=None: return inspect.getdoc(method)
except: pass
for parent in filter(lambda a: a!=cls, inspect.getmro(cls)):
ret = fetch_doc(parent, name)
if ret!=None: return ret
return None
doc = fetch_doc(cls, name)
self.wiki_classes += '*`%s(%s)`*\n'%(name, arg_str)
self.wiki_classes += ' %s\n\n'%doc
variables = inspect.getmembers(cls, lambda x: inspect.ismemberdescriptor(x) or isinstance(x, int) or isinstance(x, str) or isinstance(x, float))
for name, var in variables:
if not name.startswith('__'):
if hasattr(var, '__doc__'): d = var.__doc__
else: d = str(var)
self.wiki_classes += '*`%s`* = %s\n\n'%(name, d)
| Python |
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import unittest
import random
import math
from scipy.special import gammaln, psi, polygamma
from scipy import weave
from utils.start_cpp import start_cpp
# Provides various gamma-related functions...
gamma_code = start_cpp() + """
#ifndef GAMMA_CODE
#define GAMMA_CODE
#include <cmath>
// Returns the natural logarithm of the Gamma function...
// (Uses Lanczos's approximation.)
double lnGamma(double z)
{
static const double coeff[9] = {0.99999999999980993, 676.5203681218851, -1259.1392167224028, 771.32342877765313, -176.61502916214059, 12.507343278686905, -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7};
if (z<0.5)
{
// Use reflection formula, as approximation doesn't work down here...
return log(M_PI) - log(sin(M_PI*z)) - lnGamma(1.0-z);
}
else
{
double x = coeff[0];
for (int i=1;i<9;i++) x += coeff[i]/(z+i-1);
double t = z + 6.5;
return log(sqrt(2.0*M_PI)) + (z-0.5)*log(t) - t + log(x);
}
}
// Calculates the Digamma function, i.e. the derivative of the log of the Gamma function - uses a partial expansion of an infinite series to 4 terms that is good for high values, and an identity to express lower values in terms of higher values...
double digamma(double z)
{
static const double highVal = 13.0; // A bit of fiddling shows that the last term with this is of the order 1e-10, so we can expect at least 9 digits of accuracy past the decimal point.
double ret = 0.0;
while (z<highVal)
{
ret -= 1.0/z;
z += 1.0;
}
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz4 = iz2*iz2;
double iz6 = iz4*iz2;
ret += log(z) - iz1/2.0 - iz2/12.0 + iz4/120.0 - iz6/252.0;
return ret;
}
// Calculates the trigamma function - uses a partial expansion of an infinite series that is accurate for large values, and then uses an identity to express lower values in terms of higher values - same approach as for the digamma function basically...
double trigamma(double z)
{
static const double highVal = 8.0;
double ret = 0.0;
while (z<highVal)
{
ret += 1.0/(z*z);
z += 1.0;
}
z -= 1.0;
double iz1 = 1.0/z;
double iz2 = iz1*iz1;
double iz3 = iz1*iz2;
double iz5 = iz3*iz2;
double iz7 = iz5*iz2;
double iz9 = iz7*iz2;
ret += iz1 - 0.5*iz2 + iz3/6.0 - iz5/30.0 + iz7/42.0 - iz9/30.0;
return ret;
}
#endif
"""
def lnGamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns the logorithm of the gamma function"""
code = start_cpp(gamma_code) + """
return_val = lnGamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def digamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the digamma function"""
code = start_cpp(gamma_code) + """
return_val = digamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
def trigamma(z):
"""Pointless as scipy, a library this is dependent on, defines this, but useful for testing. Returns an evaluation of the trigamma function"""
code = start_cpp(gamma_code) + """
return_val = trigamma(z);
"""
return weave.inline(code, ['z'], support_code=gamma_code)
class TestFuncs(unittest.TestCase):
"""Test code for the assorted gamma-related functions."""
def test_compile(self):
code = start_cpp(gamma_code) + """
"""
weave.inline(code, support_code=gamma_code)
def test_error_lngamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = lnGamma(z)
good = gammaln(z)
assert(math.fabs(own-good)<1e-12)
def test_error_digamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = digamma(z)
good = psi(z)
assert(math.fabs(own-good)<1e-9)
def test_error_trigamma(self):
for _ in xrange(1000):
z = random.uniform(0.01, 100.0)
own = trigamma(z)
good = polygamma(1,z)
assert(math.fabs(own-good)<1e-9)
# If this file is run do the unit tests...
if __name__ == '__main__':
unittest.main()
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import inspect
import hashlib
def start_cpp(hash_str = None):
"""This method does two things - firstly it adds the correct line numbers to scipy.weave code (Good for debugging) and secondly it can optionaly inserts a hash code of some other code into the code. This latter feature is useful for working around the fact the scipy.weave only recompiles if the hash of the code changes, but ignores the support_code - passing the support_code into start_cpp avoids this problem by putting its hash into the code and forcing a recompile when that code changes. Usage is <code variable> = start_cpp([support_code variable]) + <3 quotations to start big comment with code in, typically going over many lines.>"""
frame = inspect.currentframe().f_back
info = inspect.getframeinfo(frame)
if hash_str==None:
return '#line %i "%s"\n'%(info[1],info[0])
else:
h = hashlib.md5()
h.update(hash_str)
hash_val = h.hexdigest()
return '#line %i "%s" // %s\n'%(info[1],info[0],hash_val)
| Python |
# -*- coding: utf-8 -*-
# Code copied from http://opencv.willowgarage.com/wiki/PythonInterface - license unknown, but presumed to be at least as liberal as bsd (The license for opencv.).
import cv
import numpy as np
def cv2array(im):
"""Converts a cv array to a numpy array."""
depth2dtype = {
cv.IPL_DEPTH_8U: 'uint8',
cv.IPL_DEPTH_8S: 'int8',
cv.IPL_DEPTH_16U: 'uint16',
cv.IPL_DEPTH_16S: 'int16',
cv.IPL_DEPTH_32S: 'int32',
cv.IPL_DEPTH_32F: 'float32',
cv.IPL_DEPTH_64F: 'float64',
}
arrdtype=im.depth
a = np.fromstring(
im.tostring(),
dtype=depth2dtype[im.depth],
count=im.width*im.height*im.nChannels)
a.shape = (im.height,im.width,im.nChannels)
return a
def array2cv(a):
"""Converts a numpy array to a cv array, if possible."""
dtype2depth = {
'uint8': cv.IPL_DEPTH_8U,
'int8': cv.IPL_DEPTH_8S,
'uint16': cv.IPL_DEPTH_16U,
'int16': cv.IPL_DEPTH_16S,
'int32': cv.IPL_DEPTH_32S,
'float32': cv.IPL_DEPTH_32F,
'float64': cv.IPL_DEPTH_64F,
}
try:
nChannels = a.shape[2]
except:
nChannels = 1
cv_im = cv.CreateImageHeader((a.shape[1],a.shape[0]),
dtype2depth[str(a.dtype)],
nChannels)
cv.SetData(cv_im, a.tostring(),
a.dtype.itemsize*nChannels*a.shape[1])
return cv_im
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import multiprocessing as mp
import multiprocessing.synchronize # To make sure we have all the functionality.
import types
import marshal
import unittest
def repeat(x):
"""A generator that repeats the input forever - can be used with the mp_map function to give data to a function that is constant."""
while True: yield x
def run_code(code,args):
"""Internal use function that does the work in each process."""
code = marshal.loads(code)
func = types.FunctionType(code, globals(), '_')
return func(*args)
def mp_map(func, *iters, **keywords):
"""A multiprocess version of the map function. Note that func must limit itself to the data provided - if it accesses anything else (globals, locals to its definition.) it will fail. There is a repeat generator provided in this module to work around such issues. Note that, unlike map, this iterates the length of the shortest of inputs, rather than the longest - whilst this makes it not a perfect substitute it makes passing constant argumenmts easier as they can just repeat for infinity."""
if 'pool' in keywords: pool = keywords['pool']
else: pool = mp.Pool()
code = marshal.dumps(func.func_code)
jobs = []
for args in zip(*iters):
jobs.append(pool.apply_async(run_code,(code,args)))
for i in xrange(len(jobs)):
jobs[i] = jobs[i].get()
return jobs
class TestMpMap(unittest.TestCase):
def test_simple1(self):
data = ['a','b','c','d']
def noop(data):
return data
data_noop = mp_map(noop, data)
self.assertEqual(data, data_noop)
def test_simple2(self):
data = [x for x in xrange(1000)]
data_double = mp_map(lambda a: a*2, data)
self.assertEqual(map(lambda a: a*2,data), data_double)
def test_gen(self):
def gen():
for i in xrange(100): yield i
data_double = mp_map(lambda a: a*2, gen())
self.assertEqual(map(lambda a: a*2,gen()), data_double)
def test_repeat(self):
def mult(a,b):
return a*b
data = [x for x in xrange(50,5000,5)]
data_triple = mp_map(mult, data, repeat(3))
self.assertEqual(map(lambda a: a*3,data),data_triple)
def test_none(self):
data = []
data_sqr = mp_map(lambda x: x*x, data)
self.assertEqual([],data_sqr)
if __name__ == '__main__':
unittest.main()
| Python |
# Copyright (c) 2012, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import sys
import os.path
import tempfile
import shutil
from distutils.core import setup, Extension
import distutils.ccompiler
import distutils.dep_util
try:
__default_compiler = distutils.ccompiler.new_compiler()
except:
__default_compiler = None
def make_mod(name, base, source, openCL = False):
"""Uses distutils to compile a python module - really just a set of hacks to allow this to be done 'on demand', so it only compiles if the module does not exist or is older than the current source, and after compilation the program can continue on its merry way, and immediatly import the just compiled module. Note that on failure erros can be thrown - its your choice to catch them or not. name is the modules name, i.e. what you want to use with the import statement. base is the base directory for the module, which contains the source file - often you would want to set this to 'os.path.dirname(__file__)', assuming the .py file that imports the module is in the same directory as the code. It is this directory that the module is output to. source is the filename of the source code to compile, or alternativly a list of filenames. openCL indicates if OpenCL is used by the module, in which case it does all the necesary setup - done like this so these setting can be kept centralised, so when they need to be different for a new platform they only have to be changed in one place."""
if __default_compiler==None: raise Exception('No compiler!')
# Work out the various file names - check if we actually need to do anything...
if not isinstance(source, list): source = [source]
source_path = map(lambda s: os.path.join(base, s), source)
library_path = os.path.join(base, __default_compiler.shared_object_filename(name))
if reduce(lambda a,b: a or b, map(lambda s: distutils.dep_util.newer(s, library_path), source_path)):
try:
print 'b'
# Backup the argv variable and create a temporary directory to do all work in...
old_argv = sys.argv[:]
temp_dir = tempfile.mkdtemp()
# Prepare the extension...
sys.argv = ['','build_ext','--build-lib', base, '--build-temp', temp_dir]
comp_path = filter(lambda s: not s.endswith('.h'), source_path)
depends = filter(lambda s: s.endswith('.h'), source_path)
if openCL:
ext = Extension(name, comp_path, include_dirs=['/usr/local/cuda/include', '/opt/AMDAPP/include'], libraries = ['OpenCL'], library_dirs = ['/usr/lib64/nvidia', '/opt/AMDAPP/lib/x86_64'], depends=depends)
else:
ext = Extension(name, comp_path, depends=depends)
# Compile...
setup(name=name, version='1.0.0', ext_modules=[ext])
finally:
# Cleanup the argv variable and the temporary directory...
sys.argv = old_argv
shutil.rmtree(temp_dir, True)
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
from numpy_help_cpp import numpy_util_code
# Provides various functions to assist with manipulating python objects from c++ code.
python_obj_code = numpy_util_code + start_cpp() + """
#ifndef PYTHON_OBJ_CODE
#define PYTHON_OBJ_CODE
// Extracts a boolean from an object...
bool GetObjectBoolean(PyObject * obj, const char * name)
{
PyObject * b = PyObject_GetAttrString(obj, name);
bool ret = b!=Py_False;
Py_DECREF(b);
return ret;
}
// Extracts an int from an object...
int GetObjectInt(PyObject * obj, const char * name)
{
PyObject * i = PyObject_GetAttrString(obj, name);
int ret = PyInt_AsLong(i);
Py_DECREF(i);
return ret;
}
// Extracts a float from an object...
float GetObjectFloat(PyObject * obj, const char * name)
{
PyObject * f = PyObject_GetAttrString(obj, name);
float ret = PyFloat_AsDouble(f);
Py_DECREF(f);
return ret;
}
// Extracts an array from an object, returning it as a new[] unsigned char array. You can also pass in a pointer to an int to have the size of the array stored...
unsigned char * GetObjectByte1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
unsigned char * ret = new unsigned char[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Byte1D(nao,i);
Py_DECREF(nao);
return ret;
}
// Extracts an array from an object, returning it as a new[] float array. You can also pass in a pointer to an int to have the size of the array stored...
float * GetObjectFloat1D(PyObject * obj, const char * name, int * size = 0)
{
PyArrayObject * nao = (PyArrayObject*)PyObject_GetAttrString(obj, name);
float * ret = new float[nao->dimensions[0]];
if (size) *size = nao->dimensions[0];
for (int i=0;i<nao->dimensions[0];i++) ret[i] = Float1D(nao,i);
Py_DECREF(nao);
return ret;
}
#endif
"""
| Python |
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from start_cpp import start_cpp
# Defines helper functions for accessing numpy arrays...
numpy_util_code = start_cpp() + """
#ifndef NUMPY_UTIL_CODE
#define NUMPY_UTIL_CODE
float & Float1D(PyArrayObject * arr, int index = 0)
{
return *(float*)(arr->data + index*arr->strides[0]);
}
float & Float2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
float & Float3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
return *(float*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
unsigned char & Byte1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index*arr->strides[0]);
}
unsigned char & Byte2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
unsigned char & Byte3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(unsigned char));
return *(unsigned char*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
int & Int1D(PyArrayObject * arr, int index = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index*arr->strides[0]);
}
int & Int2D(PyArrayObject * arr, int index1 = 0, int index2 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1]);
}
int & Int3D(PyArrayObject * arr, int index1 = 0, int index2 = 0, int index3 = 0)
{
//assert(arr->strides[0]==sizeof(int));
return *(int*)(arr->data + index1*arr->strides[0] + index2*arr->strides[1] + index3*arr->strides[2]);
}
#endif
"""
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2011, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import cvarray
import mp_map
import prog_bar
import numpy_help_cpp
import python_obj_cpp
import matrix_cpp
import gamma_cpp
import setProcName
import start_cpp
import make
import doc_gen
# Setup...
doc = doc_gen.DocGen('utils', 'Utilities/Miscellaneous', 'Library of miscellaneous stuff - most modules depend on this.')
doc.addFile('readme.txt', 'Overview')
# Variables...
doc.addVariable('numpy_help_cpp.numpy_util_code', 'Assorted utility functions for accessing numpy arrays within scipy.weave C++ code.')
doc.addVariable('python_obj_cpp.python_obj_code', 'Assorted utility functions for interfacing with python objects from scipy.weave C++ code.')
doc.addVariable('matrix_cpp.matrix_code', 'Matrix manipulation routines for use in scipy.weave C++')
doc.addVariable('gamma_cpp.gamma_code', 'Gamma and related functions for use in scipy.weave C++')
# Functions...
doc.addFunction(make.make_mod)
doc.addFunction(cvarray.cv2array)
doc.addFunction(cvarray.array2cv)
doc.addFunction(mp_map.repeat)
doc.addFunction(mp_map.mp_map)
doc.addFunction(setProcName.setProcName)
doc.addFunction(start_cpp.start_cpp)
doc.addFunction(make.make_mod)
# Classes...
doc.addClass(prog_bar.ProgBar)
doc.addClass(doc_gen.DocGen)
| Python |
#! /usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2010, Tom SF Haines
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from ctypes import *
def setProcName(name):
"""Sets the process name, linux only - useful for those programs where you might want to do a killall, but don't want to slaughter all the other python processes. Note that there are multiple mechanisms, and that the given new name can be shortened by differing amounts in differing cases."""
# Call the process control function...
libc = cdll.LoadLibrary('libc.so.6')
libc.prctl(15, c_char_p(name), 0, 0, 0)
# Update argv...
charPP = POINTER(POINTER(c_char))
argv = charPP.in_dll(libc,'_dl_argv')
size = libc.strlen(argv[0])
libc.strncpy(argv[0],c_char_p(name),size)
if __name__=='__main__':
# Quick test that it works...
import os
ps1 = 'ps'
ps2 = 'ps -f'
os.system(ps1)
os.system(ps2)
setProcName('wibble_wobble')
os.system(ps1)
os.system(ps2)
| Python |
#! /usr/bin/env python
# Copyright 2012 Tom SF Haines
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
import df
from utils import doc_gen
# Setup...
doc = doc_gen.DocGen('df', 'Decision Forests', 'Extensive random forests implimentation')
doc.addFile('readme.txt', 'Overview')
# Classes...
doc.addClass(df.DF)
doc.addClass(df.ExemplarSet)
doc.addClass(df.MatrixES)
doc.addClass(df.MatrixGrow)
doc.addClass(df.Goal)
doc.addClass(df.Classification)
doc.addClass(df.DensityGaussian)
doc.addClass(df.Pruner)
doc.addClass(df.PruneCap)
doc.addClass(df.Test)
doc.addClass(df.AxisSplit)
doc.addClass(df.LinearSplit)
doc.addClass(df.DiscreteBucket)
doc.addClass(df.Generator)
doc.addClass(df.MergeGen)
doc.addClass(df.RandomGen)
doc.addClass(df.AxisMedianGen)
doc.addClass(df.LinearMedianGen)
doc.addClass(df.AxisRandomGen)
doc.addClass(df.LinearRandomGen)
doc.addClass(df.DiscreteRandomGen)
doc.addClass(df.AxisClassifyGen)
doc.addClass(df.LinearClassifyGen)
doc.addClass(df.DiscreteClassifyGen)
doc.addClass(df.SVMClassifyGen)
doc.addClass(df.Node)
| Python |
Subsets and Splits
SQL Console for ajibawa-2023/Python-Code-Large
Provides a useful breakdown of language distribution in the training data, showing which languages have the most samples and helping identify potential imbalances across different language groups.