code stringlengths 1 2.01M | repo_name stringlengths 3 62 | path stringlengths 1 267 | language stringclasses 231
values | license stringclasses 13
values | size int64 1 2.01M |
|---|---|---|---|---|---|
function tf = gr_is_connected(G)
%GR_IS_CONNECTED Test graph connectivity
%
% tf = GR_IS_CONNECTED(G);
% Returns whether G is a connected graph. Here, G should be
% an undirected graph.
%
% Created by Dahua Lin, on Jan 25, 2012
%
%% verify input arguments
if ~(is_gr(G) && isequal(G.dty, 'u') && G.has_nbs)
error('gr_is_connected:invalidarg', ...
'G should be an undirected graph struct (with neighbors).');
end
%% main
tf = gr_is_connected_cimp(G);
| zzhangumd-smitoolbox | graph/algs/gr_is_connected.m | MATLAB | mit | 482 |
function [edges, ccs] = gr_kruskal(G, w, K)
%GR_KRUSKAL Kruskal's Algorithm for minimum spanning tree/forest
%
% edges = GR_KRUSKAL(G, w);
% edges = GR_KRUSKAL(G, w, K);
%
% Finds the minimum spanning tree/forest of the given graph G.
%
% Input arguments:
% - G: The input graph, which should be an undirected graph
% struct.
%
% - w: The edge weights (a vector of length G.m).
%
% - K: The number of trees(clusters) in the forest.
% If K is omitted, it is by default set to 1, meaning
% to find the minimum spanning tree.
%
% Note: generally, K is the minimum allowable number of
% trees. If G in itself have K' > K connected components,
% then K' trees will actually be found.
%
% Output arguments:
% - edges: The list of edge indices in the spanning tree/forest.
%
% Let m be the number of all edges in G, and e = edges(i)
% be the index of the i-th edge found in edges, whose
% value ranges from 1 to 2*m.
%
% Note the edges are in non-decreasing order of edge
% weights.
%
% [edges, ccs] = GR_KRUSKAL( ... );
%
% Additionally returns a cell array of clusters. In particular,
% ccs{k} is the vector of vertex indices in the k-th cluster.
%
% Created by Dahua Lin, on Jan 26, 2012
%
%% verify input argument
if ~(is_gr(G) && isequal(G.dty, 'u'))
error('gr_kruskal:invalidarg', ...
'G should be an undirected graph struct.');
end
if ~(isfloat(w) && isreal(w) && ~issparse(w) && numel(w) == G.m)
error('gr_kruskal:invalidarg', ...
'w should be a real vector of length G.m.');
end
w = [w w];
if nargin < 3
K = int32(1);
else
if ~(isnumeric(K) && isscalar(K) && isreal(K) && K == fix(K) && ...
K >= 1 && K < G.n)
error('gr_kruskal:invalidarg', ...
'K should be a positive integer scalar with K < G.n.');
end
K = int32(K);
end
%% main
if nargout <= 1
edges = gr_kruskal_cimp(G, w, K);
else
[edges, ccs] = gr_kruskal_cimp(G, w, K);
end
| zzhangumd-smitoolbox | graph/algs/gr_kruskal.m | MATLAB | mit | 2,226 |
function [Q, H] = hmm_infer(Pi, T, LL)
%HMM_INFER HMM E-step inference
%
% Q = HMM_INFER(Pi, T, LL);
%
% Infers the (posterior) marginal distributions of all states in
% an Hidden Markov Model.
%
% Suppose there are K distinct state values, and n steps along
% the chain.
%
% Input arguments:
% - Pi: The initial distribution of states [K x 1]
% - T: The transition probability matrix [K x K]
% - LL: The matrix of log-likelihood values [K x n]
%
% In the output, Q is a matrix of K x n, where Q(k, i) is the
% posterior probability of the state at i-th step being k,
% given all observations.
%
% [Q, H] = HMM_INFER(Pi, T, LL);
%
% Also computes H, the accumulated transition counts.
%
% Specifically, H is an K x K matrix, with
%
% H(u, v) = sum_{i=2}^n pr(x_{n-1}=u, x_n=v | all observations)
%
% Remarks
% -------
% - This function actually performs the E-step in E-M estimation
% of an Hidden markov model.
%
% Created by Dahua Lin, in Feb 1, 2012
%
%% main
% run forward-backward recursion
[A, Lc] = hmm_forward(Pi, T, LL); % hmm_forward will verify all inputs
B = hmm_backward(T, LL, Lc);
% extract outputs
Q = A .* B;
% calculate H
if nargout < 2; return; end
E = exp(bsxfun(@minus, LL(:,2:end), Lc(2:end)));
H = (A(:,1:end-1) * (E .* B(:,2:end))') .* T;
| zzhangumd-smitoolbox | pmodels/markov/hmm_infer.m | MATLAB | mit | 1,412 |
function [x, v] = chain_viterbi(A, B, w)
%CHAIN_VITERBI Viterbi algorithm on a Hidden Markov chain
%
% [x, v] = CHAIN_VITERBI(A, B);
% [x, v] = CHAIN_VITERBI(A, B, w);
%
% Performs the Viterbi algorithm along a chain that finds a sequence
% of states that maximizes the following objective
%
% sum_{i=1}^n a_i(x_i) + sum_{i=1}^{n-1} w(i) * b(x_i, x_{i+1})
%
%
% Suppose there are K distinct states and n nodes of the chain.
%
% Input arguments:
%
% - A: The matrix of first-order (additive) potentials
% [size: K x n].
%
% - B: The matrix of second-order (additive) potentials,
% which can be in either of the following two forms:
% - K x K matrix, then b(u, v) = B(u, v).
% - 1 x 2 vector, then b(u, v) = B(1) when u == v
% and b(u, v) = B(2) when u != v.
%
% - w: The second-order potential weights [1 x (n-1)].
% If w is omitted, all weights are assumed to be 1.
%
% Output arguments:
% - x: The resultant sequence of states [1 x n].
%
% - v: The objective (total potential) of the optimal state
% sequence.
%
% Created by Dahua Lin, on Feb 2, 2012
%
%% verify input arguments
if ~(isfloat(A) && isreal(A) && ~issparse(A) && ndims(A) == 2)
error('chain_viterbi:invalidarg', ...
'A should be a non-sparse real matrix.');
end
[K, n] = size(A);
if ~( isfloat(B) && isreal(B) && ~issparse(B) && ...
(isequal(size(B), [K K]) || numel(B) == 2) )
error('chain_viterbi:invalidarg', ...
'B should be a non-sparse real matrix of size K x K or 1 x 2.');
end
if nargin < 3
w = [];
else
if ~(isfloat(w) && isreal(w) && ~issparse(w) && isvector(w) && ...
length(w) == n - 1)
error('chain_viterbi:invalidarg', ...
'w should be a non-sparse real vector of length n - 1.');
end
end
%% main
if ~isa(A, 'double'); A = double(A); end
if ~isa(B, 'double'); B = double(B); end
[x, v] = chain_viterbi_cimp([], A, B, w);
| zzhangumd-smitoolbox | pmodels/markov/chain_viterbi.m | MATLAB | mit | 2,134 |
function [x, v] = hmm_decode(Pi, T, LL)
%HMM_DECODE Find the most probable state sequence for HMM
%
% [x, v] = HMM_DECODE(Pi, T, LL);
%
% Finds the state sequence based on an Hidden Markov Model using
% viterbi algorithm.
%
% Suppose there are K states and n nodes of the chain.
%
% Input arguments:
% - Pi: The initial distribution of states [K x 1]
% - T: The transition probability matrix [K x K]
% - LL: The matrix of log-likelihood values [K x n]
%
% Output arguments:
% - x: The solved sequence of states
% - v: The joint log-likelihood of the resultant sequence.
%
% Created by Feb 2, 2012
%
%% verify input arguments
if ~(isfloat(Pi) && isreal(Pi) && ~issparse(Pi) && isvector(Pi))
error('hmm_decode:invalidarg', ...
'Pi should be a non-sparse real vector.');
end
K = size(T, 1);
if ~(isfloat(T) && isreal(T) && ~issparse(T) && ndims(T) == 2 && ...
K == size(T,2))
error('hmm_decode:invalidarg', ...
'T should be a non-sparse real square matrix.');
end
if numel(Pi) ~= K
error('hmm_decode:invalidarg', ...
'The sizes of Pi and T are inconsistent.');
end
if ~(isfloat(LL) && isreal(LL) && ~issparse(LL) && ndims(LL) == 2 && ...
size(LL,1) == K)
error('hmm_decode:invalidarg', ...
'LL should be a non-sparse real matrix of size K x n.');
end
%% main
if ~isa(Pi, 'double'); Pi = double(Pi); end
if ~isa(T, 'double'); T = double(T); end
if ~isa(LL, 'double'); LL = double(LL); end
[x, v] = chain_viterbi_cimp(log(Pi), LL, log(T), []);
| zzhangumd-smitoolbox | pmodels/markov/hmm_decode.m | MATLAB | mit | 1,613 |
function [Alpha, Lc] = hmm_forward(Pi, T, LL)
%HMM_FORWARD HMM Forward recursion
%
% [Alpha, Lp] = HMM_FORWARD(Pi, T, LL);
%
% Performs forward recursion to evaluate the alpha values for
% Hidden Markov Model based inference.
%
% Input arguments:
% - Pi: The initial distribution of states [K x 1]
% - T: The transition probability matrix [K x K]
% - LL: The log-likelihood matrix [K x n]
%
% Output arguments:
% - Alpha: The matrix of normalized alpha-values [K x n]
% Alpha(k, i) := pr(z_i = k | x_1, ..., x_i)
%
% - Lc: The vector of log conditional probabilities [1 x n]
% Lc(i) = log p(x_i | x_1, ..., x_{i-1})
%
%
% Created by Dahua Lin, on Feb 1, 2012
%
%% verify input arguments
if ~(isfloat(Pi) && isreal(Pi) && ~issparse(Pi) && isvector(Pi))
error('hmm_forward:invalidarg', ...
'Pi should be a non-sparse real vector.');
end
K = size(T, 1);
if ~(isfloat(T) && isreal(T) && ~issparse(T) && ndims(T) == 2 && ...
K == size(T,2))
error('hmm_forward:invalidarg', ...
'T should be a non-sparse real square matrix.');
end
if numel(Pi) ~= K
error('hmm_forward:invalidarg', ...
'The sizes of Pi and T are inconsistent.');
end
if ~(isfloat(LL) && isreal(LL) && ~issparse(LL) && ndims(LL) == 2 && ...
size(LL,1) == K)
error('hmm_forward:invalidarg', ...
'LL should be a non-sparse real matrix of size K x n.');
end
%% main
if ~isa(Pi, 'double'); Pi = double(Pi); end
if ~isa(T, 'double'); T = double(T); end
if ~isa(LL, 'double'); LL = double(LL); end
[Alpha, Lc] = hmm_forward_cimp(Pi, T, LL);
| zzhangumd-smitoolbox | pmodels/markov/hmm_forward.m | MATLAB | mit | 1,692 |
function Beta = hmm_backward(T, LL, Lc)
%HMM_BACKWARD HMM Backward recursion
%
% Beta = HMM_BACKWARD(T, LL, Lc);
%
% Performs forward recursion to evaluate the alpha values for
% Hidden Markov Model based inference.
%
% Input arguments:
% - T: The transition probability matrix [K x K]
% - LL: The log-likelihood matrix [K x n]
% - Lc: The log conditional likelihood vector [1 x n]
% (This is the 2nd output argument of hmm_forward).
%
% Output arguments:
% - Beta: The vector of normalized beta values [K x n]
%
%
% Created by Dahua Lin, on Feb 1, 2012
%
%% verify input arguments
K = size(T, 1);
if ~(isfloat(T) && isreal(T) && ~issparse(T) && ndims(T) == 2 && ...
K == size(T,2))
error('hmm_backward:invalidarg', ...
'T should be a non-sparse real square matrix.');
end
if ~(isfloat(LL) && isreal(LL) && ~issparse(LL) && ndims(LL) == 2 && ...
size(LL,1) == K)
error('hmm_backward:invalidarg', ...
'LL should be a non-sparse real matrix of size K x n.');
end
n = size(LL, 2);
if ~(isfloat(Lc) && isreal(Lc) && isequal(size(Lc), [1 n]))
error('hmm_backward:invalidarg', ...
'Lc should be a non-sparse real vector of size 1 x n.');
end
%% main
if ~isa(T, 'double'); T = double(T); end
if ~isa(LL, 'double'); LL = double(LL); end
if ~isa(Lc, 'double'); Lc = double(Lc); end
Beta = hmm_backward_cimp(T, LL, Lc);
| zzhangumd-smitoolbox | pmodels/markov/hmm_backward.m | MATLAB | mit | 1,467 |
/**********************************************************
*
* chain_viterbi_cimp.cpp
*
* The C++ mex implementation of chain_viterbi
*
* Created by Dahua Lin, on Feb 1, 2012
*
**********************************************************/
#include <bcslib/matlab/bcs_mex.h>
#include <cmath>
using namespace bcs;
using namespace bcs::matlab;
struct FullSO
{
FullSO(int K, const double *B)
: _K(K), _B(B)
{
}
double operator() (int t, int i, int j) const
{
return _B[i + j * _K];
}
int _K;
const double *_B;
};
struct FullSO_W
{
FullSO_W(int K, const double *B, const double *w)
: _K(K), _B(B), _w(w)
{
}
double operator() (int t, int i, int j) const
{
return _B[i + j * _K] * _w[t];
}
int _K;
const double *_B;
const double *_w;
};
struct SimpleSO
{
SimpleSO(const double *B)
: b0(B[0]), b1(B[1])
{
}
double operator() (int t, int i, int j) const
{
return i == j ? b0 : b1;
}
double b0;
double b1;
};
struct SimpleSO_W
{
SimpleSO_W(const double *B, const double *w)
: b0(B[0]), b1(B[1]), _w(w)
{
}
double operator() (int t, int i, int j) const
{
return (i == j ? b0 : b1) * _w[t];
}
double b0;
double b1;
const double *_w;
};
template<class SecondOrd>
void viterbi_forward(int K, int n,
const double *A0, const double *A, const SecondOrd& B,
double *V, int *R, double& final_v, int& final_s)
{
// initialize
if (A0)
{
for (int k = 0; k < K; ++k) V[k] = A0[k] + A[k];
}
else
{
for (int k = 0; k < K; ++k) V[k] = A[k];
}
// main loop
double opt_v = 0;
int opt_s = 0;
for (int i = 1; i < n; ++i)
{
const double *U = V;
V += K;
A += K;
for (int k = 0; k < K; ++k)
{
opt_v = U[0] + B(i-1, 0, k);
opt_s = 0;
for (int l = 1; l < K; ++l)
{
double v = U[l] + B(i-1, l, k);
if (v > opt_v)
{
opt_v = v;
opt_s = l;
}
}
V[k] = opt_v + A[k];
R[k] = opt_s;
}
R += K;
}
// finalize
opt_v = V[0];
opt_s = 0;
for (int k = 1; k < K; ++k)
{
if (V[k] > opt_v)
{
opt_v = V[k];
opt_s = k;
}
}
final_v = opt_v;
final_s = opt_s;
}
void viterbi_backtrace(int K, int n, int final_s, const int *R,
double *x)
{
int s = final_s;
x[n - 1] = s + 1;
R += (n - 2) * K;
for (int i = n - 2; i >= 0; --i)
{
x[i] = ((s = R[s]) + 1);
R -= K;
}
}
template<class SecondOrd>
inline double do_viterbi(int K, int n,
const double *A0, const double *A, const SecondOrd& B,
double *x)
{
double *V = new double[K * n];
int *R = new int[K * (n-1)];
double final_v;
int final_s;
viterbi_forward(K, n, A0, A, B, V, R, final_v, final_s);
viterbi_backtrace(K, n, final_s, R, x);
delete[] V;
delete[] R;
return final_v;
}
/**
* main entry:
*
* Input
* [0]: A0: additional first-order potential to the first node [empty or K x 1]
* [1]: A: the first-order potential [K x n]
* [2]: B: the second-order potential [K x K or 1 x 2]
* [3]: w: the second-order link weights [empty or 1 x (n-1)]
*
* Ouput
* [0]: x: the solved sequence
* [1]: v: the optimal total potential
*/
void bcsmex_main(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
// take inputs
const_marray mA0(prhs[0]);
const_marray mA(prhs[1]);
const_marray mB(prhs[2]);
const_marray mW(prhs[3]);
int K = (int)mA.nrows();
int n = (int)mA.ncolumns();
const double *A0 = !mA0.is_empty() ? mA0.data<double>() : NULL;
const double *A = mA.data<double>();
const double *w = !mW.is_empty() ? mW.data<double>() : NULL;
// prepare output
marray mX = create_marray<double>(1, n);
double *x = mX.data<double>();
double v = 0;
// main
if (mB.nelems() == 2)
{
if (w)
{
SimpleSO_W B(mB.data<double>(), w);
v = do_viterbi(K, n, A0, A, B, x);
}
else
{
SimpleSO B(mB.data<double>());
v = do_viterbi(K, n, A0, A, B, x);
}
}
else
{
if (w)
{
FullSO_W B(K, mB.data<double>(), w);
v = do_viterbi(K, n, A0, A, B, x);
}
else
{
FullSO B(K, mB.data<double>());
v = do_viterbi(K, n, A0, A, B, x);
}
}
// output
plhs[0] = mX.mx_ptr();
plhs[1] = mxCreateDoubleScalar(v);
}
BCSMEX_MAINDEF
| zzhangumd-smitoolbox | pmodels/markov/private/chain_viterbi_cimp.cpp | C++ | mit | 5,153 |
/**********************************************************
*
* chain_bp_cimp.cpp
*
* The C++ mex implementation of chain_bp
*
* Created by Dahua Lin, on Feb 1, 2012
*
**********************************************************/
#include <bcslib/matlab/bcs_mex.h>
#include <cmath>
using namespace bcs;
using namespace bcs::matlab;
struct FullSO
{
FullSO(int K, const double *B)
: _K(K), _B(B)
{
}
double operator() (int i, int j) const
{
return _B[i + j * _K];
}
int _K;
const double *_B;
};
struct SimpleSO
{
SimpleSO(const double *B)
: b0(B[0]), b1(B[1])
{
}
double operator() (int i, int j) const
{
return i == j ? b0 : b1;
}
double b0;
double b1;
};
inline void normalize_exp(int K, double *x)
{
double max_x = x[0];
for (int i = 1; i < K; ++i)
{
if (max_x < x[i]) max_x = x[i];
}
double s = 0;
for (int i = 0; i < K; ++i)
{
s += (x[i] = std::exp(x[i] - max_x));
}
double c = 1.0 / s;
for (int i = 0; i < K; ++i)
{
x[i] *= c;
}
}
inline void normalize(int len, double *x)
{
double s = 0;
for (int i = 0; i < len; ++i) s += x[i];
double c = 1.0 / s;
for (int i = 0; i < len; ++i) x[i] *= c;
}
inline void update_u(int K, const double *M, const double *L, double *U)
{
if (M)
{
for (int k = 0; k < K; ++k) U[k] = std::log(M[k]) + L[k];
}
else
{
for (int k = 0; k < K; ++k) U[k] = L[k];
}
double max_u = U[0];
for (int k = 1; k < K; ++k)
{
if (max_u < U[k]) max_u = U[k];
}
for (int k = 0; k < K; ++k)
{
U[k] = std::exp(U[k] - max_u);
}
}
template<class SecondOrdMat>
void chain_forward(int K, int n,
const double *LA, const SecondOrdMat& B, double *fmsg, double *U)
{
// initialize
update_u(K, 0, LA, U);
// main loop
for (int i = 1; i < n; ++i)
{
// compute fmsg
for (int k = 0; k < K; ++k)
{
double v = 0;
for (int l = 0; l < K; ++l)
{
v += U[l] * B(l, k);
}
fmsg[k] = v;
}
normalize(K, fmsg);
// switch to next
if (i < n-1)
{
LA += K;
update_u(K, fmsg, LA, U);
fmsg += K;
}
}
}
template<class SecondOrdMat>
void chain_backward(int K, int n,
const double *LA, const SecondOrdMat& B, double *bmsg, double *U)
{
// initialize
LA += (n - 1) * K;
bmsg += (n - 2) * K;
update_u(K, 0, LA, U);
// main loop
for (int i = n-1; i > 0; --i)
{
// compute bmsg
for (int l = 0; l < K; ++l)
{
for (int k = 0; k < K; ++k)
{
bmsg[k] += U[l] * B(k, l);
}
}
normalize(K, bmsg);
// switch to next
if (i > 1)
{
LA -= K;
update_u(K, bmsg, LA, U);
bmsg -= K;
}
}
}
void evaluate_belief(int K, int n, const double *LA,
const double *fmsg, const double *bmsg, double *mu)
{
// first one
for (int k = 0; k < K; ++k)
{
mu[k] = LA[k] + std::log(bmsg[k]);
}
normalize_exp(K, mu);
mu += K;
LA += K;
bmsg += K;
// middle ones
for (int i = 1; i < n-1; ++i)
{
for (int k = 0; k < K; ++k)
{
mu[k] = LA[k] + std::log(fmsg[k]) + std::log(bmsg[k]);
}
normalize_exp(K, mu);
mu += K;
LA += K;
fmsg += K;
bmsg += K;
}
// last one
for (int k = 0; k < K; ++k)
{
mu[k] = LA[k] + std::log(fmsg[k]);
}
normalize_exp(K, mu);
}
template<class SecondOrdMat>
inline void do_chain_bp(int K, int n,
const double *LA, const SecondOrdMat& B,
double *mu, double *fmsg, double *bmsg)
{
chain_forward(K, n, LA, B, fmsg, mu);
chain_backward(K, n, LA, B, bmsg, mu);
evaluate_belief(K, n, LA, fmsg, bmsg, mu);
}
/**
* main entry:
*
* Input
* [0]: LA: the first-order log-potential [K x n]
* [1]: B: the second-order potential [K x K or 1 x 2]
*
* Ouput
* [0]: mu: the marginal distributions [K x n]
* [1]: fmsg: the forward messages [K x (n-1)]
* [2]: bmsg: the backward messages [K x (n-1)]
*/
void bcsmex_main(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
// take inputs
const_marray mLA(prhs[0]);
const_marray mB(prhs[1]);
int K = (int)mLA.nrows();
int n = (int)mLA.ncolumns();
const double *LA = mLA.data<double>();
// prepare output
marray mMu = create_marray<double>(K, n);
marray mFmsg = create_marray<double>(K, n-1);
marray mBmsg = create_marray<double>(K, n-1);
double *mu = mMu.data<double>();
double *fmsg = mFmsg.data<double>();
double *bmsg = mBmsg.data<double>();
// main
if (mB.nelems() == 2)
{
SimpleSO B(mB.data<double>());
do_chain_bp(K, n, LA, B, mu, fmsg, bmsg);
}
else
{
FullSO B(K, mB.data<double>());
do_chain_bp(K, n, LA, B, mu, fmsg, bmsg);
}
// output
plhs[0] = mMu.mx_ptr();
plhs[1] = mFmsg.mx_ptr();
plhs[2] = mBmsg.mx_ptr();
}
BCSMEX_MAINDEF
| zzhangumd-smitoolbox | pmodels/markov/private/chain_bp_cimp.cpp | C++ | mit | 5,691 |
/********************************************************************
*
* hmm_backward_cimp.cpp
*
* The C++ mex implementation of hmm_backward
*
* Created by Dahua Lin, on Feb 1, 2012
*
********************************************************************/
#include <bcslib/matlab/bcs_mex.h>
#include <cmath>
using namespace bcs;
using namespace bcs::matlab;
inline void mult_mat_vec(int K, const double *T, const double *x, double *y)
{
for (int i = 0; i < K; ++i) y[i] = 0;
for (int j = 0; j < K; ++j)
{
double xj = x[j];
for (int i = 0; i < K; ++i)
{
y[i] += *(T++) * xj;
}
}
}
inline double vmax(int len, const double *v)
{
double m = v[0];
for (int i = 1; i < len; ++i)
{
if (m < v[i]) m = v[i];
}
return m;
}
// core function
void do_backward(int K, int n, const double *T, const double *L,
const double *Lc, double *B)
{
double *b = B + (n - 1) * K;
for (int k = 0; k < K; ++k) b[k] = 1.0;
const double *ll = L + (n - 1) * K;
double *u = new double[K];
for (int i = n-1; i > 0; --i)
{
double maxll = vmax(K, ll);
for (int k = 0; k < K; ++k)
{
u[k] = b[k] * std::exp(ll[k] - maxll);
}
b -= K;
mult_mat_vec(K, T, u, b);
double c = std::exp(maxll - Lc[i]);
for (int k = 0; k < K; ++k)
{
b[k] *= c;
}
ll -= K;
}
delete[] u;
}
/**
* main entry:
*
* Input
* [0]: T: the transition matrix
* [1]: L: the log-likelihood matrix
* [2]: Lc: the log-conditional vector
*
* Ouput
* [0]: Beta: The resultant matrix of beta values
*/
void bcsmex_main(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
// take inputs
const_marray mT(prhs[0]);
const_marray mL(prhs[1]);
const_marray mLc(prhs[2]);
int K = (int)mT.nrows();
int n = (int)mL.ncolumns();
const double *T = mT.data<double>();
const double *L = mL.data<double>();
const double *Lc = mLc.data<double>();
// prepare output
marray mBeta = create_marray<double>(K, n);
double *B = mBeta.data<double>();
// main
do_backward(K, n, T, L, Lc, B);
// output
plhs[0] = mBeta.mx_ptr();
}
BCSMEX_MAINDEF
| zzhangumd-smitoolbox | pmodels/markov/private/hmm_backward_cimp.cpp | C++ | mit | 2,472 |
/********************************************************************
*
* hmm_forward_cimp.cpp
*
* The C++ mex implementation of hmm_forward
*
* Created by Dahua Lin, on Feb 1, 2012
*
********************************************************************/
#include <bcslib/matlab/bcs_mex.h>
#include <cmath>
using namespace bcs;
using namespace bcs::matlab;
inline void nrm_process(int K, const double *P0, const double *L,
double *A, double& Lc)
{
double maxL = L[0];
for (int k = 1; k < K; ++k)
{
if (maxL < L[k]) maxL = L[k];
}
double s = 0;
for (int k = 0; k < K; ++k)
{
s += (A[k] = P0[k] * std::exp(L[k] - maxL));
}
double c = 1.0 / s;
for (int k = 0; k < K; ++k)
{
A[k] *= c;
}
Lc = maxL + std::log(s);
}
// core function
void do_forward(int K, int n,
const double *Pi, const double *T, const double *L,
double *A, double *Lc)
{
// initialize
nrm_process(K, Pi, L, A, Lc[0]);
const double *A0 = A;
A += K;
L += K;
// recursively proceed
for (int i = 1; i < n; ++i)
{
for (int k = 0; k < K; ++k)
{
double v = 0;
for (int u = 0; u < K; ++u)
{
v += A0[u] * T[u + k * K];
}
A[k] = v;
}
nrm_process(K, A, L, A, Lc[i]);
A0 = A;
A += K;
L += K;
}
}
/**
* main entry:
*
* Input
* [0]: Pi: the initial distribution
* [1]: T: the transition matrix
* [2]: L: the log-likelihood matrix
*
* Ouput
* [0]: Alpha: The resultant matrix of alpha values
*/
void bcsmex_main(int nlhs, mxArray* plhs[], int nrhs, const mxArray* prhs[])
{
// take inputs
const_marray mPi(prhs[0]);
const_marray mT(prhs[1]);
const_marray mL(prhs[2]);
int K = (int)mT.nrows();
int n = (int)mL.ncolumns();
const double *Pi = mPi.data<double>();
const double *T = mT.data<double>();
const double *L = mL.data<double>();
// prepare output
marray mAlpha = create_marray<double>(K, n);
double *A = mAlpha.data<double>();
marray mLc = create_marray<double>(1, n);
double *Lc = mLc.data<double>();
// main
do_forward(K, n, Pi, T, L, A, Lc);
// output
plhs[0] = mAlpha.mx_ptr();
plhs[1] = mLc.mx_ptr();
}
BCSMEX_MAINDEF
| zzhangumd-smitoolbox | pmodels/markov/private/hmm_forward_cimp.cpp | C++ | mit | 2,548 |
function T = markov_tpm(K, seqs, w, pri_c)
%MARKOV_TPM Estimation of Markov Transition Probability Matrix
%
% T = MARKOV_TPM(K, seqs);
% T = MARKOV_TPM(K, seqs, w);
% T = MARKOV_TPM(K, seqs, [], pri_c);
% T = MARKOV_TPM(K, seqs, w, pri_c);
%
% Estimates the transition probability from observed sequences
%
% Input arguments:
% - K: The number of different states
%
% - seqs: The sequences, which can be in either of the following
% form.
%
% - a row vector of states
%
% - a K x n matrix, seqs(k, i) gives the probability that
% the i-th step is in the k-th state.
% Each column of this matrix needs to sum to 1.
%
% - a cell array. The element in each cell is in either
% of the above forms.
%
% - w: The weights of the sequences. Suppose there are
% m sequences, then w should be a vector of length m.
%
% - pri_c: The prior count. (If pri_c is given, it assumes a
% Dirichlet prior with alpha = pri_c + 1, and MAP
% estimation is to be performed.)
%
% pri_c can be either a scalar or an K x K matrix.
%
% Created by Dahua Lin, on Jan 31, 2012
%
%% verify input arguments
if ~(isnumeric(K) && isscalar(K) && K >= 2 && K == fix(K))
error('markov_tpm:invalidarg', 'K must be an integer with K >= 2.');
end
if isnumeric(seqs)
m = 1;
else
m = numel(seqs);
if m == 1
seqs = seqs{1};
end
end
if nargin >= 3 && ~isempty(w)
if ~(isfloat(w) && isreal(w) && numel(w) == m)
error('markov_tpm:invalidarg', ...
'w should be a reall array with m elements.');
end
else
w = [];
end
if nargin >= 4
if ~(isfloat(pri_c) && ...
((isscalar(pri_c) && pri_c >= 0) || isequal(size(pri_c), [K K])) )
error('markov_tpm:invalidarg', ...
'pri_c should be a non-negative real scalar or a K x K real matrix.');
end
else
pri_c = 0;
end
%% main
% summarize observations
if m == 1
sH = count_ts(K, seqs);
if ~isempty(w) && w ~= 1
sH = sH * w;
end
else
sH = zeros(K, K);
if isempty(w)
for i = 1 : m
H = count_ts(K, seqs{i});
sH = sH + H;
end
else
for i = 1 : m
H = count_ts(K, seqs{i});
sH = sH + H * w(i);
end
end
end
% solve estimation
if ~isequal(pri_c, 0)
sH = sH + pri_c;
end
T = bsxfun(@times, sH, 1 ./ sum(sH, 2));
%% counting functions
function H = count_ts(K, s)
if ~(isnumeric(s) && isreal(s) && ndims(s) == 2)
error('markov_tpm:invalidarg', ...
'Each sequence must be a real vector/matrix.');
end
if size(s, 1) == 1
I = s(1:end-1);
J = s(2:end);
L = I + (J - 1) * K;
H = intcount(K*K, L);
H = reshape(H, K, K);
elseif size(s, 1) == K
A = s(:, 1:end-1);
B = s(:, 2:end);
H = A * B';
else
error('markov_tpm:invalidarg', ...
'The size of some sequence is invalid.');
end
| zzhangumd-smitoolbox | pmodels/markov/markov_tpm.m | MATLAB | mit | 3,184 |
function [mu, fmsg, bmsg] = chain_bp(LA, LB)
%CHAIN_BP Discrete Belief Propagation along a chain
%
% [mu, fmsg, bmsg] = CHAIN_BP(LA, LB);
%
% Performs belief propagation along a chain based on the following
% Markov model
%
% p(x) = (1/Z) *
% prod_{i=1}^n a_i(x_i) *
% prod_{i=1}^{n-1} b(x_i, x_{i+1})
%
% Here, we assume the second-order potentials are time-homogeneous
%
% Suppose there are K distinct states and n nodes of the chain.
%
% Input arguments:
% - LA: The logarithm of first-order potentials [K x n]
%
% - LB: The logarithm of second-order potential matrix,
%
% Note that LB can be either a K x K matrix, or a
% pair in form of [lb0, lb1], where lb0 is the log-
% potential when x_i and x_{i+1} are the same, and
% lb1 is the log-potential when x_i is not equal to
% x_{i-1}.
%
% Output arguments:
%
% - mu: The marginal distribution of states [K x n]
% - fmsg: The (normalized) forward messages [K x (n-1)]
% - bmsg: The (normalized) backward messages [K x (n-1)].
%
% Created by Dahua Lin, on Feb 1, 2012
%
%% verify input arguments
if ~(isfloat(LA) && isreal(LA) && ~issparse(LA) && ndims(LA) == 2)
error('chain_bp:invalidarg', 'LA should be a non-sparse real matrix.');
end
K = size(LA, 1);
if ~(isfloat(LB) && isreal(LB) && ~issparse(LB) && ...
(isequal(size(LB), [K K]) || numel(LB) == 2))
error('chain_bp:invalidarg', ...
'LB should be a non-sparse real matrix of size K x K or 1 x 2.');
end
%% main
if ~isa(LA, 'double'); LA = double(LA); end
if ~isa(LB, 'double'); LB = double(LB); end
if size(LA, 2) == 1
mu = nrmexp(LA, 1);
fmsg = zeros(K, 0);
bmsg = zeros(K, 0);
else
B = exp(LB - max(LB(:)));
[mu, fmsg, bmsg] = chain_bp_cimp(LA, B);
end
| zzhangumd-smitoolbox | pmodels/markov/chain_bp.m | MATLAB | mit | 1,959 |
function v = ddentropy(P)
% Compute the entropy of discrete distribution
%
% v = ddentropy(P);
% computes the entropies of the discrete distributions represented
% by columns of P.
%
% If P is a vector (row or column), it returns the entropy of
% the discrete distribution whose probability mass function is
% represented by P.
%
% If P is an m x n (non-vector) matrix, then v will be a 1 x n
% row vector, with v(i) corresponding to P(:,i).
%
% History
% -------
% - Created by Dahua Lin, on Sep 18, 2010
%
%% verify
if ~(isfloat(P) && ndims(P) == 2 && ~issparse(P))
error('ddentropy:invalidarg', 'P should be a non-sparse numeric matrix.');
end
%% main
v = - sum_xlogy(P, P);
| zzhangumd-smitoolbox | pmodels/ddistr/ddentropy.m | MATLAB | mit | 776 |
function v = ddkldiv(P, Q)
% Compute Kullback Leibler divergence
%
% v = ddkldiv(P, Q);
% computes the Kullback Leibler divergence between the discrete
% distributions represented by the columns of P and Q.
%
% If P and Q are both vectors, then it returns the K-L divergence
% between them.
%
% If P and Q are matrices of size m x n (with m > 1 and n > 1),
% then v will be a row vector of size 1 x n, where v(i) is
% the divergence between P(:,i) and Q(:,i).
%
% Remarks
% -------
% - The value of K-L divergence can be infinity when p_i > 0 and
% q_i = 0 for some i.
%
% History
% -------
% - Created by Dahua Lin, on Sep 14, 2010
% - Modified by Dahua Lin, on Mar 28, 2011
%
%% main
v = sum_xlogy(P, P) - sum_xlogy(P, Q);
| zzhangumd-smitoolbox | pmodels/ddistr/ddkldiv.m | MATLAB | mit | 844 |
function [P, H] = ddestimate(V, w, c0)
%DDESTIMATE Estimates discrete distribution(s)
%
% P = ddestimate(V);
% P = ddestimate(V, w);
%
% performs maximum likelihood estimation of discrete distribution(s)
% given (weighted) observations.
%
% Input arguments:
% - V: the observed histograms, a matrix of size K x n.
% Here, K is the number of classes, and n is the number of
% observed histograms.
%
% V can also be in form of a cell array as {K, z}, where
% z is a row vector of size 1 x n, and each element in z
% is an integer between 1 and K.
%
% - w: the weights of the histograms, a col vector of size n x 1
% or a matrix of size n x m (m groups of weights).
%
% Output arguments:
% - P: the estimated distribution(s).
% if w is omitted or a row vector, then P is a column vector
% of size K x 1, if there are multiple groups of weights,
% namely m > 1, then P is of size K x m.
%
% P = ddestimate(V, [], c0);
% P = ddestimate(V, w, c0);
%
% performs maximum a posteriori estimation of discrete distribution.
%
% Here, the prior is given by c0, the prior counts, which can be
% either a scalar, or a vector of size K x 1. If Dirichlet prior is
% used, then c0 here equals alpha - 1.
%
% [P, H] = ddestimate( ... );
%
% additionally returns the accumulated counts.
%
% Created by Dahua Lin, on Dec 25, 2011
%
%% parse input
if isnumeric(V)
if ~(isfloat(V) && isreal(V) && ndims(V) == 2)
error('ddestimate:invalidarg', 'V should be a real matrix.');
end
uhist = 1;
n = size(V, 2);
elseif iscell(V) && numel(V) == 2
K = V{1};
z = V{2};
if ~(isscalar(K) && isreal(K) && K == fix(K) && K >= 1)
error('ddestimate:invalidarg', 'K should be a positive integer.');
end
if ~(isvector(z) && isreal(z) && ~issparse(z))
error('ddestimate:invalidarg', 'z should be a real vector.');
end
if size(z, 1) > 1
z = z.';
end
n = size(z, 2);
uhist = 0;
else
error('ddestimate:invalidarg', 'The first argument is invalid.');
end
if nargin < 2 || isempty(w)
w = [];
else
if ~(isfloat(w) && isreal(w) && ndims(w) == 2 && size(w, 1) == n)
error('ddestimate:invalidarg', 'w should be a matrix with n rows.');
end
end
if nargin >= 3 && ~isempty(c0)
if ~(isfloat(c0) && isreal(c0) && ...
(isscalar(c0) || isequal(size(c0), [K 1])))
error('ddestimate:invalidarg', ...
'c0 should be either a scalar or a real vector of size K x 1.');
end
else
c0 = 0;
end
%% main
if uhist
if isempty(w)
H = sum(V, 2);
else
H = V * w;
end
else
if isempty(w)
H = intcount(K, z).';
else
H = aggreg(w, K, z.', 'sum');
end
end
if ~isequal(c0, 0)
if isscalar(c0) || size(H, 2) == 1
H = H + c0;
else
H = bsxfun(@plus, H, c0);
end
end
P = bsxfun(@times, H, 1 ./ sum(H, 1));
| zzhangumd-smitoolbox | pmodels/ddistr/ddestimate.m | MATLAB | mit | 3,142 |
function Y = nrmexp(X, dim)
% Compute normalized exponentiation
%
% Y = nrmexp(X);
% Y = nrmexp(X, dim);
% computes normalized exponentiation along the specified dimension.
% If dim is omitted, the normalization is performed along the
% first non-singleton dimension.
%
% Given a vector x, the normalized exponentiation is a vector y
% of the same length, which is defined to be
%
% y_i = exp(x_i) / sum_j exp(x_j).
%
% To reduce the risk of overflow or underflow, the input values
% are first appropriately shifted before the computation is
% performed. This function is useful in converting likelihood
% to posterior probabilities.
%
% History
% -------
% - Created by Dahua Lin, on Sep 13, 2010
%
%% verify input
if ~isfloat(X)
error('nrmexp:invalidarg', 'X should be a numeric array.');
end
if nargin < 2
dim = fnsdim(X);
else
if ~(isscalar(dim) && dim >= 1 && dim <= ndims(X))
error('nrmexp:invalidarg', ...
'dim should be an integer scalar with 1 <= dim <= ndims(X).');
end
end
%% main
vlen = size(X, dim);
if vlen <= 1
siz = size(X);
siz(dim) = 1;
Y = ones(siz, class(X));
else
mx = max(X, [], dim);
Y = exp(bsxfun(@minus, X, mx));
Y = bsxfun(@times, Y, 1 ./ sum(Y, dim));
end
| zzhangumd-smitoolbox | pmodels/ddistr/nrmexp.m | MATLAB | mit | 1,404 |
function Q = ddposterior(pri, lik, op)
%DDPOSTERIOR Compute posterior discrete distribution
%
% Q = DDPOSTERIOR(pri, lik);
% Q = DDPOSTERIOR(pri, loglik, 'LL');
%
% Compute posterior discrete distributions, given prior and
% likelihood (or log-likelihood).
%
% Suppose there are K classes and n (observed) samples.
%
% Input arguments:
% - pri: the prior distribution, which can be either of:
% - []: indicates uniform prior
% - K x 1 vector: a prior probability vector
% - K x n matrix: sample-dependent probability matrix
%
% - lik: the likelihood of the samples with respect to
% all K classes, in form of a K x n matrix.
%
% - loglik: the log-likelihood of the samples with respect to
% all K classes, in form of a K x n matrix.
%
% Output arguments:
% - Q: a K x n matrix, with Q(k, i) being the posterior
% probability of the i-th sample for the k-th class.
%
% Created by Dahua Lin, Dec 25, 2011
%
%% verify input arguments
if ~isempty(pri)
if ~(isfloat(pri) && isreal(pri) && ndims(pri) == 2)
error('ddposterior:invalidarg', ...
'pri should be a real vector of matrix.');
end
end
if ~(isfloat(lik) && isreal(lik) && ndims(lik) == 2)
error('ddposterior:invalidarg', ...
'lik or loglik should be a real matrix.');
end
if nargin < 3
ll = 0;
else
if ~strcmpi(op, 'll')
error('ddposterior:invalidarg', ...
'The third argument can only be ''LL'' if given.');
end
ll = 1;
end
[K, n] = size(lik);
if ~isempty(pri)
if ~(size(pri, 1) == K && (size(pri, 2) == 1 || size(pri, 2) == n))
error('ddposterior:invalidarg', ...
'The sizes of pri and lik are inconsistent.');
end
end
%% main
if K == 1
Q = ones(1, n);
return;
end
if ll
if isempty(pri)
E = lik;
else
if size(pri, 2) == n
E = log(pri) + lik;
else
E = bsxfun(@plus, log(pri), lik);
end
end
Q = nrmexp(E, 1);
else
if isempty(pri)
P = lik;
else
if size(pri, 2) == n
P = pri .* lik;
else
P = bsxfun(@times, pri, lik);
end
end
Q = bsxfun(@times, P, 1 ./ sum(P, 1));
end
| zzhangumd-smitoolbox | pmodels/ddistr/ddposterior.m | MATLAB | mit | 2,391 |
/********************************************************************
*
* ddsample_cimp.cpp
*
* The C++ mex implementation for ddsample: sampling from
* discrete distributions
*
* Created by Dahua Lin, on Nov 7, 2010
*
********************************************************************/
#include <bcslib/matlab/bcs_mex.h>
#include <bcslib/base/block.h>
using namespace bcs;
using namespace bcs::matlab;
template<typename T>
inline int locate_interval(int n, const T *redges, T v)
{
int i = 0;
while (i < n && v > redges[i]) ++i;
return i;
}
/**
* K: the number of classes
* m: the number of different probability distributions
* n: the number of samples to be drawn from each distribution
* F: the cumulative distribution matrix [K x m column-major]
* V: the uniformly distributed random values [n x m column-major]
*
* samples: the obtained samples [n x m column-major]
*/
void do_ddsample(size_t K, int m, int n,
const double *F, const double *V, int *samples)
{
if (m == 1)
{
if (n == 1) // m == 1 & n == 1
{
*samples = locate_interval(K, F, *V);
}
else // m == 1 & n > 1
{
for (int i = 0; i < n; ++i)
{
samples[i] = locate_interval(K, F, V[i]);
}
}
}
else
{
if (n == 1) // m > 1 & n == 1
{
for (int j = 0; j < m; ++j, F += K)
{
samples[j] = locate_interval(K, F, V[j]);
}
}
else // m > 1 & n > 1
{
for (int j = 0; j < m; ++j, F += K)
{
for (int i = 0; i < n; ++i)
{
*(samples++) = locate_interval(K, F, *(V++));
}
}
}
}
}
marray samples_to_matlab(int n, int m, int *samples)
{
int N = (int)(n * m);
marray mR = create_marray<double>(n, m);
double *r = mR.data<double>();
for (int i = 0; i < N; ++i)
r[i] = samples[i] + 1;
return mR;
}
/**
* main entry:
*
* Input:
* [0]: F: the cumulative distribution function [K x m double]
* [1]: V: the uniformly distributed random variables [n x m double]
*
* Output:
* [0]: s: the samples [n x m double one-based]
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// take input
const_marray mF(prhs[0]);
const_marray mV(prhs[1]);
const double *F = mF.data<double>();
const double *V = mV.data<double>();
int K = (int)mF.nrows();
int m = (int)mF.ncolumns();
int n = (int)mV.nrows();
// do sampling
scoped_block<int> s(m * n);
do_ddsample(K, m, n, F, V, s.pbase());
// output
plhs[0] = samples_to_matlab(n, m, s.pbase()).mx_ptr();
}
| zzhangumd-smitoolbox | pmodels/ddistr/private/ddsample_cimp.cpp | C++ | mit | 2,934 |
function X = ddsample(P, n)
% Draw samples from discrete distribution(s)
%
% X = ddsample(P, n);
% draw n samples from each of the discrete distributions in P.
%
% Let P be an K x m matrix, then P represents m distinct discrete
% distributions over K classes.
%
% The output X will be an n x m matrix, where X(:,j) contains the
% samples drawn from the distribution P(:,j).
%
% Created by Dahua Lin, on Nov 7, 2010
%
%% verify input arguments
if ~(isfloat(P) && ~issparse(P) && ndims(P) == 2)
error('ddsample:invalidarg', ...
'P should be a non-sparse numeric matrix.');
end
if ~(isnumeric(n) && isscalar(n) && n >= 1)
error('ddsample:invalidarg', ...
'n should be an integer scalar.');
end
n = double(n);
%% main
F = cumsum(P, 1);
V = rand(n, size(P,2));
X = ddsample_cimp(F, V);
| zzhangumd-smitoolbox | pmodels/ddistr/ddsample.m | MATLAB | mit | 848 |
classdef tsuite_wishartd
% Test suite for wishartd and wishart_sample
%
% Created by Dahua Lin, on Sep 3, 2011
%
%% Properties
properties
dims = [1 2 3 5];
types = {'s', 'd', 'f'};
deg = 8.5;
end
%% Test cases
methods
function test_evaluation(obj)
run_multi(obj, @tsuite_wishartd.do_test_evaluation);
end
function test_sampling(obj)
run_multi(obj, @tsuite_wishartd.do_test_sampling);
end
end
%% Test implementation
methods(Static, Access='private')
function do_test_evaluation(d, ty, deg)
S = rand_pdmat(ty, d, 1, [0.5, 1.5]);
Sm = pdmat_fullform(S);
J = pdmat_inv(S);
n = 100;
W_s = rand_pdmat('s', d, n, [0.5 1.5]);
W_d = rand_pdmat('d', d, n, [0.5 1.5]);
W_f = rand_pdmat('f', d, n, [0.5 1.5]);
Ls0 = tsuite_wishartd.calc_logpdf(Sm, deg, W_s);
Ld0 = tsuite_wishartd.calc_logpdf(Sm, deg, W_d);
Lf0 = tsuite_wishartd.calc_logpdf(Sm, deg, W_f);
Ls1 = wishartd_logpdf(S, deg, W_s);
Ld1 = wishartd_logpdf(S, deg, W_d);
Lf1 = wishartd_logpdf(S, deg, W_f);
Ls2 = wishartd_logpdf(J, deg, W_s, [], 'inv');
Ld2 = wishartd_logpdf(J, deg, W_d, [], 'inv');
Lf2 = wishartd_logpdf(J, deg, W_f, [], 'inv');
assert(isequal(size(Ls0), [1, n]));
assert(isequal(size(Ld0), [1, n]));
assert(isequal(size(Lf0), [1, n]));
assert(isequal(size(Ls1), [1, n]));
assert(isequal(size(Ld1), [1, n]));
assert(isequal(size(Lf1), [1, n]));
assert(isequal(Ls1, Ls2));
assert(isequal(Ld1, Ld2));
assert(isequal(Lf1, Lf2));
devcheck('loglik (s)', Ls0, Ls1, 1e-12);
devcheck('loglik (d)', Ld0, Ld1, 1e-12);
devcheck('loglik (f)', Lf0, Lf1, 1e-12);
end
function do_test_sampling(d, ty, deg)
S = rand_pdmat(ty, d, 1, [0.5, 1.5]);
Sm = pdmat_fullform(S);
N0 = 50;
N = 2e5;
Ys = wishartd_sample(d, deg, N);
assert(isequal(size(Ys), [d d N]));
for i = 1 : N0
Yi = Ys(:,:,i);
assert(isequal(Yi, Yi'));
assert(all(eig(Yi)) > 0);
end
Ws = wishartd_sample(S, deg, N);
assert(isequal(size(Ws), [d d N]));
for i = 1 : N0
Wi = Ws(:,:,i);
assert(isequal(Wi, Wi'));
assert(all(eig(Wi)) > 0);
end
Ymean0 = deg * eye(d);
Wmean0 = deg * Sm;
Id = eye(d);
Yvar0 = zeros(d, d);
Wvar0 = zeros(d, d);
for i = 1 : d
for j = 1 : d
Yvar0(i, j) = deg * (Id(i,j)^2 + Id(i,i) * Id(j,j));
Wvar0(i, j) = deg * (Sm(i,j)^2 + Sm(i,i) * Sm(j,j));
end
end
Ymean = mean(Ys, 3);
Yvar = reshape(vecvar(reshape(Ys, d*d, N)), d, d);
Wmean = mean(Ws, 3);
Wvar = reshape(vecvar(reshape(Ws, d*d, N)), d, d);
devcheck('sample mean (std)', Ymean, Ymean0, 5e-2);
devcheck('sample var (std)', Yvar, Yvar0, 0.3);
devcheck('sample mean', Wmean, Wmean0, 5e-2);
devcheck('sample var', Wvar, Wvar0, 0.3);
end
end
%% Auxiliary functions
methods(Access='private')
function run_multi(obj, tfunc)
% run multiple test under different settings
for it = numel(obj.types)
ty = obj.types{it};
for d = obj.dims
tfunc(d, ty, obj.deg);
end
end
end
end
methods(Static, Access='private')
function L = calc_logpdf(Sm, df, W)
d = W.d;
lpc = df*d*log(2)/2 + df * lndet(Sm)/2 + mvgammaln(d, df/2);
n = W.n;
L = zeros(1, n);
for i = 1 : n
Wi = pdmat_fullform(W, i);
u1 = (df - d - 1) / 2 * lndet(Wi);
u2 = (-1/2) * trace(Sm \ Wi);
L(i) = u1 + u2 - lpc;
end
end
end
end
| zzhangumd-smitoolbox | pmodels/tests/tsuite_wishartd.m | MATLAB | mit | 4,967 |
classdef tsuite_ppca
% A test suite for PPCA functions
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% properties
properties
dims = {[2 1], [5, 3], [10, 4]};
end
%% Test cases
methods
function test_basics(obj)
run_multi(obj, @tsuite_ppca.do_test_basics);
end
function test_evaluation(obj)
run_multi(obj, @tsuite_ppca.do_test_evaluation);
end
function test_sampling(obj)
run_multi(obj, @tsuite_ppca.do_test_sampling);
end
end
%% Core Test functions
methods(Static)
function do_test_basics(d, q, zmean)
% Test basic functionalities
M = tsuite_ppca.rand_model(d, q, zmean);
% check W
W0 = tsuite_ppca.Wmat(M);
W = ppca_W(M);
assert(isequal(size(W), [d q]));
devcheck('Wmat', W, W0, 1e-15);
% check cov and icov
C0 = tsuite_ppca.Cmat(M);
C = ppca_cov(M);
assert(isa(C, 'double') && isequal(size(C), [d d]));
devcheck('Cov', C, C0, 1e-13);
J = ppca_icov(M);
assert(isa(J, 'double') && isequal(size(J), [d d]));
devcheck('ICov', C * J, eye(d), 1e-12);
devcheck('ICov', J * C, eye(d), 1e-12);
% check conversion to gaussd
Gm = gaussd('m', M);
Gc = gaussd('c', M);
assert(is_gaussd(Gm) && Gm.d == d && Gm.n == 1);
assert(is_gaussd(Gc) && Gc.d == d && Gc.n == 1);
assert(is_pdmat(Gm.C) && Gm.C.d == d && Gm.C.ty == 'f' && Gm.C.n == 1);
assert(is_pdmat(Gc.J) && Gc.J.d == d && Gc.J.ty == 'f' && Gc.J.n == 1);
assert(isequal(Gm.C.v, C));
assert(isequal(Gc.J.v, J));
if zmean
assert(isequal(Gm.mu, 0));
assert(isequal(Gc.h, 0));
else
assert(isequal(size(Gm.mu), [d 1]));
assert(isequal(size(Gc.h), [d 1]));
assert(isequal(Gm.mu, M.mu));
h0 = Gm.C.v \ Gm.mu;
devcheck('h-vec', Gc.h, h0, 1e-13);
end
end
function do_test_evaluation(d, q, zmean)
% Test evaluation and transform
M = tsuite_ppca.rand_model(d, q, zmean);
% check ft and bt
n = 100;
Z = randn(q, n);
X0 = tsuite_ppca.ftr(M, Z);
X = ppca_ft(M, Z);
assert(isequal(size(X), [d, n]));
devcheck('ft', X, X0, 1e-14);
Zr = ppca_bt(M, X);
assert(isequal(size(Z), [q n]));
devcheck('bt', Zr, Z, 1e-12);
% sqmahdist
Gm = gaussd('m', M);
Y = randn(d, n);
D0 = gaussd_sqmahdist(Gm, Y);
D = ppca_sqmahdist(M, Y);
assert(isequal(size(D), [1 n]));
devcheck('sqmahdist', D, D0, 1e-11);
% logpdf
L0 = gaussd_logpdf(Gm, Y);
L = ppca_logpdf(M, Y);
assert(isequal(size(L), [1 n]));
devcheck('logpdf', L, L0, 1e-11);
end
function do_test_sampling(d, q, zmean)
% Test sampling
M = tsuite_ppca.rand_model(d, q, zmean);
C = ppca_cov(M);
if zmean
mu = zeros(d, 1);
else
mu = M.mu;
end
ns = 1e6;
X = ppca_sample(M, ns);
Xmean = vecmean(X);
Xcov = veccov(X);
devcheck('sample mean', Xmean, mu, 5e-2);
devcheck('sample cov', Xcov, C, 2e-1);
end
end
%% Auxiliary functions
methods(Access='private')
function run_multi(obj, tfunc)
% Run a test case under multiple conditions
ds = obj.dims;
for i = 1 : numel(ds)
d = ds{i}(1);
q = ds{i}(2);
tfunc(d, q, true);
tfunc(d, q, false);
end
end
end
methods(Static, Access='private')
function M = rand_model(d, q, zmean)
U = orth(randn(d, d));
U = U(:, 1:q);
s = sort(rand(1, q) * 4 + 1);
se = 0.5;
if zmean
mu = 0;
else
mu = randn(d, 1);
end
M = ppca_model(U, s, se, mu);
assert(is_ppca(M));
assert(M.d == d);
assert(M.q == q);
assert(isequal(M.s, s));
assert(M.se == se);
assert(isequal(M.B, U));
assert(isequal(M.mu, mu));
end
function W = Wmat(M)
W = bsxfun(@times, M.B, M.s);
end
function C = Cmat(M)
D = diag(M.s.^2);
C = M.B * D * M.B';
C = adddiag(C, M.se^2);
if ~isequal(C, C')
C = 0.5 * (C + C');
end
end
function X = ftr(M, Z)
W = ppca_W(M);
X = bsxfun(@plus, W * Z, M.mu);
end
end
end
| zzhangumd-smitoolbox | pmodels/tests/tsuite_ppca.m | MATLAB | mit | 6,035 |
classdef tsuite_dirichletd
% Test suite for dirichlet distribution functions
%
% History
% -------
% - Create by Dahua Lin, on Sep 1, 2011
% - Modified by Dahua Lin, on Sep 3, 2011
%
%% Properties
properties
dims = [2 5];
nums = [1 3];
end
%% Test cases
methods
function test_evaluation(obj)
run_multi(obj, @tsuite_dirichletd.do_test_evaluation);
end
function test_sampling(obj)
run_multi(obj, @tsuite_dirichletd.do_test_sampling);
end
end
%% Test implementation
methods(Static, Access='private')
function do_test_evaluation(d, m, alpha)
N = 100;
X = rand(d, N);
X = bsxfun(@times, X, 1 ./ sum(X, 1));
if d > size(alpha, 1)
A = repmat(alpha, d, 1);
else
A = alpha;
end
% covariance
if m == 1
C0 = tsuite_dirichletd.calc_cov(A);
C1 = dird_cov(alpha, d);
assert(isequal(size(C0), [d d]));
assert(isequal(size(C1), [d d]));
devcheck('cov', C1, C0, 1e-15);
end
% entropy
ent0 = tsuite_dirichletd.calc_entropy(A);
ent1 = dird_entropy(alpha, d);
assert(isequal(size(ent0), [1 m]));
assert(isequal(size(ent1), [1 m]));
devcheck('entropy', ent1, ent0, 1e-12);
% log pdf
L0 = tsuite_dirichletd.calc_logpdf(A, X);
L1 = dird_logpdf(alpha, X);
assert(isequal(size(L0), [m N]));
assert(isequal(size(L1), [m N]));
devcheck('logpdf', L1, L0, 1e-12);
end
function do_test_sampling(d, m, alpha)
if m > 1
return;
end
if d > size(alpha, 1)
A = repmat(alpha, d, 1);
else
A = alpha;
end
mean0 = bsxfun(@times, A, 1 ./ sum(A, 1));
cov0 = dird_cov(A);
ns = 1e6;
X = dird_sample(alpha, [d ns]);
assert(isequal(size(X), [d, ns]));
smean = vecmean(X);
scov = veccov(X);
devcheck('sample - mean', smean, mean0, 1e-2);
devcheck('sample - cov', scov, cov0, 2e-2);
end
end
%% Auxiliary functions
methods(Access='private')
function run_multi(obj, tfunc)
% run multiple test under different settings
ds = obj.dims;
ms = obj.nums;
for d = ds
for m = ms
a0 = 1.2 + rand(1, m);
tfunc(d, m, a0);
a1 = 1.2 + rand(d, m);
tfunc(d, m, a1);
end
end
end
end
methods(Static, Access='private')
function C = calc_cov(A)
d = size(A, 1);
a0 = sum(A, 1);
C = zeros(d, d);
for i = 1 : d
for j = 1 : d
ai = A(i);
aj = A(j);
if i == j
cv = ai * (a0 - ai) / (a0^2 * (a0 + 1));
else
cv = - ai * aj / (a0^2 * (a0 + 1));
end
C(i, j) = cv;
end
end
end
function v = calc_entropy(A)
logB = sum(gammaln(A), 1) - gammaln(sum(A, 1));
d = size(A, 1);
a0 = sum(A, 1);
v = logB + (a0 - d) .* psi(a0) - sum((A - 1) .* psi(A), 1);
end
function L = calc_logpdf(A, X)
m = size(A, 2);
n = size(X, 2);
L = zeros(m, n);
logB = sum(gammaln(A), 1) - gammaln(sum(A, 1));
for i = 1 : m
a = A(:, i);
for j = 1 : n
x = X(:, j);
L(i, j) = -logB(i) + (a-1)' * log(x);
end
end
end
end
end
| zzhangumd-smitoolbox | pmodels/tests/tsuite_dirichletd.m | MATLAB | mit | 4,972 |
classdef tsuite_gammad
% Test suite for gamma distribution functions
%
% History
% -------
% - Create by Dahua Lin, on Sep 1, 2011
% - Modified by Dahua Lin, on Sep 3, 2011
%
%% Properties
properties
dims = { [1 1 1], [1 1 3], [1 3 3], [3 1 3], [3 3 3] };
nums = { [1 1], [1 5], [5 1], [5 5] };
end
%% Test cases
methods
function test_evaluation(obj)
run_multi(obj, @tsuite_gammad.do_test_evaluation);
end
function test_sampling(obj)
run_multi(obj, @tsuite_gammad.do_test_sampling);
end
end
%% Test implementation
methods(Static, Access='private')
function do_test_evaluation(d, m, alpha, beta)
N = 100;
X = rand(d, N);
% entropy
ent0 = tsuite_gammad.calc_entropy(alpha, beta);
ent = gammad_entropy(alpha, beta);
assert(isequal(size(ent0), [1 m]));
assert(isequal(size(ent), [1 m]));
devcheck('entropy eval', ent, ent0, 1e-12);
% logpdf
L0 = tsuite_gammad.calc_logpdf(alpha, beta, X);
L = gammad_logpdf(alpha, beta, X);
assert(isequal(size(L0), [m N]));
assert(isequal(size(L), [m N]));
devcheck('logpdf eval', L, L0, 1e-12);
end
function do_test_sampling(d, m, alpha, beta)
if m > 1
return;
end
mean0 = alpha .* beta;
var0 = alpha .* (beta.^2);
if d > 1 && isscalar(alpha) && isscalar(beta)
mean0 = mean0(ones(d, 1), 1);
var0 = var0(ones(d, 1), 1);
end
ns = 1e5;
X1 = gammad_sample(alpha, beta, [d ns]);
assert(isequal(size(X1), [d, ns]));
devcheck('sample 1 - mean', vecmean(X1), mean0, 2e-2);
devcheck('sample 1 - var', vecvar(X1), var0, 0.15);
end
end
%% Auxiliary functions
methods(Access='private')
function run_multi(obj, tfunc)
% run multiple test under different settings
ds = obj.dims;
ms = obj.nums;
for i = 1 : numel(ds)
for j = 1 : numel(ms)
d = ds{i};
m = ms{j};
da = d(1);
db = d(2);
d = d(3);
ma = m(1);
mb = m(2);
m = max(ma, mb);
alpha = rand(da, ma) + 1.5;
beta = rand(db, mb) + 0.5;
tfunc(d, m, alpha, beta);
end
end
end
end
methods(Static, Access='private')
function v = calc_entropy(A, B)
v = bsxfun(@plus, A + gammaln(A) + (1 - A) .* psi(A), log(B));
v = sum(v, 1);
end
function L = calc_logpdf(A, B, X)
m = max(size(A, 2), size(B, 2));
[d, n] = size(X);
A = bsxfun(@times, ones(d, m), A);
B = bsxfun(@times, ones(d, m), B);
L = zeros(m, n);
for k = 1 : m
Pk = zeros(d, n);
for i = 1 : d
a = A(i, k);
b = B(i, k);
Pk(i, :) = gampdf(X(i, :), a, b);
end
L(k, :) = sum(log(Pk), 1);
end
end
end
end
| zzhangumd-smitoolbox | pmodels/tests/tsuite_gammad.m | MATLAB | mit | 4,188 |
classdef tsuite_vscatter
% A test suite for vscatter function
%
% Created by Dahua Lin, on Sep 28, 2011
%
%% Test cases
methods
function test_scatter_vec(obj) %#ok<MANU>
tsuite_vscatter.run_multi(@tsuite_vscatter.do_test_scatter_vec);
end
function test_scatter_mat(obj) %#ok<MANU>
tsuite_vscatter.run_multi(@tsuite_vscatter.do_test_scatter_mat);
end
end
methods(Static)
function do_test_scatter_vec(X, U, W)
[d, n] = size(X);
K = size(U, 2);
R0 = zeros(d, K);
for k = 1 : K
if isequal(U, 0)
u = zeros(d, 1);
else
u = U(:,k);
end
if isscalar(W)
w = W * ones(1, n);
else
w = W(k,:);
end
Y = bsxfun(@minus, X, u);
R0(:,k) = (Y.^2) * w';
end
R = vscatter(X, U, W, 'v');
assert(isequal(size(R0), size(R)));
assert(max(abs(R(:) - R0(:))) < 1e-12);
end
function do_test_scatter_mat(X, U, W)
[d, n] = size(X);
K = size(U, 2);
R0 = zeros(d, d, K);
for k = 1 : K
if isequal(U, 0)
u = zeros(d, 1);
else
u = U(:,k);
end
if isscalar(W)
w = W * ones(1, n);
else
w = W(k,:);
end
Y = bsxfun(@minus, X, u);
C = Y * diag(w) * Y';
R0(:,:,k) = 0.5 * (C + C');
end
R = vscatter(X, U, W, 'c');
assert(isequal(size(R0), size(R)));
for k = 1 : K
C = R(:,:,k);
assert(isequal(C, C'));
end
assert(max(abs(R(:) - R0(:))) < 1e-12);
end
function run_multi(tfunc)
n = 50;
for d = [1 2 5]
for K = [1 3]
X = rand(d, n);
U = rand(d, K);
W = rand(K, n);
if K == 1
tfunc(X, 0, 1);
tfunc(X, 0, 2.5);
tfunc(X, 0, W);
end
tfunc(X, U, 1);
tfunc(X, U, 2.5);
tfunc(X, U, W);
end
end
end
end
end
| zzhangumd-smitoolbox | pmodels/tests/tsuite_vscatter.m | MATLAB | mit | 2,938 |
classdef tsuite_invgammad
% Test suite for inverse gamma distribution functions
%
% History
% -------
% - Create by Dahua Lin, on Sep 1, 2011
% - Modified by Dahua Lin, on Sep 3, 2011
%
%% Properties
properties
dims = { [1 1 1], [1 1 3], [1 3 3], [3 1 3], [3 3 3] };
nums = { [1 1], [1 5], [5 1], [5 5] };
end
%% Test cases
methods
function test_evaluation(obj)
run_multi(obj, @tsuite_invgammad.do_test_evaluation);
end
function test_sampling(obj)
run_multi(obj, @tsuite_invgammad.do_test_sampling);
end
end
%% Test implementation
methods(Static, Access='private')
function do_test_evaluation(d, m, alpha, beta)
N = 100;
X = rand(d, N) + 0.5;
% entropy
ent0 = tsuite_invgammad.calc_entropy(alpha, beta);
ent = invgammad_entropy(alpha, beta);
assert(isequal(size(ent0), [1 m]));
assert(isequal(size(ent), [1 m]));
devcheck('entropy eval', ent, ent0, 1e-12);
% logpdf
L0 = tsuite_invgammad.calc_logpdf(alpha, beta, X);
L = invgammad_logpdf(alpha, beta, X);
assert(isequal(size(L0), [m N]));
assert(isequal(size(L), [m N]));
devcheck('logpdf eval', L, L0, 1e-12);
end
function do_test_sampling(d, m, alpha, beta)
if m > 1
return;
end
mean0 = alpha .* beta;
var0 = alpha .* (beta.^2);
if d > 1 && isscalar(alpha) && isscalar(beta)
mean0 = mean0(ones(d, 1), 1);
var0 = var0(ones(d, 1), 1);
end
ns = 1e5;
X1 = gammad_sample(alpha, beta, [d ns]);
assert(isequal(size(X1), [d, ns]));
devcheck('sample 1 - mean', vecmean(X1), mean0, 2e-2);
devcheck('sample 1 - var', vecvar(X1), var0, 0.15);
end
end
%% Auxiliary functions
methods(Access='private')
function run_multi(obj, tfunc)
% run multiple test under different settings
ds = obj.dims;
ms = obj.nums;
for i = 1 : numel(ds)
for j = 1 : numel(ms)
d = ds{i};
m = ms{j};
da = d(1);
db = d(2);
d = d(3);
ma = m(1);
mb = m(2);
m = max(ma, mb);
alpha = rand(da, ma) + 1.5;
beta = rand(db, mb) + 0.5;
tfunc(d, m, alpha, beta);
end
end
end
end
methods(Static, Access='private')
function v = calc_entropy(A, B)
v = bsxfun(@plus, A + gammaln(A) - (1 + A) .* psi(A), log(B));
v = sum(v, 1);
end
function L = calc_logpdf(A, B, X)
m = max(size(A, 2), size(B, 2));
[d, n] = size(X);
A = bsxfun(@times, ones(d, m), A);
B = bsxfun(@times, ones(d, m), B);
L = zeros(m, n);
for k = 1 : m
Pk = zeros(d, n);
for i = 1 : d
a = A(i, k);
b = B(i, k);
x = X(i, :);
Pk(i, :) = b^a / gamma(a) * (x.^ (-a-1)) .* exp(-b ./ x);
end
L(k, :) = sum(log(Pk), 1);
end
end
end
end
| zzhangumd-smitoolbox | pmodels/tests/tsuite_invgammad.m | MATLAB | mit | 4,280 |
classdef tsuite_gaussd
% A test suite for gaussd class
%
% History
% -------
% - Created by Dahua Lin, on Jun 19, 2010
% - Modified by Dahua Lin, on Aug 17, 2011
% - Modified by Dahua Lin, on Aug 25, 2011
% - Modified by Dahua Lin, on Sep 3, 2011
%
%% Properties
properties
types = {'s', 'd', 'f'};
dims = [1 2 5];
nums = [1, 3];
end
%% Test cases
methods
function test_construction(obj)
run_multi(obj, @tsuite_gaussd.do_test_construction);
end
function test_calculation(obj)
run_multi(obj, @tsuite_gaussd.do_test_calculation);
end
function test_evaluation(obj)
run_multi(obj, @tsuite_gaussd.do_test_evaluation);
end
function test_mle(obj)
run_multi(obj, @tsuite_gaussd.do_test_mle);
end
function test_sampling(obj)
run_multi(obj, @tsuite_gaussd.do_test_sampling);
end
end
%% Test case implementation
methods(Static, Access='private')
function do_test_construction(cf, d, m, is_zeromean, is_shared)
% parse settings
if is_zeromean; assert(m == 1); end
if is_shared; m2 = 1; else m2 = m; end
% generate mean-param model
if is_zeromean
mu = 0;
else
mu = randn(d, m);
end
C = rand_pdmat(cf, d, m2, [1, 3]);
g_m0 = gaussd('m', mu, C);
tsuite_gaussd.verify_mp(g_m0, d, m, cf, mu, C);
% generate canon-param model
if is_zeromean
h = 0;
else
h = pdmat_lsolve(C, mu);
end
J = pdmat_inv(C);
g_c0 = gaussd('c', h, J);
tsuite_gaussd.verify_cp(g_c0, d, m, cf, h, J);
% mean to canon conversion
g_c1 = gaussd('c', g_m0);
tsuite_gaussd.verify_cp(g_c1, d, m, cf, [], []);
devcheck('mp->cp conversion (h)', g_c1.h, h, 1e-12);
devcheck('mp->cp conversion (J)', g_c1.J.v, J.v, 1e-12);
% canon to mean conversion
g_m1 = gaussd('m', g_c0);
tsuite_gaussd.verify_mp(g_m1, d, m, cf, [], []);
devcheck('cp->mp conversion (mu)', g_m1.mu, mu, 1e-12);
devcheck('cp->mp conversion (C)', g_m1.C.v, C.v, 1e-12);
end
function do_test_calculation(ty, d, m, is_zeromean, is_shared)
% parse settings
if is_zeromean; assert(m == 1); end
if is_shared; m2 = 1; else m2 = m; end
% generate model
if is_zeromean;
mu = 0;
else
mu = randn(d, m);
end
C = rand_pdmat(ty, d, m2, [1, 3]);
g_mp = gaussd('m', mu, C);
g_cp = gaussd('c', g_mp);
% calculate ground-truths
if m == 1
if is_zeromean
gt.ca = 0;
else
gt.ca = tsuite_gaussd.calc_ca(mu, C);
end
else
gt.ca = zeros(1, m);
for k = 1 : m
if C.n == 1
Ck = C;
else
Ck = pdmat_pick(C, k);
end
gt.ca(k) = tsuite_gaussd.calc_ca(mu(:,k), Ck);
end
end
if m2 == 1
gt.cb = tsuite_gaussd.calc_cb(C);
gt.ent = tsuite_gaussd.calc_entropy(C);
else
gt.cb = zeros(1, m);
gt.ent = zeros(1, m);
for k = 1 : m
Ck = pdmat_pick(C, k);
gt.cb(k) = tsuite_gaussd.calc_cb(Ck);
gt.ent(k) = tsuite_gaussd.calc_entropy(Ck);
end
end
% test ca and cb
[ca_m, cb_m] = gaussd_const(g_mp);
[ca_c, cb_c] = gaussd_const(g_cp);
devcheck('ca_m', ca_m, gt.ca, 1e-12);
devcheck('cb_m', cb_m, gt.cb, 1e-12);
devcheck('ca_c', ca_c, gt.ca, 1e-12);
devcheck('cb_c', cb_c, gt.cb, 1e-12);
% test entropy
ent_m = gaussd_entropy(g_mp);
ent_c = gaussd_entropy(g_cp);
devcheck('ent_m', ent_m, gt.ent, 1e-12);
devcheck('ent_c', ent_c, gt.ent, 1e-12);
end
function do_test_evaluation(ty, d, m, is_zeromean, is_shared)
% parse settings
if is_zeromean; assert(m == 1); end
if is_shared; m2 = 1; else m2 = m; end
% generate models
if is_zeromean;
mu = 0;
else
mu = randn(d, m);
end
C = rand_pdmat(ty, d, m2, [1, 3]);
g_mp = gaussd('m', mu, C);
g_cp = gaussd('c', g_mp);
% calculate ground-truths
% generate full form of mu and C
if isequal(mu, 0)
mu = zeros(d, m);
end
if m == 1
C2 = pdmat_fullform(g_mp.C);
else
C2 = zeros(d, d, m);
if is_shared
C2 = pdmat_fullform(g_mp.C);
C2 = repmat(C2, [1, 1, m]);
else
for k = 1 : m
C2(:,:,k) = pdmat_fullform(g_mp.C, k);
end
end
end
ld = zeros(1, m);
Q2 = zeros(d, d, m);
for k = 1 : m
ld(k) = lndet(C2(:,:,k));
Q2(:,:,k) = inv(C2(:,:,k));
end
% generate sample points
n0 = 100;
X = randn(d, n0);
% evaluate ground-truths
gt.D = tsuite_gaussd.comp_sq_mahdist(mu, Q2, X);
ldv = pdmat_lndet(C);
gt.LP = bsxfun(@plus, -0.5 * gt.D, 0.5 * (- ldv.' - d * log(2*pi)));
% compare with ground-truths
D_m = gaussd_sqmahdist(g_mp, X);
D_c = gaussd_sqmahdist(g_cp, X);
LP_m = gaussd_logpdf(g_mp, X);
LP_c = gaussd_logpdf(g_cp, X);
devcheck('sqmahdist (m)', D_m, gt.D, 1e-8 * max(abs(gt.D(:))));
devcheck('sqmahdist (c)', D_c, gt.D, 1e-8 * max(abs(gt.D(:))));
devcheck('logpdf (m)', LP_m, gt.LP, 1e-8 * max(abs(gt.LP(:))));
devcheck('logpdf (c)', LP_c, gt.LP, 1e-8 * max(abs(gt.LP(:))));
end
function do_test_mle(ty, d, m, is_zeromean, is_shared)
% parse settings
if is_zeromean
return;
end
% generate data
mu0 = randn(d, 1);
L0 = rand(d, d);
n = 1000;
X = bsxfun(@plus, mu0, L0 * randn(d, n));
w = rand(n, m);
wsp = l2mat(m, randi(m, n, 1), 1, 'sparse');
assert(isequal(size(wsp), [n m]));
% perform estimation
if m == 1
Ge0 = gaussd_mle(X, [], ty, is_shared);
tsuite_gaussd.verify_mp(Ge0, d, m, ty, [], []);
Ge = gaussd_mle(X, w, ty, is_shared);
tsuite_gaussd.verify_mp(Ge, d, m, ty, [], []);
Ge2 = gaussd_mle(X, wsp, ty, is_shared);
tsuite_gaussd.verify_mp(Ge2, d, m, ty, [], []);
Gr0 = tsuite_gaussd.gmle(X, ones(n, 1), ty, is_shared);
Gr = tsuite_gaussd.gmle(X, w, ty, is_shared);
Gr2 = tsuite_gaussd.gmle(X, full(wsp), ty, is_shared);
devcheck('mle (mean)', Ge0.mu, Gr0.mu, 1e-12);
devcheck('mle (cov)', Ge0.C.v, Gr0.C.v, 1e-12);
devcheck('w-mle (mean)', Ge.mu, Gr.mu, 1e-12);
devcheck('w-mle (cov)', Ge.C.v, Gr.C.v, 1e-12);
devcheck('wsp-mle (mean)', Ge2.mu, Gr2.mu, 1e-12);
devcheck('wsp-mle (cov)', Ge2.C.v, Gr2.C.v, 1e-12);
else
Ge = gaussd_mle(X, w, ty, is_shared);
tsuite_gaussd.verify_mp(Ge, d, m, ty, [], []);
Ge2 = gaussd_mle(X, wsp, ty, is_shared);
tsuite_gaussd.verify_mp(Ge2, d, m, ty, [], []);
Gr = tsuite_gaussd.gmle(X, w, ty, is_shared);
Gr2 = tsuite_gaussd.gmle(X, full(wsp), ty, is_shared);
devcheck('w-mle (mean)', Ge.mu, Gr.mu, 1e-12);
devcheck('w-mle (cov)', Ge.C.v, Gr.C.v, 1e-12);
devcheck('wsp-mle (mean)', Ge2.mu, Gr2.mu, 1e-12);
devcheck('wsp-mle (cov)', Ge2.C.v, Gr2.C.v, 1e-12);
end
end
function do_test_sampling(ty, d, m, is_zeromean, is_shared)
% parse settings
if m > 1
return;
end
if is_zeromean; assert(m == 1); end
if is_shared; m2 = 1; else m2 = m; end
% generate models
if is_zeromean;
mu = 0;
else
mu = randn(d, m);
end
C = rand_pdmat(ty, d, m2, [1, 3]);
g_mp = gaussd('m', mu, C);
g_cp = gaussd('c', g_mp);
if is_zeromean
mu0 = zeros(d, 1);
else
mu0 = mu;
end
C0 = pdmat_fullform(C);
% perform sampling
n = 100000;
X_m = gaussd_sample(g_mp, n);
X_c = gaussd_sample(g_cp, n);
devcheck('sample_mean (m)', vecmean(X_m), mu0, 2e-2);
devcheck('sample_cov (m)', veccov(X_m), C0, 5e-2);
devcheck('sample_mean (c)', vecmean(X_c), mu0, 2e-2);
devcheck('sample_cov (c)', veccov(X_c), C0, 5e-2);
end
end
%% Auxiliary functions
methods(Access='private')
function run_multi(obj, tfunc)
% Run a test case under multiple conditions
tys = obj.types;
ds = obj.dims;
ms = obj.nums;
for it = 1 : numel(tys)
ty = tys{it};
for d = ds
for m = ms
tfunc(ty, d, m, false, false);
tfunc(ty, d, m, false, true);
if m == 1
tfunc(ty, d, m, true, false);
tfunc(ty, d, m, true, true);
end
end
end
end
end
end
methods(Static, Access='private')
function verify_mp(g, d, m, cf, mu, C)
assert(strcmp(g.tag, 'gaussd'));
assert(isequal(g.ty, 'm'));
assert(g.d == d);
assert(g.n == m);
assert(isequal(g.C.ty, cf));
if ~isempty(mu)
assert(isequal(g.mu, mu));
end
if ~isempty(C)
assert(isequal(g.C, C));
end
end
function verify_cp(g, d, m, cf, h, J)
assert(strcmp(g.tag, 'gaussd'));
assert(isequal(g.ty, 'c'));
assert(g.d == d);
assert(g.n == m);
assert(isequal(g.J.ty, cf));
if ~isempty(h)
assert(isequal(g.h, h));
end
if ~isempty(J)
assert(isequal(g.J, J));
end
end
function v = calc_entropy(C)
d = C.d;
ldv = pdmat_lndet(C);
v = 0.5 * (d * (log(2*pi) + 1) + ldv);
end
function v = calc_ca(mu, C)
C = pdmat_fullform(C);
v = mu' * (C \ mu);
end
function v = calc_cb(C)
d = C.d;
ldv = pdmat_lndet(C);
v = -0.5 * (d * log(2*pi) + ldv);
end
function D = comp_sq_mahdist(mu, icov, X)
% compute squared Mahalanobis distance to Gaussian centers
m = size(mu, 2);
n = size(X, 2);
D = zeros(m, n);
for i = 1 : m
A = icov(:,:,i);
v = mu(:,i);
D(i, :) = pwsqmahdist(v, X, A);
end
end
function G = gmle(X, w, cf, cov_tied)
% A slow (but straightforward) way to implement MLE
mu = vecmean(X, w);
d = size(X, 1);
m = size(w, 2);
if cov_tied && m > 1
sw = sum(w, 1).';
sw = sw / sum(sw);
end
switch cf
case {'s', 'd'}
v = vecvar(X, w, mu);
if cf == 's'
v = mean(v, 1);
end
if cov_tied && m > 1
v = v * sw;
end
C = pdmat(cf, d, v);
case 'f'
v = veccov(X, w, mu);
if cov_tied && m > 1
v = reshape(v, d * d, m) * sw;
v = reshape(v, d, d);
end
C = pdmat(cf, d, v);
end
G = gaussd('m', mu, C);
end
end
end
| zzhangumd-smitoolbox | pmodels/tests/tsuite_gaussd.m | MATLAB | mit | 15,274 |
classdef tsuite_gauss_models
% The test suite for various Gauss based generative models
%
% Created by Dahua Lin, on Dec 13, 2011
%
%% configurations
properties
Jx_forms = {'s', 'd', 'f'};
dims = {1, 2, 5, [3, 2]};
Ks = [1, 3];
end
%% main test cases
methods
function test_lingen(obj)
run_multi(obj, @tsuite_gauss_models.do_test_lingen);
end
function run_multi(obj, tfunc)
% Run a test case under multiple conditions
cfs = obj.Jx_forms;
ds = obj.dims;
ks = obj.Ks;
n = 25;
for i = 1 : numel(cfs)
cf = cfs{i};
for j = 1 : numel(ds)
d = ds{j};
for k = ks
tfunc(cf, d, k, n);
end
end
end
end
end
%% core testing functions
methods(Static, Access='private')
%% gaussgn_capture
function do_test_lingen(cf, d, K, n)
% Perform the test of gauss_lingen on a specific setting
%
% cf: the form of Jx
% d: the dimensions
% K: the number of rows in w
% n: the number of observed samples
%
% parse inputs
if isscalar(d)
use_A = false;
q = d;
else
use_A = true;
q = d(2);
d = d(1);
end
% prepare model
Jx = rand_pdmat(cf, d, 1, [1 2]);
X = randn(d, n);
U = randn(q, K);
if use_A
A = randn(d, q);
else
A = [];
end
if ~use_A
g0 = gauss_lingen(d);
gm = gauss_lingen(Jx);
else
g0 = gauss_lingen(d, q);
gm = gauss_lingen(Jx, A);
end
% verify models
assert(g0.xdim == d);
assert(g0.pdim == q);
assert(isempty(g0.Gx));
assert(isempty(g0.Gx_cb));
assert(isempty(g0.Jx));
assert(isempty(g0.A));
assert(gm.xdim == d);
assert(gm.pdim == q);
assert(is_pdmat(gm.Jx) && isequal(gm.Jx, Jx));
assert(isempty(gm.A) == ~use_A);
if use_A
assert(isequal(gm.A, A));
end
assert(is_gaussd(gm.Gx) && gm.Gx.ty == 'c' && gm.Gx.n == 1);
assert(isequal(gm.Gx.h, 0));
assert(isequal(gm.Gx.J, Jx));
assert(isscalar(gm.Gx_cb));
[~, cb] = gaussd_const(gm.Gx);
devcheck('gaussgm (cb)', cb, gm.Gx_cb, 1e-15);
assert(g0.query_obs(X) == n);
assert(gm.query_obs(X) == n);
assert(g0.query_params(U) == K);
assert(gm.query_params(U) == K);
% verify loglik evaluation
LL = gm.loglik(U, X);
if ~use_A
AU = U;
else
AU = A * U;
end
LL0 = zeros(K, n);
for k = 1 : K
lv = gaussd_logpdf(gm.Gx, bsxfun(@minus, X, AU(:,k)));
LL0(k,:) = lv;
end
devcheck('simplegen (LL)', LL, LL0, 1e-12);
% verify capturing (conjugate updates)
if K == 1
[dh0_i, dJ0_i] = tsuite_gauss_models.lingen_capture_gt(X, [], Jx, A);
dG_i = gm.capture(X);
assert(isequal(size(dh0_i), [q, K]));
assert(is_pdmat(dJ0_i) && dJ0_i.d == q && dJ0_i.n == K);
assert(is_gaussd(dG_i) && dG_i.ty == 'c' && dG_i.d == q && dG_i.n == K);
assert(isequal(size(dG_i.h), [q, K]));
assert(dG_i.J.d == q && dG_i.J.n == K);
devcheck('gaussgen w/ Zi (dh)', dG_i.h, dh0_i, 1e-12);
devcheck('gaussgen w/ Zi (dJ)', dG_i.J.v, dJ0_i.v, 1e-12);
end
W = rand(n, K);
[dh0_w, dJ0_w] = tsuite_gauss_models.lingen_capture_gt(X, W, Jx, A);
dG_w = gm.capture(X, W);
assert(is_gaussd(dG_w) && dG_w.ty == 'c' && dG_w.d == q && dG_w.n == K);
assert(isequal(size(dG_w.h), [q, K]));
assert(dG_w.J.d == q && dG_w.J.n == K);
devcheck('gaussgen w/ Zw (dh)', dG_w.h, dh0_w, 1e-12);
devcheck('gaussgen w/ Zw (dJ)', dG_w.J.v, dJ0_w.v, 1e-12);
% verify MLE
if K == 1
Ui = gm.mle(X);
Ui0 = pdmat_lsolve(dJ0_i, dh0_i);
assert(isequal(size(Ui0), [q, K]));
assert(isequal(size(Ui), [q, K]));
devcheck('gaussgen (Ui)', Ui, Ui0, 1e-10);
end
Uw = gm.mle(X, W);
Uw0 = pdmat_lsolve(dJ0_w, dh0_w);
assert(isequal(size(Uw0), [q, K]));
assert(isequal(size(Uw), [q, K]));
devcheck('gaussgen (Uw)', Uw, Uw0, 1e-10);
end
function [dh, dJ] = lingen_capture_gt(X, w, Jx, A)
% Calculate the ground-truth for gaussgm_capture
n = size(X, 2);
if isempty(w)
K = 1;
w = ones(n, 1);
else
K = size(w, 2);
end
if isempty(A)
q = size(X, 1);
else
q = size(A, 2);
end
dh = zeros(q, K);
for k = 1 : K
dh_k = pdmat_mvmul(Jx, (X * w(:, k)));
if ~isempty(A)
dh_k = A' * dh_k;
end
dh(:, k) = dh_k;
end
sw = sum(w, 1);
if isempty(A)
dJ = pdmat_scale(Jx, sw);
else
dJ = zeros(q, q, K);
Jx = pdmat_fullform(Jx);
for k = 1 : K
dJ_k = sw(k) * (A' * Jx * A);
dJ_k = 0.5 * (dJ_k + dJ_k');
dJ(:,:,k) = dJ_k;
end
dJ = pdmat('f', q, dJ);
end
end
end
end
| zzhangumd-smitoolbox | pmodels/tests/tsuite_gauss_models.m | MATLAB | mit | 7,206 |
function [W, idf] = tf_idf_weight(C, op)
%TF_IDF_WEIGHT Computes TF-IDF Weights
%
% W = TF_IDF_WEIGHT(C);
%
% calculates the TF-IDF weights for each pair of word and document
% in a corpus.
%
% Input arguments:
% - C: The word-count table of the corpus. Suppose there
% are V words and n documents, then the size of C is
% V x n.
%
% Output arguments:
% - W: The TF-IDF weight matrix, of size V x n.
% W(v, d) is the tf-idf weight of the word v in the
% d-th document.
%
% W = TF_IDF_WEIGHT(C, 'normalize');
%
% Normalize the word-counts before calculating the TF-IDF weights.
%
% Here, C will be first normalized, such that each column of C
% sums to one.
%
% [W, idf] = TF_IDF_WEIGHT( ... );
%
% additionally returns the IDF values, as a column vector of
% size V x 1.
%
% Created by Dahua Lin, on Feb 19, 2011
%
%% verify input arguments
if ~(isfloat(C) && isreal(C) && ismatrix(C))
error('tf_idf_weight:invalidarg', 'C should be a real matrix.');
end
if nargin < 2
to_normalize = 0;
else
if ~(ischar(op) && strcmp(op, 'normalize'))
error('tf_idf_weight:invalidarg', ...
'The second argument can only be ''normalize''.');
end
to_normalize = 1;
end
%% main
if to_normalize
C = bsxfun(@times, C, 1 ./ full(sum(C, 1)));
end
a = full(sum(C > 0, 2));
n = size(C, 2);
idf = log(n ./ a);
W = bsxfun(@times, C, idf);
| zzhangumd-smitoolbox | pmodels/topics/tf_idf_weight.m | MATLAB | mit | 1,539 |
function LL = topic_loglik(Beta, C, Q)
%TOPIC_LOGLIK Document log-likelihood for Topic model
%
% LL = TOPIC_LOGLIK(Beta, C, Q);
%
% Evaluates the log-likelihood of each document based on a
% topic model.
%
% Suppose there are K topics, n documents and V words in the
% vocabulary.
%
% Input arguments:
% - Beta: The word distributions of topics [V x K]
% - C: The word count table of the corpus [V x n]
% - Q: The per-doc posterior topic distribution [K x n]
%
%
% Output arguments:
% - LL: The log-likelihood of all documents [1 x n].
%
% Remarks
% -------
% - This is a generic function that can help to evaluate perplexity
% for different types of topic models.
%
% Created by Dahua Lin, on Feb 18, 2012.
%
%% verify input arguments
if ~(isfloat(Beta) && isreal(Beta) && ismatrix(Beta) && ~issparse(Beta))
error('topic_loglik:invalidarg', 'Beta should be a non-sparse real matrix.');
end
[V, K] = size(Beta);
if ~(isfloat(C) && isreal(C) && ismatrix(C) && size(C,1) == V)
error('topic_loglik:invalidarg', 'C should be a real matrix with V rows.');
end
n = size(C, 2);
if ~(isfloat(Q) && isreal(Q) && isequal(size(Q), [K n]) && ~issparse(Q))
error('topic_loglik:invalidarg', ...
'Q should be a non-sparse real matrix of size K x n.');
end
%% main
[I, J, wc] = find(C);
I = int32(I) - 1;
J = int32(J) - 1;
if ~isa(Beta, 'double'); Beta = double(Beta); end
if ~isa(wc, 'double'); wc = double(wc); end
if ~isa(Q, 'double'); Q = double(Q); end
LL = topic_loglik_cimp(Beta, I, J, wc, Q); | zzhangumd-smitoolbox | pmodels/topics/topic_loglik.m | MATLAB | mit | 1,632 |
function topiclda_demo()
%TOPICLDA_DEMO A simple program to demo the use of LDA
%
% R = TOPICLDA_DEMO;
%
%% Configuration
V = 50; % the number of words in the vocabulary
K = 3; % the number of topics
n0 = 500; % the number of training documents
n1 = 1000; % the number of testing documents
nw = 1000; % the number of words per document
%% Generate data
disp('Generating document data ...');
[alpha0, Beta0] = gen_cfg(V, K);
C0 = gen_corpus(alpha0, Beta0, nw, n0);
C1 = gen_corpus(alpha0, Beta0, nw, n1);
%% Train model
disp('Training LDA model ...');
eta = 0.01;
S = topiclda_em(V, eta);
S = S.initialize(C0, [], K);
opts = varinfer_options([], 'maxiters', 200, 'tol', 1e-6, 'display', 'off');
S = varinfer_drive(S, opts);
Beta = S.sol.Beta;
alpha = S.sol.alpha;
% permute topic labels
D = zeros(K, K);
for k = 1 : K
for l = 1 : K
D(k, l) = ddkldiv(Beta0(:,k), Beta(:,l));
end
end
[~, tmap] = min(D, [], 1);
Beta(:, tmap) = Beta;
D(:, tmap) = D;
alpha(tmap) = alpha;
%% Show results
disp('============================');
disp('Topic prior:');
disp(' ground-truth: '); disp(alpha0.' / sum(alpha0));
disp(' solved-result: '); disp(alpha.' / sum(alpha));
disp('Divergence between True Topics and Solved Topics');
disp(D);
figure;
for k = 1 : K
subplot(K,1,k);
bar([Beta0(:,k), Beta(:,k)]);
end
disp('Evaluating Perplexity values ...');
ppx0_tr = calc_ppx(alpha0, Beta0, C0);
ppx0_te = calc_ppx(alpha0, Beta0, C1);
ppx1_tr = calc_ppx(alpha, Beta, C0);
ppx1_te = calc_ppx(alpha, Beta, C1);
[alpha_ram, Beta_ram] = gen_cfg(V, K);
ppxr_tr = calc_ppx(alpha_ram, Beta_ram, C0);
ppxr_te = calc_ppx(alpha_ram, Beta_ram, C1);
disp(' True Estimated Random');
fprintf(' On training: %12.4f %12.4f %12.4f\n', ppx0_tr, ppx1_tr, ppxr_tr);
fprintf(' On testing: %12.4f %12.4f %12.4f\n', ppx0_te, ppx1_te, ppxr_te);
disp(' ');
%% Data generation function
function [alpha, Beta] = gen_cfg(V, K)
alpha = 1 + rand(K, 1) * 2;
Beta = exp(randn(V, K) * 1.5) * 2;
Beta = bsxfun(@times, Beta, 1 ./ sum(Beta, 1));
function C = gen_corpus(alpha, Beta, nw, n)
[V, K] = size(Beta);
T = dird_sample(alpha, n);
% draw topics
Ts = ddsample(T, nw);
% draw words
gs = intgroup(K, Ts(:));
words = zeros(nw, n);
for k = 1 : K
cg = gs{k};
ws = ddsample(Beta(:,k), numel(cg));
words(cg) = ws;
end
% count words
C = zeros(V, n);
for i = 1 : n
C(:,i) = intcount(V, words(:,i));
end
function v = calc_ppx(alpha, Beta, C)
K = size(Beta, 2);
Ginit = bsxfun(@plus, alpha, sum(C, 1) / K);
Gam = topiclda_varinfer(Beta, alpha, C, [], Ginit, 500, 1e-8);
v = topiclda_ppx(Beta, C, Gam);
| zzhangumd-smitoolbox | pmodels/topics/topiclda_demo.m | MATLAB | mit | 2,694 |
function [sol, objVs] = topiclda_em_update(C, w, eta, sol, eiters, objVs)
%TOPICLDA_EM_UPDATE Performs E-M update of a Topic-LDA solution
%
% [sol, objVs] = TOPICLDA_EM_UPDATE(C, w, eta, sol, eiters);
%
% Performs E-M update for Topic-LDA estimation.
%
% Input arguments:
% - C: The word-count matrix [V x n]
% - w: The document weights [empty or 1 x n]
% - eta: The prior-count for word-distributions
% - sol: The topiclda solution to be updated
% - eiters; The maximum iterations within each E-step
%
% Output arguments:
% - sol: The updated solution struct
% - objVs: The itemized objective values
%
%
% Remarks
% -------
% - This function is to help the implementation of topic-LDA,
% and is not supposed to be directly called by end users.
%
% Created by Dahua Lin, on Feb 18, 2012
%
%% main
alpha = sol.alpha;
Beta = sol.Beta;
Gamma = sol.Gamma;
% E-step (variational inference)
if isempty(Gamma)
sC = full(sum(C, 1));
Gamma = bsxfun(@plus, alpha, sC * (1/sol.K));
end
[Gamma, W, Vs] = topiclda_varinfer(Beta, alpha, C, w, Gamma, ...
eiters, 1e-12);
if isempty(w)
Vs = sum(Vs, 2);
else
Vs = Vs * w';
end
objVs.ell_theta = Vs(1);
objVs.ell_z = Vs(2);
objVs.ell_w = Vs(3);
objVs.ent_theta = Vs(4);
objVs.ent_z = Vs(5);
% M-step (parameter estimation)
% update alpha
sg = sum(Gamma, 1);
dird_sta = bsxfun(@minus, psi(Gamma), psi(sg));
alpha = dird_mle(dird_sta, w, alpha, 'input', 'stat');
et = calc_eloglik_theta(alpha, dird_sta);
objVs.ell_theta = sum(et);
% update Beta
Wt = W.';
if eta > 0
W1 = Wt + eta;
else
W1 = Wt';
end
Beta = bsxfun(@times, W1, 1 ./ sum(W1, 1));
logB = log(Beta);
objVs.ell_w = sum(sum(Wt .* logB, 1));
if eta > 0
objVs.lpri_beta = eta * sum(logB(:));
end
% Store updated results
sol.alpha = alpha;
sol.Beta = Beta;
sol.Gamma = Gamma;
sol.W = W;
%% Aux functions
function v = calc_eloglik_theta(alpha, dird_sta)
v = gammaln(sum(alpha)) - sum(gammaln(alpha));
v = v + (alpha - 1)' * dird_sta;
| zzhangumd-smitoolbox | pmodels/topics/topiclda_em_update.m | MATLAB | mit | 2,092 |
/**********************************************************
*
* topic_loglik_cimp.cpp
*
* C++ mex implementation of topic_loglik
*
* Created by Dahua Lin, on Feb 18, 2012
*
**********************************************************/
#include <bcslib/matlab/bcs_mex.h>
#include "specfuncs.h"
#include "corpus.h"
using namespace bcs;
using namespace bcs::matlab;
using namespace smi;
double calc_loglik(int V, int K, const double *Beta,
const Doc& doc, const double *q, double *temp)
{
int nw = doc.nwords;
double ll = 0;
for (int i = 0; i < nw; ++i) // for each word
{
// calculate per-word posterior of topics
int32_t wd = doc.words[i];
double c = doc.counts[i];
double s = 0;
for (int k = 0; k < K; ++k)
{
s += (temp[k] = q[k] * Beta[wd + V * k]);
}
double inv_s = 1.0 / s;
for (int k = 0; k < K; ++k) temp[k] *= inv_s;
// calculate generative probability of this word
// with z marginalized out
double pv = 0;
for (int k = 0; k < K; ++k)
{
pv += temp[k] * Beta[wd + V * k];
}
// add to total loglik
ll += c * std::log(pv);
}
return ll;
}
/**
* Inputs
* [0] Beta: The word distributions of topics [V x K]
* [1] I: The whole list of words [int32 zero-based]
* [2] J: The whole list of documents [int32 zero-based]
* [3] C: The whole list of word counts [double]
* [4] Q: The per-doc posterior distr. of topics [K x n]
*
* Outputs
* [0] v: The per-doc log-likelihood values [1 x n]
*/
void bcsmex_main(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// take inputs
const_marray mBeta(prhs[0]);
const_marray mI(prhs[1]);
const_marray mJ(prhs[2]);
const_marray mC(prhs[3]);
const_marray mQ(prhs[4]);
int V = mBeta.nrows();
int K = mBeta.ncolumns();
int n = mQ.ncolumns();
int len = mI.nelems();
const double *Beta = mBeta.data<double>();
const int32_t *I = mI.data<int32_t>();
const int32_t *J = mJ.data<int32_t>();
const double *C = mC.data<double>();
const double *Q = mQ.data<double>();
// prepare output
marray mV = create_marray<double>(1, n);
double *v = mV.data<double>();
// main
Corpus corpus(n, len, I, J, C);
const double *q = Q;
double *temp = new double[K];
for (int i = 0; i < n; ++i, q += K)
{
const Doc& doc = corpus.doc(i);
v[i] = calc_loglik(V, K, Beta, doc, q, temp);
}
delete[] temp;
// output
plhs[0] = mV.mx_ptr();
}
BCSMEX_MAINDEF
| zzhangumd-smitoolbox | pmodels/topics/private/topic_loglik_cimp.cpp | C++ | mit | 2,866 |
// The header file for the class Corpus
#ifndef SMI_CORPUS_H
#define SMI_CORPUS_H
namespace smi
{
struct Doc
{
Doc()
: nwords(0), words(0), counts(0)
{}
int nwords;
const int32_t *words;
const double *counts;
};
class Corpus
{
public:
// I and J are zero-based indices
Corpus(int n, int len, const int32_t *I, const int32_t *J, const double *C)
: m_ndocs(n), m_docs(new Doc[n]), m_words(I), m_wcounts(C), m_max_count(0)
{
// 1st-pass scan #words
if (n > 1)
{
for (int i = 0; i < len; ++i)
{
++ m_docs[J[i]].nwords;
}
int o = 0;
for (int i = 0; i < n; ++i)
{
Doc& doc = m_docs[i];
int nw = doc.nwords;
if (nw > 0)
{
doc.words = m_words + o;
doc.counts = m_wcounts + o;
o += nw;
if (nw > m_max_count)
m_max_count = nw;
}
}
}
else
{
m_docs->nwords = len;
m_docs->words = m_words;
m_docs->counts = m_wcounts;
m_max_count = m_docs->nwords;
}
}
~Corpus()
{
delete[] m_docs;
}
int ndocs() const { return m_ndocs; }
int max_count() const { return m_max_count; }
const Doc& doc(int i) const
{
return m_docs[i];
}
private:
int m_ndocs;
Doc *m_docs;
const int32_t *m_words;
const double *m_wcounts;
int m_max_count;
}; // end class Corpus
} // end namespace smi
#endif
| zzhangumd-smitoolbox | pmodels/topics/private/corpus.h | C++ | mit | 1,846 |
/**********************************************************
*
* topiclda_varinfer_cimp.cpp
*
* C++ mex implementation of topiclda_varinfer
*
* Created by Dahua Lin, on Feb 5, 2012
*
**********************************************************/
#include <bcslib/matlab/bcs_mex.h>
#include "specfuncs.h"
#include <limits>
#include "corpus.h"
using namespace bcs;
using namespace bcs::matlab;
using namespace smi;
// #define TOPICLDA_VARINFER_MONITORING
const int NUM_OBJ_ITEMS = 5;
struct DocObj
{
double ell_theta;
double ell_z;
double ell_w;
double ent_gamma;
double ent_phi;
};
inline double calc_sum_objv(const DocObj& s)
{
return s.ell_theta + s.ell_z + s.ell_w + s.ent_gamma + s.ent_phi;
}
class Model
{
public:
Model(int V, int K, const double *Beta, const double *alpha)
: m_V(V), m_K(K), m_Beta(Beta), m_alpha(alpha)
{
m_lpri_gamma_const = calc_lpri_gamma_const(K, alpha);
}
int V() const { return m_V; }
int K() const { return m_K; }
double beta(int v, int k) const { return m_Beta[v + m_V * k]; }
double alpha(int k) const { return m_alpha[k]; }
double logpri_gamma(const double *g) const
{
double a = m_lpri_gamma_const;
double sg = 0;
for (int k = 0; k < m_K; ++k)
{
sg += g[k];
}
double digamma_sum = smi::digamma(sg);
for (int k = 0; k < m_K; ++k)
{
a += (m_alpha[k] - 1) * (smi::digamma(g[k]) - digamma_sum);
}
return a;
}
private:
static double calc_lpri_gamma_const(int K, const double *alpha)
{
double sa = 0;
double sgl = 0;
for (int k = 0; k < K; ++k)
{
sa += alpha[k];
sgl += smi::gammaln(alpha[k]);
}
return smi::gammaln(sa) - sgl;
}
private:
int m_V;
int m_K;
const double *m_Beta;
const double *m_alpha;
double m_lpri_gamma_const;
};
inline double calc_entropy(int n, const double *p)
{
double e = 0;
for (int i = 0; i < n; ++i) e += p[i] * std::log(p[i]);
return -e;
}
void calc_objv(DocObj& obj, const Model& model, const Doc& doc,
const double *gamma, const double *Phi, double *temp)
{
int K = model.K();
// pre-compute psi(gamma_k) - psi(gsum) --> temp
double gsum = 0;
for (int k = 0; k < K; ++k) gsum += gamma[k];
double psi_gsum = smi::digamma(gsum);
for (int k = 0; k < K; ++k) temp[k] = smi::digamma(gamma[k]) - psi_gsum;
// ell_theta
obj.ell_theta = model.logpri_gamma(gamma);
// ell_z
int nw = doc.nwords;
obj.ell_z = 0;
const double *phi = Phi;
for (int i = 0; i < nw; ++i, phi += K)
{
double a = 0;
for (int k = 0; k < K; ++k)
{
a += phi[k] * temp[k];
}
obj.ell_z += doc.counts[i] * a;
}
// ell_w
obj.ell_w = 0;
phi = Phi;
for (int i = 0; i < nw; ++i, phi += K)
{
int v = doc.words[i];
double a = 0;
for (int k = 0; k < K; ++k)
{
a += phi[k] * std::log(model.beta(v, k));
}
obj.ell_w += doc.counts[i] * a;
}
// ent_theta
obj.ent_gamma = - smi::gammaln(gsum);
for (int k = 0; k < K; ++k)
{
obj.ent_gamma += (smi::gammaln(gamma[k]) - (gamma[k] - 1) * temp[k]);
}
// ent_phi
phi = Phi;
obj.ent_phi = 0;
for (int i = 0; i < nw; ++i, phi += K)
{
obj.ent_phi += doc.counts[i] * calc_entropy(K, phi);
}
}
// Perform inference over a particular document
//
// return whether converged
//
// gamma need to be pre-initialized
//
// prev_gamma, exp_psi: at least K elements
//
bool infer_on_doc(const Model& model, const Doc& doc,
double *gamma, double *Phi,
int maxIter, double tol,
double *prev_gamma, double *exp_psi)
{
int V = model.V();
int K = model.K();
int nw = doc.nwords;
bool converged = false;
#ifdef TOPICLDA_VARINFER_MONITORING
double *temp = new double[K];
double objv = std::numeric_limits<double>::quiet_NaN();
#endif
for (int t = 0; t < maxIter; ++t)
{
// store previous gamma values (for convergence test)
for (int k = 0; k < K; ++k)
{
exp_psi[k] = std::exp(smi::digamma(gamma[k]));
prev_gamma[k] = gamma[k];
gamma[k] = model.alpha(k);
}
double *phi = Phi;
for (int i = 0; i < nw; ++i, phi += K)
{
int32_t v = doc.words[i];
double c = doc.counts[i];
// calculate phi
double sum_phi = 0;
for (int k = 0; k < K; ++k)
{
sum_phi += (phi[k] = model.beta(v, k) * exp_psi[k]);
}
// normalize phi
double nrm_coeff = 1.0 / sum_phi;
for (int k = 0; k < K; ++k) phi[k] *= nrm_coeff;
// accumulate phi to gamma
for (int k = 0; k < K; ++k)
{
gamma[k] += c * phi[k];
}
}
// decide convergence (using L1-error)
double err = 0;
for (int k = 0; k < K; ++k)
{
err += std::abs(gamma[k] - prev_gamma[k]);
}
#ifdef TOPICLDA_VARINFER_MONITORING
double objv_pre = objv;
DocObj obj;
calc_objv(obj, model, doc, gamma, Phi, temp);
objv = calc_sum_objv(obj);
mexPrintf("Iter %d: objv = %.6f (ch = %.4g), ch.gamma = %.4g\n",
t, objv, objv - objv_pre, err);
#endif
if (err < tol)
{
converged = true;
break;
}
}
#ifdef TOPICLDA_VARINFER_MONITORING
delete[] temp;
#endif
return converged;
}
inline void accum_phi(int K, double *sumPhi, const Doc& doc, double w,
const double *Phi)
{
int nw = doc.nwords;
for (int j = 0; j < nw; ++j, Phi += K)
{
int v = doc.words[j];
double wc = w * doc.counts[j];
double *sp = sumPhi + v * K;
for (int k = 0; k < K; ++k)
{
sp[k] += wc * Phi[k];
}
}
}
void do_infer(const Model& model, const Corpus& corpus, const double *w,
int maxIter, double tol,
double *Gamma, bool *converged, double *sumPhi, double *obj_vs)
{
int K = model.K();
double *gamma = Gamma;
// allocate temporary memory
double *Phi = new double[K * corpus.max_count()];
double *prev_gamma = new double[K];
double *exp_psi = new double[K];
DocObj *objs = (DocObj*)obj_vs;
#ifdef TOPICLDA_VARINFER_MONITORING
mexPrintf("Topic-LDA variational inference\n");
mexPrintf("**********************************\n");
#endif
for (int i = 0; i < corpus.ndocs(); ++i, gamma += K)
{
const Doc& doc = corpus.doc(i);
#ifdef TOPICLDA_VARINFER_MONITORING
mexPrintf("On Document [%d / %d] with %d words\n",
i+1, corpus.ndocs(), doc.nwords);
#endif
// per-document inference
bool cvg = infer_on_doc(model, doc, gamma,
Phi, maxIter, tol, prev_gamma, exp_psi);
converged[i] = cvg;
// accumulate to sumPhi
accum_phi(K, sumPhi, doc, w[i], Phi);
// calc objective
double *temp = exp_psi;
calc_objv(objs[i], model, doc, gamma, Phi, temp);
}
// release temporary memory
delete[] Phi;
delete[] prev_gamma;
delete[] exp_psi;
}
/**
* Inputs
* [0] Beta: The word distributions of topics [V x K]
* [1] alpha: The Dirichlet prior parameter [scalar]
* [2] w: The document weights [vector of length n]
* [3] I: The whole list of words [int32 zero-based]
* [4] J: The whole list of documents [int32 zero-based]
* [5] C: The whole list of word counts [double]
* [6] MaxIter: The maximum number of iterations per document [double]
* [7] Tol: Tolerance of change of gamma at convergence [double]
* [8] Gamma0: The initial Gamma values [K x n]
*
* Outputs
* [0] Gamma: The gamma vectors [K x n double]
* [1] converged: Whether the variational inference converged [1 x n logical]
* [2] sumPhi: The (weighted) sum of phi vectors [K x V double]
* (for beta estimation)
* [3] ObjVs: The itemized objective function
*/
void bcsmex_main(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// take inputs
const_marray mBeta(prhs[0]);
const_marray mAlpha(prhs[1]);
const_marray mW(prhs[2]);
const_marray mI(prhs[3]);
const_marray mJ(prhs[4]);
const_marray mC(prhs[5]);
const_marray mMaxIter(prhs[6]);
const_marray mTol(prhs[7]);
const mxArray *mxGamma0 = prhs[8];
int V = mBeta.nrows();
int K = mBeta.ncolumns();
int n = (int)mxGetN(mxGamma0);
int len = mI.nelems();
const double *Beta = mBeta.data<double>();
const double *alpha = mAlpha.data<double>();
const double *w = mW.data<double>();
const int32_t *I = mI.data<int32_t>();
const int32_t *J = mJ.data<int32_t>();
const double *C = mC.data<double>();
int maxIter = (int)mMaxIter.get_scalar<double>();
double tol = mTol.get_scalar<double>();
// prepare output
mxArray *mxGamma = mxDuplicateArray(mxGamma0);
marray mConverged = create_marray<bool>(1, n);
marray mSumPhi = create_marray<double>(K, V);
marray mObjVs = create_marray<double>(NUM_OBJ_ITEMS, n);
double *Gamma = mxGetPr(mxGamma);
bool *converged = mConverged.data<bool>();
double *sumPhi = mSumPhi.data<double>();
double *objvs = mObjVs.data<double>();
// main
Model model(V, K, Beta, alpha);
Corpus corpus(n, len, I, J, C);
do_infer(model, corpus, w, maxIter, tol, Gamma, converged, sumPhi, objvs);
// output
plhs[0] = mxGamma;
plhs[1] = mConverged.mx_ptr();
plhs[2] = mSumPhi.mx_ptr();
plhs[3] = mObjVs.mx_ptr();
}
BCSMEX_MAINDEF
| zzhangumd-smitoolbox | pmodels/topics/private/topiclda_varinfer_cimp.cpp | C++ | mit | 10,797 |
// Some special function codes
#ifndef SMI_SPECFUNCS_H
#define SMI_SPECFUNCS_H
#include <cmath>
namespace smi
{
double gammaln(double x)
{
const double M_lnSqrt2PI = 0.91893853320467274178;
const double s0 = 76.18009172947146;
const double s1 = -86.50532032941677;
const double s2 = 24.01409824083091;
const double s3 = -1.231739572450155;
const double s4 = 0.1208650973866179e-2;
const double s5 = -0.5395239384953e-5;
/* Lanczos method */
double denom = x+1;
double x1 = x + 5.5;
double series = 1.000000000190015 +
s0 / (x+1) +
s1 / (x+2) +
s2 / (x+3) +
s3 / (x+4) +
s4 / (x+5) +
s5 / (x+6);
return( M_lnSqrt2PI +
(x+0.5)* std::log(x1) - x1 + std::log(series/x) );
}
double digamma(double x)
{
const double c = 12;
const double d1 = -0.57721566490153286;
const double d2 = 1.6449340668482264365; /* pi^2/6 */
const double s = 1e-6;
const double s3 = 1./12;
const double s4 = 1./120;
const double s5 = 1./252;
const double s6 = 1./240;
const double s7 = 1./132;
/* Use Taylor series if argument <= S */
if(x <= s)
{
return d1 - 1/x + d2*x;
}
/* Reduce to digamma(X + N) where (X + N) >= c */
double y = 0;
while(x < c)
{
y -= 1.0 / x;
x += 1.0;
}
/* Use de Moivre's expansion if argument >= C */
/* This expansion can be computed in Maple via asympt(Psi(x),x) */
if(x >= c)
{
double r = 1.0 / x;
y += log(x) - 0.5*r;
r *= r;
double t = (s5 - r * (s6 - r * s7));
y -= r * (s3 - r * (s4 - r * t));
}
return y;
}
}
#endif
| zzhangumd-smitoolbox | pmodels/topics/private/specfuncs.h | C++ | mit | 2,055 |
function v = topiclda_ppx(Beta, C, Gamma)
%TOPICLDA_PPX Evaluation of Perplexity for Topic LDA
%
% v = topiclda_ppx(Beta, C, Gamma);
%
% Evaluates the perplexity over a corpus for a Topic LDA model.
%
% Input arguments:
% - Beta: The word-distributions of topics
% - C: The word count table of the corpus
% - Gamma: The per-document posterior Dirichlet param
% (which can be solved using topiclda_varinfer)
%
% Output arguments:
% - v: The perplexity over the given document.
%
%% main
Q = exp(psi(Gamma));
Q = bsxfun(@times, Q, 1 ./ sum(Q, 1));
v = topic_ppx(Beta, C, Q);
| zzhangumd-smitoolbox | pmodels/topics/topiclda_ppx.m | MATLAB | mit | 679 |
classdef topiclda_em < smi_state
% Topic Latent Dirichlet allocation E-M algorithm state
%
% Created by Dahua Lin, on Feb 18, 2012
%
%% properties
properties(GetAccess='public', SetAccess='private')
vsize; % the size of vocabulary (V)
pricount = 0; % the prior count for topic estimation
estep_iters = 20; % The (maximum) iterations within each E-step
end
%% observations
properties(GetAccess='public', SetAccess='private')
ndocs; % the number of documents (n)
counts; % the matrix of word counts [V x n]
weights; % the document weights [empty or 1 x n]
end
%% dynamic states
properties
sol; % the current solution (with following fields)
% K: the number of topics
% alpha: Dirichlet prior parameters [K x 1]
% Beta: topic-wise word distributions [V x K]
% Gamma: per-document topic-distribution [K x n]
% W: accumulated per-topic word counts [V x K]
objVs; % a struct with the itemized objective values
% ell_theta: expected log-lik of theta
% ell_z: expected log-lik of z
% ell_w: expected log-lik of words
% ent_theta: entropy of theta
% ent_z: entropy of z
% lpri_beta: log-prior of beta
end
%% methods
methods
function obj = topiclda_em(V, eta)
% Construct a topiclda E-M state object
%
% obj = topiclda_em(V);
% obj = topiclda_em(V, eta);
%
% V is the size of vocabulary.
%
% Here, eta is the prior-word-count for each topic
% (by default, eta = 0)
%
if ~(isnumeric(V) && isreal(V) && isscalar(V) && V >= 1 && V == fix(V))
error('topiclda_em:invalidarg', ...
'V should be a positive integer.');
end
obj.vsize = V;
if nargin >= 2
if ~(isfloat(eta) && isreal(eta) && isscalar(eta) && eta >= 0)
error('topiclda_em:invalidarg', ...
'eta should be a non-negative scalar.');
end
obj.pricount = double(eta);
end
end
function obj = initialize(obj, C, w, B0, a0)
% Initialize the state with observations and initial params
%
% obj = obj.initialize(C, [], K);
% obj = obj.initialize(C, w, K);
% obj = obj.initialize(C, w, B0);
% obj = obj.initialize(C, w, B0, a0);
%
% Inputs:
% - C: The per-document word count matrix [V x n]
% - w: The document weights [empty or 1 x n]
% - K: The number of topics
% - B0: The initial word-distributions [V x K]
% - a0: The initial Dirichlet params [K x 1]
%
% If B0 or a0 is not provided, they are initialized
% randomly.
%
V = obj.vsize;
% check inputs
if ~(isfloat(C) && isreal(C) && ismatrix(C) && size(C,1) == V)
error('topiclda_em:invalidarg', ...
'C should be a real matrix with V rows.');
end
n = size(C, 2);
if ~isa(C, 'double'); C = double(C); end
if ~isempty(w)
if ~(isfloat(w) && isreal(w) && isvector(w) && numel(w) == n)
error('topiclda_em:invalidarg', ...
'w should be a real vector of length n.');
end
if size(w, 1) > 1; w = w.'; end
if ~isa(w, 'double'); w = double(w); end
end
if isscalar(B0)
K = B0;
if ~(isnumeric(K) && isreal(K) && isscalar(K) && ...
K == fix(K) && K >= 1)
error('topiclda_em:invalidarg', ...
'K should be a positive integer.');
end
K = double(K);
% initialize B0
B0 = rand(V, K);
B0 = bsxfun(@times, B0, 1 ./ sum(B0, 1));
else
if ~(isreal(B0) && isfloat(B0) && ismatrix(B0) && size(B0, 1) == V)
error('topiclda_em:invalidarg', ...
'B0 should be a real matrix with V rows.');
end
K = size(B0, 2);
if issparse(B0)
B0 = full(B0);
end
if ~isa(B0, 'double'); B0 = double(B0); end
end
if nargin < 5 || isempty(a0)
a0 = 1 + rand(K, 1);
else
if ~(isreal(a0) && isfloat(a0) && ~issparse(a0) && isequal(size(a0), [K 1]))
error('topiclda_em:invalidarg', ...
'a0 should be a real vector of size K x 1.');
end
if ~isa(a0, 'double'); a0 = double(a0); end
end
% set fields
obj.ndocs = n;
obj.counts = C;
if ~isempty(w)
obj.weights = w;
end
s.K = K;
s.alpha = a0;
s.Beta = B0;
s.Gamma = [];
s.W = [];
obj.sol = s;
end
function obj = update(obj)
% Updates the state (one E-M iteration)
%
% obj = obj.update();
%
[obj.sol, obj.objVs] = topiclda_em_update( ...
obj.counts, obj.weights, obj.pricount, obj.sol, ...
obj.estep_iters, obj.objVs);
end
function R = output(obj)
% Output the current solution
%
% R = obj.output();
%
% R is a struct with the following fields:
% - Beta: word distributions for topics
% - alpha: Dirichlet parameters
% - Gamma: per-document topic distributions
%
s = obj.sol;
R.Beta = s.Beta;
R.alpha = s.alpha;
R.Gamma = s.Gamma;
end
function b = is_ready(obj)
% Tests whether the object is ready for running
%
% b = obj.is_ready();
%
b = ~isempty(obj.sol);
end
function objv = evaluate_objv(obj)
% Evaluate the objective function of the current state
%
% objv = obj.evaluate_objv();
%
o = obj.objVs;
objv = o.ell_theta + o.ell_z + o.ell_w + ...
o.ent_theta + o.ent_z + o.lpri_beta;
end
end
end | zzhangumd-smitoolbox | pmodels/topics/topiclda_em.m | MATLAB | mit | 7,733 |
function [Gamma, W, objVs, cvg] = topiclda_varinfer(Beta, alpha, C, w, Gamma, maxIter, tol)
%TOPICLDA_VARINFER Variational Inference Step of Latent Dirichlet Allocation
%
% [Gamma, W] = TOPICLDA_VARINFER(Beta, alpha, C, w, Gamma, maxIter, tol);
% [Gamma, W, objVs] = TOPICLDA_VARINFER( ... );
% [Gamma, W, objVs, cvg] = TOPICLDA_VARINFER( ... );
%
% Performs a variational inference update step for Latent Dirichlet
% Allocation (LDA).
%
% Suppose there are V words in vocabulary, K topics, and n documents.
%
% Input arguments:
% - Beta: The word distributions of topics [V x K].
%
% - alpha: The Dirichlet prior parameter [scalar or K x 1].
%
% - C: The word-count matrix [V x n]
%
% - w: The document-weights [empty or a vector of length n]
%
% - Gamma: The initial per-document topic distribution matrix [K x n]
%
% - maxIter: The maximum number of iterations (per document)
%
% - tol: The tolerance at convergence
% (in terms of L1-norm of gamma differnce)
%
% Output arguments:
% - Gamma: The solved per-document topic distribution matrix [K x n]
%
% - W: The accumulated per-topic word counts [K x V]
%
% - objVs: The per-document itemized objective term values [5 x n]
% Specifically, objVs(:,i) corresponds to the i-th doc.
% 1st row: expected log-likelihood of theta
% 2nd row: expected total log-likelihood of z
% 3rd row: expected total log-likelihood of w (words)
% 4th row: entropy of theta (w.r.t. gamma)
% 5th row: entropy of z (w.r.t. phi)
%
% - cvg: The vector of convergence indicator. [1 x n]
% cvg(i) indicates whether the inference on the i-th
% document converges.
%
% Created by Dahua Lin, on Feb 5, 2012
%
%% Verify inputs
if ~(isfloat(Beta) && isreal(Beta) && ndims(Beta) == 2 && ~issparse(Beta))
error('topiclda_varinfer:invalidarg', ...
'Beta should be a non-sparse real matrix.');
end
[V, K] = size(Beta);
if ~(isfloat(alpha) && isreal(alpha) && ...
(isscalar(alpha) || isequal(size(alpha), [K 1])) )
error('topiclda_varinfer:invalidarg', ...
'alpha should be a real scalar or a K x 1 vector.');
end
if isscalar(alpha)
alpha = alpha(ones(K, 1));
end
if ~isa(alpha, 'double'); alpha = double(alpha); end
if ~(isfloat(C) && isreal(C) && ndims(C) == 2 && size(C, 1) == V)
error('topiclda_varinfer:invalidarg', ...
'C should be a real matrix with V rows.');
end
n = size(C, 2);
if isempty(w)
w = ones(1, n);
else
if ~(isfloat(w) && isreal(w) && ~issparse(w) && isvector(w) && numel(w) == n)
error('topiclda_varinfer:invalidarg', ...
'w should be a real vector of length n.');
end
if ~isa(w, 'double'); w = double(w); end
end
if ~(isfloat(Gamma) && isreal(Gamma) && isequal(size(Gamma), [K n]))
error('topiclda_varinfer:invalidarg', ...
'Gamma should be a matrix of size K x n.');
end
if ~(isnumeric(maxIter) && isreal(maxIter) && isscalar(maxIter) && maxIter > 1)
error('topiclda_varinfer:invalidarg', ...
'maxIter should be a positive integer.');
end
maxIter = double(maxIter);
if ~(isnumeric(tol) && isreal(tol) && isscalar(tol) && tol > 0)
error('topiclda_varinfer:invalidarg', ...
'tol should be a positive real scalar.');
end
tol = double(tol);
%% main
[I, J, wc] = find(C);
I = int32(I) - 1;
J = int32(J) - 1;
if ~isa(wc, 'double'); wc = double(wc); end
if ~isa(Gamma, 'double'); Gamma = double(Gamma); end
[Gamma, cvg, W, objVs] = ...
topiclda_varinfer_cimp(Beta, alpha, w, I, J, wc, maxIter, tol, Gamma);
| zzhangumd-smitoolbox | pmodels/topics/topiclda_varinfer.m | MATLAB | mit | 3,818 |
function v = topic_ppx(Beta, C, Q)
%TOPIC_PPX Corpus Perplexity for Topic model
%
% v = TOPIC_PPX(Beta, C, Q);
%
% Evaluates the perplexity of a topic model on a corpus.
%
% Suppose there are K topics, n documents and V words in the
% vocabulary.
%
% Input arguments:
% - Beta: The word distributions of topics [V x K]
% - C: The word count table of the corpus [V x n]
% - Q: The per-doc posterior topic distribution [K x n]
%
%
% This function outputs v, the perplexity over the corpus.
%
% Remarks
% -------
% - This is a generic function that can help to evaluate perplexity
% for different types of topic models.
%
% Created by Dahua Lin, on Feb 18, 2012.
%
%% main
LL = topic_loglik(Beta, C, Q);
tc = full(sum(C(:)));
v = exp(sum((-1/tc) * LL));
| zzhangumd-smitoolbox | pmodels/topics/topic_ppx.m | MATLAB | mit | 856 |
classdef gauss_lingen < genmodel_base
% The class that implements a linear Gaussian generative model
%
% The linear Gaussian generative model, with parameter u, is
% formulated as
%
% x ~ N(A * u, Cx), or x ~ N(u, Cx) if A is identity.
%
% Here, let d be the dimension of x, and q be that of u, then
% A should be a d x q matrix.
%
% Here, the observation can be captured by the conjugate update as
%
% dh = sum_i w_i (A' * Jx * x_i);
% dJ = sum_i w_i (A' * Jx * A);
%
% Here, Jx is the inverse of Cx, i.e. the precision matrix.
%
% Created by Dahua Lin, on Dec 13, 2011
%
%% properties
% basic info
properties(GetAccess='public', SetAccess='private')
xdim; % the dimension of observations (d)
pdim; % the dimension of parameters (q)
use_A = false; % whether transform matrix A is used.
Gx; % the measurement model
Gx_cb; % a constant value (cb) for Gx
end
% hyper-parameters
properties(GetAccess='public', SetAccess='private')
Jx; % the precision matrix of the measurement model
A; % the transform matrix [empty or d x q]
end
properties(GetAccess='private', SetAccess='private')
AtJA; % the matrix equals A' * Jx * A
end
methods
function model = set_Jx(model, v)
% Set the parameter Jx to the model
%
% model = set_Jx(model, Jx);
%
d = model.xdim;
if isscalar(v)
if v > 0
model.Jx = pdmat_mat('s', d, v);
else
error('gauss_lingen:invalidarg', ...
'The precision (Jx) should be a positive value.');
end
elseif is_pdmat(v)
if ~(v.n == 1 && v.d == d)
error('gauss_lingen:invalidarg', ...
'Jx should have Jx.n == 1 and Jx.d == d.');
end
model.Jx = v;
else
error('gauss_lingen:invalidarg', ...
'Attempt to set Jx to an invalid value.');
end
Jx_ = model.Jx;
model.Gx.J = Jx_;
model.Gx_cb = d / 2 - gaussd_entropy(Jx_, 'c');
if model.use_A
model.AtJA = gauss_lingen.calc_AtJA(Jx_, model.A);
end
end
function model = set_A(model, v)
% Set the parameter A to the model
%
% model = set_A(model, A);
%
d = model.xdim;
q = model.pdim;
if ~(isfloat(v) && isreal(v) && isequal(size(v), [d q]))
error('gauss_lingen:invalidarg', ...
'A should be a real matrix of size d x q.');
end
model.A = v;
model.use_A = true;
if ~isempty(model.Jx)
model.AtJA = gauss_lingen.calc_AtJA(Jx_, v);
end
end
end
%% methods
methods
%% constructor
function model = gauss_lingen(Jx, A)
% Construct a Gaussian generative model as formulated above
%
% model = gauss_lingen(d);
% model = gauss_lingen(d, q);
%
% Creates an empty model with xdim == d and pdim == q.
% If q is omitted, then it assumes q == d.
%
% After the construction, Jx and A remains empty.
% One has to set Jx (and optionally A) before using
% the model.
%
% model = gauss_lingen(Jx);
% model = gauss_lingen(Jx, A);
%
% Creates a model with given precision matrix Jx,
% and (optionally) the transform matrix A.
%
if isnumeric(Jx) && isscalar(Jx)
d = Jx;
if ~(isreal(d) && d == fix(d) && d > 0)
error('gauss_lingen:invalidarg', 'd should be a positive integer.');
end
if nargin < 2
q = d;
elseif isnumeric(A) && isscalar(A)
q = A;
if ~(isreal(q) && q == fix(q) && q > 0)
error('gauss_lingen:invalidarg', 'q should be a positive integer.');
end
else
error('gauss_lingen:invalidarg', 'The 2nd argument is invalid.');
end
model.xdim = double(d);
model.pdim = double(q);
else
if ~is_pdmat(Jx)
error('gauss_lingen:invalidarg', 'Jx should be a pdmat struct.');
end
d = Jx.d;
if nargin < 2
q = d;
uA = false;
else
if ~(isfloat(A) && isreal(A) && ndims(A) == 2 && size(A,1) == d)
error('gauss_lingen:invalidarg', ...
'A should be a real matrix with d rows.');
end
q = size(A, 2);
uA = true;
end
model.xdim = d;
model.pdim = q;
model.use_A = uA;
model.Jx = Jx;
if uA
model.A = A;
model.AtJA = gauss_lingen.calc_AtJA(Jx, A);
end
model.Gx = gaussd('c', 0, Jx);
model.Gx_cb = d / 2 - gaussd_entropy(Jx, 'c');
end
end
%% observation query
function n = query_obs(model, X)
% Get the number of observation samples
%
% n = model.query_obs(X);
% verifies the validity of X as an observation set,
% and returns the number of samples in X.
%
d = model.xdim;
if ~(isfloat(X) && isreal(X) && ndims(X) == 2 && size(X,1) == d)
error('gauss_lingen:invalidarg', ...
'The observations should be a real matrix with d rows.');
end
n = size(X, 2);
end
function n = query_params(model, U)
% Get the number of parameters
%
% n = model.query_params(U);
% verifies the validity of X as an observation set,
% and returns the number of samples in X.
%
q = model.pdim;
if ~(isfloat(U) && isreal(U) && ndims(U) == 2 && size(U,1) == q)
error('gauss_lingen:invalidarg', ...
'The observations should be a real matrix with q rows.');
end
n = size(U, 2);
end
function Uc = combine_params(model, varargin) %#ok<MANU>
% Combine multiple parameters
%
% Uc = model.combine_params(U1, U2, ...);
%
Uc = [varargin{:}];
end
%% log-likelihood evaluation
function LL = loglik(model, U, X)
% Evaluate the log-likelihood values at given samples
%
% LL = loglik(model, U, X);
% evaluates the log-likelihood at the samples given
% in X, with respect to the parameters given in U.
%
% verify inputs
q = model.pdim;
if ~(isfloat(U) && isreal(U) && ndims(U) == 2 && size(U, 1) == q)
error('gauss_lingen:invalidarg', ...
'The params U should be a real matrix with q rows.');
end
% evaluate
if model.use_A
U = model.A * U;
end
g = model.Gx;
g.n = size(U, 2);
g.h = pdmat_mvmul(g.J, U);
ca = sum(g.h .* U, 1);
cb = model.Gx_cb;
LL = gaussd_logpdf(g, X, {ca, cb});
end
%% maximum likelihood estimation
function U = mle(model, X, W, I)
% Performs maximum likelihood estimation of the parameters
%
% U = model.mle(X, W);
% U = model.mle(X, W, I);
%
% performs maximum likelihood estimation based on
% given (weighted) set of data
%
% verify inputs
if nargin < 3
W = [];
end
if nargin >= 4
X = X(:, I);
if ~isempty(W)
W = W(I, :);
end
end
n = model.query_obs(X);
% compute
if isempty(W)
U = sum(X, 2) * (1 / n);
else
sw = sum(W, 1);
U = X * bsxfun(@times, W, 1 ./ sw);
end
if model.use_A
H_ = model.AtJA;
A_ = model.A;
U = H_ \ (A_' * pdmat_mvmul(model.Jx, U));
end
end
%% conjugate update
function S = capture(model, X, W, I)
% Capture observations into conjugate updates
%
% S = model.capture(X, W);
% S = model.capture(X, W, I);
% computes the conjuate updates to the canonical params
% of the prior based on given (weighted) set of samples.
%
% Inputs:
% - X: the sample matrix, size: d x n
% - W: the weight matrix, empty or n x K
% - I: the selected indices
%
% Outputs:
% - I:
%
% verify inputs
d = model.xdim;
q = model.pdim;
uA = model.use_A;
Jx_ = model.Jx;
if nargin < 3
W = [];
end
if nargin >= 4
X = X(:, I);
if ~isempty(W)
W = W(I, :);
end
end
n = model.query_obs(X);
if Jx_.ty == 's'
Jsca = 1;
jv = Jx_.v;
else
Jsca = 0;
end
% compute dh
if Jsca
if isempty(W)
dh = sum(X, 2) * jv;
else
dh = X * (jv * W);
end
else
JX = pdmat_mvmul(Jx_, X);
if isempty(W)
dh = sum(JX, 2);
else
dh = JX * W;
end
end
if uA
A_ = model.A;
dh = A_' * dh;
end
% compute dJ
if isempty(W)
K = 1;
tw = n;
else
K = size(W, 2);
tw = sum(W, 1);
end
if ~uA
if Jsca
dJ = pdmat('s', d, tw * jv);
else
dJ = pdmat_scale(Jx_, tw);
end
else
dJ0 = model.AtJA;
if isscalar(tw)
dJ = pdmat('f', q, dJ0 * tw);
else
dJ = zeros([size(dJ0), K]);
for k = 1 : K
dJ(:,:,k) = dJ0 * tw(k);
end
dJ = pdmat('f', q, dJ);
end
end
% make S
S.tag = 'gaussd';
S.ty = 'c';
S.n = K;
S.d = q;
S.h = dh;
S.J = dJ;
end
end
%% Auxiliary implementation
methods(Static, Access='private')
function H = calc_AtJA(J, A)
if J.ty == 's'
jv = J.v;
H = jv * (A' * A);
else
H = pdmat_pwquad(J, A);
H = 0.5 * (H + H');
end
end
end
end
| zzhangumd-smitoolbox | pmodels/gauss/gauss_lingen.m | MATLAB | mit | 13,623 |
function X = ppca_ft(M, Z)
% Performs forward transform w.r.t. PPCA model
%
% X = ppca_ft(M, Z);
% Transforms the latent vectors in Z to the observed space, with
% respect to the PPCA model M, as
%
% x <- W * z.
%
% Here, Z should be a q x n matrix, and X is a d x n matrix.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~is_ppca(M)
error('ppca_ft:invalidarg', 'M should be a PPCA struct.');
end
%% main
X = M.B * bsxfun(@times, M.s.', Z);
mu = M.mu;
if ~isequal(mu, 0)
X = bsxfun(@plus, X, mu);
end
| zzhangumd-smitoolbox | pmodels/gauss/ppca_ft.m | MATLAB | mit | 559 |
function M = ppca_model(B, s, se, mu)
% Constructs a Probabilistic PCA model struct
%
% M = ppca_model(B, s, se);
% M = ppca_model(B, s, se, mu);
%
% constructs a PPCA model strict.
%
% Input arguments:
%
% - B: the basis matrix. The size of U is d x q, where
% d is the observed space dimension, and q is the
% latent space dimension.
%
% Note that B should be an orthonormal matrix, meaning
% that B' * B is identity.
%
% - s: the vector of principal scales, i.e. the scale along
% principal directions. The length of s is q.
%
% Note that the factor matrix W has W = B * diag(s).
%
% - se: the noise standard deviation.
%
% - mu: the mean vector. It can either a d x 1 vector or
% a zero scalar. (If omitted, mu is set to zero).
% History
% -------
% - Created by Dahua Lin, on Nov 20, 2010
% - Modified by Dahua Lin, on Nov 3, 2011
% - Modified by Dahua Lin, on Dec 27, 2011
%
%% verify inputs
if ~(isfloat(B) && isreal(B) && ndims(B) == 2)
error('ppca_model:invalidarg', 'U should be a real matrix.');
end
[d, q] = size(B);
if q >= d
error('ppca_model:invalidarg', 'q should be less than d.');
end
if ~(isfloat(s) && isreal(s) && isvector(s) && numel(s) == q)
error('ppca_model:invalidarg', 's should be a vector of length q.');
end
if size(s, 1) > 1
s = s.'; % turn into row vector
end
if ~(isfloat(se) && isreal(se) && se > 0)
error('ppca_model:invalidarg', 'se should be a positive real scalar.');
end
if nargin < 4
mu = 0;
else
if ~(isequal(mu, 0) || ...
(isfloat(mu) && isreal(mu) && isequal(size(mu), [d 1])))
error('ppca_model:invalidarg', ...
'mu should be either zero or a d x 1 real vector.');
end
end
%% main
% basic fields
M.tag = 'ppca';
M.d = d;
M.q = q;
M.mu = mu;
M.B = B;
M.s = s;
M.se = se;
% auxiliary fields
evs = zeros(1, d);
evs(1:q) = s.^2 + se^2;
evs(q+1:end) = se^2;
M.ldc = sum(log(evs));
| zzhangumd-smitoolbox | pmodels/gauss/ppca_model.m | MATLAB | mit | 2,119 |
function M = ppca_mle(X, w, q, varargin)
% Performs Maximum Likelihood estimation of PPCA from data
%
% M = ppca_mle(X, [], q, ...);
% M = ppca_mle(X, w, q, ...);
%
% performs Maximum Likelihood estimation of the PPCA
% model from data.
%
% Input arguments:
% - X: The data matrix of size d x n, of which each
% column is s sample.
% - w: The sample weights, a vector of length n.
% If all samples have the same weight, it can be empty.
% - q: the dimension of the latent space. It should
% have q < min(d, n).
%
% One can specify further options to control the
% estimation, in form of name/value pairs.
%
% - 'method': The method used to do the training,
% which can be
% - 'cov': by computing the covariance
% matrix, and compute the
% eigenvectors of it.
% - 'svd': by doing SVD, this can be
% faster when n < d.
% The default is 'cov'.
%
% - 'mu': Pre-computed mean vector, which can be
% either of the following:
% - []: no pre-computed vector. The function
% will compute mu.
% - a d x 1 mean vector.
% - a zero scalar, indicating zero mean.
%
% History
% -------
% - Created by Dahua Lin, on Nov 20, 2010
% - Modified by Dahua Lin, on Nov 3, 2011
% - Modified by Dahua Lin, on Dec 27, 2011
%
%% verify arguments
if ~(isfloat(X) && ndims(X) == 2 && isreal(X))
error('probpca:mle:invalidarg', ...
'X should be a real matrix.');
end
[d, n] = size(X);
if ~isempty(w)
if ~(isfloat(w) && isreal(w) && isvector(w) && numel(w) == n)
error('probpca:mle:invalidarg', ...
'w should be a vector of length n.');
end
if size(w, 2) > 1
w = w.';
end
end
if ~(isnumeric(q) && isscalar(q) && q == fix(q) && ...
q >= 1 && q < d && q < n);
error('probpca:mle:invalidarg', ...
'q should be a positive integer less than min(d, n).');
end
[method, mu] = chk_opts(d, varargin);
%% main
% mean vector
if isempty(mu)
if isempty(w)
mu = sum(X, 2) * (1 / n);
else
sw = sum(w);
mu = (X * w) * (1 / sw);
end
end
if isequal(mu, 0)
Z = X;
else
Z = bsxfun(@minus, X, mu);
end
% basis and eigenvalues
switch method
case 'cov'
if isempty(w)
C = (Z * Z') * (1/n);
else
C = (Z * bsxfun(@times, Z', w)) * (1/sw);
C = 0.5 * (C + C');
end
[U, evs] = eig(C);
evs = diag(evs);
case 'svd'
if isempty(w)
[U, svs] = svd(Z, 0);
svs = diag(svs);
evs = (svs.^2) * (1/n);
else
[U, svs] = svd(bsxfun(@times, Z, sqrt(w)'), 0);
svs = diag(svs);
evs = (svs.^2) * (1/sw);
end
otherwise
error('ppca_mle:invalidarg', 'The method %s is invalid.', method);
end
% make struct
d = size(U, 1);
[evs, si] = sort(evs, 1, 'descend');
qevs = evs(1:q);
qevs = max(qevs, 1e-12);
B = U(:, si(1:q));
se2 = sum(evs(q+1:end)) / (d-q);
se2 = max(se2, 1e-12);
M = ppca_model(B, sqrt(qevs - se2), sqrt(se2), mu);
%% sub functions
function [method, mu] = chk_opts(d, params)
method = 'cov';
mu = [];
if ~isempty(params)
onames = params(1:2:end);
ovals = params(2:2:end);
if ~(numel(onames) == numel(ovals) && iscellstr(onames))
error('ppca_mle:invalidarg', ...
'The name/value list for options is invalid.');
end
for i = 1 : numel(onames)
cn = onames{i};
cv = ovals{i};
switch lower(cn)
case 'method'
if ~(ischar(cv) && ...
(strcmp(cv, 'cov') || strcmp(cv, 'svd')))
error('ppca_mle:invalidarg', ...
'The method should be either ''cov'' or ''svd''.');
end
method = cv;
case 'mu'
if ~isempty(cv)
if ~( isequal(cv, 0) || (isfloat(cv) && isreal(cv) && ...
isequal(size(cv), [d 1])) )
error('ppca_mle:invalidarg', ...
'mu should be either 0 or a d x 1 real vector.');
end
mu = cv;
end
otherwise
error('ppca_mle:invalidarg', ...
'Unknown option name %s', cn);
end
end
end
| zzhangumd-smitoolbox | pmodels/gauss/ppca_mle.m | MATLAB | mit | 4,833 |
function gaussd_ellipse(G, r, n, varargin)
% Draws an Ellipse to represent a Gaussian model
%
% gaussd_ellipse(G, r, n, ...);
%
% Input arguments:
% - G: A gaussd struct to represent the Gaussian model(s)
% - r: the relative radius
% - n: the number of points on each ellipse
%
% Output:
% - h: the handle to the drawn lines.
%
% One can also specify additional options to customize the plot like
% as using the plot function.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~(is_gaussd(G) && G.d == 2)
error('gaussd_ellipse:invalidarg', ...
'G should be a gaussd struct with G.d == 2.');
end
if ~(isfloat(r) && isreal(r) && isscalar(r) && r > 0)
error('gaussd_ellipse:invalidarg', ...
'r should be a positive real value.');
end
if ~(isnumeric(n) && isscalar(n) && n == fix(n) && n >= 8)
error('gaussd_ellipse:invalidarg', ...
'n should be a positive integer with n >= 8.');
end
%% main
t = linspace(0, 2*pi, n);
Z = [cos(t); sin(t)];
if r ~= 1
Z = Z * r;
end
if G.n == 1
X = gaussd_sample(G, [], Z);
plot(X(1,:), X(2,:), varargin{:});
else
for i = 1 : G.n
g = gaussd_sub(G, i);
X = gaussd_sample(g, [], Z);
plot(X(1,:), X(2,:), varargin{:});
hold on;
end
hold off;
end
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_ellipse.m | MATLAB | mit | 1,349 |
function C = ppca_cov(M)
% Get the covariance matrix of a PPCA model
%
% C = ppca_cov(M);
% computes the equivalent covariance matrix of the input PPCA model.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~is_ppca(M)
error('ppca_cov:invalidarg', 'M should be a PPCA struct.');
end
%% main
W = ppca_W(M);
C = adddiag(W * W', M.se^2);
| zzhangumd-smitoolbox | pmodels/gauss/ppca_cov.m | MATLAB | mit | 368 |
function [ca, cb] = gaussd_const(G)
% Calculate two useful constants of Gaussian distributions
%
% ca = gaussd_const(G);
% [ca, cb] = gaussd_const(G);
%
% This function calculates two useful constants for Gaussian
% pdf evaluation.
%
% ca = mu' * h = mu' * inv(C) * mu = h' * inv(J) * h
% cb = -(1/2) * log((2 * pi)^d * |C|) = d/2 - entropy
%
% If there is only one output arguments, then only ca is computed.
%
% Outputs:
% - ca: In general, it will be a vector of size 1 x G.n
% If G.mu or G.h is a zero scalar, then ca is a
% zero scalar.
%
% - cb: It is a scalar if n' is 1, or a vector of size 1 x n'.
% Here n' is G.C.n or G.J.n.
%
% Created by Dahua Lin, on Dec 5, 2011
%
%% verify
if ~is_gaussd(G)
error('gaussd_const:invalidarg', 'G must be a gaussd struct.');
end
%% main
ty = G.ty;
% calculate ca
if ty == 'm'
ca = calc_ca(G.n, G.mu, G.C);
else
ca = calc_ca(G.n, G.h, G.J);
end
% calculate cb
if nargout >= 2
if ty == 'm'
cb = G.d * log(2 * pi) + pdmat_lndet(G.C);
else
cb = G.d * log(2 * pi) - pdmat_lndet(G.J);
end
cb = (-0.5) * cb;
end
%% core functions
function ca = calc_ca(n, u, S)
if isequal(u, 0)
ca = 0;
else
if n == 1
ca = u' * pdmat_lsolve(S, u);
else
h = pdmat_lsolve(S, u);
ca = dot(u, h, 1);
end
end
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_const.m | MATLAB | mit | 1,453 |
function R = gaussd_mapest(G, S)
% Performs MAP estimation w.r.t. Gaussian prior
%
% R = gaussd_mapest(G, S);
%
% Performs MAP estimation with the Gaussian prior G, and the
% updates to the canonical param of the prior.
%
% Input arguments:
% - G: the Gaussian prior (G.ty == 'c' && G.n == 1)
% - S: the gaussd struct that captures the updates derived
% from the observations.
%
% Output arguments:
% - R: the MAP estimation.
%
% Generally, R is given by (J_pri + dJ) \ (h_pri + dh).
%
% Created by Dahua Lin, on Dec 14, 2011
%
%% main
Gp = gaussd_conjupdate(G, S);
R = pdmat_lsolve(Gp.J, Gp.h);
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_mapest.m | MATLAB | mit | 694 |
function W = ppca_W(M)
% Get the factor matrix W of a PPCA model
%
% W = ppca_W(M);
% computes the factor matrix of the input PPCA model.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% main
W = bsxfun(@times, M.B, M.s);
| zzhangumd-smitoolbox | pmodels/gauss/ppca_W.m | MATLAB | mit | 232 |
classdef ppca_gm < genmodel_base
% The class implementing PPCA as a genmodel_base subclass
%
% Created by Dahua Lin, on Nov 6, 2011
%
properties(GetAccess='public', SetAccess='private')
dim; % the observed space dimension
ldim; % the latent space dimension
end
methods
function model = ppca_gm(d, q)
% Construct a PPCA generative model
%
% model = ppca_gm(d, q);
% constructs a PPCA generative model.
%
% - d: the observed space dimension
% - q: the latent space dimension
%
model.dim = d;
model.ldim = q;
end
function n = query_obs(model, obs)
% Gets the number of samples
%
% n = model.query_obs(obs);
%
if ~(isfloat(obs) && ndims(obs) == 2 && isreal(obs))
error('ppca_gm:invalidarg', ...
'obs should be a non-sparse real matrix.');
end
if size(obs,1) ~= model.dim
error('ppca_gm:invalidarg', ...
'The dimension of obs is invalid.');
end
n = size(obs, 2);
end
function n = query_params(model, Ms)
% Gets the number of models
%
% n = model.query_params(Ms);
%
d = model.dim;
q = model.ldim;
if ~(isstruct(Ms) && is_ppca(Ms(1)) && Ms(1).d == d && Ms(1).q == q)
error('ppca_gm:invalidarg', ...
'The model (params) are invalid.');
end
n = numel(Ms);
end
function L = loglik(model, Ms, X) %#ok<MANU>
% Evaluate the log-likelihood of all samples w.r.t all models
%
% L = model.loglik(Ms, X);
%
K = numel(Ms);
if K == 1
L = ppca_logpdf(Ms, X);
else
L = zeros(K, size(X,2));
for k = 1 : K
L(k,:) = ppca_logpdf(Ms(k), X);
end
end
end
function Ms = mle(model, X, W, I)
% Performs maximum likelihood estimation of models
%
% Ms = model.mle(X, W);
% Ms = model.mle(X, W, I);
%
if nargin < 3
W = [];
end
n = model.query_obs(X);
if ~isempty(W)
if ~(isfloat(W) && isreal(W) && ndims(W) == 2 && size(W,1) == n)
error('ppca_gm:invalidarg', ...
'W should be a real matrix with n columns.');
end
K = size(W, 2);
else
W = [];
K = 1;
end
if nargin >= 4
X = X(:, I);
if ~isempty(W)
W = W(I, :);
end
end
q = model.ldim;
if K == 1
Ms = ppca_mle(X, W, q);
else
Ms = cell(1, K);
for k = 1 : K
Ms{k} = ppca_mle(X, W(:,k), q);
end
Ms = vertcat(Ms{:});
end
end
function capture(model, X, W) %#ok<INUSD,MANU>
error('ppca_gm:notsupported', ...
'The capture method is not supported by PPCA.');
end
end
end
| zzhangumd-smitoolbox | pmodels/gauss/ppca_gm.m | MATLAB | mit | 3,960 |
classdef gausspri < prior_base
% Gaussian prior distribution
%
% This class wrap a gaussd object to prior_base interface
%
% Created by Dahua Lin, on Dec 27, 2011
%
properties(GetAccess='public', SetAccess='private')
dim; % the space dimension
gdistr; % the Gaussian distribution (gaussd struct)
const_a; % the Gaussian constant (a)
const_b; % the Gaussian constant (b)
end
methods
function obj = gausspri(G)
% Captures a Gaussian model as prior
%
% obj = gausspri(G);
%
% Here, G should be a gaussd struct with G.n == 1.
%
if ~(is_gaussd(G) && G.n == 1)
error('gausspri:invalidarg', ...
'G should be a gaussd object with G.n == 1.');
end
G = gaussd('c', G);
obj.dim = G.d;
obj.gdistr = G;
[ca, cb] = gaussd_const(G);
obj.const_a = ca;
obj.const_b = cb;
end
function n = query_samples(obj, X)
% Verify the validity of input samples and return the number
%
% n = obj.query_samples(X);
% verifies the validity of X as a sample matrix, and
% returns the number of samples in X.
%
d = obj.dim;
if ~(isfloat(X) && isreal(X) && ndims(X) == 2 && size(X,1) == d)
error('query_samples:invalidarg', ...
'X should be a real matrix with size(X,1) == d.');
end
n = size(X, 2);
end
function L = logpdf(obj, X)
% Evaluate the log pdf at given samples
%
% L = obj.logpdf(X);
% evaluates the log pdf at the samples in X.
%
g = obj.gdistr;
ca = obj.const_a;
cb = obj.const_b;
L = gaussd_logpdf(g, X, {ca, cb});
end
function X = sample(obj, n)
% Samples from the prior distribution
%
% X = obj.sample(n);
% draws n samples from the Gaussian prior
%
g = obj.gdistr;
X = gaussd_sample(g, n);
end
function X = pos_sample(obj, S, n)
% Samples from posterior distribution
%
% X = obj.pos_sample(S, n);
% draws n samples from the posterior Gaussian
% distribution.
%
% Here, S is a gaussd struct that captures the
% observation statistics. Note that S should have
% S.n == 1.
%
if S.n ~= 1
error('gausspri:invalidarg', ...
'S violates the constraint: S.n == 1.');
end
gp = gaussd_conjupdate(obj.gdistr, S);
X = gaussd_sample(gp, n);
end
function X = mapest(obj, S)
% Performs MAP estimation with the stats of observations
%
% X = obj.mapest(S);
% performs MAP estimation with S capturing the
% statistics from the observations.
%
g = obj.gdistr;
X = gaussd_mapest(g, S);
end
end
end
| zzhangumd-smitoolbox | pmodels/gauss/gausspri.m | MATLAB | mit | 3,707 |
function G = agmrf(W, a, y)
% Constructs an attractive Gaussian MRF
%
% An attractive Gaussian MRF is defined to be a Gaussian model, as
%
% p(x) = exp(E(x)) / Z,
%
% with
%
% E(x) = 1/2 * sum_{(i,j) \in E} W(i,j) (x(i) - x(j))^2
% + 1/2 * a (x(i) - y(i))^2
%
% This can be written in a matrix form as
%
% E(x) = 1/2 * (x' * L * x + (x-y)' * Da * (x-y) ).
%
% G = agmrf(W, a);
% G = agmrf(W, a, y);
%
% Constructs an attractive Gaussian MRF as formulated above.
%
% Input arguments:
% - W: The weighted adjacency matrix [n x n]
% - a: The weights that link x to y, which can be either a
% scalar or a vector of length n.
% - y: The y values, which can be a vector of length n.
% If y is omitted, it is assumed to be zero.
%
% In the output, G is a gaussd struct with G.ty == 'c' (using
% canonical parameters).
%
% Created by Dahua Lin, on Dec 9, 2011
%
%% verify inputs
if ~(isfloat(W) && ndims(W) == 2 && isreal(W))
error('agmrf:invalidarg', 'W should be a real matrix.');
end
n = size(W, 1);
if ~(isfloat(a) && isreal(a) && ...
(isscalar(a) || (isvector(a) && length(a) == n)))
error('agmrf:invalidarg', 'a should be a real vector of length n.');
end
if nargin < 3
y = 0;
else
if ~(isfloat(y) && isreal(y) && isvector(y) && length(y) == n)
error('agmrf:invalidarg', 'y should be a real vector of length n.');
end
end
%% main
J = laplacemat(W, a);
if isequal(y, 0) || isequal(a, 0)
h = 0;
else
if size(y, 2) > 1; y = y.'; end
if size(a, 2) > 1; a = a.'; end
if isequal(a, 1)
h = y;
else
h = y .* a;
end
end
G = gaussd('c', h, J);
| zzhangumd-smitoolbox | pmodels/gauss/agmrf.m | MATLAB | mit | 1,790 |
function v = gaussd_entropy(C, op)
% Compute the entropy of a Gaussian distribution
%
% v = gaussd_entropy(C);
% v = gaussd_entropy(C, 'm');
%
% computes the entropy of Gaussian distribution(s) based on
% its covariance matrix.
%
% C should be a pdmat struct. v will be a scalar (if C.n == 1),
% or a 1 x C.n row vector.
%
% v = gaussd_entropy(J, 'i');
% v = gaussd_entropy(J, 'c');
%
% computes the entropy based on the information matrix.
%
% v = gaussd_entropy(G);
%
% computes the entropy of the given Gaussian model, where G is
% a gaussd struct.
%
% History
% -------
% - Created by Dahua Lin, on Sep 1, 2011
% - Modified by Dahua Lin, on Dec 5, 2011
%
%% verify input
if is_pdmat(C)
if nargin < 2
is_cov = 1;
else
if ~(ischar(op) && isscalar(op))
error('gaussd_entropy:invalidarg', ...
'The second arg to gauss_entropy should be a character.');
end
if op == 'm'
is_cov = 1;
elseif op == 'i' || op == 'c'
is_cov = 0;
else
error('gaussd_entropy:invalidarg', 'The 2nd arg to gauss_entropy is invalid.');
end
end
elseif is_gaussd(C)
if C.ty == 'm'
C = C.C;
is_cov = 1;
elseif C.ty == 'c'
C = C.J;
is_cov = 0;
end
else
error('gaussd_entropy:invalidarg', ...
'The first arg should be either a pdmat struct or a gaussd struct.');
end
%% main
log2pip1 = 2.837877066409345483560659472811;
if is_cov
v = (C.d * log2pip1 + pdmat_lndet(C)) * 0.5;
else
v = (C.d * log2pip1 - pdmat_lndet(C)) * 0.5;
end
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_entropy.m | MATLAB | mit | 1,712 |
function tf = is_gaussd(G)
% Tests whether the input argument is a gaussd struct.
%
% tf = is_gaussd(G);
%
% Created by Dahua Lin, on Dec 5, 2011
%
tf = isstruct(G) && isscalar(G) && isfield(G, 'tag') && ...
strcmp(G.tag, 'gaussd');
| zzhangumd-smitoolbox | pmodels/gauss/is_gaussd.m | MATLAB | mit | 241 |
classdef gaussgm < genmodel_base
% The class to implement a basic Gaussian generative model
%
% x ~ N(mu, Sigma);
%
% Two parameters:
% - mu: the mean vector
% - Sigma: the covariance
%
% They together are encapsulated in a gaussd struct.
%
%% properties
properties(GetAccess='public', SetAccess='private')
dim; % the space dimension
cov_form; % the form of covariance
tied_cov = false; % whether the covariance is tied
end
%% constructor
methods
function model = gaussgm(d, cf, op)
% Constructs a Gaussian generative model
%
% model = gaussgm(d, cf);
% creates a Gaussian generative model of specified
% dimension and covariance form.
%
% Inputs:
% - d: the space dimension
% - cf: the form of covariance:
% 's': isotropic covariance
% 'd': diagonal covariance
% 'f': full-form covariance
%
% model = gaussgm(d, cf, 'tied-cov');
% creates a Gaussian generative model where the
% covariance of all component models are tied.
%
if ~(isnumeric(d) && isscalar(d) && d == fix(d) && d >= 1)
error('gaussgm:invalidarg', 'd should be a positive integer.');
end
if ~(ischar(cf) && isscalar(cf) && any(cf == 'sdf'))
error('gaussgm:invalidarg', ...
'cf should be either of ''s'', ''d'', or ''f''.');
end
model.dim = d;
model.cov_form = cf;
if nargin >= 3
if ~strcmpi(op, 'tied-cov')
error('gaussgm:invalidarg', ...
'The third argument is invalid.');
end
model.tied_cov = true;
end
end
end
%% Query and Evaluation
methods
function n = query_obs(model, X)
% Get the number of samples in the input
%
% n = model.query_obs(X);
%
d = model.dim;
if ~(isfloat(X) && isreal(X) && ndims(X) == 2 && size(X,1) == d)
error('gaussgm:invalidarg', ...
'The sample matrix should be a real matrix with d rows.');
end
n = size(X, 2);
end
function n = query_params(model, G)
% Get the number of parameters in the input
%
% n = model.query_params(G);
%
d = model.dim;
if ~(is_gaussd(G) && G.ty == 'm' && G.d == d)
error('gaussgm:invalidarg', ...
'The parameters G should be a gaussd struct with G.d == d.');
end
n = G.n;
end
function LL = loglik(model, G, X)
% Evaluate the log-likelihood values at given samples
%
% LL = loglik(model, G, X);
%
% evaluates the log-likelihood at the samples given
% in X, with respect to the Gaussian distributions
% represented by G.
%
d = model.dim;
if ~(is_gaussd(G) && G.d == d)
error('gaussgm:invalidarg', ...
'The parameters G should be a gaussd struct with G.d == d.');
end
LL = gaussd_logpdf(G, X);
end
end
%% Estimation
methods
function G = mle(model, X, W, I)
% Performs maximum likelihood estimation of the parameters
%
% G = model.mle(X, W);
% G = model.mle(X, W, I);
%
% performs maximum likelihood estimation based on
% given (weighted) set of data
%
cf = model.cov_form;
tie_c = model.tied_cov;
if nargin < 3
W = [];
end
if nargin >= 4
X = X(:, I);
if ~isempty(W)
W = W(I, :);
end
end
G = gaussd_mle(X, W, cf, tie_c);
end
function capture(model, X, W, I) %#ok<INUSD,MANU>
error('gaussgm:notsupported', ...
'The capture method is not supported by gaussgm');
end
end
end
| zzhangumd-smitoolbox | pmodels/gauss/gaussgm.m | MATLAB | mit | 4,988 |
function X = ppca_sample(M, n)
% Draws n samples from a PPCA model
%
% X = ppca_sample(M, n);
% draws n samples from a PPCA model M.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~is_ppca(M)
error('ppca_sample:invalidarg', 'M should be a ppca struct.');
end
if nargin < 2
n = 1;
else
if ~(isnumeric(n) && isscalar(n) && n == fix(n) && n >= 1)
error('ppca_sample:invalidarg', 'n should be a positive integer.');
end
end
%% main
Z = randn(M.q, n);
X = ppca_ft(M, Z) + randn(M.d, n) * M.se;
| zzhangumd-smitoolbox | pmodels/gauss/ppca_sample.m | MATLAB | mit | 544 |
function D = gaussd_sqmahdist(G, X, ca)
% Evaluate the squared Mahalanobis distances to Gaussian centers
%
% D = gaussd_sqmahdist(G, X);
% D = gaussd_sqmahdist(G, X, ca)
%
% computes the Gaussian Mahalanobis distance to the centers of
% Gaussian distribution in G.
%
% Given a Gaussian distribution with mean mu and covariance C,
% the squared Mahalanobis distance of x w.r.t. this model is
%
% (x - mu)' * inv(C) * (x - mu)
%
% Inputs:
% - G: a gaussd struct.
% - X: the matrix comprised of samples as columns: size d x n.
% - ca: the value of mu' * C * mu. If ca is input, the input
% value will be used, otherwise, the function will call
% gaussd_const to compute it.
%
% Outputs: (suppose K = G.n, and n is #samples in X)
% - R: a K x n matrix, where R(k, i) is the squared
% Mahalanobis distance of X(:,i) to the k-th model in
% G.
%
% Created by Dahua Lin, on Dec 5, 2011
%
%% verify inputs
if ~is_gaussd(G)
error('gaussd_sqmahdist:invalidarg', 'G should be a gaussd struct.');
end
d = G.d;
if ~(isfloat(X) && isreal(X) && ndims(X) == 2 && size(X,1) == d)
error('gaussd_sqmahdist:invalidarg', ...
'X should be a real matrix with size(X,1) == d.');
end
if nargin < 3
ca = [];
end
%% main
% compute M2 (and at the same time mu or h)
ty = G.ty;
if ty == 'm' % with mean params
C = G.C;
mu = G.mu;
if isequal(mu, 0)
zm = 1;
else
zm = 0;
end
if d == 1
r = 1 ./ C.v(:);
M2 = r * (X.^2);
if ~zm
if G.n == 1
h = r * mu;
else
h = bsxfun(@times, r.', mu);
end
end
else
switch C.ty
case 's'
r = 1 ./ C.v;
M2 = r' * sum(X.^2, 1);
if ~zm
if G.n == 1
h = r * mu;
else
h = bsxfun(@times, r, mu);
end
end
case 'd'
r = 1 ./ C.v;
M2 = r' * (X.^2);
if ~zm
if C.n == G.n
h = r .* mu;
else
h = bsxfun(@times, r, mu);
end
end
case 'f'
Cm = C.v;
if C.n == 1
if zm
JX = Cm \ X;
else
[JX, h] = solve_two(Cm, X, mu);
end
M2 = dot(X, JX, 1);
else
M2 = zeros(C.n, size(X,2));
if zm
for k = 1 : C.n
M2(k, :) = dot(X, Cm(:,:,k) \ X, 1);
end
else
h = zeros(d, G.n);
for k = 1 : C.n
[JX, h(:,k)] = solve_two(Cm(:,:,k), X, mu(:,k));
M2(k, :) = dot(X, JX, 1);
end
end
end
end
end
elseif ty == 'c' % with canonical params
J = G.J;
h = G.h;
if isequal(h, 0)
zm = 1;
else
zm = 0;
end
M2 = pdmat_quad(J, X);
if isempty(ca) && ~zm % need mu to calculate ca
mu = pdmat_lsolve(J, h);
end
end
% combine terms
if zm
D = M2;
else
K = G.n;
if isempty(ca)
if K == 1
ca = h' * mu;
else
ca = dot(h, mu, 1);
end
else
if ~(isfloat(ca) && isreal(ca) && isvector(ca) && numel(ca) == K)
error('gaussd_sqmahdist:invalidarg', ...
'The input ca should be a vector of length K.');
end
end
M1 = h' * X;
if size(M1, 1) == size(M2, 1)
D = M2 - 2 * M1;
else
D = bsxfun(@minus, M2, 2 * M1);
end
D = bsxfun(@plus, D, ca(:));
end
D(D < 0) = 0;
%% auxiliary function
function [JX, h] = solve_two(C, X, mu)
n = size(X, 2);
JX = C \ [X mu];
h = JX(:, n+1:end);
JX = JX(:, 1:n);
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_sqmahdist.m | MATLAB | mit | 4,523 |
function Z = ppca_bt(M, X)
% Performs backward transform w.r.t. PPCA model
%
% X = ppca_ft(M, Z);
% Transforms the observed vectors in X to the latent space, with
% respect to the PPCA model M, as
%
% Here, X should be a d x n matrix, and Z is a q x n matrix.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~is_ppca(M)
error('ppca_bt:invalidarg', 'M should be a PPCA struct.');
end
%% main
mu = M.mu;
if ~isequal(mu, 0)
X = bsxfun(@minus, X, mu);
end
Z = bsxfun(@times, M.B' * X, 1 ./ M.s.');
| zzhangumd-smitoolbox | pmodels/gauss/ppca_bt.m | MATLAB | mit | 545 |
function J = ppca_icov(M)
% Get the inverse covariance matrix of a PPCA model
%
% J = ppca_icov(M);
% computes the inverse covariance matrix of the input PPCA model.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~is_ppca(M)
error('ppca_cov:invalidarg', 'M should be a PPCA struct.');
end
%% main
v = 1 / (M.se^2);
a = 1 ./ (1 ./ ((M.s.^2) .* v) + 1);
V = bsxfun(@times, M.B, sqrt(a));
J = v * (eye(M.d) - V * V');
| zzhangumd-smitoolbox | pmodels/gauss/ppca_icov.m | MATLAB | mit | 449 |
function dists = ppca_sqmahdist(M, X)
% Compute squared Mahalanobis distances to the center of PPCA model
%
% dists = ppca_sqmahdist(M, X);
% computes the squared Mahalanobis distances from the samples
% in X to the center of the PPCA model M, which is defined by
%
% (x - mu)' * inv(C) * (x - mu).
%
% Suppose X has n columns (each column is sample), then dists
% is a vector of size 1 x n.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~is_ppca(M)
error('ppca_sqmahdist:invalidarg', 'M should be a PPCA struct.');
end
if ~(isfloat(X) && isreal(X) && ndims(X) == 2 && size(X,1) == M.d)
error('ppca_sqmahdist:invalidarg', ...
'X should be a real matrix with d rows.');
end
%% main
% center
mu = M.mu;
if ~isequal(mu, 0)
X = bsxfun(@minus, X, mu);
end
% decompose
B = M.B;
C = B' * X;
E = X - B * C;
% evaluate
v2 = M.se^2;
v1 = M.s.^2 + v2;
dists = (1 ./ v1) * (C.^2) + (1/v2) * sum(E.^2, 1);
| zzhangumd-smitoolbox | pmodels/gauss/ppca_sqmahdist.m | MATLAB | mit | 981 |
classdef gmrf_blk_gibbs
% The class that implements a block gibbs sampler for Gaussian MRF
%
% Created by Dahua Lin, on Dec 9, 2011
%
%% properties
properties(GetAccess='public', SetAccess='private')
gmrf; % the underlying gaussian model
blocks; % the cell array of index vectors for all blocks
Jmats; % the cell array of partial precision matrices
Lmats; % the cell array of Cholesky matrices
Fmats; % the cell array of forward matrices
end
%% constructor
methods
function obj = gmrf_blk_gibbs(G, blocks)
% Construct a block gibbs sampler for a Gaussian MRF
%
% obj = gmrf_blk_gibbs(G, blocks);
%
% Input parameters:
% G: the underlying Gaussian MRF. G should be a
% gaussd object, with G.ty == 'c'.
%
% blocks: a cell array of blocks, each cell is a vector
% of indices of variables within a block.
%
% verify arguments
if ~(is_gaussd(G) && G.ty == 'c' && G.n == 1)
error('gmrf_blk_gibbs:invalidarg', ...
'G should be a gaussd struct with G.ty == ''c'' and G.n == 1.');
end
if ~iscell(blocks)
error('gmrf_blk_gibbs:invalidarg', ...
'blocks should be a cell array of index vectors.');
end
nb = numel(blocks);
for i = 1 : nb
b = blocks{i};
if ~(isvector(b) && isnumeric(b))
error('gmrf_blk_gibbs:invalidarg', ...
'blocks{%d} is invalid.', i);
end
end
% block-wise data
Js = cell(nb, 1);
Ls = cell(nb, 1);
Fs = cell(nb, 1);
J = G.J.v;
for i = 1 : nb
b = blocks{i};
Ji = full(J(b, b));
Js{i} = Ji;
Ls{i} = chol(Ji);
Fi = J(b, :);
Fi(:, b) = [];
Fs{i} = Fi;
end
% set fields
obj.gmrf = G;
obj.blocks = blocks;
obj.Jmats = Js;
obj.Lmats = Ls;
obj.Fmats = Fs;
end
end
%% sampling
methods
function X = update(obj, X, s)
% Performs Gibbs updates
%
% X = obj.update(X, s);
%
% Updates given samples following a sequence of steps.
% Each step re-draws the values of a specific block.
%
% Inputs:
% - X: the samples to be updated. It can be
% a column vector of size d x 1, or a
% matrix of size d x n (n is the number
% of chains that are being simulated).
%
% - s: the sequence of blocks to be updated.
% The same block can repeat in the sequence.
%
% take useful fields
d = obj.gmrf.d;
h = obj.gmrf.h;
B = obj.blocks;
Js = obj.Jmats;
Ls = obj.Lmats;
Fs = obj.Fmats;
lopts.UT = true;
% verify inputs
if ~(isfloat(X) && isreal(X) && ndims(X) == 2 && size(X,1) == d)
error('gmrf_blk_gibbs:invalidarg', ...
'X should be a real matrix with size(X,1) == gmrf.d.');
end
n = size(X, 2);
if ~(isvector(s) && isnumeric(s) && isreal(s))
error('gmrf_blk_gibbs:invalidarg', ...
's should be sequence of block indices.');
end
if size(s, 1) > 1; s = s.'; end
% Go!
for i = s
b = B{i};
di = numel(b);
Ji = Js{i};
Li = Ls{i};
Fi = Fs{i};
Xr = X;
Xr(b, :) = [];
dH = Fi * Xr;
% Hi = h(b) - Fi * Xr;
if isequal(h, 0)
Hi = -dH;
else
if n == 1
Hi = h(b) - dH;
else
Hi = bsxfun(@minus, h(b), -dH);
end
end
Ui = Ji \ Hi;
Z = randn(di, n);
dX = linsolve(Li, Z, lopts);
X(b, :) = Ui + dX;
end
end
end
end
| zzhangumd-smitoolbox | pmodels/gauss/gmrf_blk_gibbs.m | MATLAB | mit | 5,279 |
function X = ellip_slice_sample(x0, llikfun, V, intv, n)
%ELLIP_SLICE_SAMPLE Elliptical Slice Sampling
%
% x = ELLIP_SLICE_SAMPLE(x0, llikfun, v);
%
% Draws the next sample from the posterior formulated as below
%
% x ~ N(x | 0, Sigma) * llik( x );
%
% Here, Sigma is the prior covariance matrix, and llik is an
% aribtrary likelihood function, which is given.
%
% Input arguments:
% - x0: The current sample
%
% - v: A sample vector drawn from N(0, Sigma).
%
% - llikfun: The log-likelihood function, which evaluates the
% log-likelihood of a given parameter.
%
% This statement outputs a sample obtained by one iteration of
% the sampling procedure.
%
% X = ELLIP_SLICE_SAMPLE(x0, V, llikfun, intv, n);
%
% Generates n samples by running the elliptical slice sampling
% algorithm. The function runs intv iterations before generating
% each sample.
%
% Note that the size of V, the matrix comprised of all pre-sampled
% vectors from the prior, should be d x (intv x n), meaning there
% are intv x n columns in V.
%
% Created by Dahua Lin, on Feb 25, 2012
%
%% verify input arguments
if ~(isfloat(x0) && isreal(x0) && ndims(x0) == 2 && size(x0,2) == 1)
error('ellip_slice_sample:invalidarg', ...
'x0 should be a real vector.');
end
d = size(x0, 1);
if ~(isa(llikfun, 'function_handle'))
error('ellip_slice_sample:invalidarg', ...
'llikfun should be a function handle.');
end
if ~(isfloat(V) && isreal(V) && ismatrix(V) && size(V,1) == d)
error('ellip_slice_sample:invalidarg', ...
'V should be a real vector or matrix with size(V,1) == d.');
end
if nargin < 4
intv = 1;
else
if ~(isnumeric(intv) && isscalar(intv) && intv >= 1)
error('ellipse_slice_sample:invalidarg', ...
'intv should be a positive number.');
end
end
if nargin < 5
n = 1;
else
if ~(isnumeric(n) && isscalar(n) && n >= 1)
error('ellipse_slice_sample:invalidarg', ...
'n should be a positive number.');
end
end
if size(V, 2) ~= intv * n
error('ellip_slice_sample:invalidarg', ...
'The number of columns in V is incorrect.');
end
%% main
x = x0;
likv = llikfun(x);
if n == 1
if intv == 1
X = run_ess(x, likv, V, llikfun);
else
for j = 1 : intv
[x, likv] = run_ess(x, likv, V(:,j), llikfun);
end
X = x;
end
else
X = zeros(d, n);
if intv == 1
for i = 1 : n
[x, likv] = run_ess(x, likv, V(:,i), llikfun);
X(:,i) = x;
end
else
for i = 1 : n
for j = 1 : intv
v = V(:, (i-1)*intv+j);
[x, likv] = run_ess(x, likv, v, llikfun);
end
X(:,i) = x;
end
end
end
%% core function
function [x, likv] = run_ess(x, likv, v, llikfun)
% log-lik thres
logy = likv + log(rand());
% initialize
t = (2*pi) * rand();
rb = t;
lb = rb - (2*pi);
tx = x * cos(t) + v * sin(t);
tlik = llikfun(tx);
% loop
while tlik < logy
% shrink the range
if t < 0
lb = t;
else
rb = t;
end
% pick a new t
t = lb + rand() * (rb - lb);
tx = x * cos(t) + v * sin(t);
tlik = llikfun(tx);
end
x = tx;
likv = tlik;
| zzhangumd-smitoolbox | pmodels/gauss/ellip_slice_sample.m | MATLAB | mit | 3,413 |
function tf = is_ppca(M)
% Tests whether the input argument is a PPCA model struct
%
% tf = is_ppca(M);
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% main
tf = isstruct(M) && numel(M) == 1 && isfield(M, 'tag') && ...
strcmp(M.tag, 'ppca');
| zzhangumd-smitoolbox | pmodels/gauss/is_ppca.m | MATLAB | mit | 251 |
function Gpos = gaussd_conjupdate(Gpri, S)
% Computes the posterior Gaussian distribution via conjuate update
%
% Gpos = gaussd_conjupdate(Gpri, S);
%
% Estimates the posterior distribution via conjugate updates on
% a given priro.
%
% Input arguments:
% - G: the Gaussian prior (G.ty == 'c' && G.n == 1)
% - S: the gaussd object that captures the updates
%
% Output arguments:
% - Gpos: the posterior Gaussian distribution
%
% Created by Dahua Lin, on Dec 14, 2011
%
%% verify input arguments
if ~(is_gaussd(Gpri) && Gpri.ty == 'c' && Gpri.n == 1)
error('gaussd_conjupdate:invalidarg', ...
'G should be a gaussd struct with G.ty == ''c'' and G.n == 1.');
end
d = Gpri.d;
if ~(is_gaussd(S) && S.d == d && S.ty == 'c')
error('gaussd_conjupdate:invalidarg', ...
'S should be a gaussd struct with G.ty == ''c'' and G.n == 1.');
end
%% main
% posterior h
dh = S.h;
if isequal(Gpri.h, 0)
h = dh;
else
if n == 1
h = Gpri.h + dh;
else
h = bsxfun(@plus, Gpri.h, dh);
end
end
% posterior J
J = pdmat_plus(Gpri.J, S.J);
% output
Gpos.tag = 'gaussd';
Gpos.ty = 'c';
Gpos.n = size(h, 2);
Gpos.d = d;
Gpos.h = h;
Gpos.J = J;
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_conjupdate.m | MATLAB | mit | 1,251 |
function G = gaussd_mle(X, W, cform, tie_cov)
% Performs Maximum likelihood estimation of Gaussian distributions
%
% G = gaussd_mle(X);
% G = gaussd_mle(X, W);
% G = gaussd_mle(X, W, cform);
% G = gaussd_mle(X, W, cform, tie_cov);
%
% performs maximum likelihood estimation of Gaussian models based on
% a (weighted) set of samples given by columns of X.
%
% Input arguments:
% - X: the sample matrix of size d x n. Each column is a
% sample.
%
% - W: The sample weights.
% It can be omitted, empty, or an n x K matrix.
% If omitted or empty, then all samples are assumed to
% have the same weight. If W is an n x K matrix, then
% K distributions are to be estimated, and the k-th one
% is estimated based on the weights given in W(:,k).
%
% - cform: the char indicating the form of covariance matrix.
% It can take either of the following values:
% - 's': isotropic covariance in form of c * I
% - 'd': diagonal covariance
% - 'f': full covariance form
% If omitted, cform is assumed to be 'f'.
%
% - tie_cov: If multiple distributions are to be estimated, whether
% their covariance is tied to the same one.
% If omitted, tie_cov is assumed to be false.
%
% History
% -------
% - Created by Dahua Lin, on Nov 14, 2010
% - Modified by Dahua Lin, on Aug 25, 2010
% - Modified by Dahua Lin, on Dec 6, 2010
%
%% verify input
if isfloat(X) && ndims(X) == 2
[d, n] = size(X);
else
error('gaussd_mle:invalidarg', 'X should be a d x n numeric matrix.');
end
if nargin < 2 || isempty(W)
W = [];
K = 1;
else
if ~(isfloat(W) && isreal(W) && ...
(isscalar(W) || (ndims(W)==2 && size(W,1) == n)))
error('gaussd_mle:invalidarg', ...
'W should be a scalar or a real matrix with n rows.');
end
K = size(W, 2);
end
if nargin < 3
cform = 'f';
else
if ~(ischar(cform) && isscalar(cform) && any(cform == 'sdf'))
error('gaussd_mle:invalidarg', ...
'cform should be either ''s'', ''d'', or ''f''.');
end
end
if nargin < 4
tie_cov = false;
else
if ~(islogical(tie_cov) && isscalar(tie_cov))
error('gaussd_mle:invalidarg', 'tie_cov should be a logical scalar.');
end
end
%% main
% preparation
if ~isempty(W)
% normalize the weights
sw = sum(W, 1);
if issparse(sw)
sw = full(sw);
end
W = bsxfun(@times, W, 1 ./ sw);
sw = sw.' / sum(sw);
end
% estimate mean vectors
mu = mean_w(X, W);
% estimate variance / covariance
switch cform
case 's'
if d == 1
ex2 = mean_w(X .* X, W);
else
ex2 = mean_w(dot(X, X, 1), W);
end
v = ex2 - dot(mu, mu, 1);
if K > 1 && tie_cov
v = mean_w(v, sw);
end
if d == 1
C = pdmat('s', 1, v);
else
C = pdmat('s', d, v * (1/d));
end
case 'd'
ex2 = mean_w(X.^2, W);
v = ex2 - mu .^ 2;
if K > 1 && tie_cov
v = mean_w(v, sw);
end
C = pdmat('d', d, v);
case 'f'
if K == 1
C = calc_cov(X, mu, W);
else
if tie_cov
C = zeros(d, d);
for k = 1 : K
C = C + sw(k) * calc_cov(X, mu(:,k), W(:,k));
end
else
C = zeros(d, d, K);
for k = 1 : K
C(:,:,k) = calc_cov(X, mu(:,k), W(:,k));
end
end
end
C = pdmat('f', d, C);
end
% generate Gaussian model
G = gaussd('m', mu, C);
%% Auxiliary functions
function y = mean_w(x, w)
if isempty(w)
y = sum(x, 2) * (1 / size(x,2));
else
y = x * w;
end
function C = calc_cov(X, mu, W)
if isempty(W)
Exx = (X * X') * (1 / size(X,2));
else
if ~issparse(W)
Exx = X * bsxfun(@times, X', W);
else
[I, ~, w] = find(W);
X = X(:, I);
Exx = X * bsxfun(@times, X', w);
end
end
C = Exx - mu * mu';
C = (C + C') * 0.5;
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_mle.m | MATLAB | mit | 4,342 |
function LP = gaussd_logpdf(G, X, ca_cb)
% Evaluate the log-pdf of Gaussian models
%
% LP = gaussd_logpdf(G, X);
% LP = gaussd_logpdf(G, X, {ca, cb})
%
% computes the log-pdf of the samples given as columns of X,
% with respect to the Gaussian distributions in G.
%
% Inputs:
% - G: A gaussd struct
% - X: the sample matrix: each column is a column.
% - ca_cb: the two constants evalulated by gaussd_const.
% If not provided, the function will call
%
% Outputs:
% - LP: the evaluated result matrix. Suppose K = G.n and
% there are n columns in X. Then the size of LP is
% K x n. In particular, LP(k, i) is the log-pdf
% at X(:,i) w.r.t. the k-th model in G.
%
% Created by Dahua Lin, on Dec 5, 2011
%
%% main
if nargin < 3
ca = [];
cb = [];
else
if ~(iscell(ca_cb) && numel(ca_cb) == 2)
error('gaussd_logpdf:invalidarg', ...
'The 3rd argument to gaussd_logpdf should be a cell array with two cells.');
end
ca = ca_cb{1};
cb = ca_cb{2};
end
if isempty(ca)
D = gaussd_sqmahdist(G, X);
else
D = gaussd_sqmahdist(G, X, ca);
end
if isempty(cb)
cb = G.d / 2 - gaussd_entropy(G);
end
if isscalar(cb)
LP = cb - 0.5 * D;
else
LP = bsxfun(@minus, cb(:), 0.5 * D);
end
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_logpdf.m | MATLAB | mit | 1,385 |
function G = gaussd(op, param1, param2)
% Constructs a Gauss distribution struct
%
% G = gaussd('m', mu, C);
%
% constructs a Gaussian distribution struct with mean and
% covariance.
%
% Suppose you are to construct a struct G comprised of n
% Gaussian distributions on a d-dimensional space.
%
% Inputs:
% - mu: the mean vector(s), which can be either a d x n matrix
% or just zero (indicating it has zero-mean)
% - C: C can be given in either of the following form:
% - a scalar s: indicating a covariance s * I
% - a d x 1 vector: indicating a diagonal covariance
% - a d x d full covariance matrix
% - a pdmat struct, with C.d == d.
% Here, C.n can be 1 or n. When C.n == 1, it means
% all distributions shared the same covariance.
%
% Outputs:
% - G: a gaussd struct using mean parameters.
%
% G = gaussd('c', h, J);
%
% constructs a Gaussian distributin struct with potential vector
% and precision matrix.
%
% Suppose you are to construct a struct G comprised of n
% Gaussian distributions on a d-dimensional space.
%
% Inputs:
% - h: the potential vector(s), which can be either a d x n
% matrix or just zero (indicating it has zero-mean)
% - J: J can be given in either of the following form:
% - a scalar s: indicating a matrix s * I
% - a d x 1 vector: indicating a diagonal precision
% matrix
% - a d x d full precision matrix
% - a pdmat struct, with J.d == d.
% Here, J.n can be 1 or n. When J.n == 1, it means
% all distributions shared the same precision matrix.
%
% Outputs:
% - G: a gaussd struct using canonical parameters.
%
% G = gaussd('m', G0);
% G = gaussd('c', G0);
%
% converts the model G0 to a specific gaussd struct type.
% Here, G0 can be a gaussd struct or a ppca struct.
%
% Re-created by Dahua Lin, on Dec 5, 2011
%
%% main skeleton
if ischar(op) && isscalar(op)
if op == 'm'
if isnumeric(param1)
[d, n, mu, C] = verify_args(param1, param2, 'mu', 'C');
G.tag = 'gaussd';
G.ty = 'm';
G.d = d;
G.n = n;
G.mu = mu;
G.C = C;
elseif is_gaussd(param1)
ty = param1.ty;
if ty == 'm'
G = param1;
elseif ty == 'c'
G = cvt_c2m(param1);
end
elseif is_ppca(param1)
M = param1;
G.tag = 'gaussd';
G.ty = 'm';
G.d = M.d;
G.n = 1;
G.mu = M.mu;
C = ppca_cov(M);
G.C = pdmat(C);
else
error('gaussd:invalidarg', 'The inputs are invalid.');
end
elseif op == 'c'
if isnumeric(param1)
[d, n, h, J] = verify_args(param1, param2, 'h', 'J');
G.tag = 'gaussd';
G.ty = 'c';
G.d = d;
G.n = n;
G.h = h;
G.J = J;
elseif is_gaussd(param1)
ty = param1.ty;
if ty == 'c'
G = param1;
elseif ty == 'm'
G = cvt_m2c(param1);
end
elseif is_ppca(param1)
M = param1;
G.tag = 'gaussd';
G.ty = 'c';
G.d = M.d;
G.n = 1;
J = ppca_icov(M);
if isequal(M.mu, 0)
h = 0;
else
h = J * M.mu;
end
G.h = h;
G.J = pdmat(J);
else
error('gaussd:invalidarg', 'The inputs are invalid.');
end
else
error('gaussd:invalidarg', ...
'The 1st argument to gaussd can only be either ''m'' or ''c''.');
end
else
error('gaussd:invalidarg', ...
'The 1st argument to gaussd can only be either ''m'' or ''c''.');
end
%% verify and parse inputs
function [d, n, a1, a2] = verify_args(a1, a2, a1_name, a2_name)
if ~(isfloat(a1) && isreal(a1) && ndims(a1) == 2)
error('%s should be a real matrix.', a1_name);
end
if isfloat(a2) && isreal(a2)
if isequal(a1, 0)
a2 = pdmat(a2);
d = a2.d;
n = 1;
else
[d, n] = size(a1);
if isscalar(a2)
a2 = pdmat('s', d, a2);
else
a2 = pdmat(a2);
if a2.d ~= d
error('The size of %s and %s is inconsistent.', a1_name, a2_name);
end
end
end
elseif is_pdmat(a2)
if isequal(a1, 0)
d = a2.d;
if a2.n ~= 1
error('%s.n must be one when %s is a zero scalar.', a2_name, a1_name);
end
n = 1;
else
[d, n] = size(a1);
if ~(a2.d == d && (a2.n == n || a2.n == 1))
error('The size of %s and %s is inconsistent.', a1_name, a2_name);
end
end
else
error('The form of %s is invalid.', a2_name);
end
%% conversion functions
function G = cvt_c2m(G0)
% convert from c-type to m-type
%
% mu = inv(J) * h;
% C = inv(J);
%
C = pdmat_inv(G0.J);
if isequal(G0.h, 0)
mu = 0;
else
mu = pdmat_mvmul(C, G0.h);
end
G.tag = 'gaussd';
G.ty = 'm';
G.d = G0.d;
G.n = G0.n;
G.mu = mu;
G.C = C;
function G = cvt_m2c(G0)
% convert from m-type to c-type
%
% h = inv(C) * mu;
% J = inv(C);
%
J = pdmat_inv(G0.C);
if isequal(G0.mu, 0)
h = 0;
else
h = pdmat_mvmul(J, G0.mu);
end
G.tag = 'gaussd';
G.ty = 'c';
G.d = G0.d;
G.n = G0.n;
G.h = h;
G.J = J;
| zzhangumd-smitoolbox | pmodels/gauss/gaussd.m | MATLAB | mit | 6,026 |
function demo_gmrf_sample(imsiz, w, n, intv)
% Demos the use of Gaussian MRF sampling
%
% demo_gmrf_sample(imsiz, w, n, intv);
%
% demonstrates the sampling of images from a Gaussian MRF.
%
% Input:
% - imsiz: the image size, in form of [height, width]
% - w: the weights of the spatial links
% - n: the number of samples to generate
% - intv: the interval between two samples to be collected.
%
% Created by Dahua Lin, on Dec 9, 2011
%
%% main
% model construction
disp('Constructing the MRF model ...');
imh = imsiz(1);
imw = imsiz(2);
r = 1; % the neighborhood range
gr = gr_local([imh imw], r);
W = gr_wmat(gr, w);
G = agmrf(W, 0.1); % obtain the G-MRF
% make the sampler
bdim = 10;
y0 = 1:bdim:imh;
x0 = 1:bdim:imw;
y1 = [y0(2:end) - 1, imh];
x1 = [x0(2:end) - 1, imw];
d = imh * imw;
inds = reshape(1:d, imh, imw);
blocks = cell(numel(y0), numel(x0));
for j = 1 : numel(x0)
for i = 1 : numel(y0)
v = inds(y0(i):y1(i), x0(j):x1(j));
blocks{i, j} = v(:);
end
end
nblocks = numel(blocks);
sampler = gmrf_blk_gibbs(G, blocks);
% simulation
disp('Simulating the chain ...');
s = repmat(1:nblocks, 1, intv);
figure;
x = randn(d, 1);
himg = imshow(get_vimage(x, imh, imw));
title('t = 0');
disp('Press any key to continue move ..');
pause;
for i = 1 : n
x = sampler.update(x, s);
vimg = get_vimage(x, imh, imw);
set(himg, 'CData', vimg);
title(sprintf('t = %d', i * intv));
disp('Press any key to continue move ..');
pause;
end
%% sub functions
function im = get_vimage(x, imh, imw)
I = reshape(x, imh, imw);
I = I + 0.5;
I(I < 0) = 0;
I(I > 1) = 1;
I = im2uint8(I);
im = cat(3, I, I, I);
| zzhangumd-smitoolbox | pmodels/gauss/demo_gmrf_sample.m | MATLAB | mit | 1,764 |
function L = ppca_logpdf(M, X)
% Evaluate log pdf values at given samples
%
% L = ppca_logpdf(M, X);
% evaluates the log proability density values at the samples given
% by X, with respect to the PPCA model.
%
% Suppose X has n columns (each column is sample), then L
% is a vector of size 1 x n.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% main
dists = ppca_sqmahdist(M, X);
L = (-0.5) * ((M.d * log(2*pi) + M.ldc) + dists);
| zzhangumd-smitoolbox | pmodels/gauss/ppca_logpdf.m | MATLAB | mit | 459 |
function Gs = gaussd_sub(G, idx)
% Get a subset of Gaussian models
%
% Gs = gaussd_sub(G, idx);
% Get a subset of Gaussian models (specified by the index or
% index vector idx).
%
% Created by Dahua Lin, on Dec 20, 2011
%
%% main
if G.ty == 'm'
mu = G.mu(:, idx);
Gs.tag = G.tag;
Gs.ty = 'm';
Gs.n = size(mu, 2);
Gs.d = G.d;
Gs.mu = mu;
if G.C.n == 1
Gs.C = G.C;
else
Gs.C = pdmat_pick(G.C, idx);
end
elseif G.ty == 'c'
h = G.h(:, idx);
Gs.tag = G.tag;
Gs.ty = 'c';
Gs.n = size(h, 2);
Gs.d = G.d;
Gs.h = h;
if G.J.n == 1
Gs.J = G.J;
else
Gs.J = pdmat_pick(G.J, idx);
end
end
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_sub.m | MATLAB | mit | 729 |
function X = gaussd_sample(G, n, Z)
% Samples from (multivariate) Gaussian distributions
%
% X = gaussd_sample(G);
% X = gaussd_sample(G, n);
% Draws n samples from a Gaussian distribution G whose mean and
% covariance are respectively given by mu and C.
%
% Input arguments:
% - G: a gaussd struct with G.n == 1.
% - n: the number of sameples to be acquired from the model.
% (If n is omitted, then n is assumed to be 1).
%
% Outputs:
% - X: a d x n matrix comprised of the generated samples
% as columns.
%
% X = gaussd_sample(G, [], Z);
%
% Transforms the samples drawn from a standard Gaussian distribution
% stored as columns of Z to the samples from G.
%
% Z should be a d x n matrix.
%
%
% History
% -------
% - Created by Dahua Lin, on Aug 17, 2011
% - Modified by Dahua Lin, on Aug 25, 2011
% - Modified by Dahua Lin, on Sep 27, 2011
% - Modified by Dahua Lin, on Nov 30, 2011
% - Modified by Dahua Lin, on Dec 5, 2011
%
%% verify input arguments
if ~(is_gaussd(G) && G.n == 1)
error('gaussd_sample:invalidarg', ...
'G should be a gaussd struct with G.n == 1.');
end
if nargin < 2
n = 1;
end
%% main skeleton
d = G.d;
if ~isempty(n)
Z = randn(d, n);
else
if ~(isfloat(Z) && isreal(Z) && ndims(Z) == 2 && size(Z,1) == d)
error('gaussd_sample:invalidarg', ...
'Z should be a d x n matrix.');
end
end
if G.ty == 'm'
X = gsample_m(Z, G.mu, G.C);
else
X = gsample_c(Z, G.h, G.J);
end
%% core functions
function X = gsample_m(X, mu, C)
[d, n] = size(X);
ty = C.ty;
v = C.v;
if ty == 's' || ty == 'd'
if ~isequal(v, 1);
if isscalar(v) || n == 1
X = X .* sqrt(v);
else
X = bsxfun(@times, X, sqrt(v));
end
end
elseif ty == 'f'
L = chol(v, 'lower');
X = L * X;
end
X = add_mu(d, n, X, mu);
function X = gsample_c(X, h, J)
[d, n] = size(X);
ty = J.ty;
v = J.v;
if ty == 's' || ty == 'd'
if ~isequal(v, 1);
v = 1 ./ v;
if isequal(h, 0)
mu = 0;
else
mu = h .* v;
end
if isscalar(v) || n == 1
X = X .* sqrt(v);
else
X = bsxfun(@times, X, sqrt(v));
end
end
elseif ty == 'f'
L = chol(v);
if isequal(h, 0)
mu = 0;
X = L \ X;
else
g = L' \ h;
A = L \ [X g];
X = A(:, 1:n);
mu = A(:, n+1);
end
end
X = add_mu(d, n, X, mu);
%% auxiliary functions
function X = add_mu(d, n, X, mu)
if ~isequal(mu, 0)
if d == 1 || n == 1
X = mu + X;
else
X = bsxfun(@plus, X, mu);
end
end
| zzhangumd-smitoolbox | pmodels/gauss/gaussd_sample.m | MATLAB | mit | 2,827 |
function v = fmm_em_objective(pri, LL, w, c0, s)
%FMM_EM_OBJECTIVE Evaluate the objective of an FMM-EM solution
%
% v = FMM_EM_OBJECTIVE(pri, LL, w, c0, s);
%
% Evaluates the objective function value for EM-estimation of
% a finite mixture model.
%
% Created by Dahua Lin, on Feb 3, 2012
%
%% main
if ~isempty(w)
w = w(:);
end
% log-pri: Pi
log_pi = log(s.Pi);
if isequal(c0, 0)
lpri_pi = 0;
else
lpri_pi = c0 * sum(log_pi);
end
% log-pri: params
if isempty(pri)
lpri_t = 0;
else
lpri_t = pri.logpdf(s.params);
lpri_t = sum(lpri_t);
end
% log-lik: labels (Z)
Q = s.Q;
llik_z = sum_w(log_pi' * Q, w);
% log-lik: observations
llik_x = sum_w(sum(Q .* LL, 1), w);
% entropy
ent = sum_w(ddentropy(Q), w);
% overall sum
v = lpri_pi + lpri_t + llik_z + llik_x + ent;
%% auxiliary function
function v = sum_w(x, w)
if isempty(w)
v = sum(x);
else
v = x * w;
end
| zzhangumd-smitoolbox | pmodels/mixmodel/fmm_em_objective.m | MATLAB | mit | 924 |
function S = fmm_init(pri, gm, X, w, method, arg)
%FMM_INIT Initialize an FMM solution
%
% S = FMM_INIT(pri, gm, X, w, 'params', A);
%
% Initializes a finite mixture model solution with initial
% parameters (given by A).
%
% Other inputs:
% - pri: The prior object
% - gm: The generative model
% - X: The observations
% - w: The sample weights
%
% S = FMM_INIT(pri, gm, X, w, 'labels', z);
%
% Initializes a finite mixture model with initial labels.
%
% S = FMM_INIT(pri, gm, X, w, 'Q', Q);
%
% Initializes a finite mixture model with the initial soft
% assignment matrix.
%
% S = FMM_INIT(pri, gm, X, w, 'rand', K);
%
% Randomly initializes a finite mixture model with K components.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify arguments
if ~(isfloat(X) && isreal(X) && ndims(X) == 2)
error('fmm_init:invalidarg', ...
'The observation matrix X should be a real matrix.');
end
n = size(X, 2);
if isempty(w)
w = [];
else
if ~(isfloat(w) && isreal(w) && isvector(w) && numel(w) == n)
error('fmm_init:invalidarg', ...
'w should be a real vector of length n.');
end
if size(w, 2) > 1
w = w.';
end
end
if ~ischar(method)
error('fmm_init:invalidarg', 'The method must be a string.');
end
%% main delegate
switch lower(method)
case 'params'
[K, params] = fmm_init_params(pri, gm, X, w, arg);
case 'labels'
[K, params] = fmm_init_labels(pri, gm, X, w, arg);
case 'q'
[K, params] = fmm_init_Q(pri, gm, X, w, arg);
case 'rand'
[K, params] = fmm_init_rand(pri, gm, X, w, arg);
otherwise
error('fmm_init:invalidarg', ...
'Unknown method name %s', method);
end
S.K = K;
S.Pi = constmat(K, 1, 1.0/K);
S.params = params;
%% core functions
function [K, params] = fmm_init_params(pri, gm, X, w, params) %#ok<INUSL>
K = gm.query_params(params);
function [K, params] = fmm_init_labels(pri, gm, X, w, z)
if ~(isnumeric(z) && ~issparse(z) && isvector(z) && isreal(z))
error('fmm_init:invalidarg', 'z should be a real vector.');
end
if size(z, 1) > 1; z = z.'; end
K = max(z);
params = fmm_est_params(pri, gm, X, w, {K, z});
function [K, params] = fmm_init_Q(pri, gm, X, w, Q)
if ~(isfloat(Q) && isreal(Q) && ndims(Q) == 2)
error('fmm_init:invalidarg', 'Q should be a real matrix.');
end
K = size(Q, 1);
params = fmm_est_params(pri, gm, X, w, Q);
function [K, params] = fmm_init_rand(pri, gm, X, w, K)
n = gm.query_obs(X);
if ~(isnumeric(K) && isscalar(K) && K == fix(K) && K >= 1 && K <= n)
error('fmm_init:invalidarg', 'K should be a positive integer with K <= n.');
end
z = ceil((K/n) * (1:n));
z(end) = K;
% random suffle
[~, si] = sort(rand(1, n));
z = z(si);
params = fmm_est_params(pri, gm, X, w, {K, z});
| zzhangumd-smitoolbox | pmodels/mixmodel/fmm_init.m | MATLAB | mit | 2,910 |
classdef fmm_std < smi_state
% The class that implements standard finite mixture model
%
% History
% -------
% - Created by Dahua Lin, on Sep 27, 2011
% - Modified by Dahua Lin, on Dec 26, 2011
%
%% Properties
% configurations
properties(GetAccess='public', SetAccess='private')
gmodel; % The underlying generative model
prior; % The parameter prior
pricount; % The prior count of each component
sampling; % whether it uses sampling
end
% observations
properties(GetAccess='public', SetAccess='private')
obs; % the observations
nobs; % the number of observations (n)
weights; % the weights of observations (empty or n x 1)
end
% run-time state
properties
K; % the number of mixture components
sol; % the current solution (a struct with fields)
Llik; % the log-likelihood w.r.t. all components [K x n]
end
%% Construction
methods
function obj = fmm_std(method, gm, pri, c0)
% Create a standard finite mixture model estimator
%
% obj = fmm_std(method, gm, pri);
% obj = fmm_std(method, gm, pri, c0);
%
% Creates a standard finite mixture model estimator
%
% Inputs:
% - method: either of the following method names:
% - 'em': standard E-M method
% - 'gibbs': Gibbs sampling
%
% - gm: the generative model object
%
% - pri: the prior model object or empty
%
% - c0: the prior count of each component
% (if omitted, it is set to zero).
%
% verify input
if ~ischar(method)
error('fmm_std:invalidarg', 'method should be a char string.');
end
switch lower(method)
case 'em'
samp = false;
case 'gibbs'
samp = true;
otherwise
error('fmm_std:invalidarg', ...
'Invalid method name %s', method);
end
if ~isa(gm, 'genmodel_base')
error('fmm_std:invalidarg', ...
'gm should be an instance of a sub-class of genmodel_base.');
end
if isempty(pri)
if samp
error('fmm_std:invalidarg', ...
'pri should be provided when using sampling.');
end
else
if ~isa(pri, 'prior_base')
error('fmm_std:invalidarg', ...
'pri should be an instance of a sub-class of prior_base.');
end
end
if nargin < 4
c0 = 0;
else
if ~(isfloat(c0) && isreal(c0) && isscalar(c0) && c0 >= 0)
error('fmm_std:invalidarg', ...
'c0 should be a nonnegative real value scalar.');
end
end
% set fields
obj.gmodel = gm;
obj.prior = pri;
obj.pricount = double(c0);
obj.sampling = samp;
end
end
%% Interface methods
methods
function obj = initialize(obj, X, w, method, arg)
% Initialize the FMM estimator state
%
% obj = obj.initialize(X, w, 'params', A);
%
% initialize with given initial parameters
%
% obj = obj.initialize(X, w, 'labels', z);
%
% initialize with given initial labels
%
% obj = obj.initialize(X, w, 'Q', Q);
%
% initialize with soft assignment matrix
%
% obj = obj.initialize(X, w, 'rand', K);
%
% initialize randomly with K component mixtures.
%
% Here, X is a sample matrix of size d x n, and
% w is either empty (all samples have a unit weight), or
% a vector of length n.
%
gm = obj.gmodel;
s = fmm_init(obj.prior, gm, X, w, method, arg);
if obj.sampling
s.z = [];
else
s.Q = [];
end
obj.obs = X;
obj.nobs = gm.query_obs(X);
if ~isempty(w)
if size(w, 2) > 1; w = w.'; end
obj.weights = w;
end
obj.K = s.K;
obj.sol = s;
obj.Llik = gm.loglik(s.params, X);
end
function obj = update(obj)
% Update the state
%
% obj.update();
% updates the object state (perform one-step of E-M
% optimization or one move of Gibbs sampling)
%
pri = obj.prior;
gm = obj.gmodel;
X = obj.obs;
w = obj.weights;
c0 = obj.pricount;
if obj.sampling
[obj.sol, obj.Llik] = fmm_gibbs_update( ...
pri, gm, X, w, c0, obj.sol, obj.Llik);
else
[obj.sol, obj.Llik] = fmm_em_update(...
pri, gm, X, w, c0, obj.sol, obj.Llik);
end
end
function s = output(obj)
% Outputs a sample
%
% s = obj.output();
%
s = obj.sol;
end
function b = is_ready(obj)
% Tests whether the object is ready for running
%
% b = obj.is_ready();
%
b = ~isempty(obj.sol);
end
function s = merge_samples(obj, samples)
% Merges multiple samples into an optimal one
%
% s = obj.merge_samples(samples);
%
s = fmm_merge_samples(obj.prior, obj.gmodel, obj.pricount, ...
obj.obs, obj.weights, samples);
end
end
%% Objective evaluation
methods
function objv = evaluate_objv(obj)
% Evaluate the objective function of the current state
%
% objv = obj.evaluate_objv();
%
if ~obj.sampling
objv = fmm_em_objective(...
obj.prior, obj.Llik, obj.weights, obj.pricount, obj.sol);
else
error('fmm_std:invalidarg', ...
'evaluate_objv can only be invoked in non-sampling mode.');
end
end
end
end
| zzhangumd-smitoolbox | pmodels/mixmodel/fmm_std.m | MATLAB | mit | 7,521 |
function R = glmm_demo(method, K)
% A script to demo the inference over mixture of Gaussian linear models
%
% R = glmm_demo(method, K);
%
% Input arguments:
% - method: either of the following strings
% - 'em': expectation-maximization
% - 'gibbs': Gibbs sampling
%
% - K: the number of mixture components
%
% History
% -------
% - Created by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~ischar(method)
error('glmm_demo:invalidarg', 'The method should be a string.');
end
method = lower(method);
if ~(isnumeric(K) && isscalar(K) && K >= 2)
error('glmm_demo:invalidarg', ...
'K should be a positive integer with K >= 2.');
end
%% prepare data
n = 1000; % # samples / class
pri_sigma = 10 * sqrt(K);
g0 = gaussd('m', 0, pdmat('s', 2, pri_sigma));
Cx = pdmat('s', 2, 1);
U0 = gaussd_sample(g0, K);
Xs = cell(1, K);
for k = 1 : K
Xs{k} = gaussd_sample(gaussd('m', U0(:,k), Cx), n);
end
X = [Xs{:}];
%% fit models
Jx = pdmat_inv(Cx);
glm = gauss_lingen(Jx);
gpri = gausspri(g0);
pri_count = 1;
state = fmm_std(method, glm, gpri, pri_count);
w = [];
state = state.initialize(X, w, 'rand', K);
switch method
case 'em'
opts = varinfer_options([], ...
'maxiters', 200, 'tol', 1e-6, 'display', 'eval');
R = varinfer_drive(state, opts);
S = R.sol;
ss = [];
case 'gibbs'
opts = mcmc_options([], ...
'burnin', 10, 'nsamples', 50, 'ips', 5, 'display', 'sample');
ss = mcmc_drive(state, opts);
S = state.merge_samples(ss);
otherwise
error('The method %s is not supported', method);
end
%% Visualize
U = S.params;
switch method
case 'em'
[~, Zm] = max(S.Q, [], 1);
case 'gibbs'
Zm = S.z;
end
visualize_results(K, X, U, Cx, Zm, ss);
%% Sub functions
function visualize_results(K, X, U, Cx, Zm, ss)
Gs = gaussd('m', U, Cx);
gm = intgroup(K, Zm);
hfig = figure;
set(hfig, 'Name', 'GMM Demo');
set(hfig, 'Position', [0 0 1024, 512]);
movegui(hfig, 'center');
subplot('Position', [0.05, 0.1, 0.43, 0.8]);
plot(X(1,:), X(2,:), 'b.', 'MarkerSize', 5);
hold on;
gaussd_ellipse(Gs, 2, 500, 'r-', 'LineWidth', 2);
axis equal;
if ~isempty(ss)
for i = 1 : numel(ss)
s = ss{i};
u = s.params;
hold on;
plot(u(1, :), u(2, :), 'm+');
end
end
subplot('Position', [0.52, 0.1, 0.43, 0.8]);
colors = {'r', 'g', 'b', 'm', 'c', 'k'};
for k = 1 : K
cr = colors{mod(k-1, numel(colors)) + 1};
hold on;
plot(X(1, gm{k}), X(2, gm{k}), [cr '.'], 'MarkerSize', 8);
end
axis equal;
| zzhangumd-smitoolbox | pmodels/mixmodel/glmm_demo.m | MATLAB | mit | 2,687 |
function [S, L] = fmm_em_update(pri, gm, X, w, c0, S, L)
%FMM_EM_UPDATE Expectation-Maximization Update for Finite Mixture Model
%
% [S, L] = FMM_EM_UPDATE(pri, gm, X, w, S, L);
%
% Updates finite mixture model solution as an E-M iteration.
%
% Input arguments:
% - pri: The prior object
% - gm: The generative model object
% - X: The samples
% - w: The sample weights
% - c0: The prior count of components
%
% The arguments to be updated:
% - S: The finite-mixture model solution.
% - L: The log-likelihood matrix.
%
% Remarks
% -------
% - This function is to help the implementation of various mixture
% model estimation, and is not supposed to be directly called by
% end users.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% main
% E-step (re-estimate marginal probabilities)
Q = ddposterior(S.Pi, L, 'LL');
S.Q = Q;
% M-step (re-estimate component parameters and Pi)
% estimate component parameters
params = fmm_est_params(pri, gm, X, w, Q);
S.params = params;
L = gm.loglik(params, X);
% estimate Pi
S.Pi = ddestimate(Q, w, c0);
| zzhangumd-smitoolbox | pmodels/mixmodel/fmm_em_update.m | MATLAB | mit | 1,182 |
function s = fmm_merge_samples(pri, gm, c0, X, w, ss)
%FMM_MERGE_SAMPLES Merge multiple FMM solutions into a single one
%
% s = FMM_MERGE_SAMPLES(pri, gm, X, w, ss);
%
% Creates a single FMM that best represents all input FMM samples.
%
% Input arguments:
% - pri: the prior object
% - gm: the generative model
% - X: the observations
% - w: the observation weights
% - ss: the cell array of FMM samples
%
% Output arguments:
% - s: the merged FMM sample.
%
% Created by Dahua Lin, on Feb 3, 2011
%
%% verify input arguments
if ~iscell(ss)
error('fmm_merge_samples:invalidarg', ...
'ss should be a cell array of FMM samples.');
end
%% main
K = ss{1}.K;
n = numel(ss);
% get Z through majority voting
Zm = cell(n, 1);
for i = 1 : n
cs = ss{i};
if cs.K ~= K
error('fmm_merge_samples:invalidarg', ...
'The K values are inconsistent.');
end
Zm{i} = cs.z;
end
Zm = vertcat(Zm{:});
z = mode(Zm, 1);
% re-estimate parameters
params = fmm_est_params(pri, gm, X, w, {K, z});
Pi = ddestimate({K, z}, w, c0);
% make merged sample
s.K = K;
s.Pi = Pi;
s.params = params;
s.z = z;
| zzhangumd-smitoolbox | pmodels/mixmodel/fmm_merge_samples.m | MATLAB | mit | 1,261 |
function params = fmm_est_params(pri, gm, X, w, Z)
% Estimate mixture model given sample assignment
%
% params = fmm_est_params(pri, gm, X, w, Z);
%
% Estimates component model parameters given sample assignment.
%
% Input arguments:
% - gm: the generative model of observations
% - pri: the prior model of parameters
% - X: the observations
% - w: the weights of observations
% - Z: the sample assignment, which can be in either of
% the following forms:
% - soft assignment matrix of size K x n
% - {K, label_vector}.
%
% Remarks
% -------
% - This function is to help the implementation of various mixture
% model estimation, and is not supposed to be directly called by
% end users.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% main
if isnumeric(Z)
if isempty(w)
W = Z.';
else
W = bsxfun(@times, Z.', w);
end
else
K = Z{1};
L = Z{2};
W = l2mat(K, L(:), 1, 'sparse');
end
if isempty(pri)
params = gm.mle(X, W);
else
cap = gm.capture(X, W);
params = pri.mapest(cap);
end
| zzhangumd-smitoolbox | pmodels/mixmodel/fmm_est_params.m | MATLAB | mit | 1,193 |
function R = gmm_demo(cf, op)
% A script to demo the inference over Gaussian mixture model
%
% R = gmm_demo(cf);
% R = gmm_demo(cf);
%
% Runs a Gaussian mixture model demo using the specified covariance
% matrix form, which can be either 's' (scale form),
% 'd' (diagonal form), or 'f' (full matrix form).
%
% The output is the struct that captures inference result.
%
% R = gmm_demo(cf, 'tied-cov');
% R = gmm_demo(cf, 'tied-cov');
%
% Runs the demo under the setting that the covariance matrix
% is shared across all mixture components.
%
% History
% -------
% - Created by Dahua Lin, on Nov 14, 2010
% - Modified by Dahua Lin, on Aug 31, 2011
% - Modified by Dahua Lin, on Sep 28, 2011
% - Modified by Dahua Lin, on Dec 27, 2011
%
%% verify input
if ~(ischar(cf) && isscalar(cf) && (cf == 's' || cf == 'd' || cf == 'f'))
error('gmm_demo:invalidarg', 'Invalid cf.');
end
if nargin < 2
c_tied = false;
mitrs = 200;
else
if ~(ischar(op) && strcmpi(op, 'tied-cov'))
error('gmm_demo:invalidarg', 'The second argument is invalid.');
end
c_tied = true;
mitrs = 1600;
end
%% generate data
d = 2;
K = 3;
n = 1000; % #samples / class
pri_sigma = 20;
gpri = gaussd('m', 0, pdmat('s', 2, pri_sigma));
U0 = gaussd_sample(gpri, K);
Xs = cell(1, K);
if c_tied
Cx = rand_pdmat(cf, d, 1, [0.5 1.5]);
for k = 1 : K
gx = gaussd('m', U0(:,k), Cx);
Xs{k} = gaussd_sample(gx, n);
end
else
for k = 1 : K
Cx = rand_pdmat(cf, d, 1, [0.5, 1.5]);
gx = gaussd('m', U0(:,k), Cx);
Xs{k} = gaussd_sample(gx, n);
end
end
X = [Xs{:}];
%% Run estimation
w = [];
R = gmm_fit(X, w, K, ...
'cov_form', cf, 'pricount', 0, 'tied_cov', c_tied, ...
'maxiters', mitrs, 'tol', 1e-6, 'display', 'eval');
%% Visualize
[~, Zm] = max(R.Q, [], 1);
visualize_results(K, X, R.G, Zm);
%% Sub functions
function visualize_results(K, X, Gs, Zm)
gm = intgroup(K, Zm);
hfig = figure;
set(hfig, 'Name', 'GMM Demo');
set(hfig, 'Position', [0 0 1024, 512]);
movegui(hfig, 'center');
subplot('Position', [0.05, 0.1, 0.43, 0.8]);
plot(X(1,:), X(2,:), 'b.', 'MarkerSize', 5);
hold on;
gaussd_ellipse(Gs, 2, 500, 'r-', 'LineWidth', 2);
axis equal;
subplot('Position', [0.52, 0.1, 0.43, 0.8]);
colors = {'r', 'g', 'b', 'm', 'c', 'k'};
for k = 1 : K
cr = colors{mod(k-1, numel(colors)) + 1};
hold on;
plot(X(1, gm{k}), X(2, gm{k}), [cr '.'], 'MarkerSize', 8);
end
axis equal;
| zzhangumd-smitoolbox | pmodels/mixmodel/gmm_demo.m | MATLAB | mit | 2,542 |
function [S, L] = fmm_gibbs_update(pri, gm, X, w, c0, S, L)
%FMM_GIBBS_UPDATE Gibbs Sampling Update for Finite Mixture Model
%
% [S, L] = FMM_GIBBS_UPDATE(pri, gm, X, w, S, L);
%
% Updates finite mixture model solution as an E-M iteration.
%
% Input arguments:
% - pri: The prior object
% - gm: The generative model object
% - X: The samples
% - w: The sample weights
% - c0: The prior count of components
%
% The arguments to be updated:
% - S: The finite-mixture model solution.
% - L: The log-likelihood matrix.
%
% Remarks
% -------
% - This function is to help the implementation of various mixture
% model estimation, and is not supposed to be directly called by
% end users.
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% main
% E-step (re-sampling labels)
Q = ddposterior(S.Pi, L, 'LL');
z = ddsample(Q, 1);
S.z = z;
% M-step (re-estimate component parameters and Pi)
% re-sample parameters
K = S.K;
grps = intgroup(K, z);
params = cell(1, K);
for k = 1 : K
cap = gm.capture(X, w, grps{k});
params{k} = pri.pos_sample(cap, 1);
end
params = gm.combine_params(params{:});
S.params = params;
L = gm.loglik(params, X);
% re-sample Pi
if isempty(w)
H = intcount(K, z).';
else
H = aggreg(w, K, z.', 'sum');
end
S.Pi = dird_sample(H + (c0 + 1), 1);
| zzhangumd-smitoolbox | pmodels/mixmodel/fmm_gibbs_update.m | MATLAB | mit | 1,415 |
function [R, state] = gmm_fit(X, w, K, varargin)
%GMM_FIT Fits a Gaussian Mixture Model
%
% R = gmm_fit(X, [], K, ...);
% R = gmm_fit(X, w, K, ...);
%
% [R, state] = gmm_fit( ... );
%
% Fits a Gaussian mixture model to a (weighted) set of data.
%
% Suppose there are n samples on a d dimensional space.
%
% Input arguments:
% - X: the data matrix [d x n]
% - w: the sample weights, which can be either empty or
% a row vector of 1 x n.
% - K: the number of mixture components (K >= 2).
%
% Output arguments:
% - R: a struct with the following fields:
% - K: the number of mixture components
% - Pi: the prior distribution over mixture components
% - G: the gaussd struct with G.d == d and G.n == K
% - Q: the soft assignment matrix [K x n]
%
% - state: the fmm_std state object
%
% One can further specify the following options in form of name
% value list:
%
% - 'cov_form': the form of covariance:
% 's': isotropic covariance
% 'd': diagonal covariance
% 'f': full form covariance
% (default = 'f')
%
% - 'initL': the initial assignment, given by a label vector
% of size 1 x n. (default = [], indicating that
% a random initialization will be used).
%
% - 'maxiters': the maximum number of iterations in E-M
% (default = 100)
%
% - 'tol': the tolerance of objective change upon convergence
% (default = 1e-6)
%
% - 'display': the level of displaying:
% 'off': no display
% 'final': display at the end
% 'stage': display per stage
% 'eval': display per objective evaluation
% 'iter': display per iteration
% (default = 'iter')
%
% - 'tied_cov': whether to tie covariance across components
% (default = false)
%
% - 'pricount': the prior count of each component (default = 0)
%
%
% Created by Dahua Lin, on Dec 27, 2011
%
%% verify input arguments
if ~(isfloat(X) && isreal(X) && ndims(X) == 2)
error('gmm_fit:invalidarg', 'X should be a real matrix.');
end
[d, n] = size(X);
if ~isempty(w)
if ~(isfloat(w) && isreal(w) && isvector(w) && numel(w) == n)
error('gmm_fit:invalidarg', ...
'w should be either empty or a real vector of length n.');
end
if size(w, 2) > 1
w = w.';
end
else
w = [];
end
if ~(isnumeric(K) && isscalar(K) && K == fix(K) && K >= 2)
error('gmm_fit:invalidarg', ...
'K should be a positive integer with K >= 2.');
end
opts = parse_options(n, K, varargin);
%% main
if ~opts.tied_cov
gm = gaussgm(d, opts.cov_form);
else
gm = gaussgm(d, opts.cov_form, 'tied-cov');
end
gs = fmm_std('em', gm, [], opts.pricount);
if isempty(opts.initL)
gs = gs.initialize(X, w, 'rand', K);
else
gs = gs.initialize(X, w, 'labels', opts.initL);
end
vdopts = varinfer_options([], ...
'maxiters', opts.maxiters, 'tol', opts.tol, 'display', opts.display);
ret = varinfer_drive(gs, vdopts);
% output
sol = ret.sol;
R.K = sol.K;
R.Pi = sol.Pi;
R.G = sol.params;
R.Q = sol.Q;
state = ret.state;
%% option parsing
function opts = parse_options(n, K, nvlist)
opts.cov_form = 'f';
opts.initL = [];
opts.maxiters = 100;
opts.tol = 1e-6;
opts.pricount = 0;
opts.tied_cov = false;
opts.display = 'off';
if ~isempty(nvlist)
ns = nvlist(1:2:end);
vs = nvlist(2:2:end);
if ~(numel(ns) == numel(vs) && iscellstr(ns))
error('gmm_fit:invalidarg', 'Invalid name value list.');
end
for i = 1 : numel(ns)
cn = ns{i};
cv = vs{i};
switch lower(cn)
case 'cov_form'
if ~(ischar(cv) && isscalar(cv) && any(cv == 'sdf'))
error('gmm_fit:invalidarg', ...
'cov_form should be either ''s'', ''d'', or ''f''.');
end
opts.cov_form = cv;
case 'initl'
if ~isempty(cv)
if ~(isnumeric(cv) && isreal(cv) && isequal(size(cv), [1, n]))
error('gmm_fit:invalidarg', ...
'initL should be a numeric vector of size 1 x n.');
end
end
opts.initL = cv;
case 'maxiters'
if ~(isnumeric(cv) && isscalar(cv) && cv >= 1 && cv == fix(cv))
error('gmm_fit:invalidarg', ...
'maxiter should be a positive integer.');
end
opts.maxiters = cv;
case 'tol'
if ~(isfloat(cv) && isreal(cv) && isscalar(cv) && cv > 0)
error('gmm_fit:invalidarg', ...
'tol should be a real positive scalar.');
end
opts.tol = cv;
case 'pricount'
if ~(isfloat(cv) && isreal(cv) && isscalar(cv) && cv >= 0)
error('gmm_fit:invalidarg', ...
'pricount should be a real non-negative scalar.');
end
opts.pricount = cv;
case 'tied_cov'
if ~(islogical(cv) && isscalar(cv))
error('gmm_fit:invalidarg', ...
'tied_cov should be a logical scalar.');
end
opts.tied_cov = cv;
case 'display'
if ~ischar(cv)
error('gmm_fit:invalidarg', ...
'display should be a string.');
end
opts.display = cv;
otherwise
error('gmm_fit:invalidarg', 'Invalid option name %s', cn);
end
end
end
if isempty(opts.initL)
opts.initL = randi(K, 1, n);
end
| zzhangumd-smitoolbox | pmodels/mixmodel/gmm_fit.m | MATLAB | mit | 6,333 |
function gsm_demo(method)
% A simple demo of Gaussian scale mixture fitting
%
% gsm_demo(method);
%
% Here method can be either 'ip' or 'em'.
%
% Created by Dahua Lin, on Nov 2, 2011
%
%% configure model
K = 4;
sigma = [1, 4, 12, 36].';
p = [0.4, 0.2, 0.25, 0.15].';
ext = 100;
%% generate data
n = 1e5;
z = ddsample(p, n);
grps = intgroup(K, z);
x = zeros(1, n);
for k = 1 : K
g = grps{k};
x(g) = randn(1, numel(g)) * sigma(k);
end
xi = linspace(-ext, ext, 5000);
%% model fitting
tol = 1e-2;
tolx = 1e-4;
[p_e, s_e] = gsm_fit(x, [], K, 'method', method, 'tol', tol, 'tolx', tolx, ...
'display', 'iter');
[s_e, sc] = sort(s_e, 1, 'ascend');
p_e = p_e(sc);
display(p_e);
display(s_e);
%% plotting
pdf_gt = gsm_pdf(xi, p, sigma);
pdf_es = gsm_pdf(xi, p_e, s_e);
figure;
semilogy(xi, pdf_gt, xi, pdf_es);
legend({'ground-truth', 'estimated'});
xlabel('x');
ylabel('pdf (in log-scale)');
function P = gsm_pdf(x, p, sigma)
E = exp( ( -0.5 ./ (sigma.^2) ) * (x.^2) );
P = (p ./ sqrt((2 * pi) * (sigma.^2)))' * E;
| zzhangumd-smitoolbox | pmodels/mixmodel/gsm_demo.m | MATLAB | mit | 1,051 |
function [p, s, converged] = gsm_fit(x, w, K, varargin)
% Fits a Gaussian scale mixture to data
%
% The pdf of a Gaussian scale mixture (GSM) model is formulated as
%
% f(x) = \sum_{k=1}^K p_k/sqrt(2 *pi * s_k^2) *
% exp(- x^2 / (2 * s_k^2) );
%
% Here, p_1, ..., p_K are the prior weights of the components, and
% s_1, ..., s_K are the scales of the components. Both are parameters
% to be estimated.
%
% [p, s] = gsm_fit(x, [], K, ...);
% [p, s] = gsm_fit(x, w, K, ...);
%
% Estimates the parameters of a Gaussian scale mixture, by fitting
% to the data.
%
% Input arguments:
% - x: a vector of observed values.
% - w: the weights of the values. If omitted, each sample
% has a weight 1.
% - K: the number of mixture components.
%
% One can specify other options to control the estimation, via
% name/value pairs:
%
% - p0: the initial guess of the component weights p
%
% - s0: the initial guess of the component scales s
%
% - method: which kind of method to use (default = 'ip')
% 'ip': interior-point algorithm
% 'em': expectation-maximization algorithm
%
% - maxiter: the maximum number of iterations
% default = 100
%
% - iterlen: the number of E-M steps in an iteration
% (default = 5)
%
% - tol: the tolerance of change of objective at convergence
% (default = 1e-6)
%
% - tolx: the tolerance of change of solution at convergence
% (default = 1e-6)
%
% - display: the level of display, which can be 'none', 'notify',
% 'final', and 'iter'.
%
% If p0 and s0 are not given, then this function uses its own
% method to give an initial guess.
%
% [p, s, converged] = gsm_fit( ... );
%
% returns whether the optimization procedure converges within
% tolerance.
%
%
% Created by Dahua Lin, on Nov 2, 2011
%
%% verify input arguments
if ~(isfloat(x) && isvector(x) && isreal(x) && ~issparse(x))
error('gsm_fit:invalidarg', 'x should be a non-sparse real vector.');
end
n = numel(x);
if size(x, 1) > 1; x = x .'; end % turn x into a row vector
if ~isempty(w)
if ~(isfloat(w) && isvector(w) && isreal(w) && ~issparse(w))
error('gsm_fit:invalidarg', 'w should be a non-sparse real vector.');
end
if numel(w) ~= n
error('gsm_fit:invalidarg', 'The size of w is not consistent with x.');
end
if size(w, 2) > 1; w = w.'; end % turn w into a column vector
else
w = ones(n, 1);
end
if ~(isscalar(K) && isnumeric(K) && K == fix(K) && K >= 1)
error('gsm_fit:invalidarg', 'K should be a positive integer number.');
end
% check options
[p0,s0,method,maxiter,iterlen,tol,tolx,dispstr] = check_options(varargin);
%% main skeleton
% initialization
x2 = x.^2;
if isempty(p0)
p0 = constmat(K, 1, 1.0 / double(K));
else
p0 = p0 / sum(p0); % ensure that p0 sums to one
end
if isempty(s0)
beta0 = init_beta0(x2, p0);
else
beta0 = 1 ./ (s0.^2);
end
switch method
case 'ip'
[p, beta, converged] = fit_ip(x2, w, p0, beta0, ...
maxiter, tol, tolx, dispstr);
case 'em'
[p, beta, converged] = fit_em(x2, w, p0, beta0, ...
maxiter, iterlen, tol, tolx, dispstr);
end
s = sqrt(1 ./ beta);
%% numeric solver
function [p, beta, converged] = fit_ip(x2, w, p0, beta0, maxiter, tol, tolx, dispstr)
K = numel(beta0);
fun = @(y) direct_objfun(y, x2, w, K);
sol0 = [p0; beta0];
Aeq = [ones(1, K), zeros(1, K)];
lb = zeros(2*K, 1);
options = optimset('Algorithm', 'interior-point', ...
'MaxIter', maxiter, ...
'TolFun', tol, ...
'TolX', tolx, ...
'Display', dispstr);
[sol, ~, eflag] = fmincon(fun, sol0, [], [], Aeq, 1, lb, [], [], options);
p = sol(1:K);
beta = sol(K+1:end);
converged = (eflag > 0);
% objective function
function [v, g] = direct_objfun(sol, x2, w, K)
p = sol(1:K);
beta = sol(K+1:end);
U = (-0.5 * beta) * x2; % outer product
lrho = log(p) + 0.5 * log(beta);
E = bsxfun(@plus, lrho, U);
maxE = max(E, [], 1);
rE = bsxfun(@minus, E, maxE);
rE = exp(rE);
rE_sum = sum(rE, 1);
Lik = log(rE_sum) + maxE;
v = Lik * w;
v = -v; % we are to maximize
if nargout >= 2 % evaluate gradient
Q = bsxfun(@times, rE, 1 ./ rE_sum);
gp = Q * w;
gbeta = 0.5 * (gp ./ beta - Q * (w .* (x2.')));
g = [gp; gbeta];
g = -g;
end
%% expectation maximization
function [p, beta, converged] = fit_em(x2, w, p0, beta0, maxiter, iterlen, tol, tolx, dispstr)
displevel = get_displevel(dispstr);
p = p0;
beta = beta0;
wx2 = w .* x2.';
it = 0;
converged = false;
if displevel >= 3
fprintf('%6s %12s %12s %12s\n', 'Iter', 'objective', 'objv-change', 'sol-change');
end
E = bsxfun(@minus, log(p) + log(beta) * 0.5, (0.5 * beta) * x2);
while ~converged && it < maxiter
it = it + 1;
for t = 1 : iterlen
% E-step
Q = nrmexp(E, 1);
% M-step
if it > 1
pre_p = p;
pre_beta = beta;
end
sw = Q * w;
beta = sw ./ (Q * wx2);
p = sw / sum(sw);
E = bsxfun(@minus, log(p) + log(beta) * 0.5, (0.5 * beta) * x2);
end
% Evaluate objective
ent = ddentropy(Q);
if it > 1
prev_objv = objv;
end
objv = (sum(Q .* E, 1) + ent) * w;
if it == 1
ch_fun = nan;
ch_sol = nan;
else
ch_fun = objv - prev_objv;
ch_sol = max( norm(p - pre_p, inf), norm(beta - pre_beta, inf) );
end
converged = it > 1 && abs(ch_fun) < tol && ch_sol < tolx;
% Display
if displevel >= 3
fprintf('%6d %12.5g %12.5g %12.5g\n', it, objv, ch_fun, ch_sol);
end
end
if displevel >= 2 || (displevel >= 1 && ~converged)
if converged
fprintf('GSM fitting (by EM) converged (with %d iterations)\n', it);
else
fprintf('GSM fitting (by EM) have not converged (with %d iterations)\n', it);
end
end
%% auxiliary functions
function beta0 = init_beta0(x2, p0)
n = numel(x2);
sx2 = sort(x2);
ei = round(cumsum(p0) * n);
ei(end) = n;
si = [1; ei(1:end-1) + 1];
K = numel(p0);
beta0 = zeros(K, 1);
for k = 1 : K
cx2 = sx2(si(k):ei(k));
cn = ei(k) - si(k) + 1;
beta0(k) = cn / sum(cx2);
end
function [p0,s0,method,maxiter,iterlen,tol,tolx,dispstr] = check_options(nvlist)
p0 = [];
s0 = [];
method = 'ip';
maxiter = 100;
iterlen = 5;
tol = 1e-6;
tolx = 1e-6;
dispstr = 'none';
if ~isempty(nvlist)
onames = nvlist(1:2:end);
ovals = nvlist(2:2:end);
if ~(numel(onames) == numel(ovals) && iscellstr(onames))
error('gsm_fit:invalidarg', 'The name/value list is invalid.');
end
for i = 1 : numel(onames)
cn = onames{i};
cv = ovals{i};
switch lower(cn)
case 'p0'
if ~(isfloat(cv) && isreal(cv) && isequal(size(cv), [K 1]))
error('gsm_fit:invalidarg', ...
'p0 should be a K x 1 real valued vector.');
end
p0 = cv;
case 's0'
if ~(isfloat(cv) && isreal(cv) && isequal(size(cv), [K 1]))
error('gsm_fit:invalidarg', ...
's0 should be a K x 1 real valued vector.');
end
s0 = cv;
case 'method'
if ~(ischar(cv) && (strcmpi(cv, 'ip') || strcmpi(cv, 'em')))
error('gsm_fit:invalidarg', ...
'The method must be a string, which can be ''ip'' or ''em''.');
end
method = lower(cv);
case 'maxiter'
if ~(isnumeric(cv) && isscalar(cv) && cv >= 1)
error('gsm_fit:invalidarg', ...
'maxiter must be a positive number.');
end
maxiter = cv;
case 'iterlen'
if ~(isnumeric(cv) && isscalar(cv) && cv >= 1)
error('gsm_fit:invalidarg', ...
'iterlen must be a positive number.');
end
iterlen = cv;
case 'tol'
if ~(isfloat(cv) && isscalar(cv) && isreal(cv) && cv > 0)
error('gsm_fit:invalidarg', ...
'tol must be a positive real value.');
end
tol = cv;
case 'tolx'
if ~(isfloat(cv) && isscalar(cv) && isreal(cv) && cv > 0)
error('gsm_fit:invalidarg', ...
'tolx must be a positive real value.');
end
tolx = cv;
case 'display'
if ~(ischar(cv) && ismember(cv, {'none', 'notify', 'final', 'iter'}))
error('gsm_fit:invalidarg', ...
'The value of display is invalid.');
end
dispstr = cv;
end
end
end
function displevel = get_displevel(dispstr)
switch dispstr
case 'none'
displevel = 0;
case 'notify'
displevel = 1;
case 'final'
displevel = 2;
case 'iter'
displevel = 3;
end
| zzhangumd-smitoolbox | pmodels/mixmodel/gsm_fit.m | MATLAB | mit | 9,730 |
function mixppca_demo(n, K)
% A program to demonstrate the use of mixture of PPCA
%
% mixppca_demo(n, K);
%
% Here, n is the number of sample points, K is the number of
% mixture components to be used
%
% By default n = 50000, and K = 8;
%
% Created by Dahua Lin, on Nov 6, 2011
%
%% get input
if nargin < 1
n = 50000;
end
if nargin < 2
K = 8;
end
%% generate data
sigma0 = 0.1;
r0 = 2;
rgn = [-3, 3, -3, 3];
theta = rand(1, n) * (2 * pi);
r = r0 + randn(1, n) * sigma0;
X = [r .* cos(theta); r .* sin(theta)];
%% Do estimation
gm = ppca_gm(2, 1);
c0 = 1;
state = fmm_std('em', gm, [], c0);
Z0 = ceil(theta / (2 * pi) * K);
w = [];
state = state.initialize(X, w, 'labels', Z0);
opts = varinfer_options([], ...
'maxiters', 50, ...
'display', 'eval', ...
'tol', 1e-6);
R = varinfer_drive(state, opts);
models = R.sol.params;
%% visualize
% models
figure;
plot(X(1,:), X(2,:), 'b.', 'MarkerSize', 3);
axis(rgn);
axis equal;
for k = 1 : K
gk = gaussd('m', models(k));
hold on;
gaussd_ellipse(gk, 2, 500, 'r', 'LineWidth', 2);
end
| zzhangumd-smitoolbox | pmodels/mixmodel/mixppca_demo.m | MATLAB | mit | 1,099 |
function c = invgammad_const(alpha, beta)
% Compute the constant term for gamma distribution logpdf
%
% c = invgammad_const(alpha, beta);
%
% Evaluates the following constant term for the evaluation of
% inverse gamma distribution logpdf:
%
% c = - (alpha log(beta) + gammaln(alpha))
%
% The sizes of alpha and beta should be compatible in the bsxfun
% sense.
%
% If either size(alpha, 1) > 1 or size(beta, 1) > 1, then
% it treats the distribution as a multi-dimensional distribution.
%
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% main
if isscalar(alpha) || isscalar(beta)
c = alpha * log(beta) - gammaln(alpha);
else
c = bsxfun(@minus, bsxfun(@times, alpha, log(beta)), gammaln(alpha));
end
if size(c, 1) > 1
c = sum(c, 1);
end
| zzhangumd-smitoolbox | pmodels/gamma/invgammad_const.m | MATLAB | mit | 811 |
function L = gammad_logpdf(alpha, beta, X, c)
%GAMMAD_LOGPDF Evaluate log probability density of gamma distribution
%
% log f(x; alpha, beta) =
% (alpha - 1) log(x) - x / beta + const
%
% with const = - (alpha log(beta) + gammaln(alpha))
%
% L = gammad_logpdf(alpha, beta, X);
% L = gammad_logpdf(alpha, beta, X, 0);
% L = gammad_logpdf(alpha, beta, X, c);
%
% evaluates the log probability density function of gamma
% distribution(s) at given samples.
%
% Inputs:
% - alpha: the shape parameter(s)
% - beta: the scale parameter(s)
% - X: the sample matrix [d x n]
%
% - c: the constant term in the log-pdf.
%
% If c is not provided if will be evaluated in the
% function. When this function is invoked multiple times
% with the same set of distributions, it is advisable
% to pre-compute it using gammad_const.
%
% One can also set c to 0, which means only computing
% the linear part without adding the constant.
%
% When d > 1, this indicates a multi-dimensional gamma distribution
% with independent components.
%
% alpha and beta can respectively be either of the following:
% a scalar, d x 1 vector, 1 x m vector, or d x m matrix.
% When m > 1, it indicates there are multiple distributions,
% the log-pdf values with respect to all distributions are evaluated
% for each sample.
%
% The sizes of alpha and beta need not be the same, but they have
% to be compatible with each other in bsxfun sense.
%
% Outputs:
% - L: the log-pdf value matrix, of size m x n.
% Particularly, L(k, i) is the log-pdf at the i-th sample
% with respect to the k-th distribution.
%
%
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% verify input arguments
if ~(isfloat(X) && isreal(X) && ndims(X) == 2)
error('gammad_logpdf:invalidarg', 'X should be a real matrix.');
end
if ~(isfloat(alpha) && isreal(alpha) && ndims(alpha) == 2)
error('gammad_logpdf:invalidarg', 'alpha should be a real matrix.');
end
if ~(isfloat(beta) && isreal(beta) && ndims(beta) == 2)
error('gammad_logpdf:invalidarg', 'beta should be a real matrix.');
end
dx = size(X, 1);
[da, ma] = size(alpha);
[db, mb] = size(beta);
if ~( (da == 1 || da == dx) && (db == 1 || db == dx) && ...
(ma == 1 || mb == 1 || ma == mb))
error('gammad_logpdf:invalidarg', 'The size of alpha or beta is invalid.');
end
m = max(ma, mb);
if nargin >= 4
if ~( isequal(c, 0) || ...
(isfloat(c) && isreal(c) && isequal(size(c), [1 m])) )
error('gammad_logpdf:invalidarg', ...
'c should be either zero or a 1 x m real vector.');
end
calc_c = 0;
else
calc_c = 1;
end
%% Evaluate
% first term: (alpha - 1) log(x)
if da == dx
T1 = (alpha - 1)' * log(X);
else
T1 = (alpha - 1)' * sum(log(X), 1);
end
% second term: x / beta
if db == dx
T2 = (1 ./ beta)' * X;
else
T2 = (1 ./ beta)' * sum(X, 1);
end
% combine terms
if size(T1, 1) == size(T2, 1)
L = T1 - T2;
else
L = bsxfun(@minus, T1, T2);
end
% add constants
if calc_c
c = gammad_const(alpha, beta);
if da < dx && db < dx
c = c * dx;
end
end
if ~isequal(c, 0)
if m == 1
L = L + c;
else
L = bsxfun(@plus, L, c.');
end
end
| zzhangumd-smitoolbox | pmodels/gamma/gammad_logpdf.m | MATLAB | mit | 3,474 |
function v = dird_entropy(alpha, d)
%DIRD_ENTROPY Evaluates the entropy of Dirichlet distribution
%
% v = dird_entropy(alpha, d);
%
% Evaluates the entropy of Dirichlet distribution(s).
%
% Here, alpha should be a matrix of size d x n, and in the output,
% the size of v is 1 x n. Particularly, v(i) corresponds to
% the parameter given by alpha(:,i).
%
% v = dird_entropy(alpha, d);
%
% Evaluates the logarithm of (symmetric) multivariate beta function.
%
% When alpha is a row vector of size 1 x n, this is equivalent to
% mvbetaln(repmat(alpha, d, 1)).
%
% When alpha is a matrix of size d x n, this is equivalent to
% mvbetaln(alpha, d).
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% main
if nargin < 2
d = size(alpha, 1);
end
logB = mvbetaln(alpha, d);
a = alpha - 1;
if size(alpha, 1) == 1
v = logB + (a * d) .* (psi(alpha * d) - psi(alpha));
else
salpha = sum(alpha, 1);
v = logB + sum(a) .* psi(salpha) - sum(a .* psi(alpha));
end
| zzhangumd-smitoolbox | pmodels/gamma/dird_entropy.m | MATLAB | mit | 1,029 |
function [alpha, objv] = dird_mle(X, w, alpha0, varargin)
%DIRD_MLE Maximum-likelihood Estimation of Dirichlet Distribution
%
% [alpha, objv] = DIRD_MLE(X, w, alpha0);
% [alpha, objv] = DIRD_MLE(X, w, alpha0, ...);
%
% Performs maximum-likelihood estimation of Dirichlet distribution.
%
% Input arguments
% ---------------
% X: The observed samples or their statistics [d x n]
% w: The sample weights [empty or 1 x n]
% alpha0: The initial guess of alpha [empty or d x 1]
% If empty, alpha0 will be initialized to
% 1 + rand(d, 1).
%
% If the 4th argument is 'normal', then X is treated as normal
% samples drawn from the Dirichlet distribution.
%
% If the 4th argument is 'stat', then X is considered to be
% (the expectation) of log(samples). This syntax can be
% useful in various context, e.g. the E-M algorithm for latent
% Dirichlet allocation.
%
% If the 4th argument is not given, it is set to 'normal'.
%
% Output arguments
% ----------------
% alpha: The solved alpha vectors
% objv: The objective value of the solution.
%
% One can input other parameters to control the optimization,
% in form of name/value pairs
%
% - input: 'normal' or 'stat':
% 'normal': X are samples from the Distribution
% 'stat': X are expectation of log(samples)
% default = 'normal'.
%
% - maxiter: The maximum number of iterations {100}
% - tolfun: The tolerance of function value changes {1e-9}
% - tolx: The tolerance of solution changes {1e-8}
% - display: the level of display {'off'}|'notify'|'final'|'iter'
%
% Note: fmincon will be used to optimize the function, and these
% options will be input to newtonfmin.
%
%% parse inputs
if ~(isfloat(X) && isreal(X) && ismatrix(X))
error('dird_mle:invalidarg', 'X should be a real matrix.');
end
[d, n] = size(X);
if ~isempty(w)
if ~(isfloat(w) && isreal(w) && isvector(w) && numel(w) == n)
error('dird_mle:invalidarg', 'w should be a vector of length n.');
end
end
if isempty(alpha0)
alpha0 = 1 + rand(d, 1);
else
if ~(isfloat(alpha0) && isreal(alpha0) && isequal(size(alpha0), [d 1]))
error('dird_mle:invalidarg', 'alpha0 should be a d x 1 real vector.');
end
end
[is_normal, opts] = parse_options(varargin);
%% main
% compute stats
if is_normal
v = log(X);
else
v = X;
end
if n > 1
if isempty(w)
tw = n;
v = sum(v, 2) * (1 / n);
else
tw = sum(w);
v = v * (w(:) / tw);
end
else
if isempty(w)
tw = 1;
else
tw = w;
end
end
% optimize
objfun = @(a) dird_mle_objfun(a, v);
[alpha, fv] = fmincon(objfun, alpha0, [], [], [], [], ...
zeros(d, 1), [], [], opts);
objv = (-fv * tw);
%% objective function
function [v, g, H] = dird_mle_objfun(a, t)
sa = sum(a);
v = sum(gammaln(a)) - gammaln(sa) - sum((a - 1) .* t, 1);
if nargout >= 2
g = psi(a) - psi(sa) - t;
end
if nargout >= 3
H = diag(psi(1, a)) - psi(1, sa);
end
%% Function parsing
function [is_normal, opts] = parse_options(params)
is_normal = 1;
maxiter = 100;
tolfun = 1e-9;
tolx = 1e-8;
display = 'off';
if ~isempty(params)
names = params(1:2:end);
vals = params(2:2:end);
if ~(numel(names) == numel(vals) && iscellstr(names))
error('dird_mle:invalidarg', 'Invalid name/value list.');
end
for i = 1 : numel(names)
cn = names{i};
v = vals{i};
switch lower(cn)
case 'input'
if strcmp(v, 'normal')
is_normal = 1;
elseif strcmp(v, 'stat')
is_normal = 0;
else
error('dird_mle:invalidarg', ...
'The value of option input should be ''normal'' or ''stat''.');
end
case 'maxiter'
if ~(isnumeric(v) && isscalar(v) && v >= 1)
error('dird_mle:invalidarg', ...
'maxtter should be a positive integer scalar.');
end
maxiter = v;
case 'tolfun'
if ~(isfloat(v) && isscalar(v) && v > 0)
error('dird_mle:invalidarg', ...
'tolfun should be a positive scalar.');
end
tolfun = v;
case 'tolx'
if ~(isfloat(v) && isscalar(v) && v > 0)
error('dird_mle:invalidarg', ...
'tolx should be a positive scalar.');
end
tolx = v;
case 'display'
if ~(ischar(v) || ...
ismember(v, {'off', 'notify', 'final', 'iter'}))
error('dird_mle:invalidarg', ...
'The value of display is invalid.');
end
display = v;
otherwise
error('dird_mle:invalidarg', ...
'The option name %s is unknown.', cn);
end
end
end
opts = optimset( ...
'MaxIter', maxiter, ...
'TolFun', tolfun, ...
'TolX', tolx, ...
'Display', display, ...
'GradObj', 'on', ...
'Hessian', 'user-supplied');
| zzhangumd-smitoolbox | pmodels/gamma/dird_mle.m | MATLAB | mit | 5,728 |
function L = wishartd_logpdf(Sigma, df, V, c, op)
%WISHARTD_LOGPDF Evaluates the log pdf of Wishart distribution
%
% L = wishartd_logpdf(Sigma, df, V);
% L = wishartd_logpdf(Sigma, df, V, 0);
% L = wishartd_logpdf(Sigma, df, V, c);
%
% Evaluates the log pdf at the matrices given in V, w.r.t. the
% Wishart distribution.
%
% Input arguments:
% - Sigma: the scale matrix (pdmat struct), Sigma.n == 1
% - df: the degree of freedom
% - V: the sample matrices (pdmat struct)
% - c: the constant term in log-pdf evaluation. If
% not supplied, the function will evaluate it.
% One can set c to zero to ignore this term.
%
% Output arguments:
% - L: the result values in form of a row vector of
% size 1 x V.n.
%
% L = wishartd_logpdf(InvSigma, df, V, [], 'inv');
% L = wishartd_logpdf(InvSigma, df, V, 0, 'inv');
% L = wishartd_logpdf(InvSigma, df, V, c, 'inv');
%
% Here, InvSigma is the inverse scale matrix.
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% verify inputs
if ~(is_pdmat(Sigma) && Sigma.n == 1)
error('wishartd_logpdf:invalidarg', ...
'Sigma should be a pdmat struct with Sigma.d == 1.');
end
d = Sigma.d;
if ~(isfloat(df) && isscalar(df) && df > d - 1)
error('wishartd_logpdf:invalidarg', ...
'df should be a numeric scalar with df > d - 1.');
end
if ~(is_pdmat(V) && V.d == d)
error('wishartd_logpdf:invalidarg', ...
'V should be a pdmat struct with V.d == d.');
end
if nargin < 4 || isempty(c)
calc_c = 1;
else
if ~(isfloat(c) && isscalar(c) && isreal(c))
error('wishartd_logpdf:invalidarg', 'c should be a real scalar.');
end
calc_c = 0;
end
if nargin < 5
is_inv = 0;
else
if ~(ischar(op) && strcmpi(op, 'inv'))
error('wishartd_logpdf:invalidarg', ...
'The 5th argument can only be ''inv''.');
end
is_inv = 1;
end
%% main
% inverse Sigma
if is_inv
J = Sigma;
else
J = pdmat_inv(Sigma);
end
% compute
L = (0.5) * ((df - d - 1) * pdmat_lndet(V) - pdmat_dot(J, V));
if calc_c
c = wishartd_const(J, df);
end
if c ~= 0
L = L + c;
end
| zzhangumd-smitoolbox | pmodels/gamma/wishartd_logpdf.m | MATLAB | mit | 2,266 |
function h = visdird3(alpha, form)
% Visualize a Dirichlet distribution with K == 3
%
% visdird3(alpha);
% visdird3(alpha, 'image');
% visdird3(alpha, 'mesh');
%
% visualizes a Dirichlet distribution with K = 3 as a colored
% image or mesh. (image by default);
%
% alpha is either a scalar or a vector of length 3.
%
% h = visdird3(alpha);
% h = visdird3(alpha, 'image');
% h = visdird3(alpha, 'mesh');
%
% This statement returns the handle to the figure.
%
% Created by Dahua Lin, on Sep 17, 2011
%
%% verify inputs
if ~(isfloat(alpha) && isreal(alpha) && ...
(isscalar(alpha) || (isvector(alpha) && numel(alpha) == 3)))
error('visdird3:invalidarg', ...
'alpha should be either a real scalar or a real vector of length 3.');
end
if size(alpha, 2) > 1
alpha = alpha.';
end
if nargin < 2
form = 'image';
else
if ~(ischar(form) && (strcmpi(form, 'image') || strcmpi(form, 'mesh')))
error('visdird3:invalidarg', ...
'The 2nd argument should be either ''image'' or ''mesh''.');
end
form = lower(form);
end
%% main
% gather the points at which pdf is calculated
tarref = [-1 0 1; 0 sqrt(3) 1; 1 0 1]';
switch form
case 'image'
nx = 480;
ny = 300;
case 'mesh'
nx = 96;
ny = 60;
end
tx = (1 : nx) / (nx + 1) * 2 - 1;
ty = (1 : ny) / (ny + 1) * sqrt(3);
[xx, yy] = meshgrid(tx, ty);
tar = [xx(:) yy(:)]';
np = size(tar, 2);
s = tarref \ [tar; ones(1, np)];
s = bsxfun(@times, s, 1 ./ sum(s, 1));
is_valid = all(s > 1e-3, 1);
% compute pdf
v = zeros(1, np);
v(is_valid) = dird_logpdf(alpha, s(:, is_valid));
vv = reshape(v, size(xx));
% visualize
switch form
case 'image'
h = imagesc(tx, ty', vv, [0, max(v)]);
axis xy;
axis([-1 1 0 sqrt(3)]);
axis equal;
colorbar;
set(gca, 'XTick', [], 'YTick', []);
case 'mesh'
h = mesh(xx, yy, vv);
axis([-1 1 0 sqrt(3) 0 max(v)]);
set(gca, 'XTick', [], 'YTick', [], 'ZTick', []);
end
| zzhangumd-smitoolbox | pmodels/gamma/visdird3.m | MATLAB | mit | 2,091 |
function c = wishartd_const(J, df)
% Compute the constant term for Wishart distribution log-pdf
%
% c = wishartd_const(J, df);
%
% Evaluates the constant term for Wishart distribution log-pdf
% or inverse Wishart distribution log-pdf.
%
% Inputs:
% - J: inverse scale matrix in form of pdmat struct.
% - df: the degree of freedom.
%
% Note that the constants for Wishart and inverse Wishart
% distributions with the same inverse scale matrix are the same.
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% verify input
if ~(is_pdmat(J) && J.n == 1)
error('wishartd_const:invalidarg', ...
'J should be a pdmat struct with J.n == 1.');
end
d = J.d;
if ~(isfloat(df) && isreal(df) && isscalar(df) && df > d - 1)
error('wishartd_const:invalidarg', ...
'df should be a real scalar greater than d - 1.');
end
%% main
c = (df / 2) * pdmat_lndet(J) - (df * d / 2) * log(2) - mvgammaln(d, df / 2);
| zzhangumd-smitoolbox | pmodels/gamma/wishartd_const.m | MATLAB | mit | 974 |
function v = gammad_entropy(alpha, beta)
% Compute the entropy for gamma distribution
%
% v = gammad_entropy(alpha, beta);
%
% Evaluates the entropy of gamma distribution.
%
% The sizes of alpha and beta should be compatible in the bsxfun
% sense.
%
% If either size(alpha, 1) > 1 or size(beta, 1) > 1, then
% it treats the distribution as a multi-dimensional distribution.
%
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% main
u = alpha + gammaln(alpha) + (1 - alpha) .* psi(alpha);
if isscalar(u) || isscalar(beta)
v = u + log(beta);
else
v = bsxfun(@plus, u, log(beta));
end
if size(v, 1) > 1
v = sum(v, 1);
end
| zzhangumd-smitoolbox | pmodels/gamma/gammad_entropy.m | MATLAB | mit | 665 |
function X = dird_sample(alpha, n)
%DIRD_SAMPLE Samples from a Dirichlet distribution
%
% X = dird_sample(alpha);
% X = dird_sample(alpha, n);
% X = dird_sample(alpha, [d, n]);
%
% Draws n samples from a Dirichlet distribution over the
% (d-1)-dimensional probability simplex, whose parameter
% is given by alpha.
%
% Input arguments:
% - alpha: the parameter of Dirichlet distribution. It is
% either a d x 1 column vector, or a scalar (for
% symmetric Dirichlet)
%
% - d: the sample dimension. Each sample is a vector
% of length d that sums to 1.
%
% - n: the number of samples to draw.
%
% Created by Dahua Lin, on Sep 27, 2011
%
%% verify input arguments
if ~( isfloat(alpha) && isreal(alpha) && ...
(isscalar(alpha) || (ndims(alpha) == 2 && size(alpha,2) == 1)) )
error('dird_sample:invalidarg', ...
'alpha should be a real scalar or a real column vector.');
end
da = size(alpha, 1);
if nargin < 2
n = 1;
d = da;
else
if ~(isnumeric(n) && (isscalar(n) || numel(n) == 2))
error('dird_sample:invalidarg', ...
'n should be a non-negative integer scalar or a numeric pair.');
end
if isscalar(n)
d = da;
else
d = n(1);
n = n(2);
if ~(da == 1 || da == d)
error('dird_sample:invalidarg', 'Inconsistent dimensions.');
end
end
end
%% main
if d > 1
if isscalar(alpha)
X = randg(alpha, d, n);
else
if n == 1
X = randg(alpha);
else
X = randg(alpha(:, ones(1,n)));
end
end
X = bsxfun(@times, X, 1 ./ sum(X, 1));
else % d == 1
X = ones(1, n);
end
| zzhangumd-smitoolbox | pmodels/gamma/dird_sample.m | MATLAB | mit | 1,818 |
function X = gammad_sample(alpha, beta, n)
%GAMMAD_SAMPLE Samples from a Gamma distribution
%
% X = gammad_sample(alpha, beta);
% X = gammad_sample(alpha, beta, n);
%
% draws n samples from a gamma distribution with shape parameter
% alpha and scale parameter beta.
%
% Input arguments:
% - alpha: can be a scalar or a d x 1 column vector.
% - beta: can be a scalar or a d x 1 column vector.
% - n: the number of samples to draw
%
% If n is omitted, it is assumed to be 1, and is d is omitted,
% it is set to size(alpha, 1).
%
% X = gammad_sample(alpha, beta, [d, n]);
%
% Additionally, specifies the dimension of the samples as d.
% This syntax is particularly useful when you need to sample
% from multi-dimensional gamma distributions of which both alpha
% and beta parameters are scalars.
%
% Created by Dahua Lin, on Sep 1, 2011
% Modified by Dahua Lin, on Dec 26, 2011
%% verify input
if ~(isfloat(alpha) && isreal(alpha) && ndims(alpha) == 2 && size(alpha,2) == 1)
error('gammad_sample:invalidarg', ...
'alpha should be a real scalar or a column vector.');
end
if ~(isfloat(beta) && isreal(beta) && ndims(beta) == 2 && size(beta,2) == 1)
error('gammad_sample:invalidarg', ...
'beta should be a positive real scalar.');
end
d1 = size(alpha, 1);
d2 = size(beta, 1);
if ~(d1 == 1 || d2 == 1 || d1 == d2)
error('gammad_sample:invalidarg', ...
'Inconsistent dimensions between alpha and beta.');
end
d_ = max(d1, d2);
if nargin < 3
n = 1;
d = d_;
else
if ~(isnumeric(n) && (isscalar(n) || numel(n) == 2))
error('gammad_sample:invalidarg', ...
'n should be a numeric scalar or pair.');
end
if isscalar(n)
d = d_;
else
d = n(1);
n = n(2);
if ~(d_ == 1 || d_ == d)
error('gammad_sample:invalidarg', ...
'The sample dimension is inconsistent.');
end
end
end
%% main
if d == 1
X = randg(alpha, 1, n);
else
if d1 == 1
X = randg(alpha, d, n);
else
X = randg(alpha(:, ones(1, n)));
end
end
if isscalar(beta)
if beta ~= 1
X = X * beta;
end
else
if n == 1
X = X .* beta;
else
X = bsxfun(@times, X, beta);
end
end
| zzhangumd-smitoolbox | pmodels/gamma/gammad_sample.m | MATLAB | mit | 2,351 |
function L = invwishartd_logpdf(Phi, df, V, c)
%INVWISHARTD_LOGPDF Evaluates the log pdf of Inverse Wishart distribution
%
% L = invwishartd_logpdf(Phi, df, V);
% L = invwishartd_logpdf(Phi, df, V, 0);
% L = invwishartd_logpdf(Phi, df, V, c);
%
% Evaluates the log pdf at the matrices given in V, w.r.t. the
% inverse Wishart distribution.
%
% Input arguments:
% - Phi: the inverse scale matrix (pdmat struct), Phi.n == 1
% - df: the degree of freedom
% - V: the sample matrices (pdmat struct)
% - c: the constant term in log-pdf evaluation. If
% not supplied, the function will evaluate it.
% One can set c to zero to ignore this term.
%
% Output arguments:
% - L: the result values in form of a row vector of
% size 1 x V.n.
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% verify inputs
if ~(is_pdmat(Phi) && Phi.n == 1)
error('invwishartd_logpdf:invalidarg', ...
'Phi should be a pdmat struct with Sigma.d == 1.');
end
d = Sigma.d;
if ~(isfloat(df) && isscalar(df) && df > d - 1)
error('invwishartd_logpdf:invalidarg', ...
'df should be a numeric scalar with df > d - 1.');
end
if ~(is_pdmat(V) && V.d == d)
error('invwishartd_logpdf:invalidarg', ...
'V should be a pdmat struct with V.d == d.');
end
if nargin < 4 || isempty(c)
calc_c = 1;
else
if ~(isfloat(c) && isscalar(c) && isreal(c))
error('invwishartd_logpdf:invalidarg', 'c should be a real scalar.');
end
calc_c = 0;
end
%% main
% compute
IV = pdmat_inv(V);
L = (-0.5) * ((df + d + 1) * pdmat_lndet(V) - pdmat_dot(Phi, IV));
if calc_c
c = wishartd_const(Phi, df);
end
if c ~= 0
L = L + c;
end
| zzhangumd-smitoolbox | pmodels/gamma/invwishartd_logpdf.m | MATLAB | mit | 1,822 |
function v = invgammad_entropy(alpha, beta)
% Compute the entropy for inverse gamma distribution
%
% v = invgammad_entropy(alpha, beta);
%
% Evaluates the entropy of inverse gamma distribution.
%
% The sizes of alpha and beta should be compatible in the bsxfun
% sense.
%
% If either size(alpha, 1) > 1 or size(beta, 1) > 1, then
% it treats the distribution as a multi-dimensional distribution.
%
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% main
u = alpha + gammaln(alpha) - (1 + alpha) .* psi(alpha);
if isscalar(u) || isscalar(beta)
v = u + log(beta);
else
v = bsxfun(@plus, u, log(beta));
end
if size(v, 1) > 1
v = sum(v, 1);
end
| zzhangumd-smitoolbox | pmodels/gamma/invgammad_entropy.m | MATLAB | mit | 687 |
function v = mvbetaln(alpha, d)
%MVBETALN Logarithm of multivariate beta function
%
% A multivariate beta function is defined to be
%
% f(a) = prod(gamma(a)) / gamma(sum(a)).
%
%
% v = mvbetaln(alpha);
%
% Evaluates the logarithm of multivariate beta function.
%
% Here, alpha should be a matrix of size d x n, and in the output,
% the size of v is 1 x n. Particularly, v(i) corresponds to
% the parameter given by alpha(:,i).
%
% v = mvbetaln(alpha, d);
%
% Evaluates the logarithm of (symmetric) multivariate beta function.
%
% When alpha is a row vector of size 1 x n, this is equivalent to
% mvbetaln(repmat(alpha, d, 1)).
%
% When alpha is a matrix of size d x n, this is equivalent to
% mvbetaln(alpha, d).
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% verify input
if ~(isfloat(alpha) && isreal(alpha) && ndims(alpha) == 2)
error('mvbetaln:invalidarg', 'alpha should be a real matrix.');
end
da = size(alpha, 1);
if nargin < 2
d = da;
else
if ~(isscalar(d) && isnumeric(d))
error('mvbetaln:invalidarg', 'd should be a numeric scalar.');
end
if ~(da == 1 || da == d)
error('mvbetaln:invalidarg', 'd is inconsistent with alpha.');
end
end
%% main
if d == 1
v = zeros(1, size(alpha, 2));
else
if da == 1
v = gammaln(alpha) * d - gammaln(alpha * d);
else
v = sum(gammaln(alpha), 1) - gammaln(sum(alpha, 1));
end
end
| zzhangumd-smitoolbox | pmodels/gamma/mvbetaln.m | MATLAB | mit | 1,478 |
function V = mvgammaln(d, X)
% Evaluate Multivariate Log-Gamma function
%
% In math, the multivariate gamma function with dim d, is defined by
%
% Gamma_d(x) = pi^(d * (d-1) / 4)
% * prod_{j=1}^d Gamma(x + (1 - j)/2)
%
% In particular, when d == 1, Gamma_d(x) = Gamma(x)
%
% Then, its logarithm is given by
%
% (d * (d-1) / 4) * log(pi) + sum_{j=1}^d GammaLn(x + (1 - j)/2)
%
% V = mvgammaln(d, X);
%
% computes the multivariate log-gamma function with dimension d
% on X.
%
% X can be an array of any size, and then the output V would be
% of the same size.
%
% Created by Dahua Lin, on Sep 2, 2011
%
%% verify input
if ~(isnumeric(d) && isscalar(d) && d == fix(d) && d >= 1)
error('mvgammaln:invalidarg', 'd should be a positive integer scalar.');
end
if ~(isfloat(X) && isreal(X))
error('mvgammaln:invalidarg', 'X should be a real-valued array.');
end
%% main
if d == 1
V = gammaln(X);
else % d > 1
% X --> x : row vector
if ndims(X) == 2 && size(X,1) == 1
x = X;
rs = 0;
else
x = reshape(X, 1, numel(X));
rs = 1;
end
% compute
% this 0.2862 ... is the value of log(pi) / 4
t0 = d * (d - 1) * 0.286182471462350043535856837838;
Y = bsxfun(@plus, (1 - (1:d)') / 2, x);
t1 = sum(gammaln(Y), 1);
v = t0 + t1;
% reshape back
if rs
V = reshape(v, size(X));
else
V = v;
end
end
| zzhangumd-smitoolbox | pmodels/gamma/mvgammaln.m | MATLAB | mit | 1,496 |
function c = gammad_const(alpha, beta)
% Compute the constant term for gamma distribution logpdf
%
% c = gammad_const(alpha, beta);
%
% Evaluates the following constant term for the evaluation of
% gamma distribution logpdf:
%
% c = - (alpha log(beta) + gammaln(alpha))
%
% The sizes of alpha and beta should be compatible in the bsxfun
% sense.
%
% If either size(alpha, 1) > 1 or size(beta, 1) > 1, then
% it treats the distribution as a multi-dimensional distribution.
%
%
% Created by Dahua Lin, on Dec 26, 2011
%
%% main
if isscalar(alpha) || isscalar(beta)
c = - (alpha * log(beta) + gammaln(alpha));
else
c = - bsxfun(@plus, bsxfun(@times, alpha, log(beta)), gammaln(alpha));
end
if size(c, 1) > 1
c = sum(c, 1);
end
| zzhangumd-smitoolbox | pmodels/gamma/gammad_const.m | MATLAB | mit | 802 |
/********************************************************************
*
* wishartd_sample_cimp.cpp
*
* The C++ mex implementation for pieces
*
* Created by Dahua Lin, on Mar 22, 2011
*
********************************************************************/
#include <bcslib/matlab/bcs_mex.h>
#include <cmath>
using namespace bcs;
using namespace bcs::matlab;
// core function
void wishartd_gen(int d, const double *css, const double *nrms, double *R)
{
double *dv = R;
for (int i = 0; i < d; ++i, dv += (d+1))
{
*dv = std::sqrt(*(css++));
}
for (int i = 1; i < d; ++i)
{
for (int j = 0; j < i; ++j)
{
R[i + j * d] = *(nrms++);
}
}
}
/**
* The main entry
*
* Input:
* [1] css: the chi-squared random numbers [d x n double]
* [2] nrms: the standard normal random numbers [d(d-1)/2 x n double]
*
* Output:
* [0] R: the output [d x d x n]
*
* Note:
* when d == 2 or d == 3, R store the sample matrix,
*
*/
void bcsmex_main(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
// take input
const_marray mCss(prhs[0]);
const_marray mNrms(prhs[1]);
int d = (int)mCss.nrows();
int n = (int)mCss.ncolumns();
const double *css = mCss.data<double>();
const double *nrms = mNrms.data<double>();
// prepare output
mxArray *mxR = 0;
if (n == 1)
{
mxR = mxCreateDoubleMatrix((mwSize)d, (mwSize)d, mxREAL);
}
else
{
mwSize dims[3];
dims[0] = (mwSize)d;
dims[1] = (mwSize)d;
dims[2] = (mwSize)n;
mxR = mxCreateNumericArray(3, dims, mxDOUBLE_CLASS, mxREAL);
}
double *R = mxGetPr(mxR);
// main
if (n == 1)
{
wishartd_gen(d, css, nrms, R);
}
else
{
int dn = d * (d - 1) / 2;
int dr = d * d;
for (int i = 0; i < n; ++i)
{
wishartd_gen(d, css + d * i, nrms + dn * i, R + dr * i);
}
}
// output
plhs[0] = mxR;
}
BCSMEX_MAINDEF
| zzhangumd-smitoolbox | pmodels/gamma/private/wishartd_sample_cimp.cpp | C++ | mit | 2,226 |