repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.dimension | def dimension(self):
"""Compute the dimension of the sampling space and identify the slices
belonging to each stochastic.
"""
self.dim = 0
self._slices = {}
for stochastic in self.stochastics:
if isinstance(stochastic.value, np.matrix):
p_len = len(stochastic.value.A.ravel())
elif isinstance(stochastic.value, np.ndarray):
p_len = len(stochastic.value.ravel())
else:
p_len = 1
self._slices[stochastic] = slice(self.dim, self.dim + p_len)
self.dim += p_len | python | def dimension(self):
"""Compute the dimension of the sampling space and identify the slices
belonging to each stochastic.
"""
self.dim = 0
self._slices = {}
for stochastic in self.stochastics:
if isinstance(stochastic.value, np.matrix):
p_len = len(stochastic.value.A.ravel())
elif isinstance(stochastic.value, np.ndarray):
p_len = len(stochastic.value.ravel())
else:
p_len = 1
self._slices[stochastic] = slice(self.dim, self.dim + p_len)
self.dim += p_len | [
"def",
"dimension",
"(",
"self",
")",
":",
"self",
".",
"dim",
"=",
"0",
"self",
".",
"_slices",
"=",
"{",
"}",
"for",
"stochastic",
"in",
"self",
".",
"stochastics",
":",
"if",
"isinstance",
"(",
"stochastic",
".",
"value",
",",
"np",
".",
"matrix",... | Compute the dimension of the sampling space and identify the slices
belonging to each stochastic. | [
"Compute",
"the",
"dimension",
"of",
"the",
"sampling",
"space",
"and",
"identify",
"the",
"slices",
"belonging",
"to",
"each",
"stochastic",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1216-L1230 | train | 220,100 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.update_cov | def update_cov(self):
"""Recursively compute the covariance matrix for the multivariate normal
proposal distribution.
This method is called every self.interval once self.delay iterations
have been performed.
"""
scaling = (2.4) ** 2 / self.dim # Gelman et al. 1996.
epsilon = 1.0e-5
chain = np.asarray(self._trace)
# Recursively compute the chain mean
self.C, self.chain_mean = self.recursive_cov(self.C, self._trace_count,
self.chain_mean, chain, scaling=scaling, epsilon=epsilon)
# Shrink covariance if acceptance rate is too small
acc_rate = self.accepted / (self.accepted + self.rejected)
if self.shrink_if_necessary:
if acc_rate < .001:
self.C *= .01
elif acc_rate < .01:
self.C *= .25
if self.verbose > 1:
if acc_rate < .01:
print_(
'\tAcceptance rate was',
acc_rate,
'shrinking covariance')
self.accepted = 0.
self.rejected = 0.
if self.verbose > 1:
print_("\tUpdating covariance ...\n", self.C)
print_("\tUpdating mean ... ", self.chain_mean)
# Update state
adjustmentwarning = '\n' +\
'Covariance was not positive definite and proposal_sd cannot be computed by \n' + \
'Cholesky decomposition. The next jumps will be based on the last \n' + \
'valid covariance matrix. This situation may have arisen because no \n' + \
'jumps were accepted during the last `interval`. One solution is to \n' + \
'increase the interval, or specify an initial covariance matrix with \n' + \
'a smaller variance. For this simulation, each time a similar error \n' + \
'occurs, proposal_sd will be reduced by a factor .9 to reduce the \n' + \
'jumps and increase the likelihood of accepted jumps.'
try:
self.updateproposal_sd()
except np.linalg.LinAlgError:
warnings.warn(adjustmentwarning)
self.covariance_adjustment(.9)
self._trace_count += len(self._trace)
self._trace = [] | python | def update_cov(self):
"""Recursively compute the covariance matrix for the multivariate normal
proposal distribution.
This method is called every self.interval once self.delay iterations
have been performed.
"""
scaling = (2.4) ** 2 / self.dim # Gelman et al. 1996.
epsilon = 1.0e-5
chain = np.asarray(self._trace)
# Recursively compute the chain mean
self.C, self.chain_mean = self.recursive_cov(self.C, self._trace_count,
self.chain_mean, chain, scaling=scaling, epsilon=epsilon)
# Shrink covariance if acceptance rate is too small
acc_rate = self.accepted / (self.accepted + self.rejected)
if self.shrink_if_necessary:
if acc_rate < .001:
self.C *= .01
elif acc_rate < .01:
self.C *= .25
if self.verbose > 1:
if acc_rate < .01:
print_(
'\tAcceptance rate was',
acc_rate,
'shrinking covariance')
self.accepted = 0.
self.rejected = 0.
if self.verbose > 1:
print_("\tUpdating covariance ...\n", self.C)
print_("\tUpdating mean ... ", self.chain_mean)
# Update state
adjustmentwarning = '\n' +\
'Covariance was not positive definite and proposal_sd cannot be computed by \n' + \
'Cholesky decomposition. The next jumps will be based on the last \n' + \
'valid covariance matrix. This situation may have arisen because no \n' + \
'jumps were accepted during the last `interval`. One solution is to \n' + \
'increase the interval, or specify an initial covariance matrix with \n' + \
'a smaller variance. For this simulation, each time a similar error \n' + \
'occurs, proposal_sd will be reduced by a factor .9 to reduce the \n' + \
'jumps and increase the likelihood of accepted jumps.'
try:
self.updateproposal_sd()
except np.linalg.LinAlgError:
warnings.warn(adjustmentwarning)
self.covariance_adjustment(.9)
self._trace_count += len(self._trace)
self._trace = [] | [
"def",
"update_cov",
"(",
"self",
")",
":",
"scaling",
"=",
"(",
"2.4",
")",
"**",
"2",
"/",
"self",
".",
"dim",
"# Gelman et al. 1996.",
"epsilon",
"=",
"1.0e-5",
"chain",
"=",
"np",
".",
"asarray",
"(",
"self",
".",
"_trace",
")",
"# Recursively comput... | Recursively compute the covariance matrix for the multivariate normal
proposal distribution.
This method is called every self.interval once self.delay iterations
have been performed. | [
"Recursively",
"compute",
"the",
"covariance",
"matrix",
"for",
"the",
"multivariate",
"normal",
"proposal",
"distribution",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1232-L1286 | train | 220,101 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.recursive_cov | def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0):
r"""Compute the covariance recursively.
Return the new covariance and the new mean.
.. math::
C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T)
C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
& = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
:Parameters:
- cov : matrix
Previous covariance matrix.
- length : int
Length of chain used to compute the previous covariance.
- mean : array
Previous mean.
- chain : array
Sample used to update covariance.
- scaling : float
Scaling parameter
- epsilon : float
Set to a small value to avoid singular matrices.
"""
n = length + len(chain)
k = length
new_mean = self.recursive_mean(mean, length, chain)
t0 = k * np.outer(mean, mean)
t1 = np.dot(chain.T, chain)
t2 = n * np.outer(new_mean, new_mean)
t3 = epsilon * np.eye(cov.shape[0])
new_cov = (
k - 1) / (
n - 1.) * cov + scaling / (
n - 1.) * (
t0 + t1 - t2 + t3)
return new_cov, new_mean | python | def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0):
r"""Compute the covariance recursively.
Return the new covariance and the new mean.
.. math::
C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T)
C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
& = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
:Parameters:
- cov : matrix
Previous covariance matrix.
- length : int
Length of chain used to compute the previous covariance.
- mean : array
Previous mean.
- chain : array
Sample used to update covariance.
- scaling : float
Scaling parameter
- epsilon : float
Set to a small value to avoid singular matrices.
"""
n = length + len(chain)
k = length
new_mean = self.recursive_mean(mean, length, chain)
t0 = k * np.outer(mean, mean)
t1 = np.dot(chain.T, chain)
t2 = n * np.outer(new_mean, new_mean)
t3 = epsilon * np.eye(cov.shape[0])
new_cov = (
k - 1) / (
n - 1.) * cov + scaling / (
n - 1.) * (
t0 + t1 - t2 + t3)
return new_cov, new_mean | [
"def",
"recursive_cov",
"(",
"self",
",",
"cov",
",",
"length",
",",
"mean",
",",
"chain",
",",
"scaling",
"=",
"1",
",",
"epsilon",
"=",
"0",
")",
":",
"n",
"=",
"length",
"+",
"len",
"(",
"chain",
")",
"k",
"=",
"length",
"new_mean",
"=",
"self... | r"""Compute the covariance recursively.
Return the new covariance and the new mean.
.. math::
C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T)
C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
& = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T)
:Parameters:
- cov : matrix
Previous covariance matrix.
- length : int
Length of chain used to compute the previous covariance.
- mean : array
Previous mean.
- chain : array
Sample used to update covariance.
- scaling : float
Scaling parameter
- epsilon : float
Set to a small value to avoid singular matrices. | [
"r",
"Compute",
"the",
"covariance",
"recursively",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1297-L1335 | train | 220,102 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.recursive_mean | def recursive_mean(self, mean, length, chain):
r"""Compute the chain mean recursively.
Instead of computing the mean :math:`\bar{x_n}` of the entire chain,
use the last computed mean :math:`bar{x_j}` and the tail of the chain
to recursively estimate the mean.
.. math::
\bar{x_n} & = \frac{1}{n} \sum_{i=1}^n x_i
& = \frac{1}{n} (\sum_{i=1}^j x_i + \sum_{i=j+1}^n x_i)
& = \frac{j\bar{x_j}}{n} + \frac{\sum_{i=j+1}^n x_i}{n}
:Parameters:
- mean : array
Previous mean.
- length : int
Length of chain used to compute the previous mean.
- chain : array
Sample used to update mean.
"""
n = length + len(chain)
return length * mean / n + chain.sum(0) / n | python | def recursive_mean(self, mean, length, chain):
r"""Compute the chain mean recursively.
Instead of computing the mean :math:`\bar{x_n}` of the entire chain,
use the last computed mean :math:`bar{x_j}` and the tail of the chain
to recursively estimate the mean.
.. math::
\bar{x_n} & = \frac{1}{n} \sum_{i=1}^n x_i
& = \frac{1}{n} (\sum_{i=1}^j x_i + \sum_{i=j+1}^n x_i)
& = \frac{j\bar{x_j}}{n} + \frac{\sum_{i=j+1}^n x_i}{n}
:Parameters:
- mean : array
Previous mean.
- length : int
Length of chain used to compute the previous mean.
- chain : array
Sample used to update mean.
"""
n = length + len(chain)
return length * mean / n + chain.sum(0) / n | [
"def",
"recursive_mean",
"(",
"self",
",",
"mean",
",",
"length",
",",
"chain",
")",
":",
"n",
"=",
"length",
"+",
"len",
"(",
"chain",
")",
"return",
"length",
"*",
"mean",
"/",
"n",
"+",
"chain",
".",
"sum",
"(",
"0",
")",
"/",
"n"
] | r"""Compute the chain mean recursively.
Instead of computing the mean :math:`\bar{x_n}` of the entire chain,
use the last computed mean :math:`bar{x_j}` and the tail of the chain
to recursively estimate the mean.
.. math::
\bar{x_n} & = \frac{1}{n} \sum_{i=1}^n x_i
& = \frac{1}{n} (\sum_{i=1}^j x_i + \sum_{i=j+1}^n x_i)
& = \frac{j\bar{x_j}}{n} + \frac{\sum_{i=j+1}^n x_i}{n}
:Parameters:
- mean : array
Previous mean.
- length : int
Length of chain used to compute the previous mean.
- chain : array
Sample used to update mean. | [
"r",
"Compute",
"the",
"chain",
"mean",
"recursively",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1337-L1358 | train | 220,103 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.propose | def propose(self):
"""
This method proposes values for stochastics based on the empirical
covariance of the values sampled so far.
The proposal jumps are drawn from a multivariate normal distribution.
"""
arrayjump = np.dot(
self.proposal_sd,
np.random.normal(
size=self.proposal_sd.shape[
0]))
if self.verbose > 2:
print_('Jump :', arrayjump)
# Update each stochastic individually.
for stochastic in self.stochastics:
jump = arrayjump[self._slices[stochastic]].squeeze()
if np.iterable(stochastic.value):
jump = np.reshape(
arrayjump[
self._slices[
stochastic]],
np.shape(
stochastic.value))
if self.isdiscrete[stochastic]:
jump = round_array(jump)
stochastic.value = stochastic.value + jump | python | def propose(self):
"""
This method proposes values for stochastics based on the empirical
covariance of the values sampled so far.
The proposal jumps are drawn from a multivariate normal distribution.
"""
arrayjump = np.dot(
self.proposal_sd,
np.random.normal(
size=self.proposal_sd.shape[
0]))
if self.verbose > 2:
print_('Jump :', arrayjump)
# Update each stochastic individually.
for stochastic in self.stochastics:
jump = arrayjump[self._slices[stochastic]].squeeze()
if np.iterable(stochastic.value):
jump = np.reshape(
arrayjump[
self._slices[
stochastic]],
np.shape(
stochastic.value))
if self.isdiscrete[stochastic]:
jump = round_array(jump)
stochastic.value = stochastic.value + jump | [
"def",
"propose",
"(",
"self",
")",
":",
"arrayjump",
"=",
"np",
".",
"dot",
"(",
"self",
".",
"proposal_sd",
",",
"np",
".",
"random",
".",
"normal",
"(",
"size",
"=",
"self",
".",
"proposal_sd",
".",
"shape",
"[",
"0",
"]",
")",
")",
"if",
"sel... | This method proposes values for stochastics based on the empirical
covariance of the values sampled so far.
The proposal jumps are drawn from a multivariate normal distribution. | [
"This",
"method",
"proposes",
"values",
"for",
"stochastics",
"based",
"on",
"the",
"empirical",
"covariance",
"of",
"the",
"values",
"sampled",
"so",
"far",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1360-L1388 | train | 220,104 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.step | def step(self):
"""
Perform a Metropolis step.
Stochastic parameters are block-updated using a multivariate normal
distribution whose covariance is updated every self.interval once
self.delay steps have been performed.
The AM instance keeps a local copy of the stochastic parameter's trace.
This trace is used to computed the empirical covariance, and is
completely independent from the Database backend.
If self.greedy is True and the number of iterations is smaller than
self.delay, only accepted jumps are stored in the internal
trace to avoid computing singular covariance matrices.
"""
# Probability and likelihood for stochastic's current value:
logp = self.logp_plus_loglike
if self.verbose > 1:
print_('Current value: ', self.stoch2array())
print_('Current likelihood: ', logp)
# Sample a candidate value
self.propose()
# Metropolis acception/rejection test
accept = False
try:
# Probability and likelihood for stochastic's proposed value:
logp_p = self.logp_plus_loglike
if self.verbose > 2:
print_('Current value: ', self.stoch2array())
print_('Current likelihood: ', logp_p)
if np.log(random()) < logp_p - logp:
accept = True
self.accepted += 1
if self.verbose > 2:
print_('Accepted')
else:
self.rejected += 1
if self.verbose > 2:
print_('Rejected')
except ZeroProbability:
self.rejected += 1
logp_p = None
if self.verbose > 2:
print_('Rejected with ZeroProbability Error.')
if (not self._current_iter % self.interval) and self.verbose > 1:
print_("Step ", self._current_iter)
print_("\tLogprobability (current, proposed): ", logp, logp_p)
for stochastic in self.stochastics:
print_(
"\t",
stochastic.__name__,
stochastic.last_value,
stochastic.value)
if accept:
print_("\tAccepted\t*******\n")
else:
print_("\tRejected\n")
print_(
"\tAcceptance ratio: ",
self.accepted / (
self.accepted + self.rejected))
if self._current_iter == self.delay:
self.greedy = False
if not accept:
self.reject()
if accept or not self.greedy:
self.internal_tally()
if self._current_iter > self.delay and self._current_iter % self.interval == 0:
self.update_cov()
self._current_iter += 1 | python | def step(self):
"""
Perform a Metropolis step.
Stochastic parameters are block-updated using a multivariate normal
distribution whose covariance is updated every self.interval once
self.delay steps have been performed.
The AM instance keeps a local copy of the stochastic parameter's trace.
This trace is used to computed the empirical covariance, and is
completely independent from the Database backend.
If self.greedy is True and the number of iterations is smaller than
self.delay, only accepted jumps are stored in the internal
trace to avoid computing singular covariance matrices.
"""
# Probability and likelihood for stochastic's current value:
logp = self.logp_plus_loglike
if self.verbose > 1:
print_('Current value: ', self.stoch2array())
print_('Current likelihood: ', logp)
# Sample a candidate value
self.propose()
# Metropolis acception/rejection test
accept = False
try:
# Probability and likelihood for stochastic's proposed value:
logp_p = self.logp_plus_loglike
if self.verbose > 2:
print_('Current value: ', self.stoch2array())
print_('Current likelihood: ', logp_p)
if np.log(random()) < logp_p - logp:
accept = True
self.accepted += 1
if self.verbose > 2:
print_('Accepted')
else:
self.rejected += 1
if self.verbose > 2:
print_('Rejected')
except ZeroProbability:
self.rejected += 1
logp_p = None
if self.verbose > 2:
print_('Rejected with ZeroProbability Error.')
if (not self._current_iter % self.interval) and self.verbose > 1:
print_("Step ", self._current_iter)
print_("\tLogprobability (current, proposed): ", logp, logp_p)
for stochastic in self.stochastics:
print_(
"\t",
stochastic.__name__,
stochastic.last_value,
stochastic.value)
if accept:
print_("\tAccepted\t*******\n")
else:
print_("\tRejected\n")
print_(
"\tAcceptance ratio: ",
self.accepted / (
self.accepted + self.rejected))
if self._current_iter == self.delay:
self.greedy = False
if not accept:
self.reject()
if accept or not self.greedy:
self.internal_tally()
if self._current_iter > self.delay and self._current_iter % self.interval == 0:
self.update_cov()
self._current_iter += 1 | [
"def",
"step",
"(",
"self",
")",
":",
"# Probability and likelihood for stochastic's current value:",
"logp",
"=",
"self",
".",
"logp_plus_loglike",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print_",
"(",
"'Current value: '",
",",
"self",
".",
"stoch2array",
"... | Perform a Metropolis step.
Stochastic parameters are block-updated using a multivariate normal
distribution whose covariance is updated every self.interval once
self.delay steps have been performed.
The AM instance keeps a local copy of the stochastic parameter's trace.
This trace is used to computed the empirical covariance, and is
completely independent from the Database backend.
If self.greedy is True and the number of iterations is smaller than
self.delay, only accepted jumps are stored in the internal
trace to avoid computing singular covariance matrices. | [
"Perform",
"a",
"Metropolis",
"step",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1390-L1470 | train | 220,105 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.internal_tally | def internal_tally(self):
"""Store the trace of stochastics for the computation of the covariance.
This trace is completely independent from the backend used by the
sampler to store the samples."""
chain = []
for stochastic in self.stochastics:
chain.append(np.ravel(stochastic.value))
self._trace.append(np.concatenate(chain)) | python | def internal_tally(self):
"""Store the trace of stochastics for the computation of the covariance.
This trace is completely independent from the backend used by the
sampler to store the samples."""
chain = []
for stochastic in self.stochastics:
chain.append(np.ravel(stochastic.value))
self._trace.append(np.concatenate(chain)) | [
"def",
"internal_tally",
"(",
"self",
")",
":",
"chain",
"=",
"[",
"]",
"for",
"stochastic",
"in",
"self",
".",
"stochastics",
":",
"chain",
".",
"append",
"(",
"np",
".",
"ravel",
"(",
"stochastic",
".",
"value",
")",
")",
"self",
".",
"_trace",
"."... | Store the trace of stochastics for the computation of the covariance.
This trace is completely independent from the backend used by the
sampler to store the samples. | [
"Store",
"the",
"trace",
"of",
"stochastics",
"for",
"the",
"computation",
"of",
"the",
"covariance",
".",
"This",
"trace",
"is",
"completely",
"independent",
"from",
"the",
"backend",
"used",
"by",
"the",
"sampler",
"to",
"store",
"the",
"samples",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1479-L1486 | train | 220,106 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.trace2array | def trace2array(self, sl):
"""Return an array with the trace of all stochastics, sliced by sl."""
chain = []
for stochastic in self.stochastics:
tr = stochastic.trace.gettrace(slicing=sl)
if tr is None:
raise AttributeError
chain.append(tr)
return np.hstack(chain) | python | def trace2array(self, sl):
"""Return an array with the trace of all stochastics, sliced by sl."""
chain = []
for stochastic in self.stochastics:
tr = stochastic.trace.gettrace(slicing=sl)
if tr is None:
raise AttributeError
chain.append(tr)
return np.hstack(chain) | [
"def",
"trace2array",
"(",
"self",
",",
"sl",
")",
":",
"chain",
"=",
"[",
"]",
"for",
"stochastic",
"in",
"self",
".",
"stochastics",
":",
"tr",
"=",
"stochastic",
".",
"trace",
".",
"gettrace",
"(",
"slicing",
"=",
"sl",
")",
"if",
"tr",
"is",
"N... | Return an array with the trace of all stochastics, sliced by sl. | [
"Return",
"an",
"array",
"with",
"the",
"trace",
"of",
"all",
"stochastics",
"sliced",
"by",
"sl",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1488-L1496 | train | 220,107 |
pymc-devs/pymc | pymc/StepMethods.py | AdaptiveMetropolis.stoch2array | def stoch2array(self):
"""Return the stochastic objects as an array."""
a = np.empty(self.dim)
for stochastic in self.stochastics:
a[self._slices[stochastic]] = stochastic.value
return a | python | def stoch2array(self):
"""Return the stochastic objects as an array."""
a = np.empty(self.dim)
for stochastic in self.stochastics:
a[self._slices[stochastic]] = stochastic.value
return a | [
"def",
"stoch2array",
"(",
"self",
")",
":",
"a",
"=",
"np",
".",
"empty",
"(",
"self",
".",
"dim",
")",
"for",
"stochastic",
"in",
"self",
".",
"stochastics",
":",
"a",
"[",
"self",
".",
"_slices",
"[",
"stochastic",
"]",
"]",
"=",
"stochastic",
"... | Return the stochastic objects as an array. | [
"Return",
"the",
"stochastic",
"objects",
"as",
"an",
"array",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1498-L1503 | train | 220,108 |
pymc-devs/pymc | pymc/StepMethods.py | TWalk.walk | def walk(self):
"""Walk proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Walk proposal kernel')
# Mask for values to move
phi = self.phi
theta = self.walk_theta
u = random(len(phi))
z = (theta / (1 + theta)) * (theta * u ** 2 + 2 * u - 1)
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value = ' + str(x))
x = x + phi * (x - xp) * z
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.stochastic.value = x
# Set proposal adjustment factor
self.hastings_factor = 0.0 | python | def walk(self):
"""Walk proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Walk proposal kernel')
# Mask for values to move
phi = self.phi
theta = self.walk_theta
u = random(len(phi))
z = (theta / (1 + theta)) * (theta * u ** 2 + 2 * u - 1)
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value = ' + str(x))
x = x + phi * (x - xp) * z
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.stochastic.value = x
# Set proposal adjustment factor
self.hastings_factor = 0.0 | [
"def",
"walk",
"(",
"self",
")",
":",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print_",
"(",
"'\\t'",
"+",
"self",
".",
"_id",
"+",
"' Running Walk proposal kernel'",
")",
"# Mask for values to move",
"phi",
"=",
"self",
".",
"phi",
"theta",
"=",
"s... | Walk proposal kernel | [
"Walk",
"proposal",
"kernel"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1631-L1661 | train | 220,109 |
pymc-devs/pymc | pymc/StepMethods.py | TWalk.traverse | def traverse(self):
"""Traverse proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Traverse proposal kernel')
# Mask for values to move
phi = self.phi
theta = self.traverse_theta
# Calculate beta
if (random() < (theta - 1) / (2 * theta)):
beta = exp(1 / (theta + 1) * log(random()))
else:
beta = exp(1 / (1 - theta) * log(random()))
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value = ' + str(x))
x = (xp + beta * (xp - x)) * phi + x * (phi == False)
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.stochastic.value = x
# Set proposal adjustment factor
self.hastings_factor = (sum(phi) - 2) * log(beta) | python | def traverse(self):
"""Traverse proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Traverse proposal kernel')
# Mask for values to move
phi = self.phi
theta = self.traverse_theta
# Calculate beta
if (random() < (theta - 1) / (2 * theta)):
beta = exp(1 / (theta + 1) * log(random()))
else:
beta = exp(1 / (1 - theta) * log(random()))
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value = ' + str(x))
x = (xp + beta * (xp - x)) * phi + x * (phi == False)
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.stochastic.value = x
# Set proposal adjustment factor
self.hastings_factor = (sum(phi) - 2) * log(beta) | [
"def",
"traverse",
"(",
"self",
")",
":",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print_",
"(",
"'\\t'",
"+",
"self",
".",
"_id",
"+",
"' Running Traverse proposal kernel'",
")",
"# Mask for values to move",
"phi",
"=",
"self",
".",
"phi",
"theta",
"... | Traverse proposal kernel | [
"Traverse",
"proposal",
"kernel"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1663-L1696 | train | 220,110 |
pymc-devs/pymc | pymc/StepMethods.py | TWalk.blow | def blow(self):
"""Blow proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Blow proposal kernel')
# Mask for values to move
phi = self.phi
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value ' + str(x))
sigma = max(phi * abs(xp - x))
x = x + phi * sigma * rnormal()
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.hastings_factor = self._g(
x,
xp,
sigma) - self._g(
self.stochastic.value,
xp,
sigma)
self.stochastic.value = x | python | def blow(self):
"""Blow proposal kernel"""
if self.verbose > 1:
print_('\t' + self._id + ' Running Blow proposal kernel')
# Mask for values to move
phi = self.phi
if self._prime:
xp, x = self.values
else:
x, xp = self.values
if self.verbose > 1:
print_('\t' + 'Current value ' + str(x))
sigma = max(phi * abs(xp - x))
x = x + phi * sigma * rnormal()
if self.verbose > 1:
print_('\t' + 'Proposed value = ' + str(x))
self.hastings_factor = self._g(
x,
xp,
sigma) - self._g(
self.stochastic.value,
xp,
sigma)
self.stochastic.value = x | [
"def",
"blow",
"(",
"self",
")",
":",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print_",
"(",
"'\\t'",
"+",
"self",
".",
"_id",
"+",
"' Running Blow proposal kernel'",
")",
"# Mask for values to move",
"phi",
"=",
"self",
".",
"phi",
"if",
"self",
".... | Blow proposal kernel | [
"Blow",
"proposal",
"kernel"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1698-L1730 | train | 220,111 |
pymc-devs/pymc | pymc/StepMethods.py | TWalk._g | def _g(self, h, xp, s):
"""Density function for blow and hop moves"""
nphi = sum(self.phi)
return (nphi / 2.0) * log(2 * pi) + nphi * \
log(s) + 0.5 * sum((h - xp) ** 2) / (s ** 2) | python | def _g(self, h, xp, s):
"""Density function for blow and hop moves"""
nphi = sum(self.phi)
return (nphi / 2.0) * log(2 * pi) + nphi * \
log(s) + 0.5 * sum((h - xp) ** 2) / (s ** 2) | [
"def",
"_g",
"(",
"self",
",",
"h",
",",
"xp",
",",
"s",
")",
":",
"nphi",
"=",
"sum",
"(",
"self",
".",
"phi",
")",
"return",
"(",
"nphi",
"/",
"2.0",
")",
"*",
"log",
"(",
"2",
"*",
"pi",
")",
"+",
"nphi",
"*",
"log",
"(",
"s",
")",
"... | Density function for blow and hop moves | [
"Density",
"function",
"for",
"blow",
"and",
"hop",
"moves"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1732-L1738 | train | 220,112 |
pymc-devs/pymc | pymc/StepMethods.py | TWalk.reject | def reject(self):
"""Sets current s value to the last accepted value"""
self.stochastic.revert()
# Increment rejected count
self.rejected[self.current_kernel] += 1
if self.verbose > 1:
print_(
self._id,
"rejected, reverting to value =",
self.stochastic.value) | python | def reject(self):
"""Sets current s value to the last accepted value"""
self.stochastic.revert()
# Increment rejected count
self.rejected[self.current_kernel] += 1
if self.verbose > 1:
print_(
self._id,
"rejected, reverting to value =",
self.stochastic.value) | [
"def",
"reject",
"(",
"self",
")",
":",
"self",
".",
"stochastic",
".",
"revert",
"(",
")",
"# Increment rejected count",
"self",
".",
"rejected",
"[",
"self",
".",
"current_kernel",
"]",
"+=",
"1",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print_",
... | Sets current s value to the last accepted value | [
"Sets",
"current",
"s",
"value",
"to",
"the",
"last",
"accepted",
"value"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1774-L1785 | train | 220,113 |
pymc-devs/pymc | pymc/StepMethods.py | TWalk.step | def step(self):
"""Single iteration of t-walk algorithm"""
valid_proposal = False
# Use x or xprime as pivot
self._prime = (random() < 0.5)
if self.verbose > 1:
print_("\n\nUsing x%s as pivot" % (" prime" * self._prime or ""))
if self._prime:
# Set the value of the stochastic to the auxiliary
self.stochastic.value = self.values[1]
if self.verbose > 1:
print_(
self._id,
"setting value to auxiliary",
self.stochastic.value)
# Current log-probability
logp = self.logp_plus_loglike
if self.verbose > 1:
print_("Current logp", logp)
try:
# Propose new value
while not valid_proposal:
self.propose()
# Check that proposed value lies in support
valid_proposal = self._support(self.stochastic.value)
if not sum(self.phi):
raise ZeroProbability
# Proposed log-probability
logp_p = self.logp_plus_loglike
if self.verbose > 1:
print_("Proposed logp", logp_p)
except ZeroProbability:
# Reject proposal
if self.verbose > 1:
print_(self._id + ' rejecting due to ZeroProbability.')
self.reject()
if self._prime:
# Update value list
self.values[1] = self.stochastic.value
# Revert to stochastic's value for next iteration
self.stochastic.value = self.values[0]
if self.verbose > 1:
print_(
self._id,
"reverting stochastic to primary value",
self.stochastic.value)
else:
# Update value list
self.values[0] = self.stochastic.value
if self.verbose > 1:
print_(self._id + ' returning.')
return
if self.verbose > 1:
print_('logp_p - logp: ', logp_p - logp)
# Evaluate acceptance ratio
if log(random()) > (logp_p - logp + self.hastings_factor):
# Revert s if fail
self.reject()
else:
# Increment accepted count
self.accepted[self.current_kernel] += 1
if self.verbose > 1:
print_(self._id + ' accepting')
if self._prime:
# Update value list
self.values[1] = self.stochastic.value
# Revert to stochastic's value for next iteration
self.stochastic.value = self.values[0]
if self.verbose > 1:
print_(
self._id,
"reverting stochastic to primary value",
self.stochastic.value)
else:
# Update value list
self.values[0] = self.stochastic.value | python | def step(self):
"""Single iteration of t-walk algorithm"""
valid_proposal = False
# Use x or xprime as pivot
self._prime = (random() < 0.5)
if self.verbose > 1:
print_("\n\nUsing x%s as pivot" % (" prime" * self._prime or ""))
if self._prime:
# Set the value of the stochastic to the auxiliary
self.stochastic.value = self.values[1]
if self.verbose > 1:
print_(
self._id,
"setting value to auxiliary",
self.stochastic.value)
# Current log-probability
logp = self.logp_plus_loglike
if self.verbose > 1:
print_("Current logp", logp)
try:
# Propose new value
while not valid_proposal:
self.propose()
# Check that proposed value lies in support
valid_proposal = self._support(self.stochastic.value)
if not sum(self.phi):
raise ZeroProbability
# Proposed log-probability
logp_p = self.logp_plus_loglike
if self.verbose > 1:
print_("Proposed logp", logp_p)
except ZeroProbability:
# Reject proposal
if self.verbose > 1:
print_(self._id + ' rejecting due to ZeroProbability.')
self.reject()
if self._prime:
# Update value list
self.values[1] = self.stochastic.value
# Revert to stochastic's value for next iteration
self.stochastic.value = self.values[0]
if self.verbose > 1:
print_(
self._id,
"reverting stochastic to primary value",
self.stochastic.value)
else:
# Update value list
self.values[0] = self.stochastic.value
if self.verbose > 1:
print_(self._id + ' returning.')
return
if self.verbose > 1:
print_('logp_p - logp: ', logp_p - logp)
# Evaluate acceptance ratio
if log(random()) > (logp_p - logp + self.hastings_factor):
# Revert s if fail
self.reject()
else:
# Increment accepted count
self.accepted[self.current_kernel] += 1
if self.verbose > 1:
print_(self._id + ' accepting')
if self._prime:
# Update value list
self.values[1] = self.stochastic.value
# Revert to stochastic's value for next iteration
self.stochastic.value = self.values[0]
if self.verbose > 1:
print_(
self._id,
"reverting stochastic to primary value",
self.stochastic.value)
else:
# Update value list
self.values[0] = self.stochastic.value | [
"def",
"step",
"(",
"self",
")",
":",
"valid_proposal",
"=",
"False",
"# Use x or xprime as pivot",
"self",
".",
"_prime",
"=",
"(",
"random",
"(",
")",
"<",
"0.5",
")",
"if",
"self",
".",
"verbose",
">",
"1",
":",
"print_",
"(",
"\"\\n\\nUsing x%s as pivo... | Single iteration of t-walk algorithm | [
"Single",
"iteration",
"of",
"t",
"-",
"walk",
"algorithm"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1800-L1896 | train | 220,114 |
pymc-devs/pymc | pymc/StepMethods.py | Slicer.step | def step(self):
"""
Slice step method
From Neal 2003 (doi:10.1214/aos/1056562461)
"""
logy = self.loglike - rexponential(1)
L = self.stochastic.value - runiform(0, self.w)
R = L + self.w
if self.doubling:
# Doubling procedure
K = self.m
while (K and (logy < self.fll(L) or logy < self.fll(R))):
if random() < 0.5:
L -= R - L
else:
R += R - L
K -= 1
else:
# Stepping out procedure
J = np.floor(runiform(0, self.m))
K = (self.m - 1) - J
while(J > 0 and logy < self.fll(L)):
L -= self.w
J -= 1
while(K > 0 and logy < self.fll(R)):
R += self.w
K -= 1
# Shrinkage procedure
self.stochastic.value = runiform(L, R)
try:
logy_new = self.loglike
except ZeroProbability:
logy_new = -np.infty
while(logy_new < logy):
if (self.stochastic.value < self.stochastic.last_value):
L = float(self.stochastic.value)
else:
R = float(self.stochastic.value)
self.stochastic.revert()
self.stochastic.value = runiform(L, R)
try:
logy_new = self.loglike
except ZeroProbability:
logy_new = -np.infty | python | def step(self):
"""
Slice step method
From Neal 2003 (doi:10.1214/aos/1056562461)
"""
logy = self.loglike - rexponential(1)
L = self.stochastic.value - runiform(0, self.w)
R = L + self.w
if self.doubling:
# Doubling procedure
K = self.m
while (K and (logy < self.fll(L) or logy < self.fll(R))):
if random() < 0.5:
L -= R - L
else:
R += R - L
K -= 1
else:
# Stepping out procedure
J = np.floor(runiform(0, self.m))
K = (self.m - 1) - J
while(J > 0 and logy < self.fll(L)):
L -= self.w
J -= 1
while(K > 0 and logy < self.fll(R)):
R += self.w
K -= 1
# Shrinkage procedure
self.stochastic.value = runiform(L, R)
try:
logy_new = self.loglike
except ZeroProbability:
logy_new = -np.infty
while(logy_new < logy):
if (self.stochastic.value < self.stochastic.last_value):
L = float(self.stochastic.value)
else:
R = float(self.stochastic.value)
self.stochastic.revert()
self.stochastic.value = runiform(L, R)
try:
logy_new = self.loglike
except ZeroProbability:
logy_new = -np.infty | [
"def",
"step",
"(",
"self",
")",
":",
"logy",
"=",
"self",
".",
"loglike",
"-",
"rexponential",
"(",
"1",
")",
"L",
"=",
"self",
".",
"stochastic",
".",
"value",
"-",
"runiform",
"(",
"0",
",",
"self",
".",
"w",
")",
"R",
"=",
"L",
"+",
"self",... | Slice step method
From Neal 2003 (doi:10.1214/aos/1056562461) | [
"Slice",
"step",
"method"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L1960-L2007 | train | 220,115 |
pymc-devs/pymc | pymc/StepMethods.py | Slicer.fll | def fll(self, value):
"""
Returns loglike of value
"""
self.stochastic.value = value
try:
ll = self.loglike
except ZeroProbability:
ll = -np.infty
self.stochastic.revert()
return ll | python | def fll(self, value):
"""
Returns loglike of value
"""
self.stochastic.value = value
try:
ll = self.loglike
except ZeroProbability:
ll = -np.infty
self.stochastic.revert()
return ll | [
"def",
"fll",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"stochastic",
".",
"value",
"=",
"value",
"try",
":",
"ll",
"=",
"self",
".",
"loglike",
"except",
"ZeroProbability",
":",
"ll",
"=",
"-",
"np",
".",
"infty",
"self",
".",
"stochastic",
... | Returns loglike of value | [
"Returns",
"loglike",
"of",
"value"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L2009-L2019 | train | 220,116 |
pymc-devs/pymc | pymc/StepMethods.py | Slicer.tune | def tune(self, verbose=None):
"""
Tuning initial slice width parameter
"""
if not self._tune:
return False
else:
self.w_tune.append(
abs(self.stochastic.last_value - self.stochastic.value))
self.w = 2 * (sum(self.w_tune) / len(self.w_tune))
return True | python | def tune(self, verbose=None):
"""
Tuning initial slice width parameter
"""
if not self._tune:
return False
else:
self.w_tune.append(
abs(self.stochastic.last_value - self.stochastic.value))
self.w = 2 * (sum(self.w_tune) / len(self.w_tune))
return True | [
"def",
"tune",
"(",
"self",
",",
"verbose",
"=",
"None",
")",
":",
"if",
"not",
"self",
".",
"_tune",
":",
"return",
"False",
"else",
":",
"self",
".",
"w_tune",
".",
"append",
"(",
"abs",
"(",
"self",
".",
"stochastic",
".",
"last_value",
"-",
"se... | Tuning initial slice width parameter | [
"Tuning",
"initial",
"slice",
"width",
"parameter"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/StepMethods.py#L2021-L2031 | train | 220,117 |
pymc-devs/pymc | pymc/database/ram.py | Trace.tally | def tally(self, chain):
"""Store the object's current value to a chain.
:Parameters:
chain : integer
Chain index.
"""
value = self._getfunc()
try:
self._trace[chain][self._index[chain]] = value.copy()
except AttributeError:
self._trace[chain][self._index[chain]] = value
self._index[chain] += 1 | python | def tally(self, chain):
"""Store the object's current value to a chain.
:Parameters:
chain : integer
Chain index.
"""
value = self._getfunc()
try:
self._trace[chain][self._index[chain]] = value.copy()
except AttributeError:
self._trace[chain][self._index[chain]] = value
self._index[chain] += 1 | [
"def",
"tally",
"(",
"self",
",",
"chain",
")",
":",
"value",
"=",
"self",
".",
"_getfunc",
"(",
")",
"try",
":",
"self",
".",
"_trace",
"[",
"chain",
"]",
"[",
"self",
".",
"_index",
"[",
"chain",
"]",
"]",
"=",
"value",
".",
"copy",
"(",
")",... | Store the object's current value to a chain.
:Parameters:
chain : integer
Chain index. | [
"Store",
"the",
"object",
"s",
"current",
"value",
"to",
"a",
"chain",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/ram.py#L86-L100 | train | 220,118 |
pymc-devs/pymc | pymc/database/ram.py | Trace.truncate | def truncate(self, index, chain):
"""
Truncate the trace array to some index.
:Parameters:
index : int
The index within the chain after which all values will be removed.
chain : int
The chain index (>=0).
"""
self._trace[chain] = self._trace[chain][:index] | python | def truncate(self, index, chain):
"""
Truncate the trace array to some index.
:Parameters:
index : int
The index within the chain after which all values will be removed.
chain : int
The chain index (>=0).
"""
self._trace[chain] = self._trace[chain][:index] | [
"def",
"truncate",
"(",
"self",
",",
"index",
",",
"chain",
")",
":",
"self",
".",
"_trace",
"[",
"chain",
"]",
"=",
"self",
".",
"_trace",
"[",
"chain",
"]",
"[",
":",
"index",
"]"
] | Truncate the trace array to some index.
:Parameters:
index : int
The index within the chain after which all values will be removed.
chain : int
The chain index (>=0). | [
"Truncate",
"the",
"trace",
"array",
"to",
"some",
"index",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/ram.py#L102-L112 | train | 220,119 |
pymc-devs/pymc | pymc/database/ram.py | Trace.gettrace | def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Return the trace.
:Stochastics:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all chains.
- slicing: A slice, overriding burn and thin assignement.
"""
if slicing is None:
slicing = slice(burn, None, thin)
if chain is not None:
if chain < 0:
chain = range(self.db.chains)[chain]
return self._trace[chain][slicing]
else:
return concatenate(list(self._trace.values()))[slicing] | python | def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Return the trace.
:Stochastics:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all chains.
- slicing: A slice, overriding burn and thin assignement.
"""
if slicing is None:
slicing = slice(burn, None, thin)
if chain is not None:
if chain < 0:
chain = range(self.db.chains)[chain]
return self._trace[chain][slicing]
else:
return concatenate(list(self._trace.values()))[slicing] | [
"def",
"gettrace",
"(",
"self",
",",
"burn",
"=",
"0",
",",
"thin",
"=",
"1",
",",
"chain",
"=",
"-",
"1",
",",
"slicing",
"=",
"None",
")",
":",
"if",
"slicing",
"is",
"None",
":",
"slicing",
"=",
"slice",
"(",
"burn",
",",
"None",
",",
"thin"... | Return the trace.
:Stochastics:
- burn (int): The number of transient steps to skip.
- thin (int): Keep one in thin.
- chain (int): The index of the chain to fetch. If None, return all chains.
- slicing: A slice, overriding burn and thin assignement. | [
"Return",
"the",
"trace",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/ram.py#L114-L130 | train | 220,120 |
pymc-devs/pymc | pymc/database/hdf5ea.py | load | def load(dbname, dbmode='a'):
"""Load an existing hdf5 database.
Return a Database instance.
:Parameters:
filename : string
Name of the hdf5 database to open.
mode : 'a', 'r'
File mode : 'a': append, 'r': read-only.
"""
if dbmode == 'w':
raise AttributeError("dbmode='w' not allowed for load.")
db = Database(dbname, dbmode=dbmode)
return db | python | def load(dbname, dbmode='a'):
"""Load an existing hdf5 database.
Return a Database instance.
:Parameters:
filename : string
Name of the hdf5 database to open.
mode : 'a', 'r'
File mode : 'a': append, 'r': read-only.
"""
if dbmode == 'w':
raise AttributeError("dbmode='w' not allowed for load.")
db = Database(dbname, dbmode=dbmode)
return db | [
"def",
"load",
"(",
"dbname",
",",
"dbmode",
"=",
"'a'",
")",
":",
"if",
"dbmode",
"==",
"'w'",
":",
"raise",
"AttributeError",
"(",
"\"dbmode='w' not allowed for load.\"",
")",
"db",
"=",
"Database",
"(",
"dbname",
",",
"dbmode",
"=",
"dbmode",
")",
"retu... | Load an existing hdf5 database.
Return a Database instance.
:Parameters:
filename : string
Name of the hdf5 database to open.
mode : 'a', 'r'
File mode : 'a': append, 'r': read-only. | [
"Load",
"an",
"existing",
"hdf5",
"database",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5ea.py#L300-L315 | train | 220,121 |
pymc-devs/pymc | pymc/database/txt.py | load | def load(dirname):
"""Create a Database instance from the data stored in the directory."""
if not os.path.exists(dirname):
raise AttributeError('No txt database named %s' % dirname)
db = Database(dirname, dbmode='a')
chain_folders = [os.path.join(dirname, c) for c in db.get_chains()]
db.chains = len(chain_folders)
data = {}
for chain, folder in enumerate(chain_folders):
files = os.listdir(folder)
funnames = funname(files)
db.trace_names.append(funnames)
for file in files:
name = funname(file)
if name not in data:
data[
name] = {
} # This could be simplified using "collections.defaultdict(dict)". New in Python 2.5
# Read the shape information
with open(os.path.join(folder, file)) as f:
f.readline()
shape = eval(f.readline()[16:])
data[
name][
chain] = np.loadtxt(
os.path.join(
folder,
file),
delimiter=',').reshape(
shape)
f.close()
# Create the Traces.
for name, values in six.iteritems(data):
db._traces[name] = Trace(name=name, value=values, db=db)
setattr(db, name, db._traces[name])
# Load the state.
statefile = os.path.join(dirname, 'state.txt')
if os.path.exists(statefile):
with open(statefile, 'r') as f:
db._state_ = eval(f.read())
else:
db._state_ = {}
return db | python | def load(dirname):
"""Create a Database instance from the data stored in the directory."""
if not os.path.exists(dirname):
raise AttributeError('No txt database named %s' % dirname)
db = Database(dirname, dbmode='a')
chain_folders = [os.path.join(dirname, c) for c in db.get_chains()]
db.chains = len(chain_folders)
data = {}
for chain, folder in enumerate(chain_folders):
files = os.listdir(folder)
funnames = funname(files)
db.trace_names.append(funnames)
for file in files:
name = funname(file)
if name not in data:
data[
name] = {
} # This could be simplified using "collections.defaultdict(dict)". New in Python 2.5
# Read the shape information
with open(os.path.join(folder, file)) as f:
f.readline()
shape = eval(f.readline()[16:])
data[
name][
chain] = np.loadtxt(
os.path.join(
folder,
file),
delimiter=',').reshape(
shape)
f.close()
# Create the Traces.
for name, values in six.iteritems(data):
db._traces[name] = Trace(name=name, value=values, db=db)
setattr(db, name, db._traces[name])
# Load the state.
statefile = os.path.join(dirname, 'state.txt')
if os.path.exists(statefile):
with open(statefile, 'r') as f:
db._state_ = eval(f.read())
else:
db._state_ = {}
return db | [
"def",
"load",
"(",
"dirname",
")",
":",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"dirname",
")",
":",
"raise",
"AttributeError",
"(",
"'No txt database named %s'",
"%",
"dirname",
")",
"db",
"=",
"Database",
"(",
"dirname",
",",
"dbmode",
"="... | Create a Database instance from the data stored in the directory. | [
"Create",
"a",
"Database",
"instance",
"from",
"the",
"data",
"stored",
"in",
"the",
"directory",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/txt.py#L148-L195 | train | 220,122 |
pymc-devs/pymc | pymc/database/txt.py | funname | def funname(file):
"""Return variable names from file names."""
if isinstance(file, str):
files = [file]
else:
files = file
bases = [os.path.basename(f) for f in files]
names = [os.path.splitext(b)[0] for b in bases]
if isinstance(file, str):
return names[0]
else:
return names | python | def funname(file):
"""Return variable names from file names."""
if isinstance(file, str):
files = [file]
else:
files = file
bases = [os.path.basename(f) for f in files]
names = [os.path.splitext(b)[0] for b in bases]
if isinstance(file, str):
return names[0]
else:
return names | [
"def",
"funname",
"(",
"file",
")",
":",
"if",
"isinstance",
"(",
"file",
",",
"str",
")",
":",
"files",
"=",
"[",
"file",
"]",
"else",
":",
"files",
"=",
"file",
"bases",
"=",
"[",
"os",
".",
"path",
".",
"basename",
"(",
"f",
")",
"for",
"f",... | Return variable names from file names. | [
"Return",
"variable",
"names",
"from",
"file",
"names",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/txt.py#L198-L209 | train | 220,123 |
pymc-devs/pymc | pymc/database/txt.py | Trace._finalize | def _finalize(self, chain):
"""Write the trace to an ASCII file.
:Parameter:
chain : int
The chain index.
"""
path = os.path.join(
self.db._directory,
self.db.get_chains()[chain],
self.name + '.txt')
arr = self.gettrace(chain=chain)
# Following numpy's example.
if six.PY3:
mode = 'wb'
else:
mode = 'w'
with open(path, mode) as f:
f.write(six.b('# Variable: %s\n' % self.name))
f.write(six.b('# Sample shape: %s\n' % str(arr.shape)))
f.write(six.b('# Date: %s\n' % datetime.datetime.now()))
np.savetxt(f, arr.reshape((-1, arr[0].size)), delimiter=',') | python | def _finalize(self, chain):
"""Write the trace to an ASCII file.
:Parameter:
chain : int
The chain index.
"""
path = os.path.join(
self.db._directory,
self.db.get_chains()[chain],
self.name + '.txt')
arr = self.gettrace(chain=chain)
# Following numpy's example.
if six.PY3:
mode = 'wb'
else:
mode = 'w'
with open(path, mode) as f:
f.write(six.b('# Variable: %s\n' % self.name))
f.write(six.b('# Sample shape: %s\n' % str(arr.shape)))
f.write(six.b('# Date: %s\n' % datetime.datetime.now()))
np.savetxt(f, arr.reshape((-1, arr[0].size)), delimiter=',') | [
"def",
"_finalize",
"(",
"self",
",",
"chain",
")",
":",
"path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"db",
".",
"_directory",
",",
"self",
".",
"db",
".",
"get_chains",
"(",
")",
"[",
"chain",
"]",
",",
"self",
".",
"name",
"... | Write the trace to an ASCII file.
:Parameter:
chain : int
The chain index. | [
"Write",
"the",
"trace",
"to",
"an",
"ASCII",
"file",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/txt.py#L59-L81 | train | 220,124 |
pymc-devs/pymc | pymc/database/txt.py | Database._initialize | def _initialize(self, funs_to_tally, length):
"""Create folder to store simulation results."""
dir = os.path.join(self._directory, CHAIN_NAME % self.chains)
os.mkdir(dir)
base.Database._initialize(self, funs_to_tally, length) | python | def _initialize(self, funs_to_tally, length):
"""Create folder to store simulation results."""
dir = os.path.join(self._directory, CHAIN_NAME % self.chains)
os.mkdir(dir)
base.Database._initialize(self, funs_to_tally, length) | [
"def",
"_initialize",
"(",
"self",
",",
"funs_to_tally",
",",
"length",
")",
":",
"dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"_directory",
",",
"CHAIN_NAME",
"%",
"self",
".",
"chains",
")",
"os",
".",
"mkdir",
"(",
"dir",
")",
"... | Create folder to store simulation results. | [
"Create",
"folder",
"to",
"store",
"simulation",
"results",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/txt.py#L129-L135 | train | 220,125 |
pymc-devs/pymc | pymc/database/txt.py | Database.savestate | def savestate(self, state):
"""Save the sampler's state in a state.txt file."""
oldstate = np.get_printoptions()
np.set_printoptions(threshold=1e6)
try:
with open(os.path.join(self._directory, 'state.txt'), 'w') as f:
print_(state, file=f)
finally:
np.set_printoptions(**oldstate) | python | def savestate(self, state):
"""Save the sampler's state in a state.txt file."""
oldstate = np.get_printoptions()
np.set_printoptions(threshold=1e6)
try:
with open(os.path.join(self._directory, 'state.txt'), 'w') as f:
print_(state, file=f)
finally:
np.set_printoptions(**oldstate) | [
"def",
"savestate",
"(",
"self",
",",
"state",
")",
":",
"oldstate",
"=",
"np",
".",
"get_printoptions",
"(",
")",
"np",
".",
"set_printoptions",
"(",
"threshold",
"=",
"1e6",
")",
"try",
":",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",... | Save the sampler's state in a state.txt file. | [
"Save",
"the",
"sampler",
"s",
"state",
"in",
"a",
"state",
".",
"txt",
"file",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/txt.py#L137-L145 | train | 220,126 |
pymc-devs/pymc | pymc/examples/disaster_model_missing.py | rate | def rate(s=switch, e=early_mean, l=late_mean):
"""Allocate appropriate mean to time series"""
out = np.empty(len(disasters_array))
# Early mean prior to switchpoint
out[:s] = e
# Late mean following switchpoint
out[s:] = l
return out | python | def rate(s=switch, e=early_mean, l=late_mean):
"""Allocate appropriate mean to time series"""
out = np.empty(len(disasters_array))
# Early mean prior to switchpoint
out[:s] = e
# Late mean following switchpoint
out[s:] = l
return out | [
"def",
"rate",
"(",
"s",
"=",
"switch",
",",
"e",
"=",
"early_mean",
",",
"l",
"=",
"late_mean",
")",
":",
"out",
"=",
"np",
".",
"empty",
"(",
"len",
"(",
"disasters_array",
")",
")",
"# Early mean prior to switchpoint",
"out",
"[",
":",
"s",
"]",
"... | Allocate appropriate mean to time series | [
"Allocate",
"appropriate",
"mean",
"to",
"time",
"series"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/examples/disaster_model_missing.py#L36-L43 | train | 220,127 |
pymc-devs/pymc | pymc/gp/GPutils.py | fast_matrix_copy | def fast_matrix_copy(f, t=None, n_threads=1):
"""
Not any faster than a serial copy so far.
"""
if not f.flags['F_CONTIGUOUS']:
raise RuntimeError(
'This will not be fast unless input array f is Fortran-contiguous.')
if t is None:
t = asmatrix(empty(f.shape, order='F'))
elif not t.flags['F_CONTIGUOUS']:
raise RuntimeError(
'This will not be fast unless input array t is Fortran-contiguous.')
# Figure out how to divide job up between threads.
dcopy_wrap(ravel(asarray(f.T)), ravel(asarray(t.T)))
return t | python | def fast_matrix_copy(f, t=None, n_threads=1):
"""
Not any faster than a serial copy so far.
"""
if not f.flags['F_CONTIGUOUS']:
raise RuntimeError(
'This will not be fast unless input array f is Fortran-contiguous.')
if t is None:
t = asmatrix(empty(f.shape, order='F'))
elif not t.flags['F_CONTIGUOUS']:
raise RuntimeError(
'This will not be fast unless input array t is Fortran-contiguous.')
# Figure out how to divide job up between threads.
dcopy_wrap(ravel(asarray(f.T)), ravel(asarray(t.T)))
return t | [
"def",
"fast_matrix_copy",
"(",
"f",
",",
"t",
"=",
"None",
",",
"n_threads",
"=",
"1",
")",
":",
"if",
"not",
"f",
".",
"flags",
"[",
"'F_CONTIGUOUS'",
"]",
":",
"raise",
"RuntimeError",
"(",
"'This will not be fast unless input array f is Fortran-contiguous.'",
... | Not any faster than a serial copy so far. | [
"Not",
"any",
"faster",
"than",
"a",
"serial",
"copy",
"so",
"far",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/GPutils.py#L34-L50 | train | 220,128 |
pymc-devs/pymc | pymc/gp/GPutils.py | vecs_to_datmesh | def vecs_to_datmesh(x, y):
"""
Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations.
"""
x, y = meshgrid(x, y)
out = zeros(x.shape + (2,), dtype=float)
out[:, :, 0] = x
out[:, :, 1] = y
return out | python | def vecs_to_datmesh(x, y):
"""
Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations.
"""
x, y = meshgrid(x, y)
out = zeros(x.shape + (2,), dtype=float)
out[:, :, 0] = x
out[:, :, 1] = y
return out | [
"def",
"vecs_to_datmesh",
"(",
"x",
",",
"y",
")",
":",
"x",
",",
"y",
"=",
"meshgrid",
"(",
"x",
",",
"y",
")",
"out",
"=",
"zeros",
"(",
"x",
".",
"shape",
"+",
"(",
"2",
",",
")",
",",
"dtype",
"=",
"float",
")",
"out",
"[",
":",
",",
... | Converts input arguments x and y to a 2d meshgrid,
suitable for calling Means, Covariances and Realizations. | [
"Converts",
"input",
"arguments",
"x",
"and",
"y",
"to",
"a",
"2d",
"meshgrid",
"suitable",
"for",
"calling",
"Means",
"Covariances",
"and",
"Realizations",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/GPutils.py#L150-L159 | train | 220,129 |
pymc-devs/pymc | pymc/database/base.py | batchsd | def batchsd(trace, batches=5):
"""
Calculates the simulation standard error, accounting for non-independent
samples. The trace is divided into batches, and the standard deviation of
the batch means is calculated.
"""
if len(np.shape(trace)) > 1:
dims = np.shape(trace)
# ttrace = np.transpose(np.reshape(trace, (dims[0], sum(dims[1:]))))
ttrace = np.transpose([t.ravel() for t in trace])
return np.reshape([batchsd(t, batches) for t in ttrace], dims[1:])
else:
if batches == 1:
return np.std(trace) / np.sqrt(len(trace))
try:
batched_traces = np.resize(trace, (batches, int(len(trace) / batches)))
except ValueError:
# If batches do not divide evenly, trim excess samples
resid = len(trace) % batches
batched_traces = np.resize(trace[:-resid],
(batches, len(trace[:-resid]) / batches))
means = np.mean(batched_traces, 1)
return np.std(means) / np.sqrt(batches) | python | def batchsd(trace, batches=5):
"""
Calculates the simulation standard error, accounting for non-independent
samples. The trace is divided into batches, and the standard deviation of
the batch means is calculated.
"""
if len(np.shape(trace)) > 1:
dims = np.shape(trace)
# ttrace = np.transpose(np.reshape(trace, (dims[0], sum(dims[1:]))))
ttrace = np.transpose([t.ravel() for t in trace])
return np.reshape([batchsd(t, batches) for t in ttrace], dims[1:])
else:
if batches == 1:
return np.std(trace) / np.sqrt(len(trace))
try:
batched_traces = np.resize(trace, (batches, int(len(trace) / batches)))
except ValueError:
# If batches do not divide evenly, trim excess samples
resid = len(trace) % batches
batched_traces = np.resize(trace[:-resid],
(batches, len(trace[:-resid]) / batches))
means = np.mean(batched_traces, 1)
return np.std(means) / np.sqrt(batches) | [
"def",
"batchsd",
"(",
"trace",
",",
"batches",
"=",
"5",
")",
":",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"trace",
")",
")",
">",
"1",
":",
"dims",
"=",
"np",
".",
"shape",
"(",
"trace",
")",
"# ttrace = np.transpose(np.reshape(trace, (dims[0], sum(... | Calculates the simulation standard error, accounting for non-independent
samples. The trace is divided into batches, and the standard deviation of
the batch means is calculated. | [
"Calculates",
"the",
"simulation",
"standard",
"error",
"accounting",
"for",
"non",
"-",
"independent",
"samples",
".",
"The",
"trace",
"is",
"divided",
"into",
"batches",
"and",
"the",
"standard",
"deviation",
"of",
"the",
"batch",
"means",
"is",
"calculated",
... | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/base.py#L395-L424 | train | 220,130 |
pymc-devs/pymc | pymc/database/base.py | Trace._initialize | def _initialize(self, chain, length):
"""Prepare for tallying. Create a new chain."""
# If this db was loaded from the disk, it may not have its
# tallied step methods' getfuncs yet.
if self._getfunc is None:
self._getfunc = self.db.model._funs_to_tally[self.name] | python | def _initialize(self, chain, length):
"""Prepare for tallying. Create a new chain."""
# If this db was loaded from the disk, it may not have its
# tallied step methods' getfuncs yet.
if self._getfunc is None:
self._getfunc = self.db.model._funs_to_tally[self.name] | [
"def",
"_initialize",
"(",
"self",
",",
"chain",
",",
"length",
")",
":",
"# If this db was loaded from the disk, it may not have its",
"# tallied step methods' getfuncs yet.",
"if",
"self",
".",
"_getfunc",
"is",
"None",
":",
"self",
".",
"_getfunc",
"=",
"self",
"."... | Prepare for tallying. Create a new chain. | [
"Prepare",
"for",
"tallying",
".",
"Create",
"a",
"new",
"chain",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/base.py#L88-L93 | train | 220,131 |
pymc-devs/pymc | pymc/database/base.py | Database._initialize | def _initialize(self, funs_to_tally, length=None):
"""Initialize the tallyable objects.
Makes sure a Trace object exists for each variable and then initialize
the Traces.
:Parameters:
funs_to_tally : dict
Name- function pairs.
length : int
The expected length of the chain. Some database may need the argument
to preallocate memory.
"""
for name, fun in six.iteritems(funs_to_tally):
if name not in self._traces:
self._traces[
name] = self.__Trace__(
name=name,
getfunc=fun,
db=self)
self._traces[name]._initialize(self.chains, length)
self.trace_names.append(list(funs_to_tally.keys()))
self.chains += 1 | python | def _initialize(self, funs_to_tally, length=None):
"""Initialize the tallyable objects.
Makes sure a Trace object exists for each variable and then initialize
the Traces.
:Parameters:
funs_to_tally : dict
Name- function pairs.
length : int
The expected length of the chain. Some database may need the argument
to preallocate memory.
"""
for name, fun in six.iteritems(funs_to_tally):
if name not in self._traces:
self._traces[
name] = self.__Trace__(
name=name,
getfunc=fun,
db=self)
self._traces[name]._initialize(self.chains, length)
self.trace_names.append(list(funs_to_tally.keys()))
self.chains += 1 | [
"def",
"_initialize",
"(",
"self",
",",
"funs_to_tally",
",",
"length",
"=",
"None",
")",
":",
"for",
"name",
",",
"fun",
"in",
"six",
".",
"iteritems",
"(",
"funs_to_tally",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"_traces",
":",
"self",
"... | Initialize the tallyable objects.
Makes sure a Trace object exists for each variable and then initialize
the Traces.
:Parameters:
funs_to_tally : dict
Name- function pairs.
length : int
The expected length of the chain. Some database may need the argument
to preallocate memory. | [
"Initialize",
"the",
"tallyable",
"objects",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/base.py#L232-L258 | train | 220,132 |
pymc-devs/pymc | pymc/database/base.py | Database._finalize | def _finalize(self, chain=-1):
"""Finalize the chain for all tallyable objects."""
chain = range(self.chains)[chain]
for name in self.trace_names[chain]:
self._traces[name]._finalize(chain)
self.commit() | python | def _finalize(self, chain=-1):
"""Finalize the chain for all tallyable objects."""
chain = range(self.chains)[chain]
for name in self.trace_names[chain]:
self._traces[name]._finalize(chain)
self.commit() | [
"def",
"_finalize",
"(",
"self",
",",
"chain",
"=",
"-",
"1",
")",
":",
"chain",
"=",
"range",
"(",
"self",
".",
"chains",
")",
"[",
"chain",
"]",
"for",
"name",
"in",
"self",
".",
"trace_names",
"[",
"chain",
"]",
":",
"self",
".",
"_traces",
"[... | Finalize the chain for all tallyable objects. | [
"Finalize",
"the",
"chain",
"for",
"all",
"tallyable",
"objects",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/base.py#L332-L337 | train | 220,133 |
pymc-devs/pymc | pymc/database/base.py | Database.truncate | def truncate(self, index, chain=-1):
"""Tell the traces to truncate themselves at the given index."""
chain = range(self.chains)[chain]
for name in self.trace_names[chain]:
self._traces[name].truncate(index, chain) | python | def truncate(self, index, chain=-1):
"""Tell the traces to truncate themselves at the given index."""
chain = range(self.chains)[chain]
for name in self.trace_names[chain]:
self._traces[name].truncate(index, chain) | [
"def",
"truncate",
"(",
"self",
",",
"index",
",",
"chain",
"=",
"-",
"1",
")",
":",
"chain",
"=",
"range",
"(",
"self",
".",
"chains",
")",
"[",
"chain",
"]",
"for",
"name",
"in",
"self",
".",
"trace_names",
"[",
"chain",
"]",
":",
"self",
".",
... | Tell the traces to truncate themselves at the given index. | [
"Tell",
"the",
"traces",
"to",
"truncate",
"themselves",
"at",
"the",
"given",
"index",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/base.py#L339-L343 | train | 220,134 |
pymc-devs/pymc | pymc/gp/Mean.py | Mean.observe | def observe(self, C, obs_mesh_new, obs_vals_new, mean_under=None):
"""
Synchronizes self's observation status with C's.
Values of observation are given by obs_vals.
obs_mesh_new and obs_vals_new should already have
been sliced, as Covariance.observe(..., output_type='o') does.
"""
self.C = C
self.obs_mesh = C.obs_mesh
self.obs_len = C.obs_len
self.Uo = C.Uo
# Evaluate the underlying mean function on the new observation mesh.
if mean_under is None:
mean_under_new = C._mean_under_new(self, obs_mesh_new)
else:
mean_under_new = mean_under
# If self hasn't been observed yet:
if not self.observed:
self.dev = (obs_vals_new - mean_under_new)
self.reg_mat = C._unobs_reg(self)
# If self has been observed already:
elif len(obs_vals_new) > 0:
# Rank of old observations.
m_old = len(self.dev)
# Deviation of new observation from mean without regard to old
# observations.
dev_new = (obs_vals_new - mean_under_new)
# Again, basis covariances get special treatment.
self.reg_mat = C._obs_reg(self, dev_new, m_old)
# Stack deviations of old and new observations from unobserved
# mean.
self.dev = hstack((self.dev, dev_new))
self.observed = True | python | def observe(self, C, obs_mesh_new, obs_vals_new, mean_under=None):
"""
Synchronizes self's observation status with C's.
Values of observation are given by obs_vals.
obs_mesh_new and obs_vals_new should already have
been sliced, as Covariance.observe(..., output_type='o') does.
"""
self.C = C
self.obs_mesh = C.obs_mesh
self.obs_len = C.obs_len
self.Uo = C.Uo
# Evaluate the underlying mean function on the new observation mesh.
if mean_under is None:
mean_under_new = C._mean_under_new(self, obs_mesh_new)
else:
mean_under_new = mean_under
# If self hasn't been observed yet:
if not self.observed:
self.dev = (obs_vals_new - mean_under_new)
self.reg_mat = C._unobs_reg(self)
# If self has been observed already:
elif len(obs_vals_new) > 0:
# Rank of old observations.
m_old = len(self.dev)
# Deviation of new observation from mean without regard to old
# observations.
dev_new = (obs_vals_new - mean_under_new)
# Again, basis covariances get special treatment.
self.reg_mat = C._obs_reg(self, dev_new, m_old)
# Stack deviations of old and new observations from unobserved
# mean.
self.dev = hstack((self.dev, dev_new))
self.observed = True | [
"def",
"observe",
"(",
"self",
",",
"C",
",",
"obs_mesh_new",
",",
"obs_vals_new",
",",
"mean_under",
"=",
"None",
")",
":",
"self",
".",
"C",
"=",
"C",
"self",
".",
"obs_mesh",
"=",
"C",
".",
"obs_mesh",
"self",
".",
"obs_len",
"=",
"C",
".",
"obs... | Synchronizes self's observation status with C's.
Values of observation are given by obs_vals.
obs_mesh_new and obs_vals_new should already have
been sliced, as Covariance.observe(..., output_type='o') does. | [
"Synchronizes",
"self",
"s",
"observation",
"status",
"with",
"C",
"s",
".",
"Values",
"of",
"observation",
"are",
"given",
"by",
"obs_vals",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/Mean.py#L49-L93 | train | 220,135 |
pymc-devs/pymc | pymc/InstantiationDecorators.py | _extract | def _extract(__func__, kwds, keys, classname, probe=True):
"""
Used by decorators stochastic and deterministic to inspect declarations
"""
# Add docs and name
kwds['doc'] = __func__.__doc__
if not 'name' in kwds:
kwds['name'] = __func__.__name__
# kwds.update({'doc':__func__.__doc__, 'name':__func__.__name__})
# Instanitate dictionary of parents
parents = {}
# This gets used by stochastic to check for long-format logp and random:
if probe:
cur_status = check_special_methods()
disable_special_methods()
# Define global tracing function (I assume this is for debugging??)
# No, it's to get out the logp and random functions, if they're in
# there.
def probeFunc(frame, event, arg):
if event == 'return':
locals = frame.f_locals
kwds.update(dict((k, locals.get(k)) for k in keys))
sys.settrace(None)
return probeFunc
sys.settrace(probeFunc)
# Get the functions logp and random (complete interface).
# Disable special methods to prevent the formation of a hurricane of
# Deterministics
try:
__func__()
except:
if 'logp' in keys:
kwds['logp'] = __func__
else:
kwds['eval'] = __func__
# Reenable special methods.
if cur_status:
enable_special_methods()
for key in keys:
if not key in kwds:
kwds[key] = None
for key in ['logp', 'eval']:
if key in keys:
if kwds[key] is None:
kwds[key] = __func__
# Build parents dictionary by parsing the __func__tion's arguments.
(args, defaults) = get_signature(__func__)
if defaults is None:
defaults = ()
# Make sure all parents were defined
arg_deficit = (len(args) - ('value' in args)) - len(defaults)
if arg_deficit > 0:
err_str = classname + ' ' + __func__.__name__ + \
': no parent provided for the following labels:'
for i in range(arg_deficit):
err_str += " " + args[i + ('value' in args)]
if i < arg_deficit - 1:
err_str += ','
raise ValueError(err_str)
# Fill in parent dictionary
try:
parents.update(dict(zip(args[-len(defaults):], defaults)))
except TypeError:
pass
value = parents.pop('value', None)
return (value, parents) | python | def _extract(__func__, kwds, keys, classname, probe=True):
"""
Used by decorators stochastic and deterministic to inspect declarations
"""
# Add docs and name
kwds['doc'] = __func__.__doc__
if not 'name' in kwds:
kwds['name'] = __func__.__name__
# kwds.update({'doc':__func__.__doc__, 'name':__func__.__name__})
# Instanitate dictionary of parents
parents = {}
# This gets used by stochastic to check for long-format logp and random:
if probe:
cur_status = check_special_methods()
disable_special_methods()
# Define global tracing function (I assume this is for debugging??)
# No, it's to get out the logp and random functions, if they're in
# there.
def probeFunc(frame, event, arg):
if event == 'return':
locals = frame.f_locals
kwds.update(dict((k, locals.get(k)) for k in keys))
sys.settrace(None)
return probeFunc
sys.settrace(probeFunc)
# Get the functions logp and random (complete interface).
# Disable special methods to prevent the formation of a hurricane of
# Deterministics
try:
__func__()
except:
if 'logp' in keys:
kwds['logp'] = __func__
else:
kwds['eval'] = __func__
# Reenable special methods.
if cur_status:
enable_special_methods()
for key in keys:
if not key in kwds:
kwds[key] = None
for key in ['logp', 'eval']:
if key in keys:
if kwds[key] is None:
kwds[key] = __func__
# Build parents dictionary by parsing the __func__tion's arguments.
(args, defaults) = get_signature(__func__)
if defaults is None:
defaults = ()
# Make sure all parents were defined
arg_deficit = (len(args) - ('value' in args)) - len(defaults)
if arg_deficit > 0:
err_str = classname + ' ' + __func__.__name__ + \
': no parent provided for the following labels:'
for i in range(arg_deficit):
err_str += " " + args[i + ('value' in args)]
if i < arg_deficit - 1:
err_str += ','
raise ValueError(err_str)
# Fill in parent dictionary
try:
parents.update(dict(zip(args[-len(defaults):], defaults)))
except TypeError:
pass
value = parents.pop('value', None)
return (value, parents) | [
"def",
"_extract",
"(",
"__func__",
",",
"kwds",
",",
"keys",
",",
"classname",
",",
"probe",
"=",
"True",
")",
":",
"# Add docs and name",
"kwds",
"[",
"'doc'",
"]",
"=",
"__func__",
".",
"__doc__",
"if",
"not",
"'name'",
"in",
"kwds",
":",
"kwds",
"[... | Used by decorators stochastic and deterministic to inspect declarations | [
"Used",
"by",
"decorators",
"stochastic",
"and",
"deterministic",
"to",
"inspect",
"declarations"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/InstantiationDecorators.py#L47-L126 | train | 220,136 |
pymc-devs/pymc | pymc/InstantiationDecorators.py | observed | def observed(obj=None, **kwds):
"""
Decorator function to instantiate data objects.
If given a Stochastic, sets a the observed flag to True.
Can be used as
@observed
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
or as
@stochastic(observed=True)
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
:SeeAlso:
stochastic, Stochastic, dtrm, Deterministic, potential, Potential, Model,
distributions
"""
if obj is not None:
if isinstance(obj, Stochastic):
obj._observed = True
return obj
else:
p = stochastic(__func__=obj, observed=True, **kwds)
return p
kwds['observed'] = True
def instantiate_observed(func):
return stochastic(func, **kwds)
return instantiate_observed | python | def observed(obj=None, **kwds):
"""
Decorator function to instantiate data objects.
If given a Stochastic, sets a the observed flag to True.
Can be used as
@observed
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
or as
@stochastic(observed=True)
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
:SeeAlso:
stochastic, Stochastic, dtrm, Deterministic, potential, Potential, Model,
distributions
"""
if obj is not None:
if isinstance(obj, Stochastic):
obj._observed = True
return obj
else:
p = stochastic(__func__=obj, observed=True, **kwds)
return p
kwds['observed'] = True
def instantiate_observed(func):
return stochastic(func, **kwds)
return instantiate_observed | [
"def",
"observed",
"(",
"obj",
"=",
"None",
",",
"*",
"*",
"kwds",
")",
":",
"if",
"obj",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"obj",
",",
"Stochastic",
")",
":",
"obj",
".",
"_observed",
"=",
"True",
"return",
"obj",
"else",
":",
"... | Decorator function to instantiate data objects.
If given a Stochastic, sets a the observed flag to True.
Can be used as
@observed
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
or as
@stochastic(observed=True)
def A(value = ., parent_name = ., ...):
return foo(value, parent_name, ...)
:SeeAlso:
stochastic, Stochastic, dtrm, Deterministic, potential, Potential, Model,
distributions | [
"Decorator",
"function",
"to",
"instantiate",
"data",
"objects",
".",
"If",
"given",
"a",
"Stochastic",
"sets",
"a",
"the",
"observed",
"flag",
"to",
"True",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/InstantiationDecorators.py#L259-L295 | train | 220,137 |
pymc-devs/pymc | pymc/InstantiationDecorators.py | robust_init | def robust_init(stochclass, tries, *args, **kwds):
"""Robust initialization of a Stochastic.
If the evaluation of the log-probability returns a ZeroProbability
error, due for example to a parent being outside of the support for
this Stochastic, the values of parents are randomly sampled until
a valid log-probability is obtained.
If the log-probability is still not valid after `tries` attempts, the
original ZeroProbability error is raised.
:Parameters:
stochclass : Stochastic, eg. Normal, Uniform, ...
The Stochastic distribution to instantiate.
tries : int
Maximum number of times parents will be sampled.
*args, **kwds
Positional and keyword arguments to declare the Stochastic variable.
:Example:
>>> lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True)
>>> pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1,2,3,4], observed=True)
"""
# Find the direct parents
stochs = [arg for arg in (list(args) + list(kwds.values()))
if isinstance(arg.__class__, StochasticMeta)]
# Find the extended parents
parents = stochs
for s in stochs:
parents.extend(s.extended_parents)
extended_parents = set(parents)
# Select the parents with a random method.
random_parents = [
p for p in extended_parents if p.rseed is True and hasattr(
p,
'random')]
for i in range(tries):
try:
return stochclass(*args, **kwds)
except ZeroProbability:
exc = sys.exc_info()
for parent in random_parents:
try:
parent.random()
except:
six.reraise(*exc)
six.reraise(*exc) | python | def robust_init(stochclass, tries, *args, **kwds):
"""Robust initialization of a Stochastic.
If the evaluation of the log-probability returns a ZeroProbability
error, due for example to a parent being outside of the support for
this Stochastic, the values of parents are randomly sampled until
a valid log-probability is obtained.
If the log-probability is still not valid after `tries` attempts, the
original ZeroProbability error is raised.
:Parameters:
stochclass : Stochastic, eg. Normal, Uniform, ...
The Stochastic distribution to instantiate.
tries : int
Maximum number of times parents will be sampled.
*args, **kwds
Positional and keyword arguments to declare the Stochastic variable.
:Example:
>>> lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True)
>>> pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1,2,3,4], observed=True)
"""
# Find the direct parents
stochs = [arg for arg in (list(args) + list(kwds.values()))
if isinstance(arg.__class__, StochasticMeta)]
# Find the extended parents
parents = stochs
for s in stochs:
parents.extend(s.extended_parents)
extended_parents = set(parents)
# Select the parents with a random method.
random_parents = [
p for p in extended_parents if p.rseed is True and hasattr(
p,
'random')]
for i in range(tries):
try:
return stochclass(*args, **kwds)
except ZeroProbability:
exc = sys.exc_info()
for parent in random_parents:
try:
parent.random()
except:
six.reraise(*exc)
six.reraise(*exc) | [
"def",
"robust_init",
"(",
"stochclass",
",",
"tries",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"# Find the direct parents",
"stochs",
"=",
"[",
"arg",
"for",
"arg",
"in",
"(",
"list",
"(",
"args",
")",
"+",
"list",
"(",
"kwds",
".",
"value... | Robust initialization of a Stochastic.
If the evaluation of the log-probability returns a ZeroProbability
error, due for example to a parent being outside of the support for
this Stochastic, the values of parents are randomly sampled until
a valid log-probability is obtained.
If the log-probability is still not valid after `tries` attempts, the
original ZeroProbability error is raised.
:Parameters:
stochclass : Stochastic, eg. Normal, Uniform, ...
The Stochastic distribution to instantiate.
tries : int
Maximum number of times parents will be sampled.
*args, **kwds
Positional and keyword arguments to declare the Stochastic variable.
:Example:
>>> lower = pymc.Uniform('lower', 0., 2., value=1.5, rseed=True)
>>> pymc.robust_init(pymc.Uniform, 100, 'data', lower=lower, upper=5, value=[1,2,3,4], observed=True) | [
"Robust",
"initialization",
"of",
"a",
"Stochastic",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/InstantiationDecorators.py#L300-L351 | train | 220,138 |
pymc-devs/pymc | pymc/examples/gp/more_examples/MKMsalmon/salmon.py | salmon.plot | def plot(self):
"""
Plot posterior from simple nonstochetric regression.
"""
figure()
plot_envelope(self.M, self.C, self.xplot)
for i in range(3):
f = Realization(self.M, self.C)
plot(self.xplot,f(self.xplot))
plot(self.abundance, self.frye, 'k.', markersize=4)
xlabel('Female abundance')
ylabel('Frye density')
title(self.name)
axis('tight') | python | def plot(self):
"""
Plot posterior from simple nonstochetric regression.
"""
figure()
plot_envelope(self.M, self.C, self.xplot)
for i in range(3):
f = Realization(self.M, self.C)
plot(self.xplot,f(self.xplot))
plot(self.abundance, self.frye, 'k.', markersize=4)
xlabel('Female abundance')
ylabel('Frye density')
title(self.name)
axis('tight') | [
"def",
"plot",
"(",
"self",
")",
":",
"figure",
"(",
")",
"plot_envelope",
"(",
"self",
".",
"M",
",",
"self",
".",
"C",
",",
"self",
".",
"xplot",
")",
"for",
"i",
"in",
"range",
"(",
"3",
")",
":",
"f",
"=",
"Realization",
"(",
"self",
".",
... | Plot posterior from simple nonstochetric regression. | [
"Plot",
"posterior",
"from",
"simple",
"nonstochetric",
"regression",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/examples/gp/more_examples/MKMsalmon/salmon.py#L54-L68 | train | 220,139 |
pymc-devs/pymc | pymc/examples/melanoma.py | survival | def survival(value=t, lam=lam, f=failure):
"""Exponential survival likelihood, accounting for censoring"""
return sum(f * log(lam) - lam * value) | python | def survival(value=t, lam=lam, f=failure):
"""Exponential survival likelihood, accounting for censoring"""
return sum(f * log(lam) - lam * value) | [
"def",
"survival",
"(",
"value",
"=",
"t",
",",
"lam",
"=",
"lam",
",",
"f",
"=",
"failure",
")",
":",
"return",
"sum",
"(",
"f",
"*",
"log",
"(",
"lam",
")",
"-",
"lam",
"*",
"value",
")"
] | Exponential survival likelihood, accounting for censoring | [
"Exponential",
"survival",
"likelihood",
"accounting",
"for",
"censoring"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/examples/melanoma.py#L43-L45 | train | 220,140 |
pymc-devs/pymc | pymc/examples/disaster_model_gof.py | disasters_sim | def disasters_sim(early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
"""Coal mining disasters sampled from the posterior predictive distribution"""
return concatenate((pm.rpoisson(early_mean, size=switchpoint), pm.rpoisson(
late_mean, size=n - switchpoint))) | python | def disasters_sim(early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
"""Coal mining disasters sampled from the posterior predictive distribution"""
return concatenate((pm.rpoisson(early_mean, size=switchpoint), pm.rpoisson(
late_mean, size=n - switchpoint))) | [
"def",
"disasters_sim",
"(",
"early_mean",
"=",
"early_mean",
",",
"late_mean",
"=",
"late_mean",
",",
"switchpoint",
"=",
"switchpoint",
")",
":",
"return",
"concatenate",
"(",
"(",
"pm",
".",
"rpoisson",
"(",
"early_mean",
",",
"size",
"=",
"switchpoint",
... | Coal mining disasters sampled from the posterior predictive distribution | [
"Coal",
"mining",
"disasters",
"sampled",
"from",
"the",
"posterior",
"predictive",
"distribution"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/examples/disaster_model_gof.py#L50-L55 | train | 220,141 |
pymc-devs/pymc | pymc/examples/disaster_model_gof.py | expected_values | def expected_values(early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
"""Discrepancy measure for GOF using the Freeman-Tukey statistic"""
# Sample size
n = len(disasters_array)
# Expected values
return concatenate(
(ones(switchpoint) * early_mean, ones(n - switchpoint) * late_mean)) | python | def expected_values(early_mean=early_mean,
late_mean=late_mean,
switchpoint=switchpoint):
"""Discrepancy measure for GOF using the Freeman-Tukey statistic"""
# Sample size
n = len(disasters_array)
# Expected values
return concatenate(
(ones(switchpoint) * early_mean, ones(n - switchpoint) * late_mean)) | [
"def",
"expected_values",
"(",
"early_mean",
"=",
"early_mean",
",",
"late_mean",
"=",
"late_mean",
",",
"switchpoint",
"=",
"switchpoint",
")",
":",
"# Sample size",
"n",
"=",
"len",
"(",
"disasters_array",
")",
"# Expected values",
"return",
"concatenate",
"(",
... | Discrepancy measure for GOF using the Freeman-Tukey statistic | [
"Discrepancy",
"measure",
"for",
"GOF",
"using",
"the",
"Freeman",
"-",
"Tukey",
"statistic"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/examples/disaster_model_gof.py#L59-L68 | train | 220,142 |
pymc-devs/pymc | pymc/database/pickle.py | load | def load(filename):
"""Load a pickled database.
Return a Database instance.
"""
file = open(filename, 'rb')
container = std_pickle.load(file)
file.close()
db = Database(file.name)
chains = 0
funs = set()
for k, v in six.iteritems(container):
if k == '_state_':
db._state_ = v
else:
db._traces[k] = Trace(name=k, value=v, db=db)
setattr(db, k, db._traces[k])
chains = max(chains, len(v))
funs.add(k)
db.chains = chains
db.trace_names = chains * [list(funs)]
return db | python | def load(filename):
"""Load a pickled database.
Return a Database instance.
"""
file = open(filename, 'rb')
container = std_pickle.load(file)
file.close()
db = Database(file.name)
chains = 0
funs = set()
for k, v in six.iteritems(container):
if k == '_state_':
db._state_ = v
else:
db._traces[k] = Trace(name=k, value=v, db=db)
setattr(db, k, db._traces[k])
chains = max(chains, len(v))
funs.add(k)
db.chains = chains
db.trace_names = chains * [list(funs)]
return db | [
"def",
"load",
"(",
"filename",
")",
":",
"file",
"=",
"open",
"(",
"filename",
",",
"'rb'",
")",
"container",
"=",
"std_pickle",
".",
"load",
"(",
"file",
")",
"file",
".",
"close",
"(",
")",
"db",
"=",
"Database",
"(",
"file",
".",
"name",
")",
... | Load a pickled database.
Return a Database instance. | [
"Load",
"a",
"pickled",
"database",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/pickle.py#L77-L100 | train | 220,143 |
pymc-devs/pymc | pymc/database/pickle.py | Database._finalize | def _finalize(self):
"""Dump traces using cPickle."""
container = {}
try:
for name in self._traces:
container[name] = self._traces[name]._trace
container['_state_'] = self._state_
file = open(self.filename, 'w+b')
std_pickle.dump(container, file)
file.close()
except AttributeError:
pass | python | def _finalize(self):
"""Dump traces using cPickle."""
container = {}
try:
for name in self._traces:
container[name] = self._traces[name]._trace
container['_state_'] = self._state_
file = open(self.filename, 'w+b')
std_pickle.dump(container, file)
file.close()
except AttributeError:
pass | [
"def",
"_finalize",
"(",
"self",
")",
":",
"container",
"=",
"{",
"}",
"try",
":",
"for",
"name",
"in",
"self",
".",
"_traces",
":",
"container",
"[",
"name",
"]",
"=",
"self",
".",
"_traces",
"[",
"name",
"]",
".",
"_trace",
"container",
"[",
"'_s... | Dump traces using cPickle. | [
"Dump",
"traces",
"using",
"cPickle",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/pickle.py#L62-L74 | train | 220,144 |
pymc-devs/pymc | pymc/diagnostics.py | geweke | def geweke(x, first=.1, last=.5, intervals=20, maxlag=20):
"""Return z-scores for convergence diagnostics.
Compare the mean of the first % of series with the mean of the last % of
series. x is divided into a number of segments for which this difference is
computed. If the series is converged, this score should oscillate between
-1 and 1.
Parameters
----------
x : array-like
The trace of some stochastic parameter.
first : float
The fraction of series at the beginning of the trace.
last : float
The fraction of series at the end to be compared with the section
at the beginning.
intervals : int
The number of segments.
maxlag : int
Maximum autocorrelation lag for estimation of spectral variance
Returns
-------
scores : list [[]]
Return a list of [i, score], where i is the starting index for each
interval and score the Geweke score on the interval.
Notes
-----
The Geweke score on some series x is computed by:
.. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}}
where :math:`E` stands for the mean, :math:`V` the variance,
:math:`x_s` a section at the start of the series and
:math:`x_e` a section at the end of the series.
References
----------
Geweke (1992)
"""
if not has_sm:
print("statsmodels not available. Geweke diagnostic cannot be calculated.")
return
if np.ndim(x) > 1:
return [geweke(y, first, last, intervals) for y in np.transpose(x)]
# Filter out invalid intervals
if first + last >= 1:
raise ValueError(
"Invalid intervals for Geweke convergence analysis",
(first, last))
# Initialize list of z-scores
zscores = [None] * intervals
# Starting points for calculations
starts = np.linspace(0, int(len(x)*(1.-last)), intervals).astype(int)
# Loop over start indices
for i,s in enumerate(starts):
# Size of remaining array
x_trunc = x[s:]
n = len(x_trunc)
# Calculate slices
first_slice = x_trunc[:int(first * n)]
last_slice = x_trunc[int(last * n):]
z = (first_slice.mean() - last_slice.mean())
z /= np.sqrt(spec(first_slice)/len(first_slice) +
spec(last_slice)/len(last_slice))
zscores[i] = len(x) - n, z
return zscores | python | def geweke(x, first=.1, last=.5, intervals=20, maxlag=20):
"""Return z-scores for convergence diagnostics.
Compare the mean of the first % of series with the mean of the last % of
series. x is divided into a number of segments for which this difference is
computed. If the series is converged, this score should oscillate between
-1 and 1.
Parameters
----------
x : array-like
The trace of some stochastic parameter.
first : float
The fraction of series at the beginning of the trace.
last : float
The fraction of series at the end to be compared with the section
at the beginning.
intervals : int
The number of segments.
maxlag : int
Maximum autocorrelation lag for estimation of spectral variance
Returns
-------
scores : list [[]]
Return a list of [i, score], where i is the starting index for each
interval and score the Geweke score on the interval.
Notes
-----
The Geweke score on some series x is computed by:
.. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}}
where :math:`E` stands for the mean, :math:`V` the variance,
:math:`x_s` a section at the start of the series and
:math:`x_e` a section at the end of the series.
References
----------
Geweke (1992)
"""
if not has_sm:
print("statsmodels not available. Geweke diagnostic cannot be calculated.")
return
if np.ndim(x) > 1:
return [geweke(y, first, last, intervals) for y in np.transpose(x)]
# Filter out invalid intervals
if first + last >= 1:
raise ValueError(
"Invalid intervals for Geweke convergence analysis",
(first, last))
# Initialize list of z-scores
zscores = [None] * intervals
# Starting points for calculations
starts = np.linspace(0, int(len(x)*(1.-last)), intervals).astype(int)
# Loop over start indices
for i,s in enumerate(starts):
# Size of remaining array
x_trunc = x[s:]
n = len(x_trunc)
# Calculate slices
first_slice = x_trunc[:int(first * n)]
last_slice = x_trunc[int(last * n):]
z = (first_slice.mean() - last_slice.mean())
z /= np.sqrt(spec(first_slice)/len(first_slice) +
spec(last_slice)/len(last_slice))
zscores[i] = len(x) - n, z
return zscores | [
"def",
"geweke",
"(",
"x",
",",
"first",
"=",
".1",
",",
"last",
"=",
".5",
",",
"intervals",
"=",
"20",
",",
"maxlag",
"=",
"20",
")",
":",
"if",
"not",
"has_sm",
":",
"print",
"(",
"\"statsmodels not available. Geweke diagnostic cannot be calculated.\"",
"... | Return z-scores for convergence diagnostics.
Compare the mean of the first % of series with the mean of the last % of
series. x is divided into a number of segments for which this difference is
computed. If the series is converged, this score should oscillate between
-1 and 1.
Parameters
----------
x : array-like
The trace of some stochastic parameter.
first : float
The fraction of series at the beginning of the trace.
last : float
The fraction of series at the end to be compared with the section
at the beginning.
intervals : int
The number of segments.
maxlag : int
Maximum autocorrelation lag for estimation of spectral variance
Returns
-------
scores : list [[]]
Return a list of [i, score], where i is the starting index for each
interval and score the Geweke score on the interval.
Notes
-----
The Geweke score on some series x is computed by:
.. math:: \frac{E[x_s] - E[x_e]}{\sqrt{V[x_s] + V[x_e]}}
where :math:`E` stands for the mean, :math:`V` the variance,
:math:`x_s` a section at the start of the series and
:math:`x_e` a section at the end of the series.
References
----------
Geweke (1992) | [
"Return",
"z",
"-",
"scores",
"for",
"convergence",
"diagnostics",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L236-L315 | train | 220,145 |
pymc-devs/pymc | pymc/diagnostics.py | raftery_lewis | def raftery_lewis(x, q, r, s=.95, epsilon=.001, verbose=1):
"""
Return the number of iterations needed to achieve a given
precision.
:Parameters:
x : sequence
Sampled series.
q : float
Quantile.
r : float
Accuracy requested for quantile.
s (optional) : float
Probability of attaining the requested accuracy (defaults to 0.95).
epsilon (optional) : float
Half width of the tolerance interval required for the q-quantile (defaults to 0.001).
verbose (optional) : int
Verbosity level for output (defaults to 1).
:Return:
nmin : int
Minimum number of independent iterates required to achieve
the specified accuracy for the q-quantile.
kthin : int
Skip parameter sufficient to produce a first-order Markov
chain.
nburn : int
Number of iterations to be discarded at the beginning of the
simulation, i.e. the number of burn-in iterations.
nprec : int
Number of iterations not including the burn-in iterations which
need to be obtained in order to attain the precision specified
by the values of the q, r and s input parameters.
kmind : int
Minimum skip parameter sufficient to produce an independence
chain.
:Example:
>>> raftery_lewis(x, q=.025, r=.005)
:Reference:
Raftery, A.E. and Lewis, S.M. (1995). The number of iterations,
convergence diagnostics and generic Metropolis algorithms. In
Practical Markov Chain Monte Carlo (W.R. Gilks, D.J. Spiegelhalter
and S. Richardson, eds.). London, U.K.: Chapman and Hall.
See the fortran source file `gibbsit.f` for more details and references.
"""
if np.ndim(x) > 1:
return [raftery_lewis(y, q, r, s, epsilon, verbose)
for y in np.transpose(x)]
output = nmin, kthin, nburn, nprec, kmind = flib.gibbmain(
x, q, r, s, epsilon)
if verbose:
print_("\n========================")
print_("Raftery-Lewis Diagnostic")
print_("========================")
print_()
print_(
"%s iterations required (assuming independence) to achieve %s accuracy with %i percent probability." %
(nmin, r, 100 * s))
print_()
print_(
"Thinning factor of %i required to produce a first-order Markov chain." %
kthin)
print_()
print_(
"%i iterations to be discarded at the beginning of the simulation (burn-in)." %
nburn)
print_()
print_("%s subsequent iterations required." % nprec)
print_()
print_(
"Thinning factor of %i required to produce an independence chain." %
kmind)
return output | python | def raftery_lewis(x, q, r, s=.95, epsilon=.001, verbose=1):
"""
Return the number of iterations needed to achieve a given
precision.
:Parameters:
x : sequence
Sampled series.
q : float
Quantile.
r : float
Accuracy requested for quantile.
s (optional) : float
Probability of attaining the requested accuracy (defaults to 0.95).
epsilon (optional) : float
Half width of the tolerance interval required for the q-quantile (defaults to 0.001).
verbose (optional) : int
Verbosity level for output (defaults to 1).
:Return:
nmin : int
Minimum number of independent iterates required to achieve
the specified accuracy for the q-quantile.
kthin : int
Skip parameter sufficient to produce a first-order Markov
chain.
nburn : int
Number of iterations to be discarded at the beginning of the
simulation, i.e. the number of burn-in iterations.
nprec : int
Number of iterations not including the burn-in iterations which
need to be obtained in order to attain the precision specified
by the values of the q, r and s input parameters.
kmind : int
Minimum skip parameter sufficient to produce an independence
chain.
:Example:
>>> raftery_lewis(x, q=.025, r=.005)
:Reference:
Raftery, A.E. and Lewis, S.M. (1995). The number of iterations,
convergence diagnostics and generic Metropolis algorithms. In
Practical Markov Chain Monte Carlo (W.R. Gilks, D.J. Spiegelhalter
and S. Richardson, eds.). London, U.K.: Chapman and Hall.
See the fortran source file `gibbsit.f` for more details and references.
"""
if np.ndim(x) > 1:
return [raftery_lewis(y, q, r, s, epsilon, verbose)
for y in np.transpose(x)]
output = nmin, kthin, nburn, nprec, kmind = flib.gibbmain(
x, q, r, s, epsilon)
if verbose:
print_("\n========================")
print_("Raftery-Lewis Diagnostic")
print_("========================")
print_()
print_(
"%s iterations required (assuming independence) to achieve %s accuracy with %i percent probability." %
(nmin, r, 100 * s))
print_()
print_(
"Thinning factor of %i required to produce a first-order Markov chain." %
kthin)
print_()
print_(
"%i iterations to be discarded at the beginning of the simulation (burn-in)." %
nburn)
print_()
print_("%s subsequent iterations required." % nprec)
print_()
print_(
"Thinning factor of %i required to produce an independence chain." %
kmind)
return output | [
"def",
"raftery_lewis",
"(",
"x",
",",
"q",
",",
"r",
",",
"s",
"=",
".95",
",",
"epsilon",
"=",
".001",
",",
"verbose",
"=",
"1",
")",
":",
"if",
"np",
".",
"ndim",
"(",
"x",
")",
">",
"1",
":",
"return",
"[",
"raftery_lewis",
"(",
"y",
",",... | Return the number of iterations needed to achieve a given
precision.
:Parameters:
x : sequence
Sampled series.
q : float
Quantile.
r : float
Accuracy requested for quantile.
s (optional) : float
Probability of attaining the requested accuracy (defaults to 0.95).
epsilon (optional) : float
Half width of the tolerance interval required for the q-quantile (defaults to 0.001).
verbose (optional) : int
Verbosity level for output (defaults to 1).
:Return:
nmin : int
Minimum number of independent iterates required to achieve
the specified accuracy for the q-quantile.
kthin : int
Skip parameter sufficient to produce a first-order Markov
chain.
nburn : int
Number of iterations to be discarded at the beginning of the
simulation, i.e. the number of burn-in iterations.
nprec : int
Number of iterations not including the burn-in iterations which
need to be obtained in order to attain the precision specified
by the values of the q, r and s input parameters.
kmind : int
Minimum skip parameter sufficient to produce an independence
chain.
:Example:
>>> raftery_lewis(x, q=.025, r=.005)
:Reference:
Raftery, A.E. and Lewis, S.M. (1995). The number of iterations,
convergence diagnostics and generic Metropolis algorithms. In
Practical Markov Chain Monte Carlo (W.R. Gilks, D.J. Spiegelhalter
and S. Richardson, eds.). London, U.K.: Chapman and Hall.
See the fortran source file `gibbsit.f` for more details and references. | [
"Return",
"the",
"number",
"of",
"iterations",
"needed",
"to",
"achieve",
"a",
"given",
"precision",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L321-L400 | train | 220,146 |
pymc-devs/pymc | pymc/diagnostics.py | effective_n | def effective_n(x):
""" Returns estimate of the effective sample size of a set of traces.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
Returns
-------
n_eff : float
Return the effective sample size, :math:`\hat{n}_{eff}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{n}_{eff} = \frac{mn}}{1 + 2 \sum_{t=1}^T \hat{\rho}_t}
where :math:`\hat{\rho}_t` is the estimated autocorrelation at lag t, and T
is the first odd positive integer for which the sum :math:`\hat{\rho}_{T+1} + \hat{\rho}_{T+1}`
is negative.
References
----------
Gelman et al. (2014)"""
if np.shape(x) < (2,):
raise ValueError(
'Calculation of effective sample size requires multiple chains of the same length.')
try:
m, n = np.shape(x)
except ValueError:
return [effective_n(np.transpose(y)) for y in np.transpose(x)]
s2 = gelman_rubin(x, return_var=True)
negative_autocorr = False
t = 1
variogram = lambda t: (sum(sum((x[j][i] - x[j][i-t])**2 for i in range(t,n)) for j in range(m))
/ (m*(n - t)))
rho = np.ones(n)
# Iterate until the sum of consecutive estimates of autocorrelation is negative
while not negative_autocorr and (t < n):
rho[t] = 1. - variogram(t)/(2.*s2)
if not t % 2:
negative_autocorr = sum(rho[t-1:t+1]) < 0
t += 1
return int(m*n / (1 + 2*rho[1:t].sum())) | python | def effective_n(x):
""" Returns estimate of the effective sample size of a set of traces.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
Returns
-------
n_eff : float
Return the effective sample size, :math:`\hat{n}_{eff}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{n}_{eff} = \frac{mn}}{1 + 2 \sum_{t=1}^T \hat{\rho}_t}
where :math:`\hat{\rho}_t` is the estimated autocorrelation at lag t, and T
is the first odd positive integer for which the sum :math:`\hat{\rho}_{T+1} + \hat{\rho}_{T+1}`
is negative.
References
----------
Gelman et al. (2014)"""
if np.shape(x) < (2,):
raise ValueError(
'Calculation of effective sample size requires multiple chains of the same length.')
try:
m, n = np.shape(x)
except ValueError:
return [effective_n(np.transpose(y)) for y in np.transpose(x)]
s2 = gelman_rubin(x, return_var=True)
negative_autocorr = False
t = 1
variogram = lambda t: (sum(sum((x[j][i] - x[j][i-t])**2 for i in range(t,n)) for j in range(m))
/ (m*(n - t)))
rho = np.ones(n)
# Iterate until the sum of consecutive estimates of autocorrelation is negative
while not negative_autocorr and (t < n):
rho[t] = 1. - variogram(t)/(2.*s2)
if not t % 2:
negative_autocorr = sum(rho[t-1:t+1]) < 0
t += 1
return int(m*n / (1 + 2*rho[1:t].sum())) | [
"def",
"effective_n",
"(",
"x",
")",
":",
"if",
"np",
".",
"shape",
"(",
"x",
")",
"<",
"(",
"2",
",",
")",
":",
"raise",
"ValueError",
"(",
"'Calculation of effective sample size requires multiple chains of the same length.'",
")",
"try",
":",
"m",
",",
"n",
... | Returns estimate of the effective sample size of a set of traces.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
Returns
-------
n_eff : float
Return the effective sample size, :math:`\hat{n}_{eff}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{n}_{eff} = \frac{mn}}{1 + 2 \sum_{t=1}^T \hat{\rho}_t}
where :math:`\hat{\rho}_t` is the estimated autocorrelation at lag t, and T
is the first odd positive integer for which the sum :math:`\hat{\rho}_{T+1} + \hat{\rho}_{T+1}`
is negative.
References
----------
Gelman et al. (2014) | [
"Returns",
"estimate",
"of",
"the",
"effective",
"sample",
"size",
"of",
"a",
"set",
"of",
"traces",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L497-L552 | train | 220,147 |
pymc-devs/pymc | pymc/diagnostics.py | gelman_rubin | def gelman_rubin(x, return_var=False):
""" Returns estimate of R for a set of traces.
The Gelman-Rubin diagnostic tests for lack of convergence by comparing
the variance between multiple chains to the variance within each chain.
If convergence has been achieved, the between-chain and within-chain
variances should be identical. To be most effective in detecting evidence
for nonconvergence, each chain should have been initialized to starting
values that are dispersed relative to the target distribution.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
return_var : bool
Flag for returning the marginal posterior variance instead of R-hat (defaults of False).
Returns
-------
Rhat : float
Return the potential scale reduction factor, :math:`\hat{R}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{R} = \sqrt{\frac{\hat{V}}{W}}
where :math:`W` is the within-chain variance and :math:`\hat{V}` is
the posterior variance estimate for the pooled traces. This is the
potential scale reduction factor, which converges to unity when each
of the traces is a sample from the target posterior. Values greater
than one indicate that one or more chains have not yet converged.
References
----------
Brooks and Gelman (1998)
Gelman and Rubin (1992)"""
if np.shape(x) < (2,):
raise ValueError(
'Gelman-Rubin diagnostic requires multiple chains of the same length.')
try:
m, n = np.shape(x)
except ValueError:
return [gelman_rubin(np.transpose(y)) for y in np.transpose(x)]
# Calculate between-chain variance
B_over_n = np.sum((np.mean(x, 1) - np.mean(x)) ** 2) / (m - 1)
# Calculate within-chain variances
W = np.sum(
[(x[i] - xbar) ** 2 for i,
xbar in enumerate(np.mean(x,
1))]) / (m * (n - 1))
# (over) estimate of variance
s2 = W * (n - 1) / n + B_over_n
if return_var:
return s2
# Pooled posterior variance estimate
V = s2 + B_over_n / m
# Calculate PSRF
R = V / W
return np.sqrt(R) | python | def gelman_rubin(x, return_var=False):
""" Returns estimate of R for a set of traces.
The Gelman-Rubin diagnostic tests for lack of convergence by comparing
the variance between multiple chains to the variance within each chain.
If convergence has been achieved, the between-chain and within-chain
variances should be identical. To be most effective in detecting evidence
for nonconvergence, each chain should have been initialized to starting
values that are dispersed relative to the target distribution.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
return_var : bool
Flag for returning the marginal posterior variance instead of R-hat (defaults of False).
Returns
-------
Rhat : float
Return the potential scale reduction factor, :math:`\hat{R}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{R} = \sqrt{\frac{\hat{V}}{W}}
where :math:`W` is the within-chain variance and :math:`\hat{V}` is
the posterior variance estimate for the pooled traces. This is the
potential scale reduction factor, which converges to unity when each
of the traces is a sample from the target posterior. Values greater
than one indicate that one or more chains have not yet converged.
References
----------
Brooks and Gelman (1998)
Gelman and Rubin (1992)"""
if np.shape(x) < (2,):
raise ValueError(
'Gelman-Rubin diagnostic requires multiple chains of the same length.')
try:
m, n = np.shape(x)
except ValueError:
return [gelman_rubin(np.transpose(y)) for y in np.transpose(x)]
# Calculate between-chain variance
B_over_n = np.sum((np.mean(x, 1) - np.mean(x)) ** 2) / (m - 1)
# Calculate within-chain variances
W = np.sum(
[(x[i] - xbar) ** 2 for i,
xbar in enumerate(np.mean(x,
1))]) / (m * (n - 1))
# (over) estimate of variance
s2 = W * (n - 1) / n + B_over_n
if return_var:
return s2
# Pooled posterior variance estimate
V = s2 + B_over_n / m
# Calculate PSRF
R = V / W
return np.sqrt(R) | [
"def",
"gelman_rubin",
"(",
"x",
",",
"return_var",
"=",
"False",
")",
":",
"if",
"np",
".",
"shape",
"(",
"x",
")",
"<",
"(",
"2",
",",
")",
":",
"raise",
"ValueError",
"(",
"'Gelman-Rubin diagnostic requires multiple chains of the same length.'",
")",
"try",... | Returns estimate of R for a set of traces.
The Gelman-Rubin diagnostic tests for lack of convergence by comparing
the variance between multiple chains to the variance within each chain.
If convergence has been achieved, the between-chain and within-chain
variances should be identical. To be most effective in detecting evidence
for nonconvergence, each chain should have been initialized to starting
values that are dispersed relative to the target distribution.
Parameters
----------
x : array-like
An array containing the 2 or more traces of a stochastic parameter. That is, an array of dimension m x n x k, where m is the number of traces, n the number of samples, and k the dimension of the stochastic.
return_var : bool
Flag for returning the marginal posterior variance instead of R-hat (defaults of False).
Returns
-------
Rhat : float
Return the potential scale reduction factor, :math:`\hat{R}`
Notes
-----
The diagnostic is computed by:
.. math:: \hat{R} = \sqrt{\frac{\hat{V}}{W}}
where :math:`W` is the within-chain variance and :math:`\hat{V}` is
the posterior variance estimate for the pooled traces. This is the
potential scale reduction factor, which converges to unity when each
of the traces is a sample from the target posterior. Values greater
than one indicate that one or more chains have not yet converged.
References
----------
Brooks and Gelman (1998)
Gelman and Rubin (1992) | [
"Returns",
"estimate",
"of",
"R",
"for",
"a",
"set",
"of",
"traces",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L556-L627 | train | 220,148 |
pymc-devs/pymc | pymc/diagnostics.py | _find_max_lag | def _find_max_lag(x, rho_limit=0.05, maxmaxlag=20000, verbose=0):
"""Automatically find an appropriate maximum lag to calculate IAT"""
# Fetch autocovariance matrix
acv = autocov(x)
# Calculate rho
rho = acv[0, 1] / acv[0, 0]
lam = -1. / np.log(abs(rho))
# Initial guess at 1.5 times lambda (i.e. 3 times mean life)
maxlag = int(np.floor(3. * lam)) + 1
# Jump forward 1% of lambda to look for rholimit threshold
jump = int(np.ceil(0.01 * lam)) + 1
T = len(x)
while ((abs(rho) > rho_limit) & (maxlag < min(T / 2, maxmaxlag))):
acv = autocov(x, maxlag)
rho = acv[0, 1] / acv[0, 0]
maxlag += jump
# Add 30% for good measure
maxlag = int(np.floor(1.3 * maxlag))
if maxlag >= min(T / 2, maxmaxlag):
maxlag = min(min(T / 2, maxlag), maxmaxlag)
"maxlag fixed to %d" % maxlag
return maxlag
if maxlag <= 1:
print_("maxlag = %d, fixing value to 10" % maxlag)
return 10
if verbose:
print_("maxlag = %d" % maxlag)
return maxlag | python | def _find_max_lag(x, rho_limit=0.05, maxmaxlag=20000, verbose=0):
"""Automatically find an appropriate maximum lag to calculate IAT"""
# Fetch autocovariance matrix
acv = autocov(x)
# Calculate rho
rho = acv[0, 1] / acv[0, 0]
lam = -1. / np.log(abs(rho))
# Initial guess at 1.5 times lambda (i.e. 3 times mean life)
maxlag = int(np.floor(3. * lam)) + 1
# Jump forward 1% of lambda to look for rholimit threshold
jump = int(np.ceil(0.01 * lam)) + 1
T = len(x)
while ((abs(rho) > rho_limit) & (maxlag < min(T / 2, maxmaxlag))):
acv = autocov(x, maxlag)
rho = acv[0, 1] / acv[0, 0]
maxlag += jump
# Add 30% for good measure
maxlag = int(np.floor(1.3 * maxlag))
if maxlag >= min(T / 2, maxmaxlag):
maxlag = min(min(T / 2, maxlag), maxmaxlag)
"maxlag fixed to %d" % maxlag
return maxlag
if maxlag <= 1:
print_("maxlag = %d, fixing value to 10" % maxlag)
return 10
if verbose:
print_("maxlag = %d" % maxlag)
return maxlag | [
"def",
"_find_max_lag",
"(",
"x",
",",
"rho_limit",
"=",
"0.05",
",",
"maxmaxlag",
"=",
"20000",
",",
"verbose",
"=",
"0",
")",
":",
"# Fetch autocovariance matrix",
"acv",
"=",
"autocov",
"(",
"x",
")",
"# Calculate rho",
"rho",
"=",
"acv",
"[",
"0",
",... | Automatically find an appropriate maximum lag to calculate IAT | [
"Automatically",
"find",
"an",
"appropriate",
"maximum",
"lag",
"to",
"calculate",
"IAT"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L630-L668 | train | 220,149 |
pymc-devs/pymc | pymc/diagnostics.py | ppp_value | def ppp_value(simdata, trueval, round=3):
"""
Calculates posterior predictive p-values on data simulated from the posterior
predictive distribution, returning the quantile of the observed data relative to
simulated.
The posterior predictive p-value is computed by:
.. math:: Pr(T(y^{\text{sim}} > T(y) | y)
where T is a test statistic of interest and :math:`y^{\text{sim}}` is the simulated
data.
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
round: int
Rounding of returned quantile (defaults to 3)
"""
if ndim(trueval) == 1 and ndim(simdata == 2):
# Iterate over more than one set of data
return [post_pred_checks(simdata[:, i], trueval[i])
for i in range(len(trueval))]
return (simdata > trueval).mean() | python | def ppp_value(simdata, trueval, round=3):
"""
Calculates posterior predictive p-values on data simulated from the posterior
predictive distribution, returning the quantile of the observed data relative to
simulated.
The posterior predictive p-value is computed by:
.. math:: Pr(T(y^{\text{sim}} > T(y) | y)
where T is a test statistic of interest and :math:`y^{\text{sim}}` is the simulated
data.
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
round: int
Rounding of returned quantile (defaults to 3)
"""
if ndim(trueval) == 1 and ndim(simdata == 2):
# Iterate over more than one set of data
return [post_pred_checks(simdata[:, i], trueval[i])
for i in range(len(trueval))]
return (simdata > trueval).mean() | [
"def",
"ppp_value",
"(",
"simdata",
",",
"trueval",
",",
"round",
"=",
"3",
")",
":",
"if",
"ndim",
"(",
"trueval",
")",
"==",
"1",
"and",
"ndim",
"(",
"simdata",
"==",
"2",
")",
":",
"# Iterate over more than one set of data",
"return",
"[",
"post_pred_ch... | Calculates posterior predictive p-values on data simulated from the posterior
predictive distribution, returning the quantile of the observed data relative to
simulated.
The posterior predictive p-value is computed by:
.. math:: Pr(T(y^{\text{sim}} > T(y) | y)
where T is a test statistic of interest and :math:`y^{\text{sim}}` is the simulated
data.
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
round: int
Rounding of returned quantile (defaults to 3) | [
"Calculates",
"posterior",
"predictive",
"p",
"-",
"values",
"on",
"data",
"simulated",
"from",
"the",
"posterior",
"predictive",
"distribution",
"returning",
"the",
"quantile",
"of",
"the",
"observed",
"data",
"relative",
"to",
"simulated",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/diagnostics.py#L705-L735 | train | 220,150 |
pymc-devs/pymc | pymc/Model.py | check_valid_object_name | def check_valid_object_name(sequence):
"""Check that the names of the objects are all different."""
names = []
for o in sequence:
if o.__name__ in names:
raise ValueError(
'A tallyable PyMC object called %s already exists. This will cause problems for some database backends.' %
o.__name__)
else:
names.append(o.__name__) | python | def check_valid_object_name(sequence):
"""Check that the names of the objects are all different."""
names = []
for o in sequence:
if o.__name__ in names:
raise ValueError(
'A tallyable PyMC object called %s already exists. This will cause problems for some database backends.' %
o.__name__)
else:
names.append(o.__name__) | [
"def",
"check_valid_object_name",
"(",
"sequence",
")",
":",
"names",
"=",
"[",
"]",
"for",
"o",
"in",
"sequence",
":",
"if",
"o",
".",
"__name__",
"in",
"names",
":",
"raise",
"ValueError",
"(",
"'A tallyable PyMC object called %s already exists. This will cause pr... | Check that the names of the objects are all different. | [
"Check",
"that",
"the",
"names",
"of",
"the",
"objects",
"are",
"all",
"different",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L847-L856 | train | 220,151 |
pymc-devs/pymc | pymc/Model.py | Model.seed | def seed(self):
"""
Seed new initial values for the stochastics.
"""
for generation in self.generations:
for s in generation:
try:
if s.rseed is not None:
value = s.random(**s.parents.value)
except:
pass | python | def seed(self):
"""
Seed new initial values for the stochastics.
"""
for generation in self.generations:
for s in generation:
try:
if s.rseed is not None:
value = s.random(**s.parents.value)
except:
pass | [
"def",
"seed",
"(",
"self",
")",
":",
"for",
"generation",
"in",
"self",
".",
"generations",
":",
"for",
"s",
"in",
"generation",
":",
"try",
":",
"if",
"s",
".",
"rseed",
"is",
"not",
"None",
":",
"value",
"=",
"s",
".",
"random",
"(",
"*",
"*",... | Seed new initial values for the stochastics. | [
"Seed",
"new",
"initial",
"values",
"for",
"the",
"stochastics",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L114-L125 | train | 220,152 |
pymc-devs/pymc | pymc/Model.py | Model.get_node | def get_node(self, node_name):
"""Retrieve node with passed name"""
for node in self.nodes:
if node.__name__ == node_name:
return node | python | def get_node(self, node_name):
"""Retrieve node with passed name"""
for node in self.nodes:
if node.__name__ == node_name:
return node | [
"def",
"get_node",
"(",
"self",
",",
"node_name",
")",
":",
"for",
"node",
"in",
"self",
".",
"nodes",
":",
"if",
"node",
".",
"__name__",
"==",
"node_name",
":",
"return",
"node"
] | Retrieve node with passed name | [
"Retrieve",
"node",
"with",
"passed",
"name"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L127-L131 | train | 220,153 |
pymc-devs/pymc | pymc/Model.py | Sampler.sample | def sample(self, iter, length=None, verbose=0):
"""
Draws iter samples from the posterior.
"""
self._cur_trace_index = 0
self.max_trace_length = iter
self._iter = iter
self.verbose = verbose or 0
self.seed()
# Assign Trace instances to tallyable objects.
self.db.connect_model(self)
# Initialize database -> initialize traces.
if length is None:
length = iter
self.db._initialize(self._funs_to_tally, length)
# Put traces on objects
for v in self._variables_to_tally:
v.trace = self.db._traces[v.__name__]
# Loop
self._current_iter = 0
self._loop()
self._finalize() | python | def sample(self, iter, length=None, verbose=0):
"""
Draws iter samples from the posterior.
"""
self._cur_trace_index = 0
self.max_trace_length = iter
self._iter = iter
self.verbose = verbose or 0
self.seed()
# Assign Trace instances to tallyable objects.
self.db.connect_model(self)
# Initialize database -> initialize traces.
if length is None:
length = iter
self.db._initialize(self._funs_to_tally, length)
# Put traces on objects
for v in self._variables_to_tally:
v.trace = self.db._traces[v.__name__]
# Loop
self._current_iter = 0
self._loop()
self._finalize() | [
"def",
"sample",
"(",
"self",
",",
"iter",
",",
"length",
"=",
"None",
",",
"verbose",
"=",
"0",
")",
":",
"self",
".",
"_cur_trace_index",
"=",
"0",
"self",
".",
"max_trace_length",
"=",
"iter",
"self",
".",
"_iter",
"=",
"iter",
"self",
".",
"verbo... | Draws iter samples from the posterior. | [
"Draws",
"iter",
"samples",
"from",
"the",
"posterior",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L221-L246 | train | 220,154 |
pymc-devs/pymc | pymc/Model.py | Sampler._finalize | def _finalize(self):
"""Reset the status and tell the database to finalize the traces."""
if self.status in ['running', 'halt']:
if self.verbose > 0:
print_('\nSampling finished normally.')
self.status = 'ready'
self.save_state()
self.db._finalize() | python | def _finalize(self):
"""Reset the status and tell the database to finalize the traces."""
if self.status in ['running', 'halt']:
if self.verbose > 0:
print_('\nSampling finished normally.')
self.status = 'ready'
self.save_state()
self.db._finalize() | [
"def",
"_finalize",
"(",
"self",
")",
":",
"if",
"self",
".",
"status",
"in",
"[",
"'running'",
",",
"'halt'",
"]",
":",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"print_",
"(",
"'\\nSampling finished normally.'",
")",
"self",
".",
"status",
"=",
"'... | Reset the status and tell the database to finalize the traces. | [
"Reset",
"the",
"status",
"and",
"tell",
"the",
"database",
"to",
"finalize",
"the",
"traces",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L248-L256 | train | 220,155 |
pymc-devs/pymc | pymc/Model.py | Sampler.stats | def stats(self, variables=None, alpha=0.05, start=0,
batches=100, chain=None, quantiles=(2.5, 25, 50, 75, 97.5)):
"""
Statistical output for variables.
:Parameters:
variables : iterable
List or array of variables for which statistics are to be
generated. If it is not specified, all the tallied variables
are summarized.
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
"""
# If no names provided, run them all
if variables is None:
variables = self._variables_to_tally
else:
variables = [v for v in self.variables if v.__name__ in variables]
stat_dict = {}
# Loop over nodes
for variable in variables:
# Plot object
stat_dict[variable.__name__] = self.trace(
variable.__name__).stats(alpha=alpha, start=start,
batches=batches, chain=chain, quantiles=quantiles)
return stat_dict | python | def stats(self, variables=None, alpha=0.05, start=0,
batches=100, chain=None, quantiles=(2.5, 25, 50, 75, 97.5)):
"""
Statistical output for variables.
:Parameters:
variables : iterable
List or array of variables for which statistics are to be
generated. If it is not specified, all the tallied variables
are summarized.
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
"""
# If no names provided, run them all
if variables is None:
variables = self._variables_to_tally
else:
variables = [v for v in self.variables if v.__name__ in variables]
stat_dict = {}
# Loop over nodes
for variable in variables:
# Plot object
stat_dict[variable.__name__] = self.trace(
variable.__name__).stats(alpha=alpha, start=start,
batches=batches, chain=chain, quantiles=quantiles)
return stat_dict | [
"def",
"stats",
"(",
"self",
",",
"variables",
"=",
"None",
",",
"alpha",
"=",
"0.05",
",",
"start",
"=",
"0",
",",
"batches",
"=",
"100",
",",
"chain",
"=",
"None",
",",
"quantiles",
"=",
"(",
"2.5",
",",
"25",
",",
"50",
",",
"75",
",",
"97.5... | Statistical output for variables.
:Parameters:
variables : iterable
List or array of variables for which statistics are to be
generated. If it is not specified, all the tallied variables
are summarized.
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains). | [
"Statistical",
"output",
"for",
"variables",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L304-L347 | train | 220,156 |
pymc-devs/pymc | pymc/Model.py | Sampler.write_csv | def write_csv(
self, filename, variables=None, alpha=0.05, start=0, batches=100,
chain=None, quantiles=(2.5, 25, 50, 75, 97.5)):
"""
Save summary statistics to a csv table.
:Parameters:
filename : string
Filename to save output.
variables : iterable
List or array of variables for which statistics are to be
generated. If it is not specified, all the tallied variables
are summarized.
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
"""
# Append 'csv' suffix if there is no suffix on the filename
if filename.find('.') == -1:
filename += '.csv'
outfile = open(filename, 'w')
# Write header to file
header = 'Parameter, Mean, SD, MC Error, Lower 95% HPD, Upper 95% HPD, '
header += ', '.join(['q%s' % i for i in quantiles])
outfile.write(header + '\n')
stats = self.stats(
variables=variables,
alpha=alpha,
start=start,
batches=batches,
chain=chain,
quantiles=quantiles)
if variables is None:
variables = sorted(stats.keys())
buffer = str()
for param in variables:
values = stats[param]
try:
# Multivariate node
shape = values['mean'].shape
indices = list(itertools.product(*[range(i) for i in shape]))
for i in indices:
buffer += self._csv_str(param, values, quantiles, i)
except AttributeError:
# Scalar node
buffer += self._csv_str(param, values, quantiles)
outfile.write(buffer)
outfile.close() | python | def write_csv(
self, filename, variables=None, alpha=0.05, start=0, batches=100,
chain=None, quantiles=(2.5, 25, 50, 75, 97.5)):
"""
Save summary statistics to a csv table.
:Parameters:
filename : string
Filename to save output.
variables : iterable
List or array of variables for which statistics are to be
generated. If it is not specified, all the tallied variables
are summarized.
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
"""
# Append 'csv' suffix if there is no suffix on the filename
if filename.find('.') == -1:
filename += '.csv'
outfile = open(filename, 'w')
# Write header to file
header = 'Parameter, Mean, SD, MC Error, Lower 95% HPD, Upper 95% HPD, '
header += ', '.join(['q%s' % i for i in quantiles])
outfile.write(header + '\n')
stats = self.stats(
variables=variables,
alpha=alpha,
start=start,
batches=batches,
chain=chain,
quantiles=quantiles)
if variables is None:
variables = sorted(stats.keys())
buffer = str()
for param in variables:
values = stats[param]
try:
# Multivariate node
shape = values['mean'].shape
indices = list(itertools.product(*[range(i) for i in shape]))
for i in indices:
buffer += self._csv_str(param, values, quantiles, i)
except AttributeError:
# Scalar node
buffer += self._csv_str(param, values, quantiles)
outfile.write(buffer)
outfile.close() | [
"def",
"write_csv",
"(",
"self",
",",
"filename",
",",
"variables",
"=",
"None",
",",
"alpha",
"=",
"0.05",
",",
"start",
"=",
"0",
",",
"batches",
"=",
"100",
",",
"chain",
"=",
"None",
",",
"quantiles",
"=",
"(",
"2.5",
",",
"25",
",",
"50",
",... | Save summary statistics to a csv table.
:Parameters:
filename : string
Filename to save output.
variables : iterable
List or array of variables for which statistics are to be
generated. If it is not specified, all the tallied variables
are summarized.
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains). | [
"Save",
"summary",
"statistics",
"to",
"a",
"csv",
"table",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L349-L423 | train | 220,157 |
pymc-devs/pymc | pymc/Model.py | Sampler._csv_str | def _csv_str(self, param, stats, quantiles, index=None):
"""Support function for write_csv"""
buffer = param
if not index:
buffer += ', '
else:
buffer += '_' + '_'.join([str(i) for i in index]) + ', '
for stat in ('mean', 'standard deviation', 'mc error'):
buffer += str(stats[stat][index]) + ', '
# Index to interval label
iindex = [key.split()[-1] for key in stats.keys()].index('interval')
interval = list(stats.keys())[iindex]
buffer += ', '.join(stats[interval].T[index].astype(str))
# Process quantiles
qvalues = stats['quantiles']
for q in quantiles:
buffer += ', ' + str(qvalues[q][index])
return buffer + '\n' | python | def _csv_str(self, param, stats, quantiles, index=None):
"""Support function for write_csv"""
buffer = param
if not index:
buffer += ', '
else:
buffer += '_' + '_'.join([str(i) for i in index]) + ', '
for stat in ('mean', 'standard deviation', 'mc error'):
buffer += str(stats[stat][index]) + ', '
# Index to interval label
iindex = [key.split()[-1] for key in stats.keys()].index('interval')
interval = list(stats.keys())[iindex]
buffer += ', '.join(stats[interval].T[index].astype(str))
# Process quantiles
qvalues = stats['quantiles']
for q in quantiles:
buffer += ', ' + str(qvalues[q][index])
return buffer + '\n' | [
"def",
"_csv_str",
"(",
"self",
",",
"param",
",",
"stats",
",",
"quantiles",
",",
"index",
"=",
"None",
")",
":",
"buffer",
"=",
"param",
"if",
"not",
"index",
":",
"buffer",
"+=",
"', '",
"else",
":",
"buffer",
"+=",
"'_'",
"+",
"'_'",
".",
"join... | Support function for write_csv | [
"Support",
"function",
"for",
"write_csv"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L425-L447 | train | 220,158 |
pymc-devs/pymc | pymc/Model.py | Sampler.summary | def summary(self, variables=None, alpha=0.05, start=0, batches=100,
chain=None, roundto=3):
"""
Generate a pretty-printed summary of the model's variables.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics.
quantiles : tuple or list
The desired quantiles to be calculated. Defaults to (2.5, 25, 50, 75, 97.5).
"""
# If no names provided, run them all
if variables is None:
variables = self._variables_to_tally
else:
variables = [
self.__dict__[
i] for i in variables if self.__dict__[
i] in self._variables_to_tally]
# Loop over nodes
for variable in variables:
variable.summary(
alpha=alpha, start=start, batches=batches, chain=chain,
roundto=roundto) | python | def summary(self, variables=None, alpha=0.05, start=0, batches=100,
chain=None, roundto=3):
"""
Generate a pretty-printed summary of the model's variables.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics.
quantiles : tuple or list
The desired quantiles to be calculated. Defaults to (2.5, 25, 50, 75, 97.5).
"""
# If no names provided, run them all
if variables is None:
variables = self._variables_to_tally
else:
variables = [
self.__dict__[
i] for i in variables if self.__dict__[
i] in self._variables_to_tally]
# Loop over nodes
for variable in variables:
variable.summary(
alpha=alpha, start=start, batches=batches, chain=chain,
roundto=roundto) | [
"def",
"summary",
"(",
"self",
",",
"variables",
"=",
"None",
",",
"alpha",
"=",
"0.05",
",",
"start",
"=",
"0",
",",
"batches",
"=",
"100",
",",
"chain",
"=",
"None",
",",
"roundto",
"=",
"3",
")",
":",
"# If no names provided, run them all",
"if",
"v... | Generate a pretty-printed summary of the model's variables.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics.
quantiles : tuple or list
The desired quantiles to be calculated. Defaults to (2.5, 25, 50, 75, 97.5). | [
"Generate",
"a",
"pretty",
"-",
"printed",
"summary",
"of",
"the",
"model",
"s",
"variables",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L449-L491 | train | 220,159 |
pymc-devs/pymc | pymc/Model.py | Sampler._assign_database_backend | def _assign_database_backend(self, db):
"""Assign Trace instance to stochastics and deterministics and Database instance
to self.
:Parameters:
- `db` : string, Database instance
The name of the database module (see below), or a Database instance.
Available databases:
- `no_trace` : Traces are not stored at all.
- `ram` : Traces stored in memory.
- `txt` : Traces stored in memory and saved in txt files at end of
sampling.
- `sqlite` : Traces stored in sqlite database.
- `hdf5` : Traces stored in an HDF5 file.
"""
# Objects that are not to be tallied are assigned a no_trace.Trace
# Tallyable objects are listed in the _nodes_to_tally set.
no_trace = getattr(database, 'no_trace')
self._variables_to_tally = set()
for object in self.stochastics | self.deterministics:
if object.keep_trace:
self._variables_to_tally.add(object)
try:
if object.mask is None:
# Standard stochastic
self._funs_to_tally[object.__name__] = object.get_value
else:
# Has missing values, so only fetch stochastic elements
# using mask
self._funs_to_tally[
object.__name__] = object.get_stoch_value
except AttributeError:
# Not a stochastic object, so no mask
self._funs_to_tally[object.__name__] = object.get_value
else:
object.trace = no_trace.Trace(object.__name__)
check_valid_object_name(self._variables_to_tally)
# If not already done, load the trace backend from the database
# module, and assign a database instance to Model.
if isinstance(db, str):
if db in dir(database):
module = getattr(database, db)
# Assign a default name for the database output file.
if self._db_args.get('dbname') is None:
self._db_args['dbname'] = self.__name__ + '.' + db
self.db = module.Database(**self._db_args)
elif db in database.__modules__:
raise ImportError(
'Database backend `%s` is not properly installed. Please see the documentation for instructions.' % db)
else:
raise AttributeError(
'Database backend `%s` is not defined in pymc.database.' % db)
elif isinstance(db, database.base.Database):
self.db = db
self.restore_sampler_state()
else: # What is this for? DH.
self.db = db.Database(**self._db_args) | python | def _assign_database_backend(self, db):
"""Assign Trace instance to stochastics and deterministics and Database instance
to self.
:Parameters:
- `db` : string, Database instance
The name of the database module (see below), or a Database instance.
Available databases:
- `no_trace` : Traces are not stored at all.
- `ram` : Traces stored in memory.
- `txt` : Traces stored in memory and saved in txt files at end of
sampling.
- `sqlite` : Traces stored in sqlite database.
- `hdf5` : Traces stored in an HDF5 file.
"""
# Objects that are not to be tallied are assigned a no_trace.Trace
# Tallyable objects are listed in the _nodes_to_tally set.
no_trace = getattr(database, 'no_trace')
self._variables_to_tally = set()
for object in self.stochastics | self.deterministics:
if object.keep_trace:
self._variables_to_tally.add(object)
try:
if object.mask is None:
# Standard stochastic
self._funs_to_tally[object.__name__] = object.get_value
else:
# Has missing values, so only fetch stochastic elements
# using mask
self._funs_to_tally[
object.__name__] = object.get_stoch_value
except AttributeError:
# Not a stochastic object, so no mask
self._funs_to_tally[object.__name__] = object.get_value
else:
object.trace = no_trace.Trace(object.__name__)
check_valid_object_name(self._variables_to_tally)
# If not already done, load the trace backend from the database
# module, and assign a database instance to Model.
if isinstance(db, str):
if db in dir(database):
module = getattr(database, db)
# Assign a default name for the database output file.
if self._db_args.get('dbname') is None:
self._db_args['dbname'] = self.__name__ + '.' + db
self.db = module.Database(**self._db_args)
elif db in database.__modules__:
raise ImportError(
'Database backend `%s` is not properly installed. Please see the documentation for instructions.' % db)
else:
raise AttributeError(
'Database backend `%s` is not defined in pymc.database.' % db)
elif isinstance(db, database.base.Database):
self.db = db
self.restore_sampler_state()
else: # What is this for? DH.
self.db = db.Database(**self._db_args) | [
"def",
"_assign_database_backend",
"(",
"self",
",",
"db",
")",
":",
"# Objects that are not to be tallied are assigned a no_trace.Trace",
"# Tallyable objects are listed in the _nodes_to_tally set.",
"no_trace",
"=",
"getattr",
"(",
"database",
",",
"'no_trace'",
")",
"self",
... | Assign Trace instance to stochastics and deterministics and Database instance
to self.
:Parameters:
- `db` : string, Database instance
The name of the database module (see below), or a Database instance.
Available databases:
- `no_trace` : Traces are not stored at all.
- `ram` : Traces stored in memory.
- `txt` : Traces stored in memory and saved in txt files at end of
sampling.
- `sqlite` : Traces stored in sqlite database.
- `hdf5` : Traces stored in an HDF5 file. | [
"Assign",
"Trace",
"instance",
"to",
"stochastics",
"and",
"deterministics",
"and",
"Database",
"instance",
"to",
"self",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L516-L579 | train | 220,160 |
pymc-devs/pymc | pymc/Model.py | Sampler.pause | def pause(self):
"""Pause the sampler. Sampling can be resumed by calling `icontinue`.
"""
self.status = 'paused'
# The _loop method will react to 'paused' status and stop looping.
if hasattr(
self, '_sampling_thread') and self._sampling_thread.isAlive():
print_('Waiting for current iteration to finish...')
while self._sampling_thread.isAlive():
sleep(.1) | python | def pause(self):
"""Pause the sampler. Sampling can be resumed by calling `icontinue`.
"""
self.status = 'paused'
# The _loop method will react to 'paused' status and stop looping.
if hasattr(
self, '_sampling_thread') and self._sampling_thread.isAlive():
print_('Waiting for current iteration to finish...')
while self._sampling_thread.isAlive():
sleep(.1) | [
"def",
"pause",
"(",
"self",
")",
":",
"self",
".",
"status",
"=",
"'paused'",
"# The _loop method will react to 'paused' status and stop looping.",
"if",
"hasattr",
"(",
"self",
",",
"'_sampling_thread'",
")",
"and",
"self",
".",
"_sampling_thread",
".",
"isAlive",
... | Pause the sampler. Sampling can be resumed by calling `icontinue`. | [
"Pause",
"the",
"sampler",
".",
"Sampling",
"can",
"be",
"resumed",
"by",
"calling",
"icontinue",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L581-L590 | train | 220,161 |
pymc-devs/pymc | pymc/Model.py | Sampler.halt | def halt(self):
"""Halt a sampling running in another thread."""
self.status = 'halt'
# The _halt method is called by _loop.
if hasattr(
self, '_sampling_thread') and self._sampling_thread.isAlive():
print_('Waiting for current iteration to finish...')
while self._sampling_thread.isAlive():
sleep(.1) | python | def halt(self):
"""Halt a sampling running in another thread."""
self.status = 'halt'
# The _halt method is called by _loop.
if hasattr(
self, '_sampling_thread') and self._sampling_thread.isAlive():
print_('Waiting for current iteration to finish...')
while self._sampling_thread.isAlive():
sleep(.1) | [
"def",
"halt",
"(",
"self",
")",
":",
"self",
".",
"status",
"=",
"'halt'",
"# The _halt method is called by _loop.",
"if",
"hasattr",
"(",
"self",
",",
"'_sampling_thread'",
")",
"and",
"self",
".",
"_sampling_thread",
".",
"isAlive",
"(",
")",
":",
"print_",... | Halt a sampling running in another thread. | [
"Halt",
"a",
"sampling",
"running",
"in",
"another",
"thread",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L592-L600 | train | 220,162 |
pymc-devs/pymc | pymc/Model.py | Sampler.isample | def isample(self, *args, **kwds):
"""
Samples in interactive mode. Main thread of control stays in this function.
"""
self._exc_info = None
out = kwds.pop('out', sys.stdout)
kwds['progress_bar'] = False
def samp_targ(*args, **kwds):
try:
self.sample(*args, **kwds)
except:
self._exc_info = sys.exc_info()
self._sampling_thread = Thread(
target=samp_targ,
args=args,
kwargs=kwds)
self.status = 'running'
self._sampling_thread.start()
self.iprompt(out=out) | python | def isample(self, *args, **kwds):
"""
Samples in interactive mode. Main thread of control stays in this function.
"""
self._exc_info = None
out = kwds.pop('out', sys.stdout)
kwds['progress_bar'] = False
def samp_targ(*args, **kwds):
try:
self.sample(*args, **kwds)
except:
self._exc_info = sys.exc_info()
self._sampling_thread = Thread(
target=samp_targ,
args=args,
kwargs=kwds)
self.status = 'running'
self._sampling_thread.start()
self.iprompt(out=out) | [
"def",
"isample",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"self",
".",
"_exc_info",
"=",
"None",
"out",
"=",
"kwds",
".",
"pop",
"(",
"'out'",
",",
"sys",
".",
"stdout",
")",
"kwds",
"[",
"'progress_bar'",
"]",
"=",
"False... | Samples in interactive mode. Main thread of control stays in this function. | [
"Samples",
"in",
"interactive",
"mode",
".",
"Main",
"thread",
"of",
"control",
"stays",
"in",
"this",
"function",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L632-L652 | train | 220,163 |
pymc-devs/pymc | pymc/Model.py | Sampler.icontinue | def icontinue(self):
"""
Restarts thread in interactive mode
"""
if self.status != 'paused':
print_(
"No sampling to continue. Please initiate sampling with isample.")
return
def sample_and_finalize():
self._loop()
self._finalize()
self._sampling_thread = Thread(target=sample_and_finalize)
self.status = 'running'
self._sampling_thread.start()
self.iprompt() | python | def icontinue(self):
"""
Restarts thread in interactive mode
"""
if self.status != 'paused':
print_(
"No sampling to continue. Please initiate sampling with isample.")
return
def sample_and_finalize():
self._loop()
self._finalize()
self._sampling_thread = Thread(target=sample_and_finalize)
self.status = 'running'
self._sampling_thread.start()
self.iprompt() | [
"def",
"icontinue",
"(",
"self",
")",
":",
"if",
"self",
".",
"status",
"!=",
"'paused'",
":",
"print_",
"(",
"\"No sampling to continue. Please initiate sampling with isample.\"",
")",
"return",
"def",
"sample_and_finalize",
"(",
")",
":",
"self",
".",
"_loop",
"... | Restarts thread in interactive mode | [
"Restarts",
"thread",
"in",
"interactive",
"mode"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L654-L670 | train | 220,164 |
pymc-devs/pymc | pymc/Model.py | Sampler.get_state | def get_state(self):
"""
Return the sampler's current state in order to
restart sampling at a later time.
"""
state = dict(sampler={}, stochastics={})
# The state of the sampler itself.
for s in self._state:
state['sampler'][s] = getattr(self, s)
# The state of each stochastic parameter
for s in self.stochastics:
state['stochastics'][s.__name__] = s.value
return state | python | def get_state(self):
"""
Return the sampler's current state in order to
restart sampling at a later time.
"""
state = dict(sampler={}, stochastics={})
# The state of the sampler itself.
for s in self._state:
state['sampler'][s] = getattr(self, s)
# The state of each stochastic parameter
for s in self.stochastics:
state['stochastics'][s.__name__] = s.value
return state | [
"def",
"get_state",
"(",
"self",
")",
":",
"state",
"=",
"dict",
"(",
"sampler",
"=",
"{",
"}",
",",
"stochastics",
"=",
"{",
"}",
")",
"# The state of the sampler itself.",
"for",
"s",
"in",
"self",
".",
"_state",
":",
"state",
"[",
"'sampler'",
"]",
... | Return the sampler's current state in order to
restart sampling at a later time. | [
"Return",
"the",
"sampler",
"s",
"current",
"state",
"in",
"order",
"to",
"restart",
"sampling",
"at",
"a",
"later",
"time",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L751-L764 | train | 220,165 |
pymc-devs/pymc | pymc/Model.py | Sampler.save_state | def save_state(self):
"""
Tell the database to save the current state of the sampler.
"""
try:
self.db.savestate(self.get_state())
except:
print_('Warning, unable to save state.')
print_('Error message:')
traceback.print_exc() | python | def save_state(self):
"""
Tell the database to save the current state of the sampler.
"""
try:
self.db.savestate(self.get_state())
except:
print_('Warning, unable to save state.')
print_('Error message:')
traceback.print_exc() | [
"def",
"save_state",
"(",
"self",
")",
":",
"try",
":",
"self",
".",
"db",
".",
"savestate",
"(",
"self",
".",
"get_state",
"(",
")",
")",
"except",
":",
"print_",
"(",
"'Warning, unable to save state.'",
")",
"print_",
"(",
"'Error message:'",
")",
"trace... | Tell the database to save the current state of the sampler. | [
"Tell",
"the",
"database",
"to",
"save",
"the",
"current",
"state",
"of",
"the",
"sampler",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L766-L775 | train | 220,166 |
pymc-devs/pymc | pymc/Model.py | Sampler.restore_sampler_state | def restore_sampler_state(self):
"""
Restore the state of the sampler and to
the state stored in the database.
"""
state = self.db.getstate() or {}
# Restore sampler's state
sampler_state = state.get('sampler', {})
self.__dict__.update(sampler_state)
# Restore stochastic parameters state
stoch_state = state.get('stochastics', {})
for sm in self.stochastics:
try:
sm.value = stoch_state[sm.__name__]
except:
warnings.warn(
'Failed to restore state of stochastic %s from %s backend' %
(sm.__name__, self.db.__name__)) | python | def restore_sampler_state(self):
"""
Restore the state of the sampler and to
the state stored in the database.
"""
state = self.db.getstate() or {}
# Restore sampler's state
sampler_state = state.get('sampler', {})
self.__dict__.update(sampler_state)
# Restore stochastic parameters state
stoch_state = state.get('stochastics', {})
for sm in self.stochastics:
try:
sm.value = stoch_state[sm.__name__]
except:
warnings.warn(
'Failed to restore state of stochastic %s from %s backend' %
(sm.__name__, self.db.__name__)) | [
"def",
"restore_sampler_state",
"(",
"self",
")",
":",
"state",
"=",
"self",
".",
"db",
".",
"getstate",
"(",
")",
"or",
"{",
"}",
"# Restore sampler's state",
"sampler_state",
"=",
"state",
".",
"get",
"(",
"'sampler'",
",",
"{",
"}",
")",
"self",
".",
... | Restore the state of the sampler and to
the state stored in the database. | [
"Restore",
"the",
"state",
"of",
"the",
"sampler",
"and",
"to",
"the",
"state",
"stored",
"in",
"the",
"database",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Model.py#L777-L797 | train | 220,167 |
pymc-devs/pymc | pymc/utils.py | normcdf | def normcdf(x, log=False):
"""Normal cumulative density function."""
y = np.atleast_1d(x).copy()
flib.normcdf(y)
if log:
if (y>0).all():
return np.log(y)
return -np.inf
return y | python | def normcdf(x, log=False):
"""Normal cumulative density function."""
y = np.atleast_1d(x).copy()
flib.normcdf(y)
if log:
if (y>0).all():
return np.log(y)
return -np.inf
return y | [
"def",
"normcdf",
"(",
"x",
",",
"log",
"=",
"False",
")",
":",
"y",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
".",
"copy",
"(",
")",
"flib",
".",
"normcdf",
"(",
"y",
")",
"if",
"log",
":",
"if",
"(",
"y",
">",
"0",
")",
".",
"all",
"... | Normal cumulative density function. | [
"Normal",
"cumulative",
"density",
"function",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L453-L461 | train | 220,168 |
pymc-devs/pymc | pymc/utils.py | lognormcdf | def lognormcdf(x, mu, tau):
"""Log-normal cumulative density function"""
x = np.atleast_1d(x)
return np.array(
[0.5 * (1 - flib.derf(-(np.sqrt(tau / 2)) * (np.log(y) - mu))) for y in x]) | python | def lognormcdf(x, mu, tau):
"""Log-normal cumulative density function"""
x = np.atleast_1d(x)
return np.array(
[0.5 * (1 - flib.derf(-(np.sqrt(tau / 2)) * (np.log(y) - mu))) for y in x]) | [
"def",
"lognormcdf",
"(",
"x",
",",
"mu",
",",
"tau",
")",
":",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"return",
"np",
".",
"array",
"(",
"[",
"0.5",
"*",
"(",
"1",
"-",
"flib",
".",
"derf",
"(",
"-",
"(",
"np",
".",
"sqrt",
"(",... | Log-normal cumulative density function | [
"Log",
"-",
"normal",
"cumulative",
"density",
"function"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L464-L468 | train | 220,169 |
pymc-devs/pymc | pymc/utils.py | invcdf | def invcdf(x):
"""Inverse of normal cumulative density function."""
x_flat = np.ravel(x)
x_trans = np.array([flib.ppnd16(y, 1) for y in x_flat])
return np.reshape(x_trans, np.shape(x)) | python | def invcdf(x):
"""Inverse of normal cumulative density function."""
x_flat = np.ravel(x)
x_trans = np.array([flib.ppnd16(y, 1) for y in x_flat])
return np.reshape(x_trans, np.shape(x)) | [
"def",
"invcdf",
"(",
"x",
")",
":",
"x_flat",
"=",
"np",
".",
"ravel",
"(",
"x",
")",
"x_trans",
"=",
"np",
".",
"array",
"(",
"[",
"flib",
".",
"ppnd16",
"(",
"y",
",",
"1",
")",
"for",
"y",
"in",
"x_flat",
"]",
")",
"return",
"np",
".",
... | Inverse of normal cumulative density function. | [
"Inverse",
"of",
"normal",
"cumulative",
"density",
"function",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L471-L475 | train | 220,170 |
pymc-devs/pymc | pymc/utils.py | trace_generator | def trace_generator(trace, start=0, stop=None, step=1):
"""Return a generator returning values from the object's trace.
Ex:
T = trace_generator(theta.trace)
T.next()
for t in T:...
"""
i = start
stop = stop or np.inf
size = min(trace.length(), stop)
while i < size:
index = slice(i, i + 1)
yield trace.gettrace(slicing=index)[0]
i += step | python | def trace_generator(trace, start=0, stop=None, step=1):
"""Return a generator returning values from the object's trace.
Ex:
T = trace_generator(theta.trace)
T.next()
for t in T:...
"""
i = start
stop = stop or np.inf
size = min(trace.length(), stop)
while i < size:
index = slice(i, i + 1)
yield trace.gettrace(slicing=index)[0]
i += step | [
"def",
"trace_generator",
"(",
"trace",
",",
"start",
"=",
"0",
",",
"stop",
"=",
"None",
",",
"step",
"=",
"1",
")",
":",
"i",
"=",
"start",
"stop",
"=",
"stop",
"or",
"np",
".",
"inf",
"size",
"=",
"min",
"(",
"trace",
".",
"length",
"(",
")"... | Return a generator returning values from the object's trace.
Ex:
T = trace_generator(theta.trace)
T.next()
for t in T:... | [
"Return",
"a",
"generator",
"returning",
"values",
"from",
"the",
"object",
"s",
"trace",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L557-L571 | train | 220,171 |
pymc-devs/pymc | pymc/utils.py | draw_random | def draw_random(obj, **kwds):
"""Draw random variates from obj.random method.
If the object has parents whose value must be updated, use
parent_name=trace_generator_function.
Ex:
R = draw_random(theta, beta=pymc.utils.trace_generator(beta.trace))
R.next()
"""
while True:
for k, v in six.iteritems(kwds):
obj.parents[k] = v.next()
yield obj.random() | python | def draw_random(obj, **kwds):
"""Draw random variates from obj.random method.
If the object has parents whose value must be updated, use
parent_name=trace_generator_function.
Ex:
R = draw_random(theta, beta=pymc.utils.trace_generator(beta.trace))
R.next()
"""
while True:
for k, v in six.iteritems(kwds):
obj.parents[k] = v.next()
yield obj.random() | [
"def",
"draw_random",
"(",
"obj",
",",
"*",
"*",
"kwds",
")",
":",
"while",
"True",
":",
"for",
"k",
",",
"v",
"in",
"six",
".",
"iteritems",
"(",
"kwds",
")",
":",
"obj",
".",
"parents",
"[",
"k",
"]",
"=",
"v",
".",
"next",
"(",
")",
"yield... | Draw random variates from obj.random method.
If the object has parents whose value must be updated, use
parent_name=trace_generator_function.
Ex:
R = draw_random(theta, beta=pymc.utils.trace_generator(beta.trace))
R.next() | [
"Draw",
"random",
"variates",
"from",
"obj",
".",
"random",
"method",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L574-L587 | train | 220,172 |
pymc-devs/pymc | pymc/utils.py | rec_setattr | def rec_setattr(obj, attr, value):
"""Set object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> rec_setattr(a, 'b.c', 2)
>>> a.b.c
2
"""
attrs = attr.split('.')
setattr(reduce(getattr, attrs[:-1], obj), attrs[-1], value) | python | def rec_setattr(obj, attr, value):
"""Set object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> rec_setattr(a, 'b.c', 2)
>>> a.b.c
2
"""
attrs = attr.split('.')
setattr(reduce(getattr, attrs[:-1], obj), attrs[-1], value) | [
"def",
"rec_setattr",
"(",
"obj",
",",
"attr",
",",
"value",
")",
":",
"attrs",
"=",
"attr",
".",
"split",
"(",
"'.'",
")",
"setattr",
"(",
"reduce",
"(",
"getattr",
",",
"attrs",
"[",
":",
"-",
"1",
"]",
",",
"obj",
")",
",",
"attrs",
"[",
"-"... | Set object's attribute. May use dot notation.
>>> class C(object): pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> rec_setattr(a, 'b.c', 2)
>>> a.b.c
2 | [
"Set",
"object",
"s",
"attribute",
".",
"May",
"use",
"dot",
"notation",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L603-L615 | train | 220,173 |
pymc-devs/pymc | pymc/utils.py | calc_min_interval | def calc_min_interval(x, alpha):
"""Internal method to determine the minimum interval of
a given width
Assumes that x is sorted numpy array.
"""
n = len(x)
cred_mass = 1.0 - alpha
interval_idx_inc = int(np.floor(cred_mass * n))
n_intervals = n - interval_idx_inc
interval_width = x[interval_idx_inc:] - x[:n_intervals]
if len(interval_width) == 0:
print_('Too few elements for interval calculation')
return [None, None]
min_idx = np.argmin(interval_width)
hdi_min = x[min_idx]
hdi_max = x[min_idx + interval_idx_inc]
return [hdi_min, hdi_max] | python | def calc_min_interval(x, alpha):
"""Internal method to determine the minimum interval of
a given width
Assumes that x is sorted numpy array.
"""
n = len(x)
cred_mass = 1.0 - alpha
interval_idx_inc = int(np.floor(cred_mass * n))
n_intervals = n - interval_idx_inc
interval_width = x[interval_idx_inc:] - x[:n_intervals]
if len(interval_width) == 0:
print_('Too few elements for interval calculation')
return [None, None]
min_idx = np.argmin(interval_width)
hdi_min = x[min_idx]
hdi_max = x[min_idx + interval_idx_inc]
return [hdi_min, hdi_max] | [
"def",
"calc_min_interval",
"(",
"x",
",",
"alpha",
")",
":",
"n",
"=",
"len",
"(",
"x",
")",
"cred_mass",
"=",
"1.0",
"-",
"alpha",
"interval_idx_inc",
"=",
"int",
"(",
"np",
".",
"floor",
"(",
"cred_mass",
"*",
"n",
")",
")",
"n_intervals",
"=",
... | Internal method to determine the minimum interval of
a given width
Assumes that x is sorted numpy array. | [
"Internal",
"method",
"to",
"determine",
"the",
"minimum",
"interval",
"of",
"a",
"given",
"width"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L694-L715 | train | 220,174 |
pymc-devs/pymc | pymc/utils.py | quantiles | def quantiles(x, qlist=(2.5, 25, 50, 75, 97.5)):
"""Returns a dictionary of requested quantiles from array
:Arguments:
x : Numpy array
An array containing MCMC samples
qlist : tuple or list
A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort, then transpose back
sx = sort(x.T).T
else:
# Sort univariate node
sx = sort(x)
try:
# Generate specified quantiles
quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]
return dict(zip(qlist, quants))
except IndexError:
print_("Too few elements for quantile calculation") | python | def quantiles(x, qlist=(2.5, 25, 50, 75, 97.5)):
"""Returns a dictionary of requested quantiles from array
:Arguments:
x : Numpy array
An array containing MCMC samples
qlist : tuple or list
A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5))
"""
# Make a copy of trace
x = x.copy()
# For multivariate node
if x.ndim > 1:
# Transpose first, then sort, then transpose back
sx = sort(x.T).T
else:
# Sort univariate node
sx = sort(x)
try:
# Generate specified quantiles
quants = [sx[int(len(sx) * q / 100.0)] for q in qlist]
return dict(zip(qlist, quants))
except IndexError:
print_("Too few elements for quantile calculation") | [
"def",
"quantiles",
"(",
"x",
",",
"qlist",
"=",
"(",
"2.5",
",",
"25",
",",
"50",
",",
"75",
",",
"97.5",
")",
")",
":",
"# Make a copy of trace",
"x",
"=",
"x",
".",
"copy",
"(",
")",
"# For multivariate node",
"if",
"x",
".",
"ndim",
">",
"1",
... | Returns a dictionary of requested quantiles from array
:Arguments:
x : Numpy array
An array containing MCMC samples
qlist : tuple or list
A list of desired quantiles (defaults to (2.5, 25, 50, 75, 97.5)) | [
"Returns",
"a",
"dictionary",
"of",
"requested",
"quantiles",
"from",
"array"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L718-L747 | train | 220,175 |
pymc-devs/pymc | pymc/utils.py | coda_output | def coda_output(pymc_object, name=None, chain=-1):
"""Generate output files that are compatible with CODA
:Arguments:
pymc_object : Model or Node
A PyMC object containing MCMC output.
"""
print_()
print_("Generating CODA output")
print_('=' * 50)
if name is None:
name = pymc_object.__name__
# Open trace file
trace_file = open(name + '_coda.out', 'w')
# Open index file
index_file = open(name + '_coda.ind', 'w')
variables = [pymc_object]
if hasattr(pymc_object, 'stochastics'):
variables = pymc_object.stochastics
# Initialize index
index = 1
# Loop over all parameters
for v in variables:
vname = v.__name__
print_("Processing", vname)
try:
index = _process_trace(
trace_file,
index_file,
v.trace(chain=chain),
vname,
index)
except TypeError:
pass
# Close files
trace_file.close()
index_file.close() | python | def coda_output(pymc_object, name=None, chain=-1):
"""Generate output files that are compatible with CODA
:Arguments:
pymc_object : Model or Node
A PyMC object containing MCMC output.
"""
print_()
print_("Generating CODA output")
print_('=' * 50)
if name is None:
name = pymc_object.__name__
# Open trace file
trace_file = open(name + '_coda.out', 'w')
# Open index file
index_file = open(name + '_coda.ind', 'w')
variables = [pymc_object]
if hasattr(pymc_object, 'stochastics'):
variables = pymc_object.stochastics
# Initialize index
index = 1
# Loop over all parameters
for v in variables:
vname = v.__name__
print_("Processing", vname)
try:
index = _process_trace(
trace_file,
index_file,
v.trace(chain=chain),
vname,
index)
except TypeError:
pass
# Close files
trace_file.close()
index_file.close() | [
"def",
"coda_output",
"(",
"pymc_object",
",",
"name",
"=",
"None",
",",
"chain",
"=",
"-",
"1",
")",
":",
"print_",
"(",
")",
"print_",
"(",
"\"Generating CODA output\"",
")",
"print_",
"(",
"'='",
"*",
"50",
")",
"if",
"name",
"is",
"None",
":",
"n... | Generate output files that are compatible with CODA
:Arguments:
pymc_object : Model or Node
A PyMC object containing MCMC output. | [
"Generate",
"output",
"files",
"that",
"are",
"compatible",
"with",
"CODA"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L750-L796 | train | 220,176 |
pymc-devs/pymc | pymc/utils.py | getInput | def getInput():
"""Read the input buffer without blocking the system."""
input = ''
if sys.platform == 'win32':
import msvcrt
if msvcrt.kbhit(): # Check for a keyboard hit.
input += msvcrt.getch()
print_(input)
else:
time.sleep(.1)
else: # Other platforms
# Posix will work with sys.stdin or sys.stdin.fileno()
# Mac needs the file descriptor.
# This solution does not work for windows since select
# expects a socket, and I have no idea how to create a
# socket from standard input.
sock = sys.stdin.fileno()
# select(rlist, wlist, xlist, timeout)
while len(select.select([sock], [], [], 0.1)[0]) > 0:
input += decode(os.read(sock, 4096))
return input | python | def getInput():
"""Read the input buffer without blocking the system."""
input = ''
if sys.platform == 'win32':
import msvcrt
if msvcrt.kbhit(): # Check for a keyboard hit.
input += msvcrt.getch()
print_(input)
else:
time.sleep(.1)
else: # Other platforms
# Posix will work with sys.stdin or sys.stdin.fileno()
# Mac needs the file descriptor.
# This solution does not work for windows since select
# expects a socket, and I have no idea how to create a
# socket from standard input.
sock = sys.stdin.fileno()
# select(rlist, wlist, xlist, timeout)
while len(select.select([sock], [], [], 0.1)[0]) > 0:
input += decode(os.read(sock, 4096))
return input | [
"def",
"getInput",
"(",
")",
":",
"input",
"=",
"''",
"if",
"sys",
".",
"platform",
"==",
"'win32'",
":",
"import",
"msvcrt",
"if",
"msvcrt",
".",
"kbhit",
"(",
")",
":",
"# Check for a keyboard hit.",
"input",
"+=",
"msvcrt",
".",
"getch",
"(",
")",
"... | Read the input buffer without blocking the system. | [
"Read",
"the",
"input",
"buffer",
"without",
"blocking",
"the",
"system",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L834-L858 | train | 220,177 |
pymc-devs/pymc | pymc/utils.py | find_generations | def find_generations(container, with_data=False):
"""
A generation is the set of stochastic variables that only has parents in
previous generations.
"""
generations = []
# Find root generation
generations.append(set())
all_children = set()
if with_data:
stochastics_to_iterate = container.stochastics | container.observed_stochastics
else:
stochastics_to_iterate = container.stochastics
for s in stochastics_to_iterate:
all_children.update(s.extended_children & stochastics_to_iterate)
generations[0] = stochastics_to_iterate - all_children
# Find subsequent _generations
children_remaining = True
gen_num = 0
while children_remaining:
gen_num += 1
# Find children of last generation
generations.append(set())
for s in generations[gen_num - 1]:
generations[gen_num].update(
s.extended_children & stochastics_to_iterate)
# Take away stochastics that have parents in the current generation.
thisgen_children = set()
for s in generations[gen_num]:
thisgen_children.update(
s.extended_children & stochastics_to_iterate)
generations[gen_num] -= thisgen_children
# Stop when no subsequent _generations remain
if len(thisgen_children) == 0:
children_remaining = False
return generations | python | def find_generations(container, with_data=False):
"""
A generation is the set of stochastic variables that only has parents in
previous generations.
"""
generations = []
# Find root generation
generations.append(set())
all_children = set()
if with_data:
stochastics_to_iterate = container.stochastics | container.observed_stochastics
else:
stochastics_to_iterate = container.stochastics
for s in stochastics_to_iterate:
all_children.update(s.extended_children & stochastics_to_iterate)
generations[0] = stochastics_to_iterate - all_children
# Find subsequent _generations
children_remaining = True
gen_num = 0
while children_remaining:
gen_num += 1
# Find children of last generation
generations.append(set())
for s in generations[gen_num - 1]:
generations[gen_num].update(
s.extended_children & stochastics_to_iterate)
# Take away stochastics that have parents in the current generation.
thisgen_children = set()
for s in generations[gen_num]:
thisgen_children.update(
s.extended_children & stochastics_to_iterate)
generations[gen_num] -= thisgen_children
# Stop when no subsequent _generations remain
if len(thisgen_children) == 0:
children_remaining = False
return generations | [
"def",
"find_generations",
"(",
"container",
",",
"with_data",
"=",
"False",
")",
":",
"generations",
"=",
"[",
"]",
"# Find root generation",
"generations",
".",
"append",
"(",
"set",
"(",
")",
")",
"all_children",
"=",
"set",
"(",
")",
"if",
"with_data",
... | A generation is the set of stochastic variables that only has parents in
previous generations. | [
"A",
"generation",
"is",
"the",
"set",
"of",
"stochastic",
"variables",
"that",
"only",
"has",
"parents",
"in",
"previous",
"generations",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L884-L925 | train | 220,178 |
pymc-devs/pymc | pymc/utils.py | append | def append(nodelist, node, label=None, sep='_'):
"""
Append function to automate the naming of list elements in Containers.
:Arguments:
- `nodelist` : List containing nodes for Container.
- `node` : Node to be added to list.
- `label` : Label to be appended to list (If not passed,
defaults to element number).
- `sep` : Separator character for label (defaults to underscore).
:Return:
- `nodelist` : Passed list with node added.
"""
nname = node.__name__
# Determine label
label = label or len(nodelist)
# Look for separator at the end of name
ind = nname.rfind(sep)
# If there is no separator, we will remove last character and
# replace with label.
node.__name__ = nname[:ind] + sep + str(label)
nodelist.append(node)
return nodelist | python | def append(nodelist, node, label=None, sep='_'):
"""
Append function to automate the naming of list elements in Containers.
:Arguments:
- `nodelist` : List containing nodes for Container.
- `node` : Node to be added to list.
- `label` : Label to be appended to list (If not passed,
defaults to element number).
- `sep` : Separator character for label (defaults to underscore).
:Return:
- `nodelist` : Passed list with node added.
"""
nname = node.__name__
# Determine label
label = label or len(nodelist)
# Look for separator at the end of name
ind = nname.rfind(sep)
# If there is no separator, we will remove last character and
# replace with label.
node.__name__ = nname[:ind] + sep + str(label)
nodelist.append(node)
return nodelist | [
"def",
"append",
"(",
"nodelist",
",",
"node",
",",
"label",
"=",
"None",
",",
"sep",
"=",
"'_'",
")",
":",
"nname",
"=",
"node",
".",
"__name__",
"# Determine label",
"label",
"=",
"label",
"or",
"len",
"(",
"nodelist",
")",
"# Look for separator at the e... | Append function to automate the naming of list elements in Containers.
:Arguments:
- `nodelist` : List containing nodes for Container.
- `node` : Node to be added to list.
- `label` : Label to be appended to list (If not passed,
defaults to element number).
- `sep` : Separator character for label (defaults to underscore).
:Return:
- `nodelist` : Passed list with node added. | [
"Append",
"function",
"to",
"automate",
"the",
"naming",
"of",
"list",
"elements",
"in",
"Containers",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/utils.py#L928-L958 | train | 220,179 |
pymc-devs/pymc | pymc/PyMCObjects.py | Deterministic.logp_partial_gradient | def logp_partial_gradient(self, variable, calculation_set=None):
"""
gets the logp gradient of this deterministic with respect to variable
"""
if self.verbose > 0:
print_('\t' + self.__name__ + ': logp_partial_gradient accessed.')
if not (datatypes.is_continuous(variable)
and datatypes.is_continuous(self)):
return zeros(shape(variable.value))
# loop through all the parameters and add up all the gradients of log p
# with respect to the approrpiate variable
gradient = builtins.sum(
[child.logp_partial_gradient(self,
calculation_set) for child in self.children])
totalGradient = 0
for parameter, value in six.iteritems(self.parents):
if value is variable:
totalGradient += self.apply_jacobian(
parameter, variable, gradient)
return np.reshape(totalGradient, shape(variable.value)) | python | def logp_partial_gradient(self, variable, calculation_set=None):
"""
gets the logp gradient of this deterministic with respect to variable
"""
if self.verbose > 0:
print_('\t' + self.__name__ + ': logp_partial_gradient accessed.')
if not (datatypes.is_continuous(variable)
and datatypes.is_continuous(self)):
return zeros(shape(variable.value))
# loop through all the parameters and add up all the gradients of log p
# with respect to the approrpiate variable
gradient = builtins.sum(
[child.logp_partial_gradient(self,
calculation_set) for child in self.children])
totalGradient = 0
for parameter, value in six.iteritems(self.parents):
if value is variable:
totalGradient += self.apply_jacobian(
parameter, variable, gradient)
return np.reshape(totalGradient, shape(variable.value)) | [
"def",
"logp_partial_gradient",
"(",
"self",
",",
"variable",
",",
"calculation_set",
"=",
"None",
")",
":",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"print_",
"(",
"'\\t'",
"+",
"self",
".",
"__name__",
"+",
"': logp_partial_gradient accessed.'",
")",
"... | gets the logp gradient of this deterministic with respect to variable | [
"gets",
"the",
"logp",
"gradient",
"of",
"this",
"deterministic",
"with",
"respect",
"to",
"variable"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/PyMCObjects.py#L503-L527 | train | 220,180 |
pymc-devs/pymc | pymc/PyMCObjects.py | Stochastic.gen_lazy_function | def gen_lazy_function(self):
"""
Will be called by Node at instantiation.
"""
# If value argument to __init__ was None, draw value from random
# method.
if self._value is None:
# Use random function if provided
if self._random is not None:
self.value = self._random(**self._parents.value)
# Otherwise leave initial value at None and warn.
else:
raise ValueError(
'Stochastic ' +
self.__name__ +
"'s value initialized to None; no initial value or random method provided.")
arguments = {}
arguments.update(self.parents)
arguments['value'] = self
arguments = DictContainer(arguments)
self._logp = LazyFunction(fun=self._logp_fun,
arguments=arguments,
ultimate_args=self.extended_parents | set(
[self]),
cache_depth=self._cache_depth)
self._logp.force_compute()
self._logp_partial_gradients = {}
for parameter, function in six.iteritems(self._logp_partial_gradient_functions):
lazy_logp_partial_gradient = LazyFunction(fun=function,
arguments=arguments,
ultimate_args=self.extended_parents | set(
[self]),
cache_depth=self._cache_depth)
# lazy_logp_partial_gradient.force_compute()
self._logp_partial_gradients[parameter] = lazy_logp_partial_gradient | python | def gen_lazy_function(self):
"""
Will be called by Node at instantiation.
"""
# If value argument to __init__ was None, draw value from random
# method.
if self._value is None:
# Use random function if provided
if self._random is not None:
self.value = self._random(**self._parents.value)
# Otherwise leave initial value at None and warn.
else:
raise ValueError(
'Stochastic ' +
self.__name__ +
"'s value initialized to None; no initial value or random method provided.")
arguments = {}
arguments.update(self.parents)
arguments['value'] = self
arguments = DictContainer(arguments)
self._logp = LazyFunction(fun=self._logp_fun,
arguments=arguments,
ultimate_args=self.extended_parents | set(
[self]),
cache_depth=self._cache_depth)
self._logp.force_compute()
self._logp_partial_gradients = {}
for parameter, function in six.iteritems(self._logp_partial_gradient_functions):
lazy_logp_partial_gradient = LazyFunction(fun=function,
arguments=arguments,
ultimate_args=self.extended_parents | set(
[self]),
cache_depth=self._cache_depth)
# lazy_logp_partial_gradient.force_compute()
self._logp_partial_gradients[parameter] = lazy_logp_partial_gradient | [
"def",
"gen_lazy_function",
"(",
"self",
")",
":",
"# If value argument to __init__ was None, draw value from random",
"# method.",
"if",
"self",
".",
"_value",
"is",
"None",
":",
"# Use random function if provided",
"if",
"self",
".",
"_random",
"is",
"not",
"None",
":... | Will be called by Node at instantiation. | [
"Will",
"be",
"called",
"by",
"Node",
"at",
"instantiation",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/PyMCObjects.py#L776-L817 | train | 220,181 |
pymc-devs/pymc | pymc/PyMCObjects.py | Stochastic.logp_gradient_contribution | def logp_gradient_contribution(self, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to self.
Calculation of the log posterior is restricted to the variables in calculation_set.
"""
# NEED some sort of check to see if the log p calculation has recently
# failed, in which case not to continue
return self.logp_partial_gradient(self, calculation_set) + builtins.sum(
[child.logp_partial_gradient(self, calculation_set) for child in self.children]) | python | def logp_gradient_contribution(self, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to self.
Calculation of the log posterior is restricted to the variables in calculation_set.
"""
# NEED some sort of check to see if the log p calculation has recently
# failed, in which case not to continue
return self.logp_partial_gradient(self, calculation_set) + builtins.sum(
[child.logp_partial_gradient(self, calculation_set) for child in self.children]) | [
"def",
"logp_gradient_contribution",
"(",
"self",
",",
"calculation_set",
"=",
"None",
")",
":",
"# NEED some sort of check to see if the log p calculation has recently",
"# failed, in which case not to continue",
"return",
"self",
".",
"logp_partial_gradient",
"(",
"self",
",",
... | Calculates the gradient of the joint log posterior with respect to self.
Calculation of the log posterior is restricted to the variables in calculation_set. | [
"Calculates",
"the",
"gradient",
"of",
"the",
"joint",
"log",
"posterior",
"with",
"respect",
"to",
"self",
".",
"Calculation",
"of",
"the",
"log",
"posterior",
"is",
"restricted",
"to",
"the",
"variables",
"in",
"calculation_set",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/PyMCObjects.py#L940-L949 | train | 220,182 |
pymc-devs/pymc | pymc/PyMCObjects.py | Stochastic.logp_partial_gradient | def logp_partial_gradient(self, variable, calculation_set=None):
"""
Calculates the partial gradient of the posterior of self with respect to variable.
Returns zero if self is not in calculation_set.
"""
if (calculation_set is None) or (self in calculation_set):
if not datatypes.is_continuous(variable):
return zeros(shape(variable.value))
if variable is self:
try:
gradient_func = self._logp_partial_gradients['value']
except KeyError:
raise NotImplementedError(
repr(
self) +
" has no gradient function for 'value'")
gradient = np.reshape(
gradient_func.get(
),
np.shape(
variable.value))
else:
gradient = builtins.sum(
[self._pgradient(variable,
parameter,
value) for parameter,
value in six.iteritems(self.parents)])
return gradient
else:
return 0 | python | def logp_partial_gradient(self, variable, calculation_set=None):
"""
Calculates the partial gradient of the posterior of self with respect to variable.
Returns zero if self is not in calculation_set.
"""
if (calculation_set is None) or (self in calculation_set):
if not datatypes.is_continuous(variable):
return zeros(shape(variable.value))
if variable is self:
try:
gradient_func = self._logp_partial_gradients['value']
except KeyError:
raise NotImplementedError(
repr(
self) +
" has no gradient function for 'value'")
gradient = np.reshape(
gradient_func.get(
),
np.shape(
variable.value))
else:
gradient = builtins.sum(
[self._pgradient(variable,
parameter,
value) for parameter,
value in six.iteritems(self.parents)])
return gradient
else:
return 0 | [
"def",
"logp_partial_gradient",
"(",
"self",
",",
"variable",
",",
"calculation_set",
"=",
"None",
")",
":",
"if",
"(",
"calculation_set",
"is",
"None",
")",
"or",
"(",
"self",
"in",
"calculation_set",
")",
":",
"if",
"not",
"datatypes",
".",
"is_continuous"... | Calculates the partial gradient of the posterior of self with respect to variable.
Returns zero if self is not in calculation_set. | [
"Calculates",
"the",
"partial",
"gradient",
"of",
"the",
"posterior",
"of",
"self",
"with",
"respect",
"to",
"variable",
".",
"Returns",
"zero",
"if",
"self",
"is",
"not",
"in",
"calculation_set",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/PyMCObjects.py#L951-L985 | train | 220,183 |
pymc-devs/pymc | pymc/PyMCObjects.py | Stochastic.random | def random(self):
"""
Draws a new value for a stoch conditional on its parents
and returns it.
Raises an error if no 'random' argument was passed to __init__.
"""
if self._random:
# Get current values of parents for use as arguments for _random()
r = self._random(**self.parents.value)
else:
raise AttributeError(
'Stochastic ' +
self.__name__ +
' does not know how to draw its value, see documentation')
if self.shape:
r = np.reshape(r, self.shape)
# Set Stochastic's value to drawn value
if not self.observed:
self.value = r
return r | python | def random(self):
"""
Draws a new value for a stoch conditional on its parents
and returns it.
Raises an error if no 'random' argument was passed to __init__.
"""
if self._random:
# Get current values of parents for use as arguments for _random()
r = self._random(**self.parents.value)
else:
raise AttributeError(
'Stochastic ' +
self.__name__ +
' does not know how to draw its value, see documentation')
if self.shape:
r = np.reshape(r, self.shape)
# Set Stochastic's value to drawn value
if not self.observed:
self.value = r
return r | [
"def",
"random",
"(",
"self",
")",
":",
"if",
"self",
".",
"_random",
":",
"# Get current values of parents for use as arguments for _random()",
"r",
"=",
"self",
".",
"_random",
"(",
"*",
"*",
"self",
".",
"parents",
".",
"value",
")",
"else",
":",
"raise",
... | Draws a new value for a stoch conditional on its parents
and returns it.
Raises an error if no 'random' argument was passed to __init__. | [
"Draws",
"a",
"new",
"value",
"for",
"a",
"stoch",
"conditional",
"on",
"its",
"parents",
"and",
"returns",
"it",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/PyMCObjects.py#L1002-L1026 | train | 220,184 |
pymc-devs/pymc | pymc/database/hdf5.py | save_sampler | def save_sampler(sampler):
"""
Dumps a sampler into its hdf5 database.
"""
db = sampler.db
fnode = tables.filenode.newnode(db._h5file, where='/', name='__sampler__')
import pickle
pickle.dump(sampler, fnode) | python | def save_sampler(sampler):
"""
Dumps a sampler into its hdf5 database.
"""
db = sampler.db
fnode = tables.filenode.newnode(db._h5file, where='/', name='__sampler__')
import pickle
pickle.dump(sampler, fnode) | [
"def",
"save_sampler",
"(",
"sampler",
")",
":",
"db",
"=",
"sampler",
".",
"db",
"fnode",
"=",
"tables",
".",
"filenode",
".",
"newnode",
"(",
"db",
".",
"_h5file",
",",
"where",
"=",
"'/'",
",",
"name",
"=",
"'__sampler__'",
")",
"import",
"pickle",
... | Dumps a sampler into its hdf5 database. | [
"Dumps",
"a",
"sampler",
"into",
"its",
"hdf5",
"database",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L605-L612 | train | 220,185 |
pymc-devs/pymc | pymc/database/hdf5.py | restore_sampler | def restore_sampler(fname):
"""
Creates a new sampler from an hdf5 database.
"""
hf = tables.open_file(fname)
fnode = hf.root.__sampler__
import pickle
sampler = pickle.load(fnode)
return sampler | python | def restore_sampler(fname):
"""
Creates a new sampler from an hdf5 database.
"""
hf = tables.open_file(fname)
fnode = hf.root.__sampler__
import pickle
sampler = pickle.load(fnode)
return sampler | [
"def",
"restore_sampler",
"(",
"fname",
")",
":",
"hf",
"=",
"tables",
".",
"open_file",
"(",
"fname",
")",
"fnode",
"=",
"hf",
".",
"root",
".",
"__sampler__",
"import",
"pickle",
"sampler",
"=",
"pickle",
".",
"load",
"(",
"fnode",
")",
"return",
"sa... | Creates a new sampler from an hdf5 database. | [
"Creates",
"a",
"new",
"sampler",
"from",
"an",
"hdf5",
"database",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L615-L623 | train | 220,186 |
pymc-devs/pymc | pymc/database/hdf5.py | Trace.tally | def tally(self, chain):
"""Adds current value to trace"""
self.db._rows[chain][self.name] = self._getfunc() | python | def tally(self, chain):
"""Adds current value to trace"""
self.db._rows[chain][self.name] = self._getfunc() | [
"def",
"tally",
"(",
"self",
",",
"chain",
")",
":",
"self",
".",
"db",
".",
"_rows",
"[",
"chain",
"]",
"[",
"self",
".",
"name",
"]",
"=",
"self",
".",
"_getfunc",
"(",
")"
] | Adds current value to trace | [
"Adds",
"current",
"value",
"to",
"trace"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L141-L143 | train | 220,187 |
pymc-devs/pymc | pymc/database/hdf5.py | Trace.hdf5_col | def hdf5_col(self, chain=-1):
"""Return a pytables column object.
:Parameters:
chain : integer
The index of the chain.
.. note::
This method is specific to the ``hdf5`` backend.
"""
return self.db._tables[chain].colinstances[self.name] | python | def hdf5_col(self, chain=-1):
"""Return a pytables column object.
:Parameters:
chain : integer
The index of the chain.
.. note::
This method is specific to the ``hdf5`` backend.
"""
return self.db._tables[chain].colinstances[self.name] | [
"def",
"hdf5_col",
"(",
"self",
",",
"chain",
"=",
"-",
"1",
")",
":",
"return",
"self",
".",
"db",
".",
"_tables",
"[",
"chain",
"]",
".",
"colinstances",
"[",
"self",
".",
"name",
"]"
] | Return a pytables column object.
:Parameters:
chain : integer
The index of the chain.
.. note::
This method is specific to the ``hdf5`` backend. | [
"Return",
"a",
"pytables",
"column",
"object",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L195-L205 | train | 220,188 |
pymc-devs/pymc | pymc/database/hdf5.py | Database.savestate | def savestate(self, state, chain=-1):
"""Store a dictionnary containing the state of the Model and its
StepMethods."""
cur_chain = self._chains[chain]
if hasattr(cur_chain, '_state_'):
cur_chain._state_[0] = state
else:
s = self._h5file.create_vlarray(
cur_chain,
'_state_',
tables.ObjectAtom(),
title='The saved state of the sampler',
filters=self.filter)
s.append(state)
self._h5file.flush() | python | def savestate(self, state, chain=-1):
"""Store a dictionnary containing the state of the Model and its
StepMethods."""
cur_chain = self._chains[chain]
if hasattr(cur_chain, '_state_'):
cur_chain._state_[0] = state
else:
s = self._h5file.create_vlarray(
cur_chain,
'_state_',
tables.ObjectAtom(),
title='The saved state of the sampler',
filters=self.filter)
s.append(state)
self._h5file.flush() | [
"def",
"savestate",
"(",
"self",
",",
"state",
",",
"chain",
"=",
"-",
"1",
")",
":",
"cur_chain",
"=",
"self",
".",
"_chains",
"[",
"chain",
"]",
"if",
"hasattr",
"(",
"cur_chain",
",",
"'_state_'",
")",
":",
"cur_chain",
".",
"_state_",
"[",
"0",
... | Store a dictionnary containing the state of the Model and its
StepMethods. | [
"Store",
"a",
"dictionnary",
"containing",
"the",
"state",
"of",
"the",
"Model",
"and",
"its",
"StepMethods",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L485-L499 | train | 220,189 |
pymc-devs/pymc | pymc/database/hdf5.py | Database._model_trace_description | def _model_trace_description(self):
"""Return a description of the table and the ObjectAtoms to be created.
:Returns:
table_description : dict
A Description of the pyTables table.
ObjectAtomsn : dict
A
in terms of PyTables
columns, and a"""
D = {}
for name, fun in six.iteritems(self.model._funs_to_tally):
arr = asarray(fun())
D[name] = tables.Col.from_dtype(dtype((arr.dtype, arr.shape)))
return D, {} | python | def _model_trace_description(self):
"""Return a description of the table and the ObjectAtoms to be created.
:Returns:
table_description : dict
A Description of the pyTables table.
ObjectAtomsn : dict
A
in terms of PyTables
columns, and a"""
D = {}
for name, fun in six.iteritems(self.model._funs_to_tally):
arr = asarray(fun())
D[name] = tables.Col.from_dtype(dtype((arr.dtype, arr.shape)))
return D, {} | [
"def",
"_model_trace_description",
"(",
"self",
")",
":",
"D",
"=",
"{",
"}",
"for",
"name",
",",
"fun",
"in",
"six",
".",
"iteritems",
"(",
"self",
".",
"model",
".",
"_funs_to_tally",
")",
":",
"arr",
"=",
"asarray",
"(",
"fun",
"(",
")",
")",
"D... | Return a description of the table and the ObjectAtoms to be created.
:Returns:
table_description : dict
A Description of the pyTables table.
ObjectAtomsn : dict
A
in terms of PyTables
columns, and a | [
"Return",
"a",
"description",
"of",
"the",
"table",
"and",
"the",
"ObjectAtoms",
"to",
"be",
"created",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L512-L526 | train | 220,190 |
pymc-devs/pymc | pymc/database/hdf5.py | Database._check_compatibility | def _check_compatibility(self):
"""Make sure the next objects to be tallied are compatible with the
stored trace."""
stored_descr = self._file_trace_description()
try:
for k, v in self._model_trace_description():
assert(stored_descr[k][0] == v[0])
except:
raise ValueError(
"The objects to tally are incompatible with the objects stored in the file.") | python | def _check_compatibility(self):
"""Make sure the next objects to be tallied are compatible with the
stored trace."""
stored_descr = self._file_trace_description()
try:
for k, v in self._model_trace_description():
assert(stored_descr[k][0] == v[0])
except:
raise ValueError(
"The objects to tally are incompatible with the objects stored in the file.") | [
"def",
"_check_compatibility",
"(",
"self",
")",
":",
"stored_descr",
"=",
"self",
".",
"_file_trace_description",
"(",
")",
"try",
":",
"for",
"k",
",",
"v",
"in",
"self",
".",
"_model_trace_description",
"(",
")",
":",
"assert",
"(",
"stored_descr",
"[",
... | Make sure the next objects to be tallied are compatible with the
stored trace. | [
"Make",
"sure",
"the",
"next",
"objects",
"to",
"be",
"tallied",
"are",
"compatible",
"with",
"the",
"stored",
"trace",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L533-L542 | train | 220,191 |
pymc-devs/pymc | pymc/database/hdf5.py | Database._gettables | def _gettables(self):
"""Return a list of hdf5 tables name PyMCsamples.
"""
groups = self._h5file.list_nodes("/")
if len(groups) == 0:
return []
else:
return [
gr.PyMCsamples for gr in groups if gr._v_name[:5] == 'chain'] | python | def _gettables(self):
"""Return a list of hdf5 tables name PyMCsamples.
"""
groups = self._h5file.list_nodes("/")
if len(groups) == 0:
return []
else:
return [
gr.PyMCsamples for gr in groups if gr._v_name[:5] == 'chain'] | [
"def",
"_gettables",
"(",
"self",
")",
":",
"groups",
"=",
"self",
".",
"_h5file",
".",
"list_nodes",
"(",
"\"/\"",
")",
"if",
"len",
"(",
"groups",
")",
"==",
"0",
":",
"return",
"[",
"]",
"else",
":",
"return",
"[",
"gr",
".",
"PyMCsamples",
"for... | Return a list of hdf5 tables name PyMCsamples. | [
"Return",
"a",
"list",
"of",
"hdf5",
"tables",
"name",
"PyMCsamples",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L544-L553 | train | 220,192 |
pymc-devs/pymc | pymc/database/hdf5.py | Database.add_attr | def add_attr(self, name, object, description='', chain=-1, array=False):
"""Add an attribute to the chain.
description may not be supported for every date type.
if array is true, create an Array object.
"""
if not np.isscalar(chain):
raise TypeError("chain must be a scalar integer.")
table = self._tables[chain]
if array is False:
table.set_attr(name, object)
obj = getattr(table.attrs, name)
else:
# Create an array in the group
if description == '':
description = name
group = table._g_getparent()
self._h5file.create_array(group, name, object, description)
obj = getattr(group, name)
setattr(self, name, obj) | python | def add_attr(self, name, object, description='', chain=-1, array=False):
"""Add an attribute to the chain.
description may not be supported for every date type.
if array is true, create an Array object.
"""
if not np.isscalar(chain):
raise TypeError("chain must be a scalar integer.")
table = self._tables[chain]
if array is False:
table.set_attr(name, object)
obj = getattr(table.attrs, name)
else:
# Create an array in the group
if description == '':
description = name
group = table._g_getparent()
self._h5file.create_array(group, name, object, description)
obj = getattr(group, name)
setattr(self, name, obj) | [
"def",
"add_attr",
"(",
"self",
",",
"name",
",",
"object",
",",
"description",
"=",
"''",
",",
"chain",
"=",
"-",
"1",
",",
"array",
"=",
"False",
")",
":",
"if",
"not",
"np",
".",
"isscalar",
"(",
"chain",
")",
":",
"raise",
"TypeError",
"(",
"... | Add an attribute to the chain.
description may not be supported for every date type.
if array is true, create an Array object. | [
"Add",
"an",
"attribute",
"to",
"the",
"chain",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/hdf5.py#L558-L581 | train | 220,193 |
pymc-devs/pymc | pymc/examples/disaster_model.py | rate | def rate(s=switchpoint, e=early_mean, l=late_mean):
''' Concatenate Poisson means '''
out = empty(len(disasters_array))
out[:s] = e
out[s:] = l
return out | python | def rate(s=switchpoint, e=early_mean, l=late_mean):
''' Concatenate Poisson means '''
out = empty(len(disasters_array))
out[:s] = e
out[s:] = l
return out | [
"def",
"rate",
"(",
"s",
"=",
"switchpoint",
",",
"e",
"=",
"early_mean",
",",
"l",
"=",
"late_mean",
")",
":",
"out",
"=",
"empty",
"(",
"len",
"(",
"disasters_array",
")",
")",
"out",
"[",
":",
"s",
"]",
"=",
"e",
"out",
"[",
"s",
":",
"]",
... | Concatenate Poisson means | [
"Concatenate",
"Poisson",
"means"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/examples/disaster_model.py#L43-L48 | train | 220,194 |
pymc-devs/pymc | pymc/gp/cov_funs/cov_utils.py | regularize_array | def regularize_array(A):
"""
Takes an np.ndarray as an input.
- If the array is one-dimensional, it's assumed to be an array of input values.
- If the array is more than one-dimensional, its last index is assumed to curse
over spatial dimension.
Either way, the return value is at least two dimensional. A.shape[-1] gives the
number of spatial dimensions.
"""
if not isinstance(A,np.ndarray):
A = np.array(A, dtype=float)
else:
A = np.asarray(A, dtype=float)
if len(A.shape) <= 1:
return A.reshape(-1,1)
elif A.shape[-1]>1:
return A.reshape(-1, A.shape[-1])
else:
return A | python | def regularize_array(A):
"""
Takes an np.ndarray as an input.
- If the array is one-dimensional, it's assumed to be an array of input values.
- If the array is more than one-dimensional, its last index is assumed to curse
over spatial dimension.
Either way, the return value is at least two dimensional. A.shape[-1] gives the
number of spatial dimensions.
"""
if not isinstance(A,np.ndarray):
A = np.array(A, dtype=float)
else:
A = np.asarray(A, dtype=float)
if len(A.shape) <= 1:
return A.reshape(-1,1)
elif A.shape[-1]>1:
return A.reshape(-1, A.shape[-1])
else:
return A | [
"def",
"regularize_array",
"(",
"A",
")",
":",
"if",
"not",
"isinstance",
"(",
"A",
",",
"np",
".",
"ndarray",
")",
":",
"A",
"=",
"np",
".",
"array",
"(",
"A",
",",
"dtype",
"=",
"float",
")",
"else",
":",
"A",
"=",
"np",
".",
"asarray",
"(",
... | Takes an np.ndarray as an input.
- If the array is one-dimensional, it's assumed to be an array of input values.
- If the array is more than one-dimensional, its last index is assumed to curse
over spatial dimension.
Either way, the return value is at least two dimensional. A.shape[-1] gives the
number of spatial dimensions. | [
"Takes",
"an",
"np",
".",
"ndarray",
"as",
"an",
"input",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/cov_funs/cov_utils.py#L23-L49 | train | 220,195 |
pymc-devs/pymc | pymc/gp/cov_funs/cov_utils.py | import_item | def import_item(name):
"""
Useful for importing nested modules such as pymc.gp.cov_funs.isotropic_cov_funs.
Updated with code copied from IPython under a BSD license.
"""
package = '.'.join(name.split('.')[0:-1])
obj = name.split('.')[-1]
if package:
module = __import__(package,fromlist=[obj])
return module.__dict__[obj]
else:
return __import__(obj) | python | def import_item(name):
"""
Useful for importing nested modules such as pymc.gp.cov_funs.isotropic_cov_funs.
Updated with code copied from IPython under a BSD license.
"""
package = '.'.join(name.split('.')[0:-1])
obj = name.split('.')[-1]
if package:
module = __import__(package,fromlist=[obj])
return module.__dict__[obj]
else:
return __import__(obj) | [
"def",
"import_item",
"(",
"name",
")",
":",
"package",
"=",
"'.'",
".",
"join",
"(",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
":",
"-",
"1",
"]",
")",
"obj",
"=",
"name",
".",
"split",
"(",
"'.'",
")",
"[",
"-",
"1",
"]",
"if",
"pa... | Useful for importing nested modules such as pymc.gp.cov_funs.isotropic_cov_funs.
Updated with code copied from IPython under a BSD license. | [
"Useful",
"for",
"importing",
"nested",
"modules",
"such",
"as",
"pymc",
".",
"gp",
".",
"cov_funs",
".",
"isotropic_cov_funs",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/cov_funs/cov_utils.py#L51-L64 | train | 220,196 |
pymc-devs/pymc | pymc/gp/cov_funs/cov_utils.py | covariance_function_bundle.add_distance_metric | def add_distance_metric(self, distance_fun_name, distance_fun_module, with_x):
"""
Takes a function that computes a distance matrix for
points in some coordinate system and returns self's
covariance function wrapped to use that distance function.
Uses function apply_distance, which was used to produce
self.euclidean and self.geographic and their docstrings.
:Parameters:
- `distance_fun`: Creates a distance matrix from two
np.arrays of points, where the first index iterates
over separate points and the second over coordinates.
In addition to the arrays x and y, distance_fun should
take an argument called symm which indicates whether
x and y are the same array.
:SeeAlso:
- `apply_distance()`
"""
if self.ampsq_is_diag:
kls = covariance_wrapper_with_diag
else:
kls = covariance_wrapper
new_fun = kls(self.cov_fun_name, self.cov_fun_module, self.extra_cov_params, distance_fun_name, distance_fun_module, with_x=with_x)
self.wrappers.append(new_fun)
# try:
setattr(self, distance_fun_name, new_fun)
# except:
# pass
return new_fun | python | def add_distance_metric(self, distance_fun_name, distance_fun_module, with_x):
"""
Takes a function that computes a distance matrix for
points in some coordinate system and returns self's
covariance function wrapped to use that distance function.
Uses function apply_distance, which was used to produce
self.euclidean and self.geographic and their docstrings.
:Parameters:
- `distance_fun`: Creates a distance matrix from two
np.arrays of points, where the first index iterates
over separate points and the second over coordinates.
In addition to the arrays x and y, distance_fun should
take an argument called symm which indicates whether
x and y are the same array.
:SeeAlso:
- `apply_distance()`
"""
if self.ampsq_is_diag:
kls = covariance_wrapper_with_diag
else:
kls = covariance_wrapper
new_fun = kls(self.cov_fun_name, self.cov_fun_module, self.extra_cov_params, distance_fun_name, distance_fun_module, with_x=with_x)
self.wrappers.append(new_fun)
# try:
setattr(self, distance_fun_name, new_fun)
# except:
# pass
return new_fun | [
"def",
"add_distance_metric",
"(",
"self",
",",
"distance_fun_name",
",",
"distance_fun_module",
",",
"with_x",
")",
":",
"if",
"self",
".",
"ampsq_is_diag",
":",
"kls",
"=",
"covariance_wrapper_with_diag",
"else",
":",
"kls",
"=",
"covariance_wrapper",
"new_fun",
... | Takes a function that computes a distance matrix for
points in some coordinate system and returns self's
covariance function wrapped to use that distance function.
Uses function apply_distance, which was used to produce
self.euclidean and self.geographic and their docstrings.
:Parameters:
- `distance_fun`: Creates a distance matrix from two
np.arrays of points, where the first index iterates
over separate points and the second over coordinates.
In addition to the arrays x and y, distance_fun should
take an argument called symm which indicates whether
x and y are the same array.
:SeeAlso:
- `apply_distance()` | [
"Takes",
"a",
"function",
"that",
"computes",
"a",
"distance",
"matrix",
"for",
"points",
"in",
"some",
"coordinate",
"system",
"and",
"returns",
"self",
"s",
"covariance",
"function",
"wrapped",
"to",
"use",
"that",
"distance",
"function",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/gp/cov_funs/cov_utils.py#L271-L307 | train | 220,197 |
pymc-devs/pymc | pymc/NormalApproximation.py | MAP.func | def func(self, p):
"""
The function that gets passed to the optimizers.
"""
self._set_stochastics(p)
try:
return -1. * self.logp
except ZeroProbability:
return Inf | python | def func(self, p):
"""
The function that gets passed to the optimizers.
"""
self._set_stochastics(p)
try:
return -1. * self.logp
except ZeroProbability:
return Inf | [
"def",
"func",
"(",
"self",
",",
"p",
")",
":",
"self",
".",
"_set_stochastics",
"(",
"p",
")",
"try",
":",
"return",
"-",
"1.",
"*",
"self",
".",
"logp",
"except",
"ZeroProbability",
":",
"return",
"Inf"
] | The function that gets passed to the optimizers. | [
"The",
"function",
"that",
"gets",
"passed",
"to",
"the",
"optimizers",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/NormalApproximation.py#L387-L395 | train | 220,198 |
pymc-devs/pymc | pymc/NormalApproximation.py | MAP.gradfunc | def gradfunc(self, p):
"""
The gradient-computing function that gets passed to the optimizers,
if needed.
"""
self._set_stochastics(p)
for i in xrange(self.len):
self.grad[i] = self.diff(i)
return -1 * self.grad | python | def gradfunc(self, p):
"""
The gradient-computing function that gets passed to the optimizers,
if needed.
"""
self._set_stochastics(p)
for i in xrange(self.len):
self.grad[i] = self.diff(i)
return -1 * self.grad | [
"def",
"gradfunc",
"(",
"self",
",",
"p",
")",
":",
"self",
".",
"_set_stochastics",
"(",
"p",
")",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"len",
")",
":",
"self",
".",
"grad",
"[",
"i",
"]",
"=",
"self",
".",
"diff",
"(",
"i",
")",
"r... | The gradient-computing function that gets passed to the optimizers,
if needed. | [
"The",
"gradient",
"-",
"computing",
"function",
"that",
"gets",
"passed",
"to",
"the",
"optimizers",
"if",
"needed",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/NormalApproximation.py#L397-L406 | train | 220,199 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.