repo stringlengths 7 55 | path stringlengths 4 127 | func_name stringlengths 1 88 | original_string stringlengths 75 19.8k | language stringclasses 1 value | code stringlengths 75 19.8k | code_tokens listlengths 20 707 | docstring stringlengths 3 17.3k | docstring_tokens listlengths 3 222 | sha stringlengths 40 40 | url stringlengths 87 242 | partition stringclasses 1 value | idx int64 0 252k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
pymc-devs/pymc | pymc/NormalApproximation.py | MAP.i_logp | def i_logp(self, index):
"""
Evaluates the log-probability of the Markov blanket of
a stochastic owning a particular index.
"""
all_relevant_stochastics = set()
p, i = self.stochastic_indices[index]
try:
return p.logp + logp_of_set(p.extended_children)
except ZeroProbability:
return -Inf | python | def i_logp(self, index):
"""
Evaluates the log-probability of the Markov blanket of
a stochastic owning a particular index.
"""
all_relevant_stochastics = set()
p, i = self.stochastic_indices[index]
try:
return p.logp + logp_of_set(p.extended_children)
except ZeroProbability:
return -Inf | [
"def",
"i_logp",
"(",
"self",
",",
"index",
")",
":",
"all_relevant_stochastics",
"=",
"set",
"(",
")",
"p",
",",
"i",
"=",
"self",
".",
"stochastic_indices",
"[",
"index",
"]",
"try",
":",
"return",
"p",
".",
"logp",
"+",
"logp_of_set",
"(",
"p",
".... | Evaluates the log-probability of the Markov blanket of
a stochastic owning a particular index. | [
"Evaluates",
"the",
"log",
"-",
"probability",
"of",
"the",
"Markov",
"blanket",
"of",
"a",
"stochastic",
"owning",
"a",
"particular",
"index",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/NormalApproximation.py#L430-L440 | train | 220,200 |
pymc-devs/pymc | pymc/NormalApproximation.py | MAP.grad_and_hess | def grad_and_hess(self):
"""
Computes self's gradient and Hessian. Used if the
optimization method for a NormApprox doesn't
use gradients and hessians, for instance fmin.
"""
for i in xrange(self.len):
di = self.diff(i)
self.grad[i] = di
self.hess[i, i] = self.diff(i, 2)
if i < self.len - 1:
for j in xrange(i + 1, self.len):
dij = self.diff2(i, j)
self.hess[i, j] = dij
self.hess[j, i] = dij | python | def grad_and_hess(self):
"""
Computes self's gradient and Hessian. Used if the
optimization method for a NormApprox doesn't
use gradients and hessians, for instance fmin.
"""
for i in xrange(self.len):
di = self.diff(i)
self.grad[i] = di
self.hess[i, i] = self.diff(i, 2)
if i < self.len - 1:
for j in xrange(i + 1, self.len):
dij = self.diff2(i, j)
self.hess[i, j] = dij
self.hess[j, i] = dij | [
"def",
"grad_and_hess",
"(",
"self",
")",
":",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"len",
")",
":",
"di",
"=",
"self",
".",
"diff",
"(",
"i",
")",
"self",
".",
"grad",
"[",
"i",
"]",
"=",
"di",
"self",
".",
"hess",
"[",
"i",
",",
... | Computes self's gradient and Hessian. Used if the
optimization method for a NormApprox doesn't
use gradients and hessians, for instance fmin. | [
"Computes",
"self",
"s",
"gradient",
"and",
"Hessian",
".",
"Used",
"if",
"the",
"optimization",
"method",
"for",
"a",
"NormApprox",
"doesn",
"t",
"use",
"gradients",
"and",
"hessians",
"for",
"instance",
"fmin",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/NormalApproximation.py#L487-L505 | train | 220,201 |
pymc-devs/pymc | pymc/NormalApproximation.py | MAP.hessfunc | def hessfunc(self, p):
"""
The Hessian function that will be passed to the optimizer,
if needed.
"""
self._set_stochastics(p)
for i in xrange(self.len):
di = self.diff(i)
self.hess[i, i] = self.diff(i, 2)
if i < self.len - 1:
for j in xrange(i + 1, self.len):
dij = self.diff2(i, j)
self.hess[i, j] = dij
self.hess[j, i] = dij
return -1. * self.hess | python | def hessfunc(self, p):
"""
The Hessian function that will be passed to the optimizer,
if needed.
"""
self._set_stochastics(p)
for i in xrange(self.len):
di = self.diff(i)
self.hess[i, i] = self.diff(i, 2)
if i < self.len - 1:
for j in xrange(i + 1, self.len):
dij = self.diff2(i, j)
self.hess[i, j] = dij
self.hess[j, i] = dij
return -1. * self.hess | [
"def",
"hessfunc",
"(",
"self",
",",
"p",
")",
":",
"self",
".",
"_set_stochastics",
"(",
"p",
")",
"for",
"i",
"in",
"xrange",
"(",
"self",
".",
"len",
")",
":",
"di",
"=",
"self",
".",
"diff",
"(",
"i",
")",
"self",
".",
"hess",
"[",
"i",
"... | The Hessian function that will be passed to the optimizer,
if needed. | [
"The",
"Hessian",
"function",
"that",
"will",
"be",
"passed",
"to",
"the",
"optimizer",
"if",
"needed",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/NormalApproximation.py#L507-L526 | train | 220,202 |
pymc-devs/pymc | pymc/threadpool.py | makeRequests | def makeRequests(callable_, args_list, callback=None,
exc_callback=_handle_thread_exception):
"""Create several work requests for same callable with different arguments.
Convenience function for creating several work requests for the same
callable where each invocation of the callable receives different values
for its arguments.
``args_list`` contains the parameters for each invocation of callable.
Each item in ``args_list`` should be either a 2-item tuple of the list of
positional arguments and a dictionary of keyword arguments or a single,
non-tuple argument.
See docstring for ``WorkRequest`` for info on ``callback`` and
``exc_callback``.
"""
requests = []
for item in args_list:
if isinstance(item, tuple):
requests.append(
WorkRequest(callable_, item[0], item[1], callback=callback,
exc_callback=exc_callback)
)
else:
requests.append(
WorkRequest(callable_, [item], None, callback=callback,
exc_callback=exc_callback)
)
return requests | python | def makeRequests(callable_, args_list, callback=None,
exc_callback=_handle_thread_exception):
"""Create several work requests for same callable with different arguments.
Convenience function for creating several work requests for the same
callable where each invocation of the callable receives different values
for its arguments.
``args_list`` contains the parameters for each invocation of callable.
Each item in ``args_list`` should be either a 2-item tuple of the list of
positional arguments and a dictionary of keyword arguments or a single,
non-tuple argument.
See docstring for ``WorkRequest`` for info on ``callback`` and
``exc_callback``.
"""
requests = []
for item in args_list:
if isinstance(item, tuple):
requests.append(
WorkRequest(callable_, item[0], item[1], callback=callback,
exc_callback=exc_callback)
)
else:
requests.append(
WorkRequest(callable_, [item], None, callback=callback,
exc_callback=exc_callback)
)
return requests | [
"def",
"makeRequests",
"(",
"callable_",
",",
"args_list",
",",
"callback",
"=",
"None",
",",
"exc_callback",
"=",
"_handle_thread_exception",
")",
":",
"requests",
"=",
"[",
"]",
"for",
"item",
"in",
"args_list",
":",
"if",
"isinstance",
"(",
"item",
",",
... | Create several work requests for same callable with different arguments.
Convenience function for creating several work requests for the same
callable where each invocation of the callable receives different values
for its arguments.
``args_list`` contains the parameters for each invocation of callable.
Each item in ``args_list`` should be either a 2-item tuple of the list of
positional arguments and a dictionary of keyword arguments or a single,
non-tuple argument.
See docstring for ``WorkRequest`` for info on ``callback`` and
``exc_callback``. | [
"Create",
"several",
"work",
"requests",
"for",
"same",
"callable",
"with",
"different",
"arguments",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/threadpool.py#L95-L124 | train | 220,203 |
pymc-devs/pymc | pymc/threadpool.py | thread_partition_array | def thread_partition_array(x):
"Partition work arrays for multithreaded addition and multiplication"
n_threads = get_threadpool_size()
if len(x.shape) > 1:
maxind = x.shape[1]
else:
maxind = x.shape[0]
bounds = np.array(np.linspace(0, maxind, n_threads + 1), dtype='int')
cmin = bounds[:-1]
cmax = bounds[1:]
return cmin, cmax | python | def thread_partition_array(x):
"Partition work arrays for multithreaded addition and multiplication"
n_threads = get_threadpool_size()
if len(x.shape) > 1:
maxind = x.shape[1]
else:
maxind = x.shape[0]
bounds = np.array(np.linspace(0, maxind, n_threads + 1), dtype='int')
cmin = bounds[:-1]
cmax = bounds[1:]
return cmin, cmax | [
"def",
"thread_partition_array",
"(",
"x",
")",
":",
"n_threads",
"=",
"get_threadpool_size",
"(",
")",
"if",
"len",
"(",
"x",
".",
"shape",
")",
">",
"1",
":",
"maxind",
"=",
"x",
".",
"shape",
"[",
"1",
"]",
"else",
":",
"maxind",
"=",
"x",
".",
... | Partition work arrays for multithreaded addition and multiplication | [
"Partition",
"work",
"arrays",
"for",
"multithreaded",
"addition",
"and",
"multiplication"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/threadpool.py#L402-L412 | train | 220,204 |
pymc-devs/pymc | pymc/threadpool.py | WorkerThread.run | def run(self):
"""Repeatedly process the job queue until told to exit."""
while True:
if self._dismissed.isSet():
# we are dismissed, break out of loop
break
# get next work request.
request = self._requests_queue.get()
# print 'Worker thread %s running request %s' %(self, request)
if self._dismissed.isSet():
# we are dismissed, put back request in queue and exit loop
self._requests_queue.put(request)
break
try:
result = request.callable(*request.args, **request.kwds)
if request.callback:
request.callback(request, result)
del result
self._requests_queue.task_done()
except:
request.exception = True
if request.exc_callback:
request.exc_callback(request)
self._requests_queue.task_done()
finally:
request.self_destruct() | python | def run(self):
"""Repeatedly process the job queue until told to exit."""
while True:
if self._dismissed.isSet():
# we are dismissed, break out of loop
break
# get next work request.
request = self._requests_queue.get()
# print 'Worker thread %s running request %s' %(self, request)
if self._dismissed.isSet():
# we are dismissed, put back request in queue and exit loop
self._requests_queue.put(request)
break
try:
result = request.callable(*request.args, **request.kwds)
if request.callback:
request.callback(request, result)
del result
self._requests_queue.task_done()
except:
request.exception = True
if request.exc_callback:
request.exc_callback(request)
self._requests_queue.task_done()
finally:
request.self_destruct() | [
"def",
"run",
"(",
"self",
")",
":",
"while",
"True",
":",
"if",
"self",
".",
"_dismissed",
".",
"isSet",
"(",
")",
":",
"# we are dismissed, break out of loop",
"break",
"# get next work request.",
"request",
"=",
"self",
".",
"_requests_queue",
".",
"get",
"... | Repeatedly process the job queue until told to exit. | [
"Repeatedly",
"process",
"the",
"job",
"queue",
"until",
"told",
"to",
"exit",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/threadpool.py#L152-L179 | train | 220,205 |
pymc-devs/pymc | pymc/threadpool.py | ThreadPool.createWorkers | def createWorkers(self, num_workers):
"""Add num_workers worker threads to the pool.
``poll_timout`` sets the interval in seconds (int or float) for how
ofte threads should check whether they are dismissed, while waiting for
requests.
"""
for i in range(num_workers):
self.workers.append(WorkerThread(self._requests_queue)) | python | def createWorkers(self, num_workers):
"""Add num_workers worker threads to the pool.
``poll_timout`` sets the interval in seconds (int or float) for how
ofte threads should check whether they are dismissed, while waiting for
requests.
"""
for i in range(num_workers):
self.workers.append(WorkerThread(self._requests_queue)) | [
"def",
"createWorkers",
"(",
"self",
",",
"num_workers",
")",
":",
"for",
"i",
"in",
"range",
"(",
"num_workers",
")",
":",
"self",
".",
"workers",
".",
"append",
"(",
"WorkerThread",
"(",
"self",
".",
"_requests_queue",
")",
")"
] | Add num_workers worker threads to the pool.
``poll_timout`` sets the interval in seconds (int or float) for how
ofte threads should check whether they are dismissed, while waiting for
requests. | [
"Add",
"num_workers",
"worker",
"threads",
"to",
"the",
"pool",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/threadpool.py#L287-L296 | train | 220,206 |
pymc-devs/pymc | pymc/threadpool.py | ThreadPool.dismissWorkers | def dismissWorkers(self, num_workers):
"""Tell num_workers worker threads to quit after their current task."""
for i in range(min(num_workers, len(self.workers))):
worker = self.workers.pop()
worker.dismiss() | python | def dismissWorkers(self, num_workers):
"""Tell num_workers worker threads to quit after their current task."""
for i in range(min(num_workers, len(self.workers))):
worker = self.workers.pop()
worker.dismiss() | [
"def",
"dismissWorkers",
"(",
"self",
",",
"num_workers",
")",
":",
"for",
"i",
"in",
"range",
"(",
"min",
"(",
"num_workers",
",",
"len",
"(",
"self",
".",
"workers",
")",
")",
")",
":",
"worker",
"=",
"self",
".",
"workers",
".",
"pop",
"(",
")",... | Tell num_workers worker threads to quit after their current task. | [
"Tell",
"num_workers",
"worker",
"threads",
"to",
"quit",
"after",
"their",
"current",
"task",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/threadpool.py#L298-L302 | train | 220,207 |
pymc-devs/pymc | pymc/threadpool.py | ThreadPool.setNumWorkers | def setNumWorkers(self, num_workers):
"""Set number of worker threads to num_workers"""
cur_num = len(self.workers)
if cur_num > num_workers:
self.dismissWorkers(cur_num - num_workers)
else:
self.createWorkers(num_workers - cur_num) | python | def setNumWorkers(self, num_workers):
"""Set number of worker threads to num_workers"""
cur_num = len(self.workers)
if cur_num > num_workers:
self.dismissWorkers(cur_num - num_workers)
else:
self.createWorkers(num_workers - cur_num) | [
"def",
"setNumWorkers",
"(",
"self",
",",
"num_workers",
")",
":",
"cur_num",
"=",
"len",
"(",
"self",
".",
"workers",
")",
"if",
"cur_num",
">",
"num_workers",
":",
"self",
".",
"dismissWorkers",
"(",
"cur_num",
"-",
"num_workers",
")",
"else",
":",
"se... | Set number of worker threads to num_workers | [
"Set",
"number",
"of",
"worker",
"threads",
"to",
"num_workers"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/threadpool.py#L304-L310 | train | 220,208 |
pymc-devs/pymc | pymc/threadpool.py | ThreadPool.putRequest | def putRequest(self, request, block=True, timeout=0):
"""Put work request into work queue and save its id for later."""
# don't reuse old work requests
# print '\tthread pool putting work request %s'%request
self._requests_queue.put(request, block, timeout)
self.workRequests[request.requestID] = request | python | def putRequest(self, request, block=True, timeout=0):
"""Put work request into work queue and save its id for later."""
# don't reuse old work requests
# print '\tthread pool putting work request %s'%request
self._requests_queue.put(request, block, timeout)
self.workRequests[request.requestID] = request | [
"def",
"putRequest",
"(",
"self",
",",
"request",
",",
"block",
"=",
"True",
",",
"timeout",
"=",
"0",
")",
":",
"# don't reuse old work requests",
"# print '\\tthread pool putting work request %s'%request",
"self",
".",
"_requests_queue",
".",
"put",
"(",
"request",
... | Put work request into work queue and save its id for later. | [
"Put",
"work",
"request",
"into",
"work",
"queue",
"and",
"save",
"its",
"id",
"for",
"later",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/threadpool.py#L312-L317 | train | 220,209 |
pymc-devs/pymc | pymc/examples/zip.py | zip | def zip(value=data, mu=mu, psi=psi):
""" Zero-inflated Poisson likelihood """
# Initialize likeihood
like = 0.0
# Loop over data
for x in value:
if not x:
# Zero values
like += np.log((1. - psi) + psi * np.exp(-mu))
else:
# Non-zero values
like += np.log(psi) + poisson_like(x, mu)
return like | python | def zip(value=data, mu=mu, psi=psi):
""" Zero-inflated Poisson likelihood """
# Initialize likeihood
like = 0.0
# Loop over data
for x in value:
if not x:
# Zero values
like += np.log((1. - psi) + psi * np.exp(-mu))
else:
# Non-zero values
like += np.log(psi) + poisson_like(x, mu)
return like | [
"def",
"zip",
"(",
"value",
"=",
"data",
",",
"mu",
"=",
"mu",
",",
"psi",
"=",
"psi",
")",
":",
"# Initialize likeihood",
"like",
"=",
"0.0",
"# Loop over data",
"for",
"x",
"in",
"value",
":",
"if",
"not",
"x",
":",
"# Zero values",
"like",
"+=",
"... | Zero-inflated Poisson likelihood | [
"Zero",
"-",
"inflated",
"Poisson",
"likelihood"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/examples/zip.py#L26-L43 | train | 220,210 |
pymc-devs/pymc | pymc/Matplot.py | plot | def plot(
data, name, format='png', suffix='', path='./', common_scale=True, datarange=(None, None), fontmap=None, verbose=1,
new=True, last=True, rows=1, num=1):
"""
Generates summary plots for nodes of a given PyMC object.
:Arguments:
data: PyMC object, trace or array
A trace from an MCMC sample or a PyMC object with one or more traces.
name: string
The name of the object.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
common_scale (optional): bool
Specifies whether plots of multivariate nodes should be on the same scale
(defaults to True).
datarange (optional): tuple or list
Range of data to plot (defaults to empirical range of data)
fontmap (optional): dict
Dictionary containing the font map for the labels of the graphic.
verbose (optional): int
Verbosity level for output (defaults to 1).
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# If there is only one data array, go ahead and plot it ...
if ndim(data) == 1:
if verbose > 0:
print_('Plotting', name)
# If new plot, generate new frame
if new:
figure(figsize=(10, 6))
# Call trace
trace(data, name, datarange=datarange, rows=rows * 2, columns=2,
num=num + 3 * (num - 1), last=last, fontmap=fontmap)
# Call autocorrelation
autocorrelation(data, name, rows=rows * 2, columns=2,
num=num+3*(num-1)+2, last=last, fontmap=fontmap)
# Call histogram
histogram(data, name, datarange=datarange, rows=rows, columns=2,
num=num*2, last=last, fontmap=fontmap)
if last:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
savefig("%s%s%s.%s" % (path, name, suffix, format))
else:
# ... otherwise plot recursively
tdata = swapaxes(data, 0, 1)
datarange = (None, None)
# Determine common range for plots
if common_scale:
datarange = (nmin(tdata), nmax(tdata))
# How many rows?
_rows = min(4, len(tdata))
for i in range(len(tdata)):
# New plot or adding to existing?
_new = not i % _rows
# Current subplot number
_num = i % _rows + 1
# Final subplot of current figure?
_last = (_num == _rows) or (i == len(tdata) - 1)
plot(tdata[i], name + '_' + str(i), format=format, path=path,
common_scale=common_scale, datarange=datarange,
suffix=suffix, new=_new, last=_last, rows=_rows,
num=_num, fontmap=fontmap, verbose=verbose) | python | def plot(
data, name, format='png', suffix='', path='./', common_scale=True, datarange=(None, None), fontmap=None, verbose=1,
new=True, last=True, rows=1, num=1):
"""
Generates summary plots for nodes of a given PyMC object.
:Arguments:
data: PyMC object, trace or array
A trace from an MCMC sample or a PyMC object with one or more traces.
name: string
The name of the object.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
common_scale (optional): bool
Specifies whether plots of multivariate nodes should be on the same scale
(defaults to True).
datarange (optional): tuple or list
Range of data to plot (defaults to empirical range of data)
fontmap (optional): dict
Dictionary containing the font map for the labels of the graphic.
verbose (optional): int
Verbosity level for output (defaults to 1).
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# If there is only one data array, go ahead and plot it ...
if ndim(data) == 1:
if verbose > 0:
print_('Plotting', name)
# If new plot, generate new frame
if new:
figure(figsize=(10, 6))
# Call trace
trace(data, name, datarange=datarange, rows=rows * 2, columns=2,
num=num + 3 * (num - 1), last=last, fontmap=fontmap)
# Call autocorrelation
autocorrelation(data, name, rows=rows * 2, columns=2,
num=num+3*(num-1)+2, last=last, fontmap=fontmap)
# Call histogram
histogram(data, name, datarange=datarange, rows=rows, columns=2,
num=num*2, last=last, fontmap=fontmap)
if last:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
savefig("%s%s%s.%s" % (path, name, suffix, format))
else:
# ... otherwise plot recursively
tdata = swapaxes(data, 0, 1)
datarange = (None, None)
# Determine common range for plots
if common_scale:
datarange = (nmin(tdata), nmax(tdata))
# How many rows?
_rows = min(4, len(tdata))
for i in range(len(tdata)):
# New plot or adding to existing?
_new = not i % _rows
# Current subplot number
_num = i % _rows + 1
# Final subplot of current figure?
_last = (_num == _rows) or (i == len(tdata) - 1)
plot(tdata[i], name + '_' + str(i), format=format, path=path,
common_scale=common_scale, datarange=datarange,
suffix=suffix, new=_new, last=_last, rows=_rows,
num=_num, fontmap=fontmap, verbose=verbose) | [
"def",
"plot",
"(",
"data",
",",
"name",
",",
"format",
"=",
"'png'",
",",
"suffix",
"=",
"''",
",",
"path",
"=",
"'./'",
",",
"common_scale",
"=",
"True",
",",
"datarange",
"=",
"(",
"None",
",",
"None",
")",
",",
"fontmap",
"=",
"None",
",",
"v... | Generates summary plots for nodes of a given PyMC object.
:Arguments:
data: PyMC object, trace or array
A trace from an MCMC sample or a PyMC object with one or more traces.
name: string
The name of the object.
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
common_scale (optional): bool
Specifies whether plots of multivariate nodes should be on the same scale
(defaults to True).
datarange (optional): tuple or list
Range of data to plot (defaults to empirical range of data)
fontmap (optional): dict
Dictionary containing the font map for the labels of the graphic.
verbose (optional): int
Verbosity level for output (defaults to 1). | [
"Generates",
"summary",
"plots",
"for",
"nodes",
"of",
"a",
"given",
"PyMC",
"object",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L385-L475 | train | 220,211 |
pymc-devs/pymc | pymc/Matplot.py | histogram | def histogram(
data, name, bins='sturges', datarange=(None, None), format='png', suffix='', path='./', rows=1,
columns=1, num=1, last=True, fontmap = None, verbose=1):
"""
Generates histogram from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the histogram.
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
datarange: tuple or list
Preferred range of histogram (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
# Internal histogram specification for handling nested arrays
try:
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# Stand-alone plot or subplot?
standalone = rows == 1 and columns == 1 and num == 1
if standalone:
if verbose > 0:
print_('Generating histogram of', name)
figure()
subplot(rows, columns, num)
# Specify number of bins
uniquevals = len(unique(data))
if isinstance(bins, int):
pass
if bins == 'sturges':
bins = uniquevals * (uniquevals <= 25) or _sturges(len(data))
elif bins == 'doanes':
bins = uniquevals * (uniquevals <= 25) or _doanes(data, len(data))
elif bins == 'sqrt':
bins = uniquevals * (uniquevals <= 25) or _sqrt_choice(len(data))
elif isinstance(bins, int):
bins = bins
else:
raise ValueError('Invalid bins argument in histogram')
if isnan(bins):
bins = uniquevals * (uniquevals <= 25) or int(
4 + 1.5 * log(len(data)))
print_(
'Bins could not be calculated using selected method. Setting bins to %i.' %
bins)
# Generate histogram
hist(data.tolist(), bins, histtype='stepfilled')
xlim(datarange)
# Plot options
title('\n\n %s hist' % name, x=0., y=1., ha='left', va='top',
fontsize='medium')
ylabel("Frequency", fontsize='x-small')
# Plot vertical lines for median and 95% HPD interval
quant = calc_quantiles(data)
axvline(x=quant[50], linewidth=2, color='black')
for q in calc_hpd(data, 0.05):
axvline(x=q, linewidth=2, color='grey', linestyle='dotted')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[rows])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[rows])
if standalone:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name, suffix, format))
# close()
except OverflowError:
print_('... cannot generate histogram') | python | def histogram(
data, name, bins='sturges', datarange=(None, None), format='png', suffix='', path='./', rows=1,
columns=1, num=1, last=True, fontmap = None, verbose=1):
"""
Generates histogram from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the histogram.
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
datarange: tuple or list
Preferred range of histogram (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
# Internal histogram specification for handling nested arrays
try:
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# Stand-alone plot or subplot?
standalone = rows == 1 and columns == 1 and num == 1
if standalone:
if verbose > 0:
print_('Generating histogram of', name)
figure()
subplot(rows, columns, num)
# Specify number of bins
uniquevals = len(unique(data))
if isinstance(bins, int):
pass
if bins == 'sturges':
bins = uniquevals * (uniquevals <= 25) or _sturges(len(data))
elif bins == 'doanes':
bins = uniquevals * (uniquevals <= 25) or _doanes(data, len(data))
elif bins == 'sqrt':
bins = uniquevals * (uniquevals <= 25) or _sqrt_choice(len(data))
elif isinstance(bins, int):
bins = bins
else:
raise ValueError('Invalid bins argument in histogram')
if isnan(bins):
bins = uniquevals * (uniquevals <= 25) or int(
4 + 1.5 * log(len(data)))
print_(
'Bins could not be calculated using selected method. Setting bins to %i.' %
bins)
# Generate histogram
hist(data.tolist(), bins, histtype='stepfilled')
xlim(datarange)
# Plot options
title('\n\n %s hist' % name, x=0., y=1., ha='left', va='top',
fontsize='medium')
ylabel("Frequency", fontsize='x-small')
# Plot vertical lines for median and 95% HPD interval
quant = calc_quantiles(data)
axvline(x=quant[50], linewidth=2, color='black')
for q in calc_hpd(data, 0.05):
axvline(x=q, linewidth=2, color='grey', linestyle='dotted')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[rows])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[rows])
if standalone:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name, suffix, format))
# close()
except OverflowError:
print_('... cannot generate histogram') | [
"def",
"histogram",
"(",
"data",
",",
"name",
",",
"bins",
"=",
"'sturges'",
",",
"datarange",
"=",
"(",
"None",
",",
"None",
")",
",",
"format",
"=",
"'png'",
",",
"suffix",
"=",
"''",
",",
"path",
"=",
"'./'",
",",
"rows",
"=",
"1",
",",
"colum... | Generates histogram from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the histogram.
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
datarange: tuple or list
Preferred range of histogram (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot. | [
"Generates",
"histogram",
"from",
"an",
"array",
"of",
"data",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L486-L589 | train | 220,212 |
pymc-devs/pymc | pymc/Matplot.py | trace | def trace(
data, name, format='png', datarange=(None, None), suffix='', path='./', rows=1, columns=1,
num=1, last=True, fontmap = None, verbose=1):
"""
Generates trace plot from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the trace.
datarange: tuple or list
Preferred y-range of trace (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# Stand-alone plot or subplot?
standalone = rows == 1 and columns == 1 and num == 1
if standalone:
if verbose > 0:
print_('Plotting', name)
figure()
subplot(rows, columns, num)
pyplot(data.tolist())
ylim(datarange)
# Plot options
title('\n\n %s trace' % name, x=0., y=1., ha='left', va='top',
fontsize='small')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[max(rows / 2, 1)])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[max(rows / 2, 1)])
if standalone:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name, suffix, format)) | python | def trace(
data, name, format='png', datarange=(None, None), suffix='', path='./', rows=1, columns=1,
num=1, last=True, fontmap = None, verbose=1):
"""
Generates trace plot from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the trace.
datarange: tuple or list
Preferred y-range of trace (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
# Stand-alone plot or subplot?
standalone = rows == 1 and columns == 1 and num == 1
if standalone:
if verbose > 0:
print_('Plotting', name)
figure()
subplot(rows, columns, num)
pyplot(data.tolist())
ylim(datarange)
# Plot options
title('\n\n %s trace' % name, x=0., y=1., ha='left', va='top',
fontsize='small')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[max(rows / 2, 1)])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[max(rows / 2, 1)])
if standalone:
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name, suffix, format)) | [
"def",
"trace",
"(",
"data",
",",
"name",
",",
"format",
"=",
"'png'",
",",
"datarange",
"=",
"(",
"None",
",",
"None",
")",
",",
"suffix",
"=",
"''",
",",
"path",
"=",
"'./'",
",",
"rows",
"=",
"1",
",",
"columns",
"=",
"1",
",",
"num",
"=",
... | Generates trace plot from an array of data.
:Arguments:
data: array or list
Usually a trace from an MCMC sample.
name: string
The name of the trace.
datarange: tuple or list
Preferred y-range of trace (defaults to (None,None)).
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot. | [
"Generates",
"trace",
"plot",
"from",
"an",
"array",
"of",
"data",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L593-L655 | train | 220,213 |
pymc-devs/pymc | pymc/Matplot.py | gof_plot | def gof_plot(
simdata, trueval, name=None, bins=None, format='png', suffix='-gof', path='./',
fontmap=None, verbose=0):
"""
Plots histogram of replicated data, indicating the location of the observed data
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
if not isinstance(simdata, ndarray):
## Can't just try and catch because ndarray objects also have
## `trace` method.
simdata = simdata.trace()
if ndim(trueval) == 1 and ndim(simdata == 2):
# Iterate over more than one set of data
for i in range(len(trueval)):
n = name or 'MCMC'
gof_plot(
simdata[
:,
i],
trueval[
i],
'%s[%i]' % (
n,
i),
bins=bins,
format=format,
suffix=suffix,
path=path,
fontmap=fontmap,
verbose=verbose)
return
if verbose > 0:
print_('Plotting', (name or 'MCMC') + suffix)
figure()
# Specify number of bins
if bins is None:
bins = 'sqrt'
uniquevals = len(unique(simdata))
if bins == 'sturges':
bins = uniquevals * (uniquevals <= 25) or _sturges(len(simdata))
elif bins == 'doanes':
bins = uniquevals * (
uniquevals <= 25) or _doanes(simdata,
len(simdata))
elif bins == 'sqrt':
bins = uniquevals * (uniquevals <= 25) or _sqrt_choice(len(simdata))
elif isinstance(bins, int):
bins = bins
else:
raise ValueError('Invalid bins argument in gof_plot')
# Generate histogram
hist(simdata, bins)
# Plot options
xlabel(name or 'Value', fontsize='x-small')
ylabel("Frequency", fontsize='x-small')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[1])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[1])
# Plot vertical line at location of true data value
axvline(x=trueval, linewidth=2, color='r', linestyle='dotted')
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name or 'MCMC', suffix, format)) | python | def gof_plot(
simdata, trueval, name=None, bins=None, format='png', suffix='-gof', path='./',
fontmap=None, verbose=0):
"""
Plots histogram of replicated data, indicating the location of the observed data
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot.
"""
if fontmap is None:
fontmap = {1: 10, 2: 8, 3: 6, 4: 5, 5: 4}
if not isinstance(simdata, ndarray):
## Can't just try and catch because ndarray objects also have
## `trace` method.
simdata = simdata.trace()
if ndim(trueval) == 1 and ndim(simdata == 2):
# Iterate over more than one set of data
for i in range(len(trueval)):
n = name or 'MCMC'
gof_plot(
simdata[
:,
i],
trueval[
i],
'%s[%i]' % (
n,
i),
bins=bins,
format=format,
suffix=suffix,
path=path,
fontmap=fontmap,
verbose=verbose)
return
if verbose > 0:
print_('Plotting', (name or 'MCMC') + suffix)
figure()
# Specify number of bins
if bins is None:
bins = 'sqrt'
uniquevals = len(unique(simdata))
if bins == 'sturges':
bins = uniquevals * (uniquevals <= 25) or _sturges(len(simdata))
elif bins == 'doanes':
bins = uniquevals * (
uniquevals <= 25) or _doanes(simdata,
len(simdata))
elif bins == 'sqrt':
bins = uniquevals * (uniquevals <= 25) or _sqrt_choice(len(simdata))
elif isinstance(bins, int):
bins = bins
else:
raise ValueError('Invalid bins argument in gof_plot')
# Generate histogram
hist(simdata, bins)
# Plot options
xlabel(name or 'Value', fontsize='x-small')
ylabel("Frequency", fontsize='x-small')
# Smaller tick labels
tlabels = gca().get_xticklabels()
setp(tlabels, 'fontsize', fontmap[1])
tlabels = gca().get_yticklabels()
setp(tlabels, 'fontsize', fontmap[1])
# Plot vertical line at location of true data value
axvline(x=trueval, linewidth=2, color='r', linestyle='dotted')
if not os.path.exists(path):
os.mkdir(path)
if not path.endswith('/'):
path += '/'
# Save to file
savefig("%s%s%s.%s" % (path, name or 'MCMC', suffix, format)) | [
"def",
"gof_plot",
"(",
"simdata",
",",
"trueval",
",",
"name",
"=",
"None",
",",
"bins",
"=",
"None",
",",
"format",
"=",
"'png'",
",",
"suffix",
"=",
"'-gof'",
",",
"path",
"=",
"'./'",
",",
"fontmap",
"=",
"None",
",",
"verbose",
"=",
"0",
")",
... | Plots histogram of replicated data, indicating the location of the observed data
:Arguments:
simdata: array or PyMC object
Trace of simulated data or the PyMC stochastic object containing trace.
trueval: numeric
True (observed) value of the data
bins: int or string
The number of bins, or a preferred binning method. Available methods include
'doanes', 'sturges' and 'sqrt' (defaults to 'doanes').
format (optional): string
Graphic output format (defaults to png).
suffix (optional): string
Filename suffix.
path (optional): string
Specifies location for saving plots (defaults to local directory).
fontmap (optional): dict
Font map for plot. | [
"Plots",
"histogram",
"of",
"replicated",
"data",
"indicating",
"the",
"location",
"of",
"the",
"observed",
"data"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Matplot.py#L790-L894 | train | 220,214 |
pymc-devs/pymc | pymc/database/sqlite.py | load | def load(dbname):
"""Load an existing SQLite database.
Return a Database instance.
"""
db = Database(dbname)
# Get the name of the objects
tables = get_table_list(db.cur)
# Create a Trace instance for each object
chains = 0
for name in tables:
db._traces[name] = Trace(name=name, db=db)
db._traces[name]._shape = get_shape(db.cur, name)
setattr(db, name, db._traces[name])
db.cur.execute('SELECT MAX(trace) FROM [%s]' % name)
chains = max(chains, db.cur.fetchall()[0][0] + 1)
db.chains = chains
db.trace_names = chains * [tables, ]
db._state_ = {}
return db | python | def load(dbname):
"""Load an existing SQLite database.
Return a Database instance.
"""
db = Database(dbname)
# Get the name of the objects
tables = get_table_list(db.cur)
# Create a Trace instance for each object
chains = 0
for name in tables:
db._traces[name] = Trace(name=name, db=db)
db._traces[name]._shape = get_shape(db.cur, name)
setattr(db, name, db._traces[name])
db.cur.execute('SELECT MAX(trace) FROM [%s]' % name)
chains = max(chains, db.cur.fetchall()[0][0] + 1)
db.chains = chains
db.trace_names = chains * [tables, ]
db._state_ = {}
return db | [
"def",
"load",
"(",
"dbname",
")",
":",
"db",
"=",
"Database",
"(",
"dbname",
")",
"# Get the name of the objects",
"tables",
"=",
"get_table_list",
"(",
"db",
".",
"cur",
")",
"# Create a Trace instance for each object",
"chains",
"=",
"0",
"for",
"name",
"in",... | Load an existing SQLite database.
Return a Database instance. | [
"Load",
"an",
"existing",
"SQLite",
"database",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/sqlite.py#L233-L255 | train | 220,215 |
pymc-devs/pymc | pymc/database/sqlite.py | get_shape | def get_shape(cursor, name):
"""Return the shape of the table ``name``."""
cursor.execute('select * from [%s]' % name)
inds = cursor.description[-1][0][1:].split('_')
return tuple([int(i) for i in inds]) | python | def get_shape(cursor, name):
"""Return the shape of the table ``name``."""
cursor.execute('select * from [%s]' % name)
inds = cursor.description[-1][0][1:].split('_')
return tuple([int(i) for i in inds]) | [
"def",
"get_shape",
"(",
"cursor",
",",
"name",
")",
":",
"cursor",
".",
"execute",
"(",
"'select * from [%s]'",
"%",
"name",
")",
"inds",
"=",
"cursor",
".",
"description",
"[",
"-",
"1",
"]",
"[",
"0",
"]",
"[",
"1",
":",
"]",
".",
"split",
"(",
... | Return the shape of the table ``name``. | [
"Return",
"the",
"shape",
"of",
"the",
"table",
"name",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/sqlite.py#L270-L274 | train | 220,216 |
pymc-devs/pymc | pymc/database/sqlite.py | Database.close | def close(self, *args, **kwds):
"""Close database."""
self.cur.close()
self.commit()
self.DB.close() | python | def close(self, *args, **kwds):
"""Close database."""
self.cur.close()
self.commit()
self.DB.close() | [
"def",
"close",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwds",
")",
":",
"self",
".",
"cur",
".",
"close",
"(",
")",
"self",
".",
"commit",
"(",
")",
"self",
".",
"DB",
".",
"close",
"(",
")"
] | Close database. | [
"Close",
"database",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/database/sqlite.py#L204-L208 | train | 220,217 |
pymc-devs/pymc | pymc/CommonDeterministics.py | create_nonimplemented_method | def create_nonimplemented_method(op_name, klass):
"""
Creates a new method that raises NotImplementedError.
"""
def new_method(self, *args):
raise NotImplementedError(
'Special method %s has not been implemented for PyMC variables.' %
op_name)
new_method.__name__ = '__' + op_name + '__'
setattr(
klass,
new_method.__name__,
UnboundMethodType(
new_method,
None,
klass)) | python | def create_nonimplemented_method(op_name, klass):
"""
Creates a new method that raises NotImplementedError.
"""
def new_method(self, *args):
raise NotImplementedError(
'Special method %s has not been implemented for PyMC variables.' %
op_name)
new_method.__name__ = '__' + op_name + '__'
setattr(
klass,
new_method.__name__,
UnboundMethodType(
new_method,
None,
klass)) | [
"def",
"create_nonimplemented_method",
"(",
"op_name",
",",
"klass",
")",
":",
"def",
"new_method",
"(",
"self",
",",
"*",
"args",
")",
":",
"raise",
"NotImplementedError",
"(",
"'Special method %s has not been implemented for PyMC variables.'",
"%",
"op_name",
")",
"... | Creates a new method that raises NotImplementedError. | [
"Creates",
"a",
"new",
"method",
"that",
"raises",
"NotImplementedError",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/CommonDeterministics.py#L802-L818 | train | 220,218 |
pymc-devs/pymc | pymc/MCMC.py | MCMC.remove_step_method | def remove_step_method(self, step_method):
"""
Removes a step method.
"""
try:
for s in step_method.stochastics:
self.step_method_dict[s].remove(step_method)
if hasattr(self, "step_methods"):
self.step_methods.discard(step_method)
self._sm_assigned = False
except AttributeError:
for sm in step_method:
self.remove_step_method(sm) | python | def remove_step_method(self, step_method):
"""
Removes a step method.
"""
try:
for s in step_method.stochastics:
self.step_method_dict[s].remove(step_method)
if hasattr(self, "step_methods"):
self.step_methods.discard(step_method)
self._sm_assigned = False
except AttributeError:
for sm in step_method:
self.remove_step_method(sm) | [
"def",
"remove_step_method",
"(",
"self",
",",
"step_method",
")",
":",
"try",
":",
"for",
"s",
"in",
"step_method",
".",
"stochastics",
":",
"self",
".",
"step_method_dict",
"[",
"s",
"]",
".",
"remove",
"(",
"step_method",
")",
"if",
"hasattr",
"(",
"s... | Removes a step method. | [
"Removes",
"a",
"step",
"method",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/MCMC.py#L129-L141 | train | 220,219 |
pymc-devs/pymc | pymc/MCMC.py | MCMC.assign_step_methods | def assign_step_methods(self, verbose=-1, draw_from_prior_when_possible=True):
"""
Make sure every stochastic variable has a step method. If not,
assign a step method from the registry.
"""
if not self._sm_assigned:
if draw_from_prior_when_possible:
# Assign dataless stepper first
last_gen = set([])
for s in self.stochastics - self.observed_stochastics:
if s._random is not None:
if len(s.extended_children) == 0:
last_gen.add(s)
dataless, dataless_gens = crawl_dataless(
set(last_gen), [last_gen])
if len(dataless):
new_method = DrawFromPrior(
dataless,
dataless_gens[::-1],
verbose=verbose)
setattr(new_method, '_model', self)
for d in dataless:
if not d.observed:
self.step_method_dict[d].append(new_method)
if self.verbose > 1:
print_(
'Assigning step method %s to stochastic %s' %
(new_method.__class__.__name__, d.__name__))
for s in self.stochastics:
# If not handled by any step method, make it a new step method
# using the registry
if len(self.step_method_dict[s]) == 0:
new_method = assign_method(s, verbose=verbose)
setattr(new_method, '_model', self)
self.step_method_dict[s].append(new_method)
if self.verbose > 1:
print_(
'Assigning step method %s to stochastic %s' %
(new_method.__class__.__name__, s.__name__))
self.step_methods = set()
for s in self.stochastics:
self.step_methods |= set(self.step_method_dict[s])
for sm in self.step_methods:
if sm.tally:
for name in sm._tuning_info:
self._funs_to_tally[
sm._id + '_' + name] = lambda name=name, sm=sm: getattr(sm, name)
else:
# Change verbosity for step methods
for sm_key in self.step_method_dict:
for sm in self.step_method_dict[sm_key]:
sm.verbose = verbose
self.restore_sm_state()
self._sm_assigned = True | python | def assign_step_methods(self, verbose=-1, draw_from_prior_when_possible=True):
"""
Make sure every stochastic variable has a step method. If not,
assign a step method from the registry.
"""
if not self._sm_assigned:
if draw_from_prior_when_possible:
# Assign dataless stepper first
last_gen = set([])
for s in self.stochastics - self.observed_stochastics:
if s._random is not None:
if len(s.extended_children) == 0:
last_gen.add(s)
dataless, dataless_gens = crawl_dataless(
set(last_gen), [last_gen])
if len(dataless):
new_method = DrawFromPrior(
dataless,
dataless_gens[::-1],
verbose=verbose)
setattr(new_method, '_model', self)
for d in dataless:
if not d.observed:
self.step_method_dict[d].append(new_method)
if self.verbose > 1:
print_(
'Assigning step method %s to stochastic %s' %
(new_method.__class__.__name__, d.__name__))
for s in self.stochastics:
# If not handled by any step method, make it a new step method
# using the registry
if len(self.step_method_dict[s]) == 0:
new_method = assign_method(s, verbose=verbose)
setattr(new_method, '_model', self)
self.step_method_dict[s].append(new_method)
if self.verbose > 1:
print_(
'Assigning step method %s to stochastic %s' %
(new_method.__class__.__name__, s.__name__))
self.step_methods = set()
for s in self.stochastics:
self.step_methods |= set(self.step_method_dict[s])
for sm in self.step_methods:
if sm.tally:
for name in sm._tuning_info:
self._funs_to_tally[
sm._id + '_' + name] = lambda name=name, sm=sm: getattr(sm, name)
else:
# Change verbosity for step methods
for sm_key in self.step_method_dict:
for sm in self.step_method_dict[sm_key]:
sm.verbose = verbose
self.restore_sm_state()
self._sm_assigned = True | [
"def",
"assign_step_methods",
"(",
"self",
",",
"verbose",
"=",
"-",
"1",
",",
"draw_from_prior_when_possible",
"=",
"True",
")",
":",
"if",
"not",
"self",
".",
"_sm_assigned",
":",
"if",
"draw_from_prior_when_possible",
":",
"# Assign dataless stepper first",
"last... | Make sure every stochastic variable has a step method. If not,
assign a step method from the registry. | [
"Make",
"sure",
"every",
"stochastic",
"variable",
"has",
"a",
"step",
"method",
".",
"If",
"not",
"assign",
"a",
"step",
"method",
"from",
"the",
"registry",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/MCMC.py#L143-L204 | train | 220,220 |
pymc-devs/pymc | pymc/MCMC.py | MCMC.tune | def tune(self):
"""
Tell all step methods to tune themselves.
"""
if self.verbose > 0:
print_('\tTuning at iteration', self._current_iter)
# Initialize counter for number of tuning stochastics
tuning_count = 0
for step_method in self.step_methods:
verbose = self.verbose
if step_method.verbose > -1:
verbose = step_method.verbose
# Tune step methods
tuning_count += step_method.tune(verbose=self.verbose)
if verbose > 1:
print_(
'\t\tTuning step method %s, returned %i\n' % (
step_method._id, tuning_count
)
)
sys.stdout.flush()
if self._burn_till_tuned:
if not tuning_count:
# If no step methods needed tuning, increment count
self._tuned_count += 1
else:
# Otherwise re-initialize count
self._tuned_count = 0
# n consecutive clean intervals removed tuning
# n is equal to self._stop_tuning_after
if self._tuned_count == self._stop_tuning_after:
if self.verbose > 0:
print_('\nFinished tuning')
self._tuning = False | python | def tune(self):
"""
Tell all step methods to tune themselves.
"""
if self.verbose > 0:
print_('\tTuning at iteration', self._current_iter)
# Initialize counter for number of tuning stochastics
tuning_count = 0
for step_method in self.step_methods:
verbose = self.verbose
if step_method.verbose > -1:
verbose = step_method.verbose
# Tune step methods
tuning_count += step_method.tune(verbose=self.verbose)
if verbose > 1:
print_(
'\t\tTuning step method %s, returned %i\n' % (
step_method._id, tuning_count
)
)
sys.stdout.flush()
if self._burn_till_tuned:
if not tuning_count:
# If no step methods needed tuning, increment count
self._tuned_count += 1
else:
# Otherwise re-initialize count
self._tuned_count = 0
# n consecutive clean intervals removed tuning
# n is equal to self._stop_tuning_after
if self._tuned_count == self._stop_tuning_after:
if self.verbose > 0:
print_('\nFinished tuning')
self._tuning = False | [
"def",
"tune",
"(",
"self",
")",
":",
"if",
"self",
".",
"verbose",
">",
"0",
":",
"print_",
"(",
"'\\tTuning at iteration'",
",",
"self",
".",
"_current_iter",
")",
"# Initialize counter for number of tuning stochastics",
"tuning_count",
"=",
"0",
"for",
"step_me... | Tell all step methods to tune themselves. | [
"Tell",
"all",
"step",
"methods",
"to",
"tune",
"themselves",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/MCMC.py#L349-L387 | train | 220,221 |
pymc-devs/pymc | pymc/MCMC.py | MCMC.get_state | def get_state(self):
"""
Return the sampler and step methods current state in order to
restart sampling at a later time.
"""
self.step_methods = set()
for s in self.stochastics:
self.step_methods |= set(self.step_method_dict[s])
state = Sampler.get_state(self)
state['step_methods'] = {}
# The state of each StepMethod.
for sm in self.step_methods:
state['step_methods'][sm._id] = sm.current_state().copy()
return state | python | def get_state(self):
"""
Return the sampler and step methods current state in order to
restart sampling at a later time.
"""
self.step_methods = set()
for s in self.stochastics:
self.step_methods |= set(self.step_method_dict[s])
state = Sampler.get_state(self)
state['step_methods'] = {}
# The state of each StepMethod.
for sm in self.step_methods:
state['step_methods'][sm._id] = sm.current_state().copy()
return state | [
"def",
"get_state",
"(",
"self",
")",
":",
"self",
".",
"step_methods",
"=",
"set",
"(",
")",
"for",
"s",
"in",
"self",
".",
"stochastics",
":",
"self",
".",
"step_methods",
"|=",
"set",
"(",
"self",
".",
"step_method_dict",
"[",
"s",
"]",
")",
"stat... | Return the sampler and step methods current state in order to
restart sampling at a later time. | [
"Return",
"the",
"sampler",
"and",
"step",
"methods",
"current",
"state",
"in",
"order",
"to",
"restart",
"sampling",
"at",
"a",
"later",
"time",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/MCMC.py#L389-L406 | train | 220,222 |
pymc-devs/pymc | pymc/MCMC.py | MCMC._calc_dic | def _calc_dic(self):
"""Calculates deviance information Criterion"""
# Find mean deviance
mean_deviance = np.mean(self.db.trace('deviance')(), axis=0)
# Set values of all parameters to their mean
for stochastic in self.stochastics:
# Calculate mean of paramter
try:
mean_value = np.mean(
self.db.trace(
stochastic.__name__)(
),
axis=0)
# Set current value to mean
stochastic.value = mean_value
except KeyError:
print_(
"No trace available for %s. DIC value may not be valid." %
stochastic.__name__)
except TypeError:
print_(
"Not able to calculate DIC: invalid stochastic %s" %
stochastic.__name__)
return None
# Return twice deviance minus deviance at means
return 2 * mean_deviance - self.deviance | python | def _calc_dic(self):
"""Calculates deviance information Criterion"""
# Find mean deviance
mean_deviance = np.mean(self.db.trace('deviance')(), axis=0)
# Set values of all parameters to their mean
for stochastic in self.stochastics:
# Calculate mean of paramter
try:
mean_value = np.mean(
self.db.trace(
stochastic.__name__)(
),
axis=0)
# Set current value to mean
stochastic.value = mean_value
except KeyError:
print_(
"No trace available for %s. DIC value may not be valid." %
stochastic.__name__)
except TypeError:
print_(
"Not able to calculate DIC: invalid stochastic %s" %
stochastic.__name__)
return None
# Return twice deviance minus deviance at means
return 2 * mean_deviance - self.deviance | [
"def",
"_calc_dic",
"(",
"self",
")",
":",
"# Find mean deviance",
"mean_deviance",
"=",
"np",
".",
"mean",
"(",
"self",
".",
"db",
".",
"trace",
"(",
"'deviance'",
")",
"(",
")",
",",
"axis",
"=",
"0",
")",
"# Set values of all parameters to their mean",
"f... | Calculates deviance information Criterion | [
"Calculates",
"deviance",
"information",
"Criterion"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/MCMC.py#L419-L450 | train | 220,223 |
pymc-devs/pymc | pymc/distributions.py | stochastic_from_data | def stochastic_from_data(name, data, lower=-np.inf, upper=np.inf,
value=None, observed=False, trace=True, verbose=-1, debug=False):
"""
Return a Stochastic subclass made from arbitrary data.
The histogram for the data is fitted with Kernel Density Estimation.
:Parameters:
- `data` : An array with samples (e.g. trace[:])
- `lower` : Lower bound on possible outcomes
- `upper` : Upper bound on possible outcomes
:Example:
>>> from pymc import stochastic_from_data
>>> pos = stochastic_from_data('posterior', posterior_samples)
>>> prior = pos # update the prior with arbitrary distributions
:Alias:
Histogram
"""
pdf = gaussian_kde(data) # automatic bandwidth selection
# account for tail contribution
lower_tail = upper_tail = 0.
if lower > -np.inf:
lower_tail = pdf.integrate_box(-np.inf, lower)
if upper < np.inf:
upper_tail = pdf.integrate_box(upper, np.inf)
factor = 1. / (1. - (lower_tail + upper_tail))
def logp(value):
prob = factor * pdf(value)
if value < lower or value > upper:
return -np.inf
elif prob <= 0.:
return -np.inf
else:
return np.log(prob)
def random():
res = pdf.resample(1)[0][0]
while res < lower or res > upper:
res = pdf.resample(1)[0][0]
return res
if value is None:
value = random()
return Stochastic(logp=logp,
doc='Non-parametric density with Gaussian Kernels.',
name=name,
parents={},
random=random,
trace=trace,
value=value,
dtype=float,
observed=observed,
verbose=verbose) | python | def stochastic_from_data(name, data, lower=-np.inf, upper=np.inf,
value=None, observed=False, trace=True, verbose=-1, debug=False):
"""
Return a Stochastic subclass made from arbitrary data.
The histogram for the data is fitted with Kernel Density Estimation.
:Parameters:
- `data` : An array with samples (e.g. trace[:])
- `lower` : Lower bound on possible outcomes
- `upper` : Upper bound on possible outcomes
:Example:
>>> from pymc import stochastic_from_data
>>> pos = stochastic_from_data('posterior', posterior_samples)
>>> prior = pos # update the prior with arbitrary distributions
:Alias:
Histogram
"""
pdf = gaussian_kde(data) # automatic bandwidth selection
# account for tail contribution
lower_tail = upper_tail = 0.
if lower > -np.inf:
lower_tail = pdf.integrate_box(-np.inf, lower)
if upper < np.inf:
upper_tail = pdf.integrate_box(upper, np.inf)
factor = 1. / (1. - (lower_tail + upper_tail))
def logp(value):
prob = factor * pdf(value)
if value < lower or value > upper:
return -np.inf
elif prob <= 0.:
return -np.inf
else:
return np.log(prob)
def random():
res = pdf.resample(1)[0][0]
while res < lower or res > upper:
res = pdf.resample(1)[0][0]
return res
if value is None:
value = random()
return Stochastic(logp=logp,
doc='Non-parametric density with Gaussian Kernels.',
name=name,
parents={},
random=random,
trace=trace,
value=value,
dtype=float,
observed=observed,
verbose=verbose) | [
"def",
"stochastic_from_data",
"(",
"name",
",",
"data",
",",
"lower",
"=",
"-",
"np",
".",
"inf",
",",
"upper",
"=",
"np",
".",
"inf",
",",
"value",
"=",
"None",
",",
"observed",
"=",
"False",
",",
"trace",
"=",
"True",
",",
"verbose",
"=",
"-",
... | Return a Stochastic subclass made from arbitrary data.
The histogram for the data is fitted with Kernel Density Estimation.
:Parameters:
- `data` : An array with samples (e.g. trace[:])
- `lower` : Lower bound on possible outcomes
- `upper` : Upper bound on possible outcomes
:Example:
>>> from pymc import stochastic_from_data
>>> pos = stochastic_from_data('posterior', posterior_samples)
>>> prior = pos # update the prior with arbitrary distributions
:Alias:
Histogram | [
"Return",
"a",
"Stochastic",
"subclass",
"made",
"from",
"arbitrary",
"data",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L401-L458 | train | 220,224 |
pymc-devs/pymc | pymc/distributions.py | randomwrap | def randomwrap(func):
"""
Decorator for random value generators
Allows passing of sequence of parameters, as well as a size argument.
Convention:
- If size=1 and the parameters are all scalars, return a scalar.
- If size=1, the random variates are 1D.
- If the parameters are scalars and size > 1, the random variates are 1D.
- If size > 1 and the parameters are sequences, the random variates are
aligned as (size, max(length)), where length is the parameters size.
:Example:
>>> rbernoulli(.1)
0
>>> rbernoulli([.1,.9])
np.asarray([0, 1])
>>> rbernoulli(.9, size=2)
np.asarray([1, 1])
>>> rbernoulli([.1,.9], 2)
np.asarray([[0, 1],
[0, 1]])
"""
# Find the order of the arguments.
refargs, defaults = utils.get_signature(func)
# vfunc = np.vectorize(self.func)
npos = len(refargs) - len(defaults) # Number of pos. arg.
nkwds = len(defaults) # Number of kwds args.
mv = func.__name__[
1:] in mv_continuous_distributions + mv_discrete_distributions
# Use the NumPy random function directly if this is not a multivariate
# distribution
if not mv:
return func
def wrapper(*args, **kwds):
# First transform keyword arguments into positional arguments.
n = len(args)
if nkwds > 0:
args = list(args)
for i, k in enumerate(refargs[n:]):
if k in kwds.keys():
args.append(kwds[k])
else:
args.append(defaults[n - npos + i])
r = []
s = []
largs = []
nr = args[-1]
length = [np.atleast_1d(a).shape[0] for a in args]
dimension = [np.atleast_1d(a).ndim for a in args]
N = max(length)
if len(set(dimension)) > 2:
raise('Dimensions do not agree.')
# Make sure all elements are iterable and have consistent lengths, ie
# 1 or n, but not m and n.
for arg, s in zip(args, length):
t = type(arg)
arr = np.empty(N, type)
if s == 1:
arr.fill(arg)
elif s == N:
arr = np.asarray(arg)
else:
raise RuntimeError('Arguments size not allowed: %s.' % s)
largs.append(arr)
if mv and N > 1 and max(dimension) > 1 and nr > 1:
raise ValueError(
'Multivariate distributions cannot take s>1 and multiple values.')
if mv:
for i, arg in enumerate(largs[:-1]):
largs[0] = np.atleast_2d(arg)
for arg in zip(*largs):
r.append(func(*arg))
size = arg[-1]
vec_stochastics = len(r) > 1
if mv:
if nr == 1:
return r[0]
else:
return np.vstack(r)
else:
if size > 1 and vec_stochastics:
return np.atleast_2d(r).T
elif vec_stochastics or size > 1:
return np.concatenate(r)
else: # Scalar case
return r[0][0]
wrapper.__doc__ = func.__doc__
wrapper.__name__ = func.__name__
return wrapper | python | def randomwrap(func):
"""
Decorator for random value generators
Allows passing of sequence of parameters, as well as a size argument.
Convention:
- If size=1 and the parameters are all scalars, return a scalar.
- If size=1, the random variates are 1D.
- If the parameters are scalars and size > 1, the random variates are 1D.
- If size > 1 and the parameters are sequences, the random variates are
aligned as (size, max(length)), where length is the parameters size.
:Example:
>>> rbernoulli(.1)
0
>>> rbernoulli([.1,.9])
np.asarray([0, 1])
>>> rbernoulli(.9, size=2)
np.asarray([1, 1])
>>> rbernoulli([.1,.9], 2)
np.asarray([[0, 1],
[0, 1]])
"""
# Find the order of the arguments.
refargs, defaults = utils.get_signature(func)
# vfunc = np.vectorize(self.func)
npos = len(refargs) - len(defaults) # Number of pos. arg.
nkwds = len(defaults) # Number of kwds args.
mv = func.__name__[
1:] in mv_continuous_distributions + mv_discrete_distributions
# Use the NumPy random function directly if this is not a multivariate
# distribution
if not mv:
return func
def wrapper(*args, **kwds):
# First transform keyword arguments into positional arguments.
n = len(args)
if nkwds > 0:
args = list(args)
for i, k in enumerate(refargs[n:]):
if k in kwds.keys():
args.append(kwds[k])
else:
args.append(defaults[n - npos + i])
r = []
s = []
largs = []
nr = args[-1]
length = [np.atleast_1d(a).shape[0] for a in args]
dimension = [np.atleast_1d(a).ndim for a in args]
N = max(length)
if len(set(dimension)) > 2:
raise('Dimensions do not agree.')
# Make sure all elements are iterable and have consistent lengths, ie
# 1 or n, but not m and n.
for arg, s in zip(args, length):
t = type(arg)
arr = np.empty(N, type)
if s == 1:
arr.fill(arg)
elif s == N:
arr = np.asarray(arg)
else:
raise RuntimeError('Arguments size not allowed: %s.' % s)
largs.append(arr)
if mv and N > 1 and max(dimension) > 1 and nr > 1:
raise ValueError(
'Multivariate distributions cannot take s>1 and multiple values.')
if mv:
for i, arg in enumerate(largs[:-1]):
largs[0] = np.atleast_2d(arg)
for arg in zip(*largs):
r.append(func(*arg))
size = arg[-1]
vec_stochastics = len(r) > 1
if mv:
if nr == 1:
return r[0]
else:
return np.vstack(r)
else:
if size > 1 and vec_stochastics:
return np.atleast_2d(r).T
elif vec_stochastics or size > 1:
return np.concatenate(r)
else: # Scalar case
return r[0][0]
wrapper.__doc__ = func.__doc__
wrapper.__name__ = func.__name__
return wrapper | [
"def",
"randomwrap",
"(",
"func",
")",
":",
"# Find the order of the arguments.",
"refargs",
",",
"defaults",
"=",
"utils",
".",
"get_signature",
"(",
"func",
")",
"# vfunc = np.vectorize(self.func)",
"npos",
"=",
"len",
"(",
"refargs",
")",
"-",
"len",
"(",
"de... | Decorator for random value generators
Allows passing of sequence of parameters, as well as a size argument.
Convention:
- If size=1 and the parameters are all scalars, return a scalar.
- If size=1, the random variates are 1D.
- If the parameters are scalars and size > 1, the random variates are 1D.
- If size > 1 and the parameters are sequences, the random variates are
aligned as (size, max(length)), where length is the parameters size.
:Example:
>>> rbernoulli(.1)
0
>>> rbernoulli([.1,.9])
np.asarray([0, 1])
>>> rbernoulli(.9, size=2)
np.asarray([1, 1])
>>> rbernoulli([.1,.9], 2)
np.asarray([[0, 1],
[0, 1]]) | [
"Decorator",
"for",
"random",
"value",
"generators"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L469-L572 | train | 220,225 |
pymc-devs/pymc | pymc/distributions.py | constrain | def constrain(value, lower=-np.Inf, upper=np.Inf, allow_equal=False):
"""
Apply interval constraint on stochastic value.
"""
ok = flib.constrain(value, lower, upper, allow_equal)
if ok == 0:
raise ZeroProbability | python | def constrain(value, lower=-np.Inf, upper=np.Inf, allow_equal=False):
"""
Apply interval constraint on stochastic value.
"""
ok = flib.constrain(value, lower, upper, allow_equal)
if ok == 0:
raise ZeroProbability | [
"def",
"constrain",
"(",
"value",
",",
"lower",
"=",
"-",
"np",
".",
"Inf",
",",
"upper",
"=",
"np",
".",
"Inf",
",",
"allow_equal",
"=",
"False",
")",
":",
"ok",
"=",
"flib",
".",
"constrain",
"(",
"value",
",",
"lower",
",",
"upper",
",",
"allo... | Apply interval constraint on stochastic value. | [
"Apply",
"interval",
"constraint",
"on",
"stochastic",
"value",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L599-L606 | train | 220,226 |
pymc-devs/pymc | pymc/distributions.py | expand_triangular | def expand_triangular(X, k):
"""
Expand flattened triangular matrix.
"""
X = X.tolist()
# Unflatten matrix
Y = np.asarray(
[[0] * i + X[i * k - (i * (i - 1)) / 2: i * k + (k - i)] for i in range(k)])
# Loop over rows
for i in range(k):
# Loop over columns
for j in range(k):
Y[j, i] = Y[i, j]
return Y | python | def expand_triangular(X, k):
"""
Expand flattened triangular matrix.
"""
X = X.tolist()
# Unflatten matrix
Y = np.asarray(
[[0] * i + X[i * k - (i * (i - 1)) / 2: i * k + (k - i)] for i in range(k)])
# Loop over rows
for i in range(k):
# Loop over columns
for j in range(k):
Y[j, i] = Y[i, j]
return Y | [
"def",
"expand_triangular",
"(",
"X",
",",
"k",
")",
":",
"X",
"=",
"X",
".",
"tolist",
"(",
")",
"# Unflatten matrix",
"Y",
"=",
"np",
".",
"asarray",
"(",
"[",
"[",
"0",
"]",
"*",
"i",
"+",
"X",
"[",
"i",
"*",
"k",
"-",
"(",
"i",
"*",
"("... | Expand flattened triangular matrix. | [
"Expand",
"flattened",
"triangular",
"matrix",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L632-L646 | train | 220,227 |
pymc-devs/pymc | pymc/distributions.py | rarlognormal | def rarlognormal(a, sigma, rho, size=1):
R"""
Autoregressive normal random variates.
If a is a scalar, generates one series of length size.
If a is a sequence, generates size series of the same length
as a.
"""
f = utils.ar1
if np.isscalar(a):
r = f(rho, 0, sigma, size)
else:
n = len(a)
r = [f(rho, 0, sigma, n) for i in range(size)]
if size == 1:
r = r[0]
return a * np.exp(r) | python | def rarlognormal(a, sigma, rho, size=1):
R"""
Autoregressive normal random variates.
If a is a scalar, generates one series of length size.
If a is a sequence, generates size series of the same length
as a.
"""
f = utils.ar1
if np.isscalar(a):
r = f(rho, 0, sigma, size)
else:
n = len(a)
r = [f(rho, 0, sigma, n) for i in range(size)]
if size == 1:
r = r[0]
return a * np.exp(r) | [
"def",
"rarlognormal",
"(",
"a",
",",
"sigma",
",",
"rho",
",",
"size",
"=",
"1",
")",
":",
"f",
"=",
"utils",
".",
"ar1",
"if",
"np",
".",
"isscalar",
"(",
"a",
")",
":",
"r",
"=",
"f",
"(",
"rho",
",",
"0",
",",
"sigma",
",",
"size",
")",... | R"""
Autoregressive normal random variates.
If a is a scalar, generates one series of length size.
If a is a sequence, generates size series of the same length
as a. | [
"R",
"Autoregressive",
"normal",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L722-L738 | train | 220,228 |
pymc-devs/pymc | pymc/distributions.py | arlognormal_like | def arlognormal_like(x, a, sigma, rho):
R"""
Autoregressive lognormal log-likelihood.
.. math::
x_i & = a_i \exp(e_i) \\
e_i & = \rho e_{i-1} + \epsilon_i
where :math:`\epsilon_i \sim N(0,\sigma)`.
"""
return flib.arlognormal(x, np.log(a), sigma, rho, beta=1) | python | def arlognormal_like(x, a, sigma, rho):
R"""
Autoregressive lognormal log-likelihood.
.. math::
x_i & = a_i \exp(e_i) \\
e_i & = \rho e_{i-1} + \epsilon_i
where :math:`\epsilon_i \sim N(0,\sigma)`.
"""
return flib.arlognormal(x, np.log(a), sigma, rho, beta=1) | [
"def",
"arlognormal_like",
"(",
"x",
",",
"a",
",",
"sigma",
",",
"rho",
")",
":",
"return",
"flib",
".",
"arlognormal",
"(",
"x",
",",
"np",
".",
"log",
"(",
"a",
")",
",",
"sigma",
",",
"rho",
",",
"beta",
"=",
"1",
")"
] | R"""
Autoregressive lognormal log-likelihood.
.. math::
x_i & = a_i \exp(e_i) \\
e_i & = \rho e_{i-1} + \epsilon_i
where :math:`\epsilon_i \sim N(0,\sigma)`. | [
"R",
"Autoregressive",
"lognormal",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L741-L751 | train | 220,229 |
pymc-devs/pymc | pymc/distributions.py | rbeta | def rbeta(alpha, beta, size=None):
"""
Random beta variates.
"""
from scipy.stats.distributions import beta as sbeta
return sbeta.ppf(np.random.random(size), alpha, beta) | python | def rbeta(alpha, beta, size=None):
"""
Random beta variates.
"""
from scipy.stats.distributions import beta as sbeta
return sbeta.ppf(np.random.random(size), alpha, beta) | [
"def",
"rbeta",
"(",
"alpha",
",",
"beta",
",",
"size",
"=",
"None",
")",
":",
"from",
"scipy",
".",
"stats",
".",
"distributions",
"import",
"beta",
"as",
"sbeta",
"return",
"sbeta",
".",
"ppf",
"(",
"np",
".",
"random",
".",
"random",
"(",
"size",
... | Random beta variates. | [
"Random",
"beta",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L803-L808 | train | 220,230 |
pymc-devs/pymc | pymc/distributions.py | rbinomial | def rbinomial(n, p, size=None):
"""
Random binomial variates.
"""
if not size:
size = None
return np.random.binomial(np.ravel(n), np.ravel(p), size) | python | def rbinomial(n, p, size=None):
"""
Random binomial variates.
"""
if not size:
size = None
return np.random.binomial(np.ravel(n), np.ravel(p), size) | [
"def",
"rbinomial",
"(",
"n",
",",
"p",
",",
"size",
"=",
"None",
")",
":",
"if",
"not",
"size",
":",
"size",
"=",
"None",
"return",
"np",
".",
"random",
".",
"binomial",
"(",
"np",
".",
"ravel",
"(",
"n",
")",
",",
"np",
".",
"ravel",
"(",
"... | Random binomial variates. | [
"Random",
"binomial",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L858-L864 | train | 220,231 |
pymc-devs/pymc | pymc/distributions.py | rbetabin | def rbetabin(alpha, beta, n, size=None):
"""
Random beta-binomial variates.
"""
phi = np.random.beta(alpha, beta, size)
return np.random.binomial(n, phi) | python | def rbetabin(alpha, beta, n, size=None):
"""
Random beta-binomial variates.
"""
phi = np.random.beta(alpha, beta, size)
return np.random.binomial(n, phi) | [
"def",
"rbetabin",
"(",
"alpha",
",",
"beta",
",",
"n",
",",
"size",
"=",
"None",
")",
":",
"phi",
"=",
"np",
".",
"random",
".",
"beta",
"(",
"alpha",
",",
"beta",
",",
"size",
")",
"return",
"np",
".",
"random",
".",
"binomial",
"(",
"n",
","... | Random beta-binomial variates. | [
"Random",
"beta",
"-",
"binomial",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L904-L910 | train | 220,232 |
pymc-devs/pymc | pymc/distributions.py | rcategorical | def rcategorical(p, size=None):
"""
Categorical random variates.
"""
out = flib.rcat(p, np.random.random(size=size))
if sum(out.shape) == 1:
return out.squeeze()
else:
return out | python | def rcategorical(p, size=None):
"""
Categorical random variates.
"""
out = flib.rcat(p, np.random.random(size=size))
if sum(out.shape) == 1:
return out.squeeze()
else:
return out | [
"def",
"rcategorical",
"(",
"p",
",",
"size",
"=",
"None",
")",
":",
"out",
"=",
"flib",
".",
"rcat",
"(",
"p",
",",
"np",
".",
"random",
".",
"random",
"(",
"size",
"=",
"size",
")",
")",
"if",
"sum",
"(",
"out",
".",
"shape",
")",
"==",
"1"... | Categorical random variates. | [
"Categorical",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L957-L965 | train | 220,233 |
pymc-devs/pymc | pymc/distributions.py | categorical_like | def categorical_like(x, p):
R"""
Categorical log-likelihood. The most general discrete distribution.
.. math:: f(x=i \mid p) = p_i
for :math:`i \in 0 \ldots k-1`.
:Parameters:
- `x` : [int] :math:`x \in 0\ldots k-1`
- `p` : [float] :math:`p > 0`, :math:`\sum p = 1`
"""
p = np.atleast_2d(p)
if np.any(abs(np.sum(p, 1) - 1) > 0.0001):
print_("Probabilities in categorical_like sum to", np.sum(p, 1))
return flib.categorical(np.array(x).astype(int), p) | python | def categorical_like(x, p):
R"""
Categorical log-likelihood. The most general discrete distribution.
.. math:: f(x=i \mid p) = p_i
for :math:`i \in 0 \ldots k-1`.
:Parameters:
- `x` : [int] :math:`x \in 0\ldots k-1`
- `p` : [float] :math:`p > 0`, :math:`\sum p = 1`
"""
p = np.atleast_2d(p)
if np.any(abs(np.sum(p, 1) - 1) > 0.0001):
print_("Probabilities in categorical_like sum to", np.sum(p, 1))
return flib.categorical(np.array(x).astype(int), p) | [
"def",
"categorical_like",
"(",
"x",
",",
"p",
")",
":",
"p",
"=",
"np",
".",
"atleast_2d",
"(",
"p",
")",
"if",
"np",
".",
"any",
"(",
"abs",
"(",
"np",
".",
"sum",
"(",
"p",
",",
"1",
")",
"-",
"1",
")",
">",
"0.0001",
")",
":",
"print_",... | R"""
Categorical log-likelihood. The most general discrete distribution.
.. math:: f(x=i \mid p) = p_i
for :math:`i \in 0 \ldots k-1`.
:Parameters:
- `x` : [int] :math:`x \in 0\ldots k-1`
- `p` : [float] :math:`p > 0`, :math:`\sum p = 1` | [
"R",
"Categorical",
"log",
"-",
"likelihood",
".",
"The",
"most",
"general",
"discrete",
"distribution",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L968-L985 | train | 220,234 |
pymc-devs/pymc | pymc/distributions.py | rcauchy | def rcauchy(alpha, beta, size=None):
"""
Returns Cauchy random variates.
"""
return alpha + beta * np.tan(pi * random_number(size) - pi / 2.0) | python | def rcauchy(alpha, beta, size=None):
"""
Returns Cauchy random variates.
"""
return alpha + beta * np.tan(pi * random_number(size) - pi / 2.0) | [
"def",
"rcauchy",
"(",
"alpha",
",",
"beta",
",",
"size",
"=",
"None",
")",
":",
"return",
"alpha",
"+",
"beta",
"*",
"np",
".",
"tan",
"(",
"pi",
"*",
"random_number",
"(",
"size",
")",
"-",
"pi",
"/",
"2.0",
")"
] | Returns Cauchy random variates. | [
"Returns",
"Cauchy",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L990-L995 | train | 220,235 |
pymc-devs/pymc | pymc/distributions.py | degenerate_like | def degenerate_like(x, k):
R"""
Degenerate log-likelihood.
.. math::
f(x \mid k) = \left\{ \begin{matrix} 1 \text{ if } x = k \\ 0 \text{ if } x \ne k\end{matrix} \right.
:Parameters:
- `x` : Input value.
- `k` : Degenerate value.
"""
x = np.atleast_1d(x)
return sum(np.log([i == k for i in x])) | python | def degenerate_like(x, k):
R"""
Degenerate log-likelihood.
.. math::
f(x \mid k) = \left\{ \begin{matrix} 1 \text{ if } x = k \\ 0 \text{ if } x \ne k\end{matrix} \right.
:Parameters:
- `x` : Input value.
- `k` : Degenerate value.
"""
x = np.atleast_1d(x)
return sum(np.log([i == k for i in x])) | [
"def",
"degenerate_like",
"(",
"x",
",",
"k",
")",
":",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"return",
"sum",
"(",
"np",
".",
"log",
"(",
"[",
"i",
"==",
"k",
"for",
"i",
"in",
"x",
"]",
")",
")"
] | R"""
Degenerate log-likelihood.
.. math::
f(x \mid k) = \left\{ \begin{matrix} 1 \text{ if } x = k \\ 0 \text{ if } x \ne k\end{matrix} \right.
:Parameters:
- `x` : Input value.
- `k` : Degenerate value. | [
"R",
"Degenerate",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1094-L1107 | train | 220,236 |
pymc-devs/pymc | pymc/distributions.py | rdirichlet | def rdirichlet(theta, size=1):
"""
Dirichlet random variates.
"""
gammas = np.vstack([rgamma(theta, 1) for i in xrange(size)])
if size > 1 and np.size(theta) > 1:
return (gammas.T / gammas.sum(1))[:-1].T
elif np.size(theta) > 1:
return (gammas[0] / gammas[0].sum())[:-1]
else:
return 1. | python | def rdirichlet(theta, size=1):
"""
Dirichlet random variates.
"""
gammas = np.vstack([rgamma(theta, 1) for i in xrange(size)])
if size > 1 and np.size(theta) > 1:
return (gammas.T / gammas.sum(1))[:-1].T
elif np.size(theta) > 1:
return (gammas[0] / gammas[0].sum())[:-1]
else:
return 1. | [
"def",
"rdirichlet",
"(",
"theta",
",",
"size",
"=",
"1",
")",
":",
"gammas",
"=",
"np",
".",
"vstack",
"(",
"[",
"rgamma",
"(",
"theta",
",",
"1",
")",
"for",
"i",
"in",
"xrange",
"(",
"size",
")",
"]",
")",
"if",
"size",
">",
"1",
"and",
"n... | Dirichlet random variates. | [
"Dirichlet",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1128-L1138 | train | 220,237 |
pymc-devs/pymc | pymc/distributions.py | dirichlet_like | def dirichlet_like(x, theta):
R"""
Dirichlet log-likelihood.
This is a multivariate continuous distribution.
.. math::
f(\mathbf{x}) = \frac{\Gamma(\sum_{i=1}^k \theta_i)}{\prod \Gamma(\theta_i)}\prod_{i=1}^{k-1} x_i^{\theta_i - 1}
\cdot\left(1-\sum_{i=1}^{k-1}x_i\right)^\theta_k
:Parameters:
x : (n, k-1) array
Array of shape (n, k-1) where `n` is the number of samples
and `k` the dimension.
:math:`0 < x_i < 1`, :math:`\sum_{i=1}^{k-1} x_i < 1`
theta : array
An (n,k) or (1,k) array > 0.
.. note::
Only the first `k-1` elements of `x` are expected. Can be used
as a parent of Multinomial and Categorical nevertheless.
"""
x = np.atleast_2d(x)
theta = np.atleast_2d(theta)
if (np.shape(x)[-1] + 1) != np.shape(theta)[-1]:
raise ValueError('The dimension of x in dirichlet_like must be k-1.')
return flib.dirichlet(x, theta) | python | def dirichlet_like(x, theta):
R"""
Dirichlet log-likelihood.
This is a multivariate continuous distribution.
.. math::
f(\mathbf{x}) = \frac{\Gamma(\sum_{i=1}^k \theta_i)}{\prod \Gamma(\theta_i)}\prod_{i=1}^{k-1} x_i^{\theta_i - 1}
\cdot\left(1-\sum_{i=1}^{k-1}x_i\right)^\theta_k
:Parameters:
x : (n, k-1) array
Array of shape (n, k-1) where `n` is the number of samples
and `k` the dimension.
:math:`0 < x_i < 1`, :math:`\sum_{i=1}^{k-1} x_i < 1`
theta : array
An (n,k) or (1,k) array > 0.
.. note::
Only the first `k-1` elements of `x` are expected. Can be used
as a parent of Multinomial and Categorical nevertheless.
"""
x = np.atleast_2d(x)
theta = np.atleast_2d(theta)
if (np.shape(x)[-1] + 1) != np.shape(theta)[-1]:
raise ValueError('The dimension of x in dirichlet_like must be k-1.')
return flib.dirichlet(x, theta) | [
"def",
"dirichlet_like",
"(",
"x",
",",
"theta",
")",
":",
"x",
"=",
"np",
".",
"atleast_2d",
"(",
"x",
")",
"theta",
"=",
"np",
".",
"atleast_2d",
"(",
"theta",
")",
"if",
"(",
"np",
".",
"shape",
"(",
"x",
")",
"[",
"-",
"1",
"]",
"+",
"1",... | R"""
Dirichlet log-likelihood.
This is a multivariate continuous distribution.
.. math::
f(\mathbf{x}) = \frac{\Gamma(\sum_{i=1}^k \theta_i)}{\prod \Gamma(\theta_i)}\prod_{i=1}^{k-1} x_i^{\theta_i - 1}
\cdot\left(1-\sum_{i=1}^{k-1}x_i\right)^\theta_k
:Parameters:
x : (n, k-1) array
Array of shape (n, k-1) where `n` is the number of samples
and `k` the dimension.
:math:`0 < x_i < 1`, :math:`\sum_{i=1}^{k-1} x_i < 1`
theta : array
An (n,k) or (1,k) array > 0.
.. note::
Only the first `k-1` elements of `x` are expected. Can be used
as a parent of Multinomial and Categorical nevertheless. | [
"R",
"Dirichlet",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1148-L1175 | train | 220,238 |
pymc-devs/pymc | pymc/distributions.py | rexponweib | def rexponweib(alpha, k, loc=0, scale=1, size=None):
"""
Random exponentiated Weibull variates.
"""
q = np.random.uniform(size=size)
r = flib.exponweib_ppf(q, alpha, k)
return loc + r * scale | python | def rexponweib(alpha, k, loc=0, scale=1, size=None):
"""
Random exponentiated Weibull variates.
"""
q = np.random.uniform(size=size)
r = flib.exponweib_ppf(q, alpha, k)
return loc + r * scale | [
"def",
"rexponweib",
"(",
"alpha",
",",
"k",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"1",
",",
"size",
"=",
"None",
")",
":",
"q",
"=",
"np",
".",
"random",
".",
"uniform",
"(",
"size",
"=",
"size",
")",
"r",
"=",
"flib",
".",
"exponweib_ppf",... | Random exponentiated Weibull variates. | [
"Random",
"exponentiated",
"Weibull",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1225-L1232 | train | 220,239 |
pymc-devs/pymc | pymc/distributions.py | exponweib_like | def exponweib_like(x, alpha, k, loc=0, scale=1):
R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0).
"""
return flib.exponweib(x, alpha, k, loc, scale) | python | def exponweib_like(x, alpha, k, loc=0, scale=1):
R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0).
"""
return flib.exponweib(x, alpha, k, loc, scale) | [
"def",
"exponweib_like",
"(",
"x",
",",
"alpha",
",",
"k",
",",
"loc",
"=",
"0",
",",
"scale",
"=",
"1",
")",
":",
"return",
"flib",
".",
"exponweib",
"(",
"x",
",",
"alpha",
",",
"k",
",",
"loc",
",",
"scale",
")"
] | R"""
Exponentiated Weibull log-likelihood.
The exponentiated Weibull distribution is a generalization of the Weibull
family. Its value lies in being able to model monotone and non-monotone
failure rates.
.. math::
f(x \mid \alpha,k,loc,scale) & = \frac{\alpha k}{scale} (1-e^{-z^k})^{\alpha-1} e^{-z^k} z^{k-1} \\
z & = \frac{x-loc}{scale}
:Parameters:
- `x` : x > 0
- `alpha` : Shape parameter
- `k` : k > 0
- `loc` : Location parameter
- `scale` : Scale parameter (scale > 0). | [
"R",
"Exponentiated",
"Weibull",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1241-L1261 | train | 220,240 |
pymc-devs/pymc | pymc/distributions.py | rgamma | def rgamma(alpha, beta, size=None):
"""
Random gamma variates.
"""
return np.random.gamma(shape=alpha, scale=1. / beta, size=size) | python | def rgamma(alpha, beta, size=None):
"""
Random gamma variates.
"""
return np.random.gamma(shape=alpha, scale=1. / beta, size=size) | [
"def",
"rgamma",
"(",
"alpha",
",",
"beta",
",",
"size",
"=",
"None",
")",
":",
"return",
"np",
".",
"random",
".",
"gamma",
"(",
"shape",
"=",
"alpha",
",",
"scale",
"=",
"1.",
"/",
"beta",
",",
"size",
"=",
"size",
")"
] | Random gamma variates. | [
"Random",
"gamma",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1275-L1280 | train | 220,241 |
pymc-devs/pymc | pymc/distributions.py | gev_expval | def gev_expval(xi, mu=0, sigma=1):
"""
Expected value of generalized extreme value distribution.
"""
return mu - (sigma / xi) + (sigma / xi) * flib.gamfun(1 - xi) | python | def gev_expval(xi, mu=0, sigma=1):
"""
Expected value of generalized extreme value distribution.
"""
return mu - (sigma / xi) + (sigma / xi) * flib.gamfun(1 - xi) | [
"def",
"gev_expval",
"(",
"xi",
",",
"mu",
"=",
"0",
",",
"sigma",
"=",
"1",
")",
":",
"return",
"mu",
"-",
"(",
"sigma",
"/",
"xi",
")",
"+",
"(",
"sigma",
"/",
"xi",
")",
"*",
"flib",
".",
"gamfun",
"(",
"1",
"-",
"xi",
")"
] | Expected value of generalized extreme value distribution. | [
"Expected",
"value",
"of",
"generalized",
"extreme",
"value",
"distribution",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1333-L1337 | train | 220,242 |
pymc-devs/pymc | pymc/distributions.py | gev_like | def gev_like(x, xi, mu=0, sigma=1):
R"""
Generalized Extreme Value log-likelihood
.. math::
pdf(x \mid \xi,\mu,\sigma) = \frac{1}{\sigma}(1 + \xi \left[\frac{x-\mu}{\sigma}\right])^{-1/\xi-1}\exp{-(1+\xi \left[\frac{x-\mu}{\sigma}\right])^{-1/\xi}}
.. math::
\sigma & > 0,\\
x & > \mu-\sigma/\xi \text{ if } \xi > 0,\\
x & < \mu-\sigma/\xi \text{ if } \xi < 0\\
x & \in [-\infty,\infty] \text{ if } \xi = 0
"""
return flib.gev(x, xi, mu, sigma) | python | def gev_like(x, xi, mu=0, sigma=1):
R"""
Generalized Extreme Value log-likelihood
.. math::
pdf(x \mid \xi,\mu,\sigma) = \frac{1}{\sigma}(1 + \xi \left[\frac{x-\mu}{\sigma}\right])^{-1/\xi-1}\exp{-(1+\xi \left[\frac{x-\mu}{\sigma}\right])^{-1/\xi}}
.. math::
\sigma & > 0,\\
x & > \mu-\sigma/\xi \text{ if } \xi > 0,\\
x & < \mu-\sigma/\xi \text{ if } \xi < 0\\
x & \in [-\infty,\infty] \text{ if } \xi = 0
"""
return flib.gev(x, xi, mu, sigma) | [
"def",
"gev_like",
"(",
"x",
",",
"xi",
",",
"mu",
"=",
"0",
",",
"sigma",
"=",
"1",
")",
":",
"return",
"flib",
".",
"gev",
"(",
"x",
",",
"xi",
",",
"mu",
",",
"sigma",
")"
] | R"""
Generalized Extreme Value log-likelihood
.. math::
pdf(x \mid \xi,\mu,\sigma) = \frac{1}{\sigma}(1 + \xi \left[\frac{x-\mu}{\sigma}\right])^{-1/\xi-1}\exp{-(1+\xi \left[\frac{x-\mu}{\sigma}\right])^{-1/\xi}}
.. math::
\sigma & > 0,\\
x & > \mu-\sigma/\xi \text{ if } \xi > 0,\\
x & < \mu-\sigma/\xi \text{ if } \xi < 0\\
x & \in [-\infty,\infty] \text{ if } \xi = 0 | [
"R",
"Generalized",
"Extreme",
"Value",
"log",
"-",
"likelihood"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1340-L1355 | train | 220,243 |
pymc-devs/pymc | pymc/distributions.py | rhalf_cauchy | def rhalf_cauchy(alpha, beta, size=None):
"""
Returns half-Cauchy random variates.
"""
return abs(alpha + beta * np.tan(pi * random_number(size) - pi / 2.0)) | python | def rhalf_cauchy(alpha, beta, size=None):
"""
Returns half-Cauchy random variates.
"""
return abs(alpha + beta * np.tan(pi * random_number(size) - pi / 2.0)) | [
"def",
"rhalf_cauchy",
"(",
"alpha",
",",
"beta",
",",
"size",
"=",
"None",
")",
":",
"return",
"abs",
"(",
"alpha",
"+",
"beta",
"*",
"np",
".",
"tan",
"(",
"pi",
"*",
"random_number",
"(",
"size",
")",
"-",
"pi",
"/",
"2.0",
")",
")"
] | Returns half-Cauchy random variates. | [
"Returns",
"half",
"-",
"Cauchy",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1403-L1408 | train | 220,244 |
pymc-devs/pymc | pymc/distributions.py | half_cauchy_like | def half_cauchy_like(x, alpha, beta):
R"""
Half-Cauchy log-likelihood. Simply the absolute value of Cauchy.
.. math::
f(x \mid \alpha, \beta) = \frac{2}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
:Parameters:
- `alpha` : Location parameter.
- `beta` : Scale parameter (beta > 0).
.. note::
- x must be non-negative.
"""
x = np.atleast_1d(x)
if sum(x.ravel() < 0):
return -inf
return flib.cauchy(x, alpha, beta) + len(x) * np.log(2) | python | def half_cauchy_like(x, alpha, beta):
R"""
Half-Cauchy log-likelihood. Simply the absolute value of Cauchy.
.. math::
f(x \mid \alpha, \beta) = \frac{2}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
:Parameters:
- `alpha` : Location parameter.
- `beta` : Scale parameter (beta > 0).
.. note::
- x must be non-negative.
"""
x = np.atleast_1d(x)
if sum(x.ravel() < 0):
return -inf
return flib.cauchy(x, alpha, beta) + len(x) * np.log(2) | [
"def",
"half_cauchy_like",
"(",
"x",
",",
"alpha",
",",
"beta",
")",
":",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"if",
"sum",
"(",
"x",
".",
"ravel",
"(",
")",
"<",
"0",
")",
":",
"return",
"-",
"inf",
"return",
"flib",
".",
"cauchy"... | R"""
Half-Cauchy log-likelihood. Simply the absolute value of Cauchy.
.. math::
f(x \mid \alpha, \beta) = \frac{2}{\pi \beta [1 + (\frac{x-\alpha}{\beta})^2]}
:Parameters:
- `alpha` : Location parameter.
- `beta` : Scale parameter (beta > 0).
.. note::
- x must be non-negative. | [
"R",
"Half",
"-",
"Cauchy",
"log",
"-",
"likelihood",
".",
"Simply",
"the",
"absolute",
"value",
"of",
"Cauchy",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1421-L1439 | train | 220,245 |
pymc-devs/pymc | pymc/distributions.py | rhalf_normal | def rhalf_normal(tau, size=None):
"""
Random half-normal variates.
"""
return abs(np.random.normal(0, np.sqrt(1 / tau), size)) | python | def rhalf_normal(tau, size=None):
"""
Random half-normal variates.
"""
return abs(np.random.normal(0, np.sqrt(1 / tau), size)) | [
"def",
"rhalf_normal",
"(",
"tau",
",",
"size",
"=",
"None",
")",
":",
"return",
"abs",
"(",
"np",
".",
"random",
".",
"normal",
"(",
"0",
",",
"np",
".",
"sqrt",
"(",
"1",
"/",
"tau",
")",
",",
"size",
")",
")"
] | Random half-normal variates. | [
"Random",
"half",
"-",
"normal",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1445-L1450 | train | 220,246 |
pymc-devs/pymc | pymc/distributions.py | rhypergeometric | def rhypergeometric(n, m, N, size=None):
"""
Returns hypergeometric random variates.
"""
if n == 0:
return np.zeros(size, dtype=int)
elif n == N:
out = np.empty(size, dtype=int)
out.fill(m)
return out
return np.random.hypergeometric(n, N - n, m, size) | python | def rhypergeometric(n, m, N, size=None):
"""
Returns hypergeometric random variates.
"""
if n == 0:
return np.zeros(size, dtype=int)
elif n == N:
out = np.empty(size, dtype=int)
out.fill(m)
return out
return np.random.hypergeometric(n, N - n, m, size) | [
"def",
"rhypergeometric",
"(",
"n",
",",
"m",
",",
"N",
",",
"size",
"=",
"None",
")",
":",
"if",
"n",
"==",
"0",
":",
"return",
"np",
".",
"zeros",
"(",
"size",
",",
"dtype",
"=",
"int",
")",
"elif",
"n",
"==",
"N",
":",
"out",
"=",
"np",
... | Returns hypergeometric random variates. | [
"Returns",
"hypergeometric",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1483-L1493 | train | 220,247 |
pymc-devs/pymc | pymc/distributions.py | hypergeometric_like | def hypergeometric_like(x, n, m, N):
R"""
Hypergeometric log-likelihood.
Discrete probability distribution that describes the number of successes in
a sequence of draws from a finite population without replacement.
.. math::
f(x \mid n, m, N) = \frac{\left({ \begin{array}{c} {m} \\ {x} \\
\end{array} }\right)\left({ \begin{array}{c} {N-m} \\ {n-x} \\
\end{array}}\right)}{\left({ \begin{array}{c} {N} \\ {n} \\
\end{array}}\right)}
:Parameters:
- `x` : [int] Number of successes in a sample drawn from a population.
- `n` : [int] Size of sample drawn from the population.
- `m` : [int] Number of successes in the population.
- `N` : [int] Total number of units in the population.
.. note::
:math:`E(X) = \frac{n n}{N}`
"""
return flib.hyperg(x, n, m, N) | python | def hypergeometric_like(x, n, m, N):
R"""
Hypergeometric log-likelihood.
Discrete probability distribution that describes the number of successes in
a sequence of draws from a finite population without replacement.
.. math::
f(x \mid n, m, N) = \frac{\left({ \begin{array}{c} {m} \\ {x} \\
\end{array} }\right)\left({ \begin{array}{c} {N-m} \\ {n-x} \\
\end{array}}\right)}{\left({ \begin{array}{c} {N} \\ {n} \\
\end{array}}\right)}
:Parameters:
- `x` : [int] Number of successes in a sample drawn from a population.
- `n` : [int] Size of sample drawn from the population.
- `m` : [int] Number of successes in the population.
- `N` : [int] Total number of units in the population.
.. note::
:math:`E(X) = \frac{n n}{N}`
"""
return flib.hyperg(x, n, m, N) | [
"def",
"hypergeometric_like",
"(",
"x",
",",
"n",
",",
"m",
",",
"N",
")",
":",
"return",
"flib",
".",
"hyperg",
"(",
"x",
",",
"n",
",",
"m",
",",
"N",
")"
] | R"""
Hypergeometric log-likelihood.
Discrete probability distribution that describes the number of successes in
a sequence of draws from a finite population without replacement.
.. math::
f(x \mid n, m, N) = \frac{\left({ \begin{array}{c} {m} \\ {x} \\
\end{array} }\right)\left({ \begin{array}{c} {N-m} \\ {n-x} \\
\end{array}}\right)}{\left({ \begin{array}{c} {N} \\ {n} \\
\end{array}}\right)}
:Parameters:
- `x` : [int] Number of successes in a sample drawn from a population.
- `n` : [int] Size of sample drawn from the population.
- `m` : [int] Number of successes in the population.
- `N` : [int] Total number of units in the population.
.. note::
:math:`E(X) = \frac{n n}{N}` | [
"R",
"Hypergeometric",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1503-L1529 | train | 220,248 |
pymc-devs/pymc | pymc/distributions.py | rlogistic | def rlogistic(mu, tau, size=None):
"""
Logistic random variates.
"""
u = np.random.random(size)
return mu + np.log(u / (1 - u)) / tau | python | def rlogistic(mu, tau, size=None):
"""
Logistic random variates.
"""
u = np.random.random(size)
return mu + np.log(u / (1 - u)) / tau | [
"def",
"rlogistic",
"(",
"mu",
",",
"tau",
",",
"size",
"=",
"None",
")",
":",
"u",
"=",
"np",
".",
"random",
".",
"random",
"(",
"size",
")",
"return",
"mu",
"+",
"np",
".",
"log",
"(",
"u",
"/",
"(",
"1",
"-",
"u",
")",
")",
"/",
"tau"
] | Logistic random variates. | [
"Logistic",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1685-L1691 | train | 220,249 |
pymc-devs/pymc | pymc/distributions.py | rlognormal | def rlognormal(mu, tau, size=None):
"""
Return random lognormal variates.
"""
return np.random.lognormal(mu, np.sqrt(1. / tau), size) | python | def rlognormal(mu, tau, size=None):
"""
Return random lognormal variates.
"""
return np.random.lognormal(mu, np.sqrt(1. / tau), size) | [
"def",
"rlognormal",
"(",
"mu",
",",
"tau",
",",
"size",
"=",
"None",
")",
":",
"return",
"np",
".",
"random",
".",
"lognormal",
"(",
"mu",
",",
"np",
".",
"sqrt",
"(",
"1.",
"/",
"tau",
")",
",",
"size",
")"
] | Return random lognormal variates. | [
"Return",
"random",
"lognormal",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1726-L1731 | train | 220,250 |
pymc-devs/pymc | pymc/distributions.py | rmultinomial | def rmultinomial(n, p, size=None):
"""
Random multinomial variates.
"""
# Leaving size=None as the default means return value is 1d array
# if not specified-- nicer.
# Single value for p:
if len(np.shape(p)) == 1:
return np.random.multinomial(n, p, size)
# Multiple values for p:
if np.isscalar(n):
n = n * np.ones(np.shape(p)[0], dtype=np.int)
out = np.empty(np.shape(p))
for i in xrange(np.shape(p)[0]):
out[i, :] = np.random.multinomial(n[i], p[i,:], size)
return out | python | def rmultinomial(n, p, size=None):
"""
Random multinomial variates.
"""
# Leaving size=None as the default means return value is 1d array
# if not specified-- nicer.
# Single value for p:
if len(np.shape(p)) == 1:
return np.random.multinomial(n, p, size)
# Multiple values for p:
if np.isscalar(n):
n = n * np.ones(np.shape(p)[0], dtype=np.int)
out = np.empty(np.shape(p))
for i in xrange(np.shape(p)[0]):
out[i, :] = np.random.multinomial(n[i], p[i,:], size)
return out | [
"def",
"rmultinomial",
"(",
"n",
",",
"p",
",",
"size",
"=",
"None",
")",
":",
"# Leaving size=None as the default means return value is 1d array",
"# if not specified-- nicer.",
"# Single value for p:",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"p",
")",
")",
"=="... | Random multinomial variates. | [
"Random",
"multinomial",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1773-L1790 | train | 220,251 |
pymc-devs/pymc | pymc/distributions.py | multinomial_like | def multinomial_like(x, n, p):
R"""
Multinomial log-likelihood.
Generalization of the binomial
distribution, but instead of each trial resulting in "success" or
"failure", each one results in exactly one of some fixed finite number k
of possible outcomes over n independent trials. 'x[i]' indicates the number
of times outcome number i was observed over the n trials.
.. math::
f(x \mid n, p) = \frac{n!}{\prod_{i=1}^k x_i!} \prod_{i=1}^k p_i^{x_i}
:Parameters:
x : (ns, k) int
Random variable indicating the number of time outcome i is
observed. :math:`\sum_{i=1}^k x_i=n`, :math:`x_i \ge 0`.
n : int
Number of trials.
p : (k,)
Probability of each one of the different outcomes.
:math:`\sum_{i=1}^k p_i = 1)`, :math:`p_i \ge 0`.
.. note::
- :math:`E(X_i)=n p_i`
- :math:`Var(X_i)=n p_i(1-p_i)`
- :math:`Cov(X_i,X_j) = -n p_i p_j`
- If :math:`\sum_i p_i < 0.999999` a log-likelihood value of -inf
will be returned.
"""
# flib expects 2d arguments. Do we still want to support multiple p
# values along realizations ?
x = np.atleast_2d(x)
p = np.atleast_2d(p)
return flib.multinomial(x, n, p) | python | def multinomial_like(x, n, p):
R"""
Multinomial log-likelihood.
Generalization of the binomial
distribution, but instead of each trial resulting in "success" or
"failure", each one results in exactly one of some fixed finite number k
of possible outcomes over n independent trials. 'x[i]' indicates the number
of times outcome number i was observed over the n trials.
.. math::
f(x \mid n, p) = \frac{n!}{\prod_{i=1}^k x_i!} \prod_{i=1}^k p_i^{x_i}
:Parameters:
x : (ns, k) int
Random variable indicating the number of time outcome i is
observed. :math:`\sum_{i=1}^k x_i=n`, :math:`x_i \ge 0`.
n : int
Number of trials.
p : (k,)
Probability of each one of the different outcomes.
:math:`\sum_{i=1}^k p_i = 1)`, :math:`p_i \ge 0`.
.. note::
- :math:`E(X_i)=n p_i`
- :math:`Var(X_i)=n p_i(1-p_i)`
- :math:`Cov(X_i,X_j) = -n p_i p_j`
- If :math:`\sum_i p_i < 0.999999` a log-likelihood value of -inf
will be returned.
"""
# flib expects 2d arguments. Do we still want to support multiple p
# values along realizations ?
x = np.atleast_2d(x)
p = np.atleast_2d(p)
return flib.multinomial(x, n, p) | [
"def",
"multinomial_like",
"(",
"x",
",",
"n",
",",
"p",
")",
":",
"# flib expects 2d arguments. Do we still want to support multiple p",
"# values along realizations ?",
"x",
"=",
"np",
".",
"atleast_2d",
"(",
"x",
")",
"p",
"=",
"np",
".",
"atleast_2d",
"(",
"p"... | R"""
Multinomial log-likelihood.
Generalization of the binomial
distribution, but instead of each trial resulting in "success" or
"failure", each one results in exactly one of some fixed finite number k
of possible outcomes over n independent trials. 'x[i]' indicates the number
of times outcome number i was observed over the n trials.
.. math::
f(x \mid n, p) = \frac{n!}{\prod_{i=1}^k x_i!} \prod_{i=1}^k p_i^{x_i}
:Parameters:
x : (ns, k) int
Random variable indicating the number of time outcome i is
observed. :math:`\sum_{i=1}^k x_i=n`, :math:`x_i \ge 0`.
n : int
Number of trials.
p : (k,)
Probability of each one of the different outcomes.
:math:`\sum_{i=1}^k p_i = 1)`, :math:`p_i \ge 0`.
.. note::
- :math:`E(X_i)=n p_i`
- :math:`Var(X_i)=n p_i(1-p_i)`
- :math:`Cov(X_i,X_j) = -n p_i p_j`
- If :math:`\sum_i p_i < 0.999999` a log-likelihood value of -inf
will be returned. | [
"R",
"Multinomial",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1800-L1836 | train | 220,252 |
pymc-devs/pymc | pymc/distributions.py | rmultivariate_hypergeometric | def rmultivariate_hypergeometric(n, m, size=None):
"""
Random multivariate hypergeometric variates.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
"""
N = len(m)
urn = np.repeat(np.arange(N), m)
if size:
draw = np.array([[urn[i] for i in np.random.permutation(len(urn))[:n]]
for j in range(size)])
r = [[np.sum(draw[j] == i) for i in range(len(m))]
for j in range(size)]
else:
draw = np.array([urn[i] for i in np.random.permutation(len(urn))[:n]])
r = [np.sum(draw == i) for i in range(len(m))]
return np.asarray(r) | python | def rmultivariate_hypergeometric(n, m, size=None):
"""
Random multivariate hypergeometric variates.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
"""
N = len(m)
urn = np.repeat(np.arange(N), m)
if size:
draw = np.array([[urn[i] for i in np.random.permutation(len(urn))[:n]]
for j in range(size)])
r = [[np.sum(draw[j] == i) for i in range(len(m))]
for j in range(size)]
else:
draw = np.array([urn[i] for i in np.random.permutation(len(urn))[:n]])
r = [np.sum(draw == i) for i in range(len(m))]
return np.asarray(r) | [
"def",
"rmultivariate_hypergeometric",
"(",
"n",
",",
"m",
",",
"size",
"=",
"None",
")",
":",
"N",
"=",
"len",
"(",
"m",
")",
"urn",
"=",
"np",
".",
"repeat",
"(",
"np",
".",
"arange",
"(",
"N",
")",
",",
"m",
")",
"if",
"size",
":",
"draw",
... | Random multivariate hypergeometric variates.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy. | [
"Random",
"multivariate",
"hypergeometric",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1841-L1863 | train | 220,253 |
pymc-devs/pymc | pymc/distributions.py | multivariate_hypergeometric_expval | def multivariate_hypergeometric_expval(n, m):
"""
Expected value of multivariate hypergeometric distribution.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
"""
m = np.asarray(m, float)
return n * (m / m.sum()) | python | def multivariate_hypergeometric_expval(n, m):
"""
Expected value of multivariate hypergeometric distribution.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy.
"""
m = np.asarray(m, float)
return n * (m / m.sum()) | [
"def",
"multivariate_hypergeometric_expval",
"(",
"n",
",",
"m",
")",
":",
"m",
"=",
"np",
".",
"asarray",
"(",
"m",
",",
"float",
")",
"return",
"n",
"*",
"(",
"m",
"/",
"m",
".",
"sum",
"(",
")",
")"
] | Expected value of multivariate hypergeometric distribution.
Parameters:
- `n` : Number of draws.
- `m` : Number of items in each categoy. | [
"Expected",
"value",
"of",
"multivariate",
"hypergeometric",
"distribution",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1866-L1875 | train | 220,254 |
pymc-devs/pymc | pymc/distributions.py | mv_normal_like | def mv_normal_like(x, mu, tau):
R"""
Multivariate normal log-likelihood
.. math::
f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter sequence.
- `Tau` : (k,k) Positive definite precision matrix.
.. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.prec_mvnorm(r, mu, tau) for r in x])
else:
return flib.prec_mvnorm(x, mu, tau) | python | def mv_normal_like(x, mu, tau):
R"""
Multivariate normal log-likelihood
.. math::
f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter sequence.
- `Tau` : (k,k) Positive definite precision matrix.
.. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.prec_mvnorm(r, mu, tau) for r in x])
else:
return flib.prec_mvnorm(x, mu, tau) | [
"def",
"mv_normal_like",
"(",
"x",
",",
"mu",
",",
"tau",
")",
":",
"# TODO: Vectorize in Fortran",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"x",
")",
")",
">",
"1",
":",
"return",
"np",
".",
"sum",
"(",
"[",
"flib",
".",
"prec_mvnorm",
"(",
"r",... | R"""
Multivariate normal log-likelihood
.. math::
f(x \mid \pi, T) = \frac{|T|^{1/2}}{(2\pi)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}T(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter sequence.
- `Tau` : (k,k) Positive definite precision matrix.
.. seealso:: :func:`mv_normal_chol_like`, :func:`mv_normal_cov_like` | [
"R",
"Multivariate",
"normal",
"log",
"-",
"likelihood"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1939-L1958 | train | 220,255 |
pymc-devs/pymc | pymc/distributions.py | mv_normal_cov_like | def mv_normal_cov_like(x, mu, C):
R"""
Multivariate normal log-likelihood parameterized by a covariance
matrix.
.. math::
f(x \mid \pi, C) = \frac{1}{(2\pi|C|)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}C^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `C` : (k,k) Positive definite covariance matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_chol_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.cov_mvnorm(r, mu, C) for r in x])
else:
return flib.cov_mvnorm(x, mu, C) | python | def mv_normal_cov_like(x, mu, C):
R"""
Multivariate normal log-likelihood parameterized by a covariance
matrix.
.. math::
f(x \mid \pi, C) = \frac{1}{(2\pi|C|)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}C^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `C` : (k,k) Positive definite covariance matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_chol_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.cov_mvnorm(r, mu, C) for r in x])
else:
return flib.cov_mvnorm(x, mu, C) | [
"def",
"mv_normal_cov_like",
"(",
"x",
",",
"mu",
",",
"C",
")",
":",
"# TODO: Vectorize in Fortran",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"x",
")",
")",
">",
"1",
":",
"return",
"np",
".",
"sum",
"(",
"[",
"flib",
".",
"cov_mvnorm",
"(",
"r"... | R"""
Multivariate normal log-likelihood parameterized by a covariance
matrix.
.. math::
f(x \mid \pi, C) = \frac{1}{(2\pi|C|)^{1/2}} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}C^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `C` : (k,k) Positive definite covariance matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_chol_like` | [
"R",
"Multivariate",
"normal",
"log",
"-",
"likelihood",
"parameterized",
"by",
"a",
"covariance",
"matrix",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L1982-L2002 | train | 220,256 |
pymc-devs/pymc | pymc/distributions.py | mv_normal_chol_like | def mv_normal_chol_like(x, mu, sig):
R"""
Multivariate normal log-likelihood.
.. math::
f(x \mid \pi, \sigma) = \frac{1}{(2\pi)^{1/2}|\sigma|)} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}(\sigma \sigma^{\prime})^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `sigma` : (k,k) Lower triangular matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_cov_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.chol_mvnorm(r, mu, sig) for r in x])
else:
return flib.chol_mvnorm(x, mu, sig) | python | def mv_normal_chol_like(x, mu, sig):
R"""
Multivariate normal log-likelihood.
.. math::
f(x \mid \pi, \sigma) = \frac{1}{(2\pi)^{1/2}|\sigma|)} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}(\sigma \sigma^{\prime})^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `sigma` : (k,k) Lower triangular matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_cov_like`
"""
# TODO: Vectorize in Fortran
if len(np.shape(x)) > 1:
return np.sum([flib.chol_mvnorm(r, mu, sig) for r in x])
else:
return flib.chol_mvnorm(x, mu, sig) | [
"def",
"mv_normal_chol_like",
"(",
"x",
",",
"mu",
",",
"sig",
")",
":",
"# TODO: Vectorize in Fortran",
"if",
"len",
"(",
"np",
".",
"shape",
"(",
"x",
")",
")",
">",
"1",
":",
"return",
"np",
".",
"sum",
"(",
"[",
"flib",
".",
"chol_mvnorm",
"(",
... | R"""
Multivariate normal log-likelihood.
.. math::
f(x \mid \pi, \sigma) = \frac{1}{(2\pi)^{1/2}|\sigma|)} \exp\left\{ -\frac{1}{2} (x-\mu)^{\prime}(\sigma \sigma^{\prime})^{-1}(x-\mu) \right\}
:Parameters:
- `x` : (n,k)
- `mu` : (k) Location parameter.
- `sigma` : (k,k) Lower triangular matrix.
.. seealso:: :func:`mv_normal_like`, :func:`mv_normal_cov_like` | [
"R",
"Multivariate",
"normal",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2041-L2060 | train | 220,257 |
pymc-devs/pymc | pymc/distributions.py | rnegative_binomial | def rnegative_binomial(mu, alpha, size=None):
"""
Random negative binomial variates.
"""
# Using gamma-poisson mixture rather than numpy directly
# because numpy apparently rounds
mu = np.asarray(mu, dtype=float)
pois_mu = np.random.gamma(alpha, mu / alpha, size)
return np.random.poisson(pois_mu, size) | python | def rnegative_binomial(mu, alpha, size=None):
"""
Random negative binomial variates.
"""
# Using gamma-poisson mixture rather than numpy directly
# because numpy apparently rounds
mu = np.asarray(mu, dtype=float)
pois_mu = np.random.gamma(alpha, mu / alpha, size)
return np.random.poisson(pois_mu, size) | [
"def",
"rnegative_binomial",
"(",
"mu",
",",
"alpha",
",",
"size",
"=",
"None",
")",
":",
"# Using gamma-poisson mixture rather than numpy directly",
"# because numpy apparently rounds",
"mu",
"=",
"np",
".",
"asarray",
"(",
"mu",
",",
"dtype",
"=",
"float",
")",
... | Random negative binomial variates. | [
"Random",
"negative",
"binomial",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2065-L2073 | train | 220,258 |
pymc-devs/pymc | pymc/distributions.py | negative_binomial_like | def negative_binomial_like(x, mu, alpha):
R"""
Negative binomial log-likelihood.
The negative binomial
distribution describes a Poisson random variable whose rate
parameter is gamma distributed. PyMC's chosen parameterization is
based on this mixture interpretation.
.. math::
f(x \mid \mu, \alpha) = \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} (\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
:Parameters:
- `x` : x = 0,1,2,...
- `mu` : mu > 0
- `alpha` : alpha > 0
.. note::
- :math:`E[x]=\mu`
- In Wikipedia's parameterization,
:math:`r=\alpha`,
:math:`p=\mu/(\mu+\alpha)`,
:math:`\mu=rp/(1-p)`
"""
alpha = np.array(alpha)
if (alpha > 1e10).any():
if (alpha > 1e10).all():
# Return Poisson when alpha gets very large
return flib.poisson(x, mu)
# Split big and small dispersion values
big = alpha > 1e10
return flib.poisson(x[big], mu[big]) + flib.negbin2(x[big - True],
mu[big - True], alpha[big - True])
return flib.negbin2(x, mu, alpha) | python | def negative_binomial_like(x, mu, alpha):
R"""
Negative binomial log-likelihood.
The negative binomial
distribution describes a Poisson random variable whose rate
parameter is gamma distributed. PyMC's chosen parameterization is
based on this mixture interpretation.
.. math::
f(x \mid \mu, \alpha) = \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} (\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
:Parameters:
- `x` : x = 0,1,2,...
- `mu` : mu > 0
- `alpha` : alpha > 0
.. note::
- :math:`E[x]=\mu`
- In Wikipedia's parameterization,
:math:`r=\alpha`,
:math:`p=\mu/(\mu+\alpha)`,
:math:`\mu=rp/(1-p)`
"""
alpha = np.array(alpha)
if (alpha > 1e10).any():
if (alpha > 1e10).all():
# Return Poisson when alpha gets very large
return flib.poisson(x, mu)
# Split big and small dispersion values
big = alpha > 1e10
return flib.poisson(x[big], mu[big]) + flib.negbin2(x[big - True],
mu[big - True], alpha[big - True])
return flib.negbin2(x, mu, alpha) | [
"def",
"negative_binomial_like",
"(",
"x",
",",
"mu",
",",
"alpha",
")",
":",
"alpha",
"=",
"np",
".",
"array",
"(",
"alpha",
")",
"if",
"(",
"alpha",
">",
"1e10",
")",
".",
"any",
"(",
")",
":",
"if",
"(",
"alpha",
">",
"1e10",
")",
".",
"all"... | R"""
Negative binomial log-likelihood.
The negative binomial
distribution describes a Poisson random variable whose rate
parameter is gamma distributed. PyMC's chosen parameterization is
based on this mixture interpretation.
.. math::
f(x \mid \mu, \alpha) = \frac{\Gamma(x+\alpha)}{x! \Gamma(\alpha)} (\alpha/(\mu+\alpha))^\alpha (\mu/(\mu+\alpha))^x
:Parameters:
- `x` : x = 0,1,2,...
- `mu` : mu > 0
- `alpha` : alpha > 0
.. note::
- :math:`E[x]=\mu`
- In Wikipedia's parameterization,
:math:`r=\alpha`,
:math:`p=\mu/(\mu+\alpha)`,
:math:`\mu=rp/(1-p)` | [
"R",
"Negative",
"binomial",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2084-L2119 | train | 220,259 |
pymc-devs/pymc | pymc/distributions.py | rnormal | def rnormal(mu, tau, size=None):
"""
Random normal variates.
"""
return np.random.normal(mu, 1. / np.sqrt(tau), size) | python | def rnormal(mu, tau, size=None):
"""
Random normal variates.
"""
return np.random.normal(mu, 1. / np.sqrt(tau), size) | [
"def",
"rnormal",
"(",
"mu",
",",
"tau",
",",
"size",
"=",
"None",
")",
":",
"return",
"np",
".",
"random",
".",
"normal",
"(",
"mu",
",",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"tau",
")",
",",
"size",
")"
] | Random normal variates. | [
"Random",
"normal",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2128-L2132 | train | 220,260 |
pymc-devs/pymc | pymc/distributions.py | rvon_mises | def rvon_mises(mu, kappa, size=None):
"""
Random von Mises variates.
"""
# TODO: Just return straight from numpy after release 1.3
return (np.random.mtrand.vonmises(
mu, kappa, size) + np.pi) % (2. * np.pi) - np.pi | python | def rvon_mises(mu, kappa, size=None):
"""
Random von Mises variates.
"""
# TODO: Just return straight from numpy after release 1.3
return (np.random.mtrand.vonmises(
mu, kappa, size) + np.pi) % (2. * np.pi) - np.pi | [
"def",
"rvon_mises",
"(",
"mu",
",",
"kappa",
",",
"size",
"=",
"None",
")",
":",
"# TODO: Just return straight from numpy after release 1.3",
"return",
"(",
"np",
".",
"random",
".",
"mtrand",
".",
"vonmises",
"(",
"mu",
",",
"kappa",
",",
"size",
")",
"+",... | Random von Mises variates. | [
"Random",
"von",
"Mises",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2191-L2197 | train | 220,261 |
pymc-devs/pymc | pymc/distributions.py | rtruncated_pareto | def rtruncated_pareto(alpha, m, b, size=None):
"""
Random bounded Pareto variates.
"""
u = random_number(size)
return (-(u * b ** alpha - u * m ** alpha - b ** alpha) /
(b ** alpha * m ** alpha)) ** (-1. / alpha) | python | def rtruncated_pareto(alpha, m, b, size=None):
"""
Random bounded Pareto variates.
"""
u = random_number(size)
return (-(u * b ** alpha - u * m ** alpha - b ** alpha) /
(b ** alpha * m ** alpha)) ** (-1. / alpha) | [
"def",
"rtruncated_pareto",
"(",
"alpha",
",",
"m",
",",
"b",
",",
"size",
"=",
"None",
")",
":",
"u",
"=",
"random_number",
"(",
"size",
")",
"return",
"(",
"-",
"(",
"u",
"*",
"b",
"**",
"alpha",
"-",
"u",
"*",
"m",
"**",
"alpha",
"-",
"b",
... | Random bounded Pareto variates. | [
"Random",
"bounded",
"Pareto",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2274-L2280 | train | 220,262 |
pymc-devs/pymc | pymc/distributions.py | truncated_pareto_expval | def truncated_pareto_expval(alpha, m, b):
"""
Expected value of truncated Pareto distribution.
"""
if alpha <= 1:
return inf
part1 = (m ** alpha) / (1. - (m / b) ** alpha)
part2 = 1. * alpha / (alpha - 1)
part3 = (1. / (m ** (alpha - 1)) - 1. / (b ** (alpha - 1.)))
return part1 * part2 * part3 | python | def truncated_pareto_expval(alpha, m, b):
"""
Expected value of truncated Pareto distribution.
"""
if alpha <= 1:
return inf
part1 = (m ** alpha) / (1. - (m / b) ** alpha)
part2 = 1. * alpha / (alpha - 1)
part3 = (1. / (m ** (alpha - 1)) - 1. / (b ** (alpha - 1.)))
return part1 * part2 * part3 | [
"def",
"truncated_pareto_expval",
"(",
"alpha",
",",
"m",
",",
"b",
")",
":",
"if",
"alpha",
"<=",
"1",
":",
"return",
"inf",
"part1",
"=",
"(",
"m",
"**",
"alpha",
")",
"/",
"(",
"1.",
"-",
"(",
"m",
"/",
"b",
")",
"**",
"alpha",
")",
"part2",... | Expected value of truncated Pareto distribution. | [
"Expected",
"value",
"of",
"truncated",
"Pareto",
"distribution",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2283-L2293 | train | 220,263 |
pymc-devs/pymc | pymc/distributions.py | rtruncated_poisson | def rtruncated_poisson(mu, k, size=None):
"""
Random truncated Poisson variates with minimum value k, generated
using rejection sampling.
"""
# Calculate m
try:
m = max(0, np.floor(k - mu))
except (TypeError, ValueError):
# More than one mu
return np.array([rtruncated_poisson(x, i)
for x, i in zip(mu, np.resize(k, np.size(mu)))]).squeeze()
k -= 1
# Calculate constant for acceptance probability
C = np.exp(flib.factln(k + 1) - flib.factln(k + 1 - m))
# Empty array to hold random variates
rvs = np.empty(0, int)
total_size = np.prod(size or 1)
while(len(rvs) < total_size):
# Propose values by sampling from untruncated Poisson with mean mu + m
proposals = np.random.poisson(
mu + m, (total_size * 4, np.size(m))).squeeze()
# Acceptance probability
a = C * np.array([np.exp(flib.factln(y - m) - flib.factln(y))
for y in proposals])
a *= proposals > k
# Uniform random variates
u = np.random.random(total_size * 4)
rvs = np.append(rvs, proposals[u < a])
return np.reshape(rvs[:total_size], size) | python | def rtruncated_poisson(mu, k, size=None):
"""
Random truncated Poisson variates with minimum value k, generated
using rejection sampling.
"""
# Calculate m
try:
m = max(0, np.floor(k - mu))
except (TypeError, ValueError):
# More than one mu
return np.array([rtruncated_poisson(x, i)
for x, i in zip(mu, np.resize(k, np.size(mu)))]).squeeze()
k -= 1
# Calculate constant for acceptance probability
C = np.exp(flib.factln(k + 1) - flib.factln(k + 1 - m))
# Empty array to hold random variates
rvs = np.empty(0, int)
total_size = np.prod(size or 1)
while(len(rvs) < total_size):
# Propose values by sampling from untruncated Poisson with mean mu + m
proposals = np.random.poisson(
mu + m, (total_size * 4, np.size(m))).squeeze()
# Acceptance probability
a = C * np.array([np.exp(flib.factln(y - m) - flib.factln(y))
for y in proposals])
a *= proposals > k
# Uniform random variates
u = np.random.random(total_size * 4)
rvs = np.append(rvs, proposals[u < a])
return np.reshape(rvs[:total_size], size) | [
"def",
"rtruncated_poisson",
"(",
"mu",
",",
"k",
",",
"size",
"=",
"None",
")",
":",
"# Calculate m",
"try",
":",
"m",
"=",
"max",
"(",
"0",
",",
"np",
".",
"floor",
"(",
"k",
"-",
"mu",
")",
")",
"except",
"(",
"TypeError",
",",
"ValueError",
"... | Random truncated Poisson variates with minimum value k, generated
using rejection sampling. | [
"Random",
"truncated",
"Poisson",
"variates",
"with",
"minimum",
"value",
"k",
"generated",
"using",
"rejection",
"sampling",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2365-L2404 | train | 220,264 |
pymc-devs/pymc | pymc/distributions.py | rtruncated_normal | def rtruncated_normal(mu, tau, a=-np.inf, b=np.inf, size=None):
"""
Random truncated normal variates.
"""
sigma = 1. / np.sqrt(tau)
na = utils.normcdf((a - mu) / sigma)
nb = utils.normcdf((b - mu) / sigma)
# Use the inverse CDF generation method.
U = np.random.mtrand.uniform(size=size)
q = U * nb + (1 - U) * na
R = utils.invcdf(q)
# Unnormalize
return R * sigma + mu | python | def rtruncated_normal(mu, tau, a=-np.inf, b=np.inf, size=None):
"""
Random truncated normal variates.
"""
sigma = 1. / np.sqrt(tau)
na = utils.normcdf((a - mu) / sigma)
nb = utils.normcdf((b - mu) / sigma)
# Use the inverse CDF generation method.
U = np.random.mtrand.uniform(size=size)
q = U * nb + (1 - U) * na
R = utils.invcdf(q)
# Unnormalize
return R * sigma + mu | [
"def",
"rtruncated_normal",
"(",
"mu",
",",
"tau",
",",
"a",
"=",
"-",
"np",
".",
"inf",
",",
"b",
"=",
"np",
".",
"inf",
",",
"size",
"=",
"None",
")",
":",
"sigma",
"=",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"tau",
")",
"na",
"=",
"utils",
"... | Random truncated normal variates. | [
"Random",
"truncated",
"normal",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2447-L2462 | train | 220,265 |
pymc-devs/pymc | pymc/distributions.py | truncated_normal_expval | def truncated_normal_expval(mu, tau, a, b):
"""Expected value of the truncated normal distribution.
.. math::
E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T}
where
.. math::
T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi
\left(\frac{A-\mu}{\sigma}\right)\text \\
\varphi_1 &=
\varphi\left(\frac{A-\mu}{\sigma}\right) \\
\varphi_2 &=
\varphi\left(\frac{B-\mu}{\sigma}\right) \\
and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`.
:Parameters:
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution.
"""
phia = np.exp(normal_like(a, mu, tau))
phib = np.exp(normal_like(b, mu, tau))
sigma = 1. / np.sqrt(tau)
Phia = utils.normcdf((a - mu) / sigma)
if b == np.inf:
Phib = 1.0
else:
Phib = utils.normcdf((b - mu) / sigma)
return (mu + (phia - phib) / (Phib - Phia))[0] | python | def truncated_normal_expval(mu, tau, a, b):
"""Expected value of the truncated normal distribution.
.. math::
E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T}
where
.. math::
T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi
\left(\frac{A-\mu}{\sigma}\right)\text \\
\varphi_1 &=
\varphi\left(\frac{A-\mu}{\sigma}\right) \\
\varphi_2 &=
\varphi\left(\frac{B-\mu}{\sigma}\right) \\
and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`.
:Parameters:
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution.
"""
phia = np.exp(normal_like(a, mu, tau))
phib = np.exp(normal_like(b, mu, tau))
sigma = 1. / np.sqrt(tau)
Phia = utils.normcdf((a - mu) / sigma)
if b == np.inf:
Phib = 1.0
else:
Phib = utils.normcdf((b - mu) / sigma)
return (mu + (phia - phib) / (Phib - Phia))[0] | [
"def",
"truncated_normal_expval",
"(",
"mu",
",",
"tau",
",",
"a",
",",
"b",
")",
":",
"phia",
"=",
"np",
".",
"exp",
"(",
"normal_like",
"(",
"a",
",",
"mu",
",",
"tau",
")",
")",
"phib",
"=",
"np",
".",
"exp",
"(",
"normal_like",
"(",
"b",
",... | Expected value of the truncated normal distribution.
.. math::
E(X) =\mu + \frac{\sigma(\varphi_1-\varphi_2)}{T}
where
.. math::
T & =\Phi\left(\frac{B-\mu}{\sigma}\right)-\Phi
\left(\frac{A-\mu}{\sigma}\right)\text \\
\varphi_1 &=
\varphi\left(\frac{A-\mu}{\sigma}\right) \\
\varphi_2 &=
\varphi\left(\frac{B-\mu}{\sigma}\right) \\
and :math:`\varphi = N(0,1)` and :math:`tau & 1/sigma**2`.
:Parameters:
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution. | [
"Expected",
"value",
"of",
"the",
"truncated",
"normal",
"distribution",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2467-L2501 | train | 220,266 |
pymc-devs/pymc | pymc/distributions.py | truncated_normal_like | def truncated_normal_like(x, mu, tau, a=None, b=None):
R"""
Truncated normal log-likelihood.
.. math::
f(x \mid \mu, \tau, a, b) = \frac{\phi(\frac{x-\mu}{\sigma})} {\Phi(\frac{b-\mu}{\sigma}) - \Phi(\frac{a-\mu}{\sigma})},
where :math:`\sigma^2=1/\tau`, `\phi` is the standard normal PDF and `\Phi` is the standard normal CDF.
:Parameters:
- `x` : Input data.
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution.
"""
x = np.atleast_1d(x)
if a is None:
a = -np.inf
a = np.atleast_1d(a)
if b is None:
b = np.inf
b = np.atleast_1d(b)
mu = np.atleast_1d(mu)
sigma = (1. / np.atleast_1d(np.sqrt(tau)))
if (x < a).any() or (x > b).any():
return -np.inf
else:
n = len(x)
phi = normal_like(x, mu, tau)
lPhia = utils.normcdf((a - mu) / sigma, log=True)
lPhib = utils.normcdf((b - mu) / sigma, log=True)
try:
d = utils.log_difference(lPhib, lPhia)
except ValueError:
return -np.inf
# d = np.log(Phib-Phia)
if len(d) == n:
Phi = d.sum()
else:
Phi = n * d
if np.isnan(Phi) or np.isinf(Phi):
return -np.inf
return phi - Phi | python | def truncated_normal_like(x, mu, tau, a=None, b=None):
R"""
Truncated normal log-likelihood.
.. math::
f(x \mid \mu, \tau, a, b) = \frac{\phi(\frac{x-\mu}{\sigma})} {\Phi(\frac{b-\mu}{\sigma}) - \Phi(\frac{a-\mu}{\sigma})},
where :math:`\sigma^2=1/\tau`, `\phi` is the standard normal PDF and `\Phi` is the standard normal CDF.
:Parameters:
- `x` : Input data.
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution.
"""
x = np.atleast_1d(x)
if a is None:
a = -np.inf
a = np.atleast_1d(a)
if b is None:
b = np.inf
b = np.atleast_1d(b)
mu = np.atleast_1d(mu)
sigma = (1. / np.atleast_1d(np.sqrt(tau)))
if (x < a).any() or (x > b).any():
return -np.inf
else:
n = len(x)
phi = normal_like(x, mu, tau)
lPhia = utils.normcdf((a - mu) / sigma, log=True)
lPhib = utils.normcdf((b - mu) / sigma, log=True)
try:
d = utils.log_difference(lPhib, lPhia)
except ValueError:
return -np.inf
# d = np.log(Phib-Phia)
if len(d) == n:
Phi = d.sum()
else:
Phi = n * d
if np.isnan(Phi) or np.isinf(Phi):
return -np.inf
return phi - Phi | [
"def",
"truncated_normal_like",
"(",
"x",
",",
"mu",
",",
"tau",
",",
"a",
"=",
"None",
",",
"b",
"=",
"None",
")",
":",
"x",
"=",
"np",
".",
"atleast_1d",
"(",
"x",
")",
"if",
"a",
"is",
"None",
":",
"a",
"=",
"-",
"np",
".",
"inf",
"a",
"... | R"""
Truncated normal log-likelihood.
.. math::
f(x \mid \mu, \tau, a, b) = \frac{\phi(\frac{x-\mu}{\sigma})} {\Phi(\frac{b-\mu}{\sigma}) - \Phi(\frac{a-\mu}{\sigma})},
where :math:`\sigma^2=1/\tau`, `\phi` is the standard normal PDF and `\Phi` is the standard normal CDF.
:Parameters:
- `x` : Input data.
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution, which corresponds to 1/sigma**2 (tau > 0).
- `a` : Left bound of the distribution.
- `b` : Right bound of the distribution. | [
"R",
"Truncated",
"normal",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2506-L2549 | train | 220,267 |
pymc-devs/pymc | pymc/distributions.py | rskew_normal | def rskew_normal(mu, tau, alpha, size=()):
"""
Skew-normal random variates.
"""
size_ = size or (1,)
len_ = np.prod(size_)
return flib.rskewnorm(
len_, mu, tau, alpha, np.random.normal(size=2 * len_)).reshape(size) | python | def rskew_normal(mu, tau, alpha, size=()):
"""
Skew-normal random variates.
"""
size_ = size or (1,)
len_ = np.prod(size_)
return flib.rskewnorm(
len_, mu, tau, alpha, np.random.normal(size=2 * len_)).reshape(size) | [
"def",
"rskew_normal",
"(",
"mu",
",",
"tau",
",",
"alpha",
",",
"size",
"=",
"(",
")",
")",
":",
"size_",
"=",
"size",
"or",
"(",
"1",
",",
")",
"len_",
"=",
"np",
".",
"prod",
"(",
"size_",
")",
"return",
"flib",
".",
"rskewnorm",
"(",
"len_"... | Skew-normal random variates. | [
"Skew",
"-",
"normal",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2557-L2564 | train | 220,268 |
pymc-devs/pymc | pymc/distributions.py | skew_normal_expval | def skew_normal_expval(mu, tau, alpha):
"""
Expectation of skew-normal random variables.
"""
delta = alpha / np.sqrt(1. + alpha ** 2)
return mu + np.sqrt(2 / pi / tau) * delta | python | def skew_normal_expval(mu, tau, alpha):
"""
Expectation of skew-normal random variables.
"""
delta = alpha / np.sqrt(1. + alpha ** 2)
return mu + np.sqrt(2 / pi / tau) * delta | [
"def",
"skew_normal_expval",
"(",
"mu",
",",
"tau",
",",
"alpha",
")",
":",
"delta",
"=",
"alpha",
"/",
"np",
".",
"sqrt",
"(",
"1.",
"+",
"alpha",
"**",
"2",
")",
"return",
"mu",
"+",
"np",
".",
"sqrt",
"(",
"2",
"/",
"pi",
"/",
"tau",
")",
... | Expectation of skew-normal random variables. | [
"Expectation",
"of",
"skew",
"-",
"normal",
"random",
"variables",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2567-L2572 | train | 220,269 |
pymc-devs/pymc | pymc/distributions.py | skew_normal_like | def skew_normal_like(x, mu, tau, alpha):
R"""
Azzalini's skew-normal log-likelihood
.. math::
f(x \mid \mu, \tau, \alpha) = 2 \Phi((x-\mu)\sqrt{\tau}\alpha) \phi(x,\mu,\tau)
where :math:\Phi is the normal CDF and :math: \phi is the normal PDF.
:Parameters:
- `x` : Input data.
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution (> 0).
- `alpha` : Shape parameter of the distribution.
.. note::
See http://azzalini.stat.unipd.it/SN/
"""
return flib.sn_like(x, mu, tau, alpha) | python | def skew_normal_like(x, mu, tau, alpha):
R"""
Azzalini's skew-normal log-likelihood
.. math::
f(x \mid \mu, \tau, \alpha) = 2 \Phi((x-\mu)\sqrt{\tau}\alpha) \phi(x,\mu,\tau)
where :math:\Phi is the normal CDF and :math: \phi is the normal PDF.
:Parameters:
- `x` : Input data.
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution (> 0).
- `alpha` : Shape parameter of the distribution.
.. note::
See http://azzalini.stat.unipd.it/SN/
"""
return flib.sn_like(x, mu, tau, alpha) | [
"def",
"skew_normal_like",
"(",
"x",
",",
"mu",
",",
"tau",
",",
"alpha",
")",
":",
"return",
"flib",
".",
"sn_like",
"(",
"x",
",",
"mu",
",",
"tau",
",",
"alpha",
")"
] | R"""
Azzalini's skew-normal log-likelihood
.. math::
f(x \mid \mu, \tau, \alpha) = 2 \Phi((x-\mu)\sqrt{\tau}\alpha) \phi(x,\mu,\tau)
where :math:\Phi is the normal CDF and :math: \phi is the normal PDF.
:Parameters:
- `x` : Input data.
- `mu` : Mean of the distribution.
- `tau` : Precision of the distribution (> 0).
- `alpha` : Shape parameter of the distribution.
.. note::
See http://azzalini.stat.unipd.it/SN/ | [
"R",
"Azzalini",
"s",
"skew",
"-",
"normal",
"log",
"-",
"likelihood"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2575-L2593 | train | 220,270 |
pymc-devs/pymc | pymc/distributions.py | rt | def rt(nu, size=None):
"""
Student's t random variates.
"""
return rnormal(0, 1, size) / np.sqrt(rchi2(nu, size) / nu) | python | def rt(nu, size=None):
"""
Student's t random variates.
"""
return rnormal(0, 1, size) / np.sqrt(rchi2(nu, size) / nu) | [
"def",
"rt",
"(",
"nu",
",",
"size",
"=",
"None",
")",
":",
"return",
"rnormal",
"(",
"0",
",",
"1",
",",
"size",
")",
"/",
"np",
".",
"sqrt",
"(",
"rchi2",
"(",
"nu",
",",
"size",
")",
"/",
"nu",
")"
] | Student's t random variates. | [
"Student",
"s",
"t",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2598-L2602 | train | 220,271 |
pymc-devs/pymc | pymc/distributions.py | t_like | def t_like(x, nu):
R"""
Student's T log-likelihood.
Describes a zero-mean normal variable
whose precision is gamma distributed. Alternatively, describes the
mean of several zero-mean normal random variables divided by their
sample standard deviation.
.. math::
f(x \mid \nu) = \frac{\Gamma(\frac{\nu+1}{2})}{\Gamma(\frac{\nu}{2}) \sqrt{\nu\pi}} \left( 1 + \frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
:Parameters:
- `x` : Input data.
- `nu` : Degrees of freedom.
"""
nu = np.asarray(nu)
return flib.t(x, nu) | python | def t_like(x, nu):
R"""
Student's T log-likelihood.
Describes a zero-mean normal variable
whose precision is gamma distributed. Alternatively, describes the
mean of several zero-mean normal random variables divided by their
sample standard deviation.
.. math::
f(x \mid \nu) = \frac{\Gamma(\frac{\nu+1}{2})}{\Gamma(\frac{\nu}{2}) \sqrt{\nu\pi}} \left( 1 + \frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
:Parameters:
- `x` : Input data.
- `nu` : Degrees of freedom.
"""
nu = np.asarray(nu)
return flib.t(x, nu) | [
"def",
"t_like",
"(",
"x",
",",
"nu",
")",
":",
"nu",
"=",
"np",
".",
"asarray",
"(",
"nu",
")",
"return",
"flib",
".",
"t",
"(",
"x",
",",
"nu",
")"
] | R"""
Student's T log-likelihood.
Describes a zero-mean normal variable
whose precision is gamma distributed. Alternatively, describes the
mean of several zero-mean normal random variables divided by their
sample standard deviation.
.. math::
f(x \mid \nu) = \frac{\Gamma(\frac{\nu+1}{2})}{\Gamma(\frac{\nu}{2}) \sqrt{\nu\pi}} \left( 1 + \frac{x^2}{\nu} \right)^{-\frac{\nu+1}{2}}
:Parameters:
- `x` : Input data.
- `nu` : Degrees of freedom. | [
"R",
"Student",
"s",
"T",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2612-L2630 | train | 220,272 |
pymc-devs/pymc | pymc/distributions.py | rnoncentral_t | def rnoncentral_t(mu, lam, nu, size=None):
"""
Non-central Student's t random variates.
"""
tau = rgamma(nu / 2., nu / (2. * lam), size)
return rnormal(mu, tau) | python | def rnoncentral_t(mu, lam, nu, size=None):
"""
Non-central Student's t random variates.
"""
tau = rgamma(nu / 2., nu / (2. * lam), size)
return rnormal(mu, tau) | [
"def",
"rnoncentral_t",
"(",
"mu",
",",
"lam",
",",
"nu",
",",
"size",
"=",
"None",
")",
":",
"tau",
"=",
"rgamma",
"(",
"nu",
"/",
"2.",
",",
"nu",
"/",
"(",
"2.",
"*",
"lam",
")",
",",
"size",
")",
"return",
"rnormal",
"(",
"mu",
",",
"tau"... | Non-central Student's t random variates. | [
"Non",
"-",
"central",
"Student",
"s",
"t",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2636-L2641 | train | 220,273 |
pymc-devs/pymc | pymc/distributions.py | noncentral_t_like | def noncentral_t_like(x, mu, lam, nu):
R"""
Non-central Student's T log-likelihood.
Describes a normal variable whose precision is gamma distributed.
.. math::
f(x|\mu,\lambda,\nu) = \frac{\Gamma(\frac{\nu +
1}{2})}{\Gamma(\frac{\nu}{2})}
\left(\frac{\lambda}{\pi\nu}\right)^{\frac{1}{2}}
\left[1+\frac{\lambda(x-\mu)^2}{\nu}\right]^{-\frac{\nu+1}{2}}
:Parameters:
- `x` : Input data.
- `mu` : Location parameter.
- `lambda` : Scale parameter.
- `nu` : Degrees of freedom.
"""
mu = np.asarray(mu)
lam = np.asarray(lam)
nu = np.asarray(nu)
return flib.nct(x, mu, lam, nu) | python | def noncentral_t_like(x, mu, lam, nu):
R"""
Non-central Student's T log-likelihood.
Describes a normal variable whose precision is gamma distributed.
.. math::
f(x|\mu,\lambda,\nu) = \frac{\Gamma(\frac{\nu +
1}{2})}{\Gamma(\frac{\nu}{2})}
\left(\frac{\lambda}{\pi\nu}\right)^{\frac{1}{2}}
\left[1+\frac{\lambda(x-\mu)^2}{\nu}\right]^{-\frac{\nu+1}{2}}
:Parameters:
- `x` : Input data.
- `mu` : Location parameter.
- `lambda` : Scale parameter.
- `nu` : Degrees of freedom.
"""
mu = np.asarray(mu)
lam = np.asarray(lam)
nu = np.asarray(nu)
return flib.nct(x, mu, lam, nu) | [
"def",
"noncentral_t_like",
"(",
"x",
",",
"mu",
",",
"lam",
",",
"nu",
")",
":",
"mu",
"=",
"np",
".",
"asarray",
"(",
"mu",
")",
"lam",
"=",
"np",
".",
"asarray",
"(",
"lam",
")",
"nu",
"=",
"np",
".",
"asarray",
"(",
"nu",
")",
"return",
"... | R"""
Non-central Student's T log-likelihood.
Describes a normal variable whose precision is gamma distributed.
.. math::
f(x|\mu,\lambda,\nu) = \frac{\Gamma(\frac{\nu +
1}{2})}{\Gamma(\frac{\nu}{2})}
\left(\frac{\lambda}{\pi\nu}\right)^{\frac{1}{2}}
\left[1+\frac{\lambda(x-\mu)^2}{\nu}\right]^{-\frac{\nu+1}{2}}
:Parameters:
- `x` : Input data.
- `mu` : Location parameter.
- `lambda` : Scale parameter.
- `nu` : Degrees of freedom. | [
"R",
"Non",
"-",
"central",
"Student",
"s",
"T",
"log",
"-",
"likelihood",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2655-L2677 | train | 220,274 |
pymc-devs/pymc | pymc/distributions.py | rdiscrete_uniform | def rdiscrete_uniform(lower, upper, size=None):
"""
Random discrete_uniform variates.
"""
return np.random.randint(lower, upper + 1, size) | python | def rdiscrete_uniform(lower, upper, size=None):
"""
Random discrete_uniform variates.
"""
return np.random.randint(lower, upper + 1, size) | [
"def",
"rdiscrete_uniform",
"(",
"lower",
",",
"upper",
",",
"size",
"=",
"None",
")",
":",
"return",
"np",
".",
"random",
".",
"randint",
"(",
"lower",
",",
"upper",
"+",
"1",
",",
"size",
")"
] | Random discrete_uniform variates. | [
"Random",
"discrete_uniform",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2691-L2695 | train | 220,275 |
pymc-devs/pymc | pymc/distributions.py | runiform | def runiform(lower, upper, size=None):
"""
Random uniform variates.
"""
return np.random.uniform(lower, upper, size) | python | def runiform(lower, upper, size=None):
"""
Random uniform variates.
"""
return np.random.uniform(lower, upper, size) | [
"def",
"runiform",
"(",
"lower",
",",
"upper",
",",
"size",
"=",
"None",
")",
":",
"return",
"np",
".",
"random",
".",
"uniform",
"(",
"lower",
",",
"upper",
",",
"size",
")"
] | Random uniform variates. | [
"Random",
"uniform",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2723-L2727 | train | 220,276 |
pymc-devs/pymc | pymc/distributions.py | rweibull | def rweibull(alpha, beta, size=None):
"""
Weibull random variates.
"""
tmp = -np.log(runiform(0, 1, size))
return beta * (tmp ** (1. / alpha)) | python | def rweibull(alpha, beta, size=None):
"""
Weibull random variates.
"""
tmp = -np.log(runiform(0, 1, size))
return beta * (tmp ** (1. / alpha)) | [
"def",
"rweibull",
"(",
"alpha",
",",
"beta",
",",
"size",
"=",
"None",
")",
":",
"tmp",
"=",
"-",
"np",
".",
"log",
"(",
"runiform",
"(",
"0",
",",
"1",
",",
"size",
")",
")",
"return",
"beta",
"*",
"(",
"tmp",
"**",
"(",
"1.",
"/",
"alpha",... | Weibull random variates. | [
"Weibull",
"random",
"variates",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2761-L2766 | train | 220,277 |
pymc-devs/pymc | pymc/distributions.py | rwishart_cov | def rwishart_cov(n, C):
"""
Return a Wishart random matrix.
:Parameters:
n : int
Degrees of freedom, > 0.
C : matrix
Symmetric and positive definite
"""
# return rwishart(n, np.linalg.inv(C))
p = np.shape(C)[0]
# Need cholesky decomposition of precision matrix C^-1?
sig = np.linalg.cholesky(C)
if n <= (p-1):
raise ValueError('Wishart parameter n must be greater '
'than size of matrix.')
norms = np.random.normal(size=(p * (p - 1)) // 2)
chi_sqs = np.sqrt(np.random.chisquare(df=np.arange(n, n - p, -1)))
A = flib.expand_triangular(chi_sqs, norms)
flib.dtrmm_wrap(sig, A, side='L', uplo='L', transa='N', alpha=1.)
w = np.asmatrix(np.dot(A, A.T))
flib.symmetrize(w)
return w | python | def rwishart_cov(n, C):
"""
Return a Wishart random matrix.
:Parameters:
n : int
Degrees of freedom, > 0.
C : matrix
Symmetric and positive definite
"""
# return rwishart(n, np.linalg.inv(C))
p = np.shape(C)[0]
# Need cholesky decomposition of precision matrix C^-1?
sig = np.linalg.cholesky(C)
if n <= (p-1):
raise ValueError('Wishart parameter n must be greater '
'than size of matrix.')
norms = np.random.normal(size=(p * (p - 1)) // 2)
chi_sqs = np.sqrt(np.random.chisquare(df=np.arange(n, n - p, -1)))
A = flib.expand_triangular(chi_sqs, norms)
flib.dtrmm_wrap(sig, A, side='L', uplo='L', transa='N', alpha=1.)
w = np.asmatrix(np.dot(A, A.T))
flib.symmetrize(w)
return w | [
"def",
"rwishart_cov",
"(",
"n",
",",
"C",
")",
":",
"# return rwishart(n, np.linalg.inv(C))",
"p",
"=",
"np",
".",
"shape",
"(",
"C",
")",
"[",
"0",
"]",
"# Need cholesky decomposition of precision matrix C^-1?",
"sig",
"=",
"np",
".",
"linalg",
".",
"cholesky"... | Return a Wishart random matrix.
:Parameters:
n : int
Degrees of freedom, > 0.
C : matrix
Symmetric and positive definite | [
"Return",
"a",
"Wishart",
"random",
"matrix",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2866-L2893 | train | 220,278 |
pymc-devs/pymc | pymc/distributions.py | valuewrapper | def valuewrapper(f, arguments=None):
"""Return a likelihood accepting value instead of x as a keyword argument.
This is specifically intended for the instantiator above.
"""
def wrapper(**kwds):
value = kwds.pop('value')
return f(value, **kwds)
if arguments is None:
wrapper.__dict__.update(f.__dict__)
else:
wrapper.__dict__.update(arguments)
return wrapper | python | def valuewrapper(f, arguments=None):
"""Return a likelihood accepting value instead of x as a keyword argument.
This is specifically intended for the instantiator above.
"""
def wrapper(**kwds):
value = kwds.pop('value')
return f(value, **kwds)
if arguments is None:
wrapper.__dict__.update(f.__dict__)
else:
wrapper.__dict__.update(arguments)
return wrapper | [
"def",
"valuewrapper",
"(",
"f",
",",
"arguments",
"=",
"None",
")",
":",
"def",
"wrapper",
"(",
"*",
"*",
"kwds",
")",
":",
"value",
"=",
"kwds",
".",
"pop",
"(",
"'value'",
")",
"return",
"f",
"(",
"value",
",",
"*",
"*",
"kwds",
")",
"if",
"... | Return a likelihood accepting value instead of x as a keyword argument.
This is specifically intended for the instantiator above. | [
"Return",
"a",
"likelihood",
"accepting",
"value",
"instead",
"of",
"x",
"as",
"a",
"keyword",
"argument",
".",
"This",
"is",
"specifically",
"intended",
"for",
"the",
"instantiator",
"above",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2968-L2981 | train | 220,279 |
pymc-devs/pymc | pymc/distributions.py | local_decorated_likelihoods | def local_decorated_likelihoods(obj):
"""
New interface likelihoods
"""
for name, like in six.iteritems(likelihoods):
obj[name + '_like'] = gofwrapper(like, snapshot) | python | def local_decorated_likelihoods(obj):
"""
New interface likelihoods
"""
for name, like in six.iteritems(likelihoods):
obj[name + '_like'] = gofwrapper(like, snapshot) | [
"def",
"local_decorated_likelihoods",
"(",
"obj",
")",
":",
"for",
"name",
",",
"like",
"in",
"six",
".",
"iteritems",
"(",
"likelihoods",
")",
":",
"obj",
"[",
"name",
"+",
"'_like'",
"]",
"=",
"gofwrapper",
"(",
"like",
",",
"snapshot",
")"
] | New interface likelihoods | [
"New",
"interface",
"likelihoods"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L2994-L3000 | train | 220,280 |
pymc-devs/pymc | pymc/distributions.py | _inject_dist | def _inject_dist(distname, kwargs={}, ns=locals()):
"""
Reusable function to inject Stochastic subclasses into module
namespace
"""
dist_logp, dist_random, grad_logp = name_to_funcs(distname, ns)
classname = capitalize(distname)
ns[classname] = stochastic_from_dist(distname, dist_logp,
dist_random,
grad_logp, **kwargs) | python | def _inject_dist(distname, kwargs={}, ns=locals()):
"""
Reusable function to inject Stochastic subclasses into module
namespace
"""
dist_logp, dist_random, grad_logp = name_to_funcs(distname, ns)
classname = capitalize(distname)
ns[classname] = stochastic_from_dist(distname, dist_logp,
dist_random,
grad_logp, **kwargs) | [
"def",
"_inject_dist",
"(",
"distname",
",",
"kwargs",
"=",
"{",
"}",
",",
"ns",
"=",
"locals",
"(",
")",
")",
":",
"dist_logp",
",",
"dist_random",
",",
"grad_logp",
"=",
"name_to_funcs",
"(",
"distname",
",",
"ns",
")",
"classname",
"=",
"capitalize",
... | Reusable function to inject Stochastic subclasses into module
namespace | [
"Reusable",
"function",
"to",
"inject",
"Stochastic",
"subclasses",
"into",
"module",
"namespace"
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L3009-L3019 | train | 220,281 |
pymc-devs/pymc | pymc/distributions.py | mod_categorical_expval | def mod_categorical_expval(p):
"""
Expected value of categorical distribution with parent p of length k-1.
An implicit k'th category is assumed to exist with associated
probability 1-sum(p).
"""
p = extend_dirichlet(p)
return np.sum([p * i for i, p in enumerate(p)]) | python | def mod_categorical_expval(p):
"""
Expected value of categorical distribution with parent p of length k-1.
An implicit k'th category is assumed to exist with associated
probability 1-sum(p).
"""
p = extend_dirichlet(p)
return np.sum([p * i for i, p in enumerate(p)]) | [
"def",
"mod_categorical_expval",
"(",
"p",
")",
":",
"p",
"=",
"extend_dirichlet",
"(",
"p",
")",
"return",
"np",
".",
"sum",
"(",
"[",
"p",
"*",
"i",
"for",
"i",
",",
"p",
"in",
"enumerate",
"(",
"p",
")",
"]",
")"
] | Expected value of categorical distribution with parent p of length k-1.
An implicit k'th category is assumed to exist with associated
probability 1-sum(p). | [
"Expected",
"value",
"of",
"categorical",
"distribution",
"with",
"parent",
"p",
"of",
"length",
"k",
"-",
"1",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L3104-L3112 | train | 220,282 |
pymc-devs/pymc | pymc/distributions.py | Impute | def Impute(name, dist_class, imputable, **parents):
"""
This function accomodates missing elements for the data of simple
Stochastic distribution subclasses. The masked_values argument is an
object of type numpy.ma.MaskedArray, which contains the raw data and
a boolean mask indicating missing values. The resulting list contains
a list of stochastics of type dist_class, with the extant values as data
stochastics and the missing values as variable stochastics.
:Arguments:
- name : string
Name of the data stochastic
- dist_class : Stochastic
Stochastic subclass such as Poisson, Normal, etc.
- imputable : numpy.ma.core.MaskedArray or iterable
A masked array with missing elements (where mask=True, value
is assumed missing), or any iterable that contains None
elements that will be imputed.
- parents (optional): dict
Arbitrary keyword arguments.
"""
dims = np.shape(imputable)
masked_values = np.ravel(imputable)
if not isinstance(masked_values, np.ma.core.MaskedArray):
# Generate mask
mask = [v is None or np.isnan(v) for v in masked_values]
# Generate masked array
masked_values = np.ma.masked_array(masked_values, mask)
# Initialise list
vars = []
for i in xrange(len(masked_values)):
# Name of element
this_name = name + '[%i]' % i
# Dictionary to hold parents
these_parents = {}
# Parse parents
for key, parent in six.iteritems(parents):
try:
# If parent is a PyMCObject
shape = np.shape(parent.value)
except AttributeError:
shape = np.shape(parent)
if shape == dims:
these_parents[key] = Lambda(key + '[%i]' % i,
lambda p=np.ravel(parent),
i=i: p[i])
elif shape == np.shape(masked_values):
these_parents[key] = Lambda(key + '[%i]' % i, lambda p=parent,
i=i: p[i])
else:
these_parents[key] = parent
if masked_values.mask[i]:
# Missing values
vars.append(dist_class(this_name, **these_parents))
else:
# Observed values
vars.append(dist_class(this_name, value=masked_values[i],
observed=True, **these_parents))
return np.reshape(vars, dims) | python | def Impute(name, dist_class, imputable, **parents):
"""
This function accomodates missing elements for the data of simple
Stochastic distribution subclasses. The masked_values argument is an
object of type numpy.ma.MaskedArray, which contains the raw data and
a boolean mask indicating missing values. The resulting list contains
a list of stochastics of type dist_class, with the extant values as data
stochastics and the missing values as variable stochastics.
:Arguments:
- name : string
Name of the data stochastic
- dist_class : Stochastic
Stochastic subclass such as Poisson, Normal, etc.
- imputable : numpy.ma.core.MaskedArray or iterable
A masked array with missing elements (where mask=True, value
is assumed missing), or any iterable that contains None
elements that will be imputed.
- parents (optional): dict
Arbitrary keyword arguments.
"""
dims = np.shape(imputable)
masked_values = np.ravel(imputable)
if not isinstance(masked_values, np.ma.core.MaskedArray):
# Generate mask
mask = [v is None or np.isnan(v) for v in masked_values]
# Generate masked array
masked_values = np.ma.masked_array(masked_values, mask)
# Initialise list
vars = []
for i in xrange(len(masked_values)):
# Name of element
this_name = name + '[%i]' % i
# Dictionary to hold parents
these_parents = {}
# Parse parents
for key, parent in six.iteritems(parents):
try:
# If parent is a PyMCObject
shape = np.shape(parent.value)
except AttributeError:
shape = np.shape(parent)
if shape == dims:
these_parents[key] = Lambda(key + '[%i]' % i,
lambda p=np.ravel(parent),
i=i: p[i])
elif shape == np.shape(masked_values):
these_parents[key] = Lambda(key + '[%i]' % i, lambda p=parent,
i=i: p[i])
else:
these_parents[key] = parent
if masked_values.mask[i]:
# Missing values
vars.append(dist_class(this_name, **these_parents))
else:
# Observed values
vars.append(dist_class(this_name, value=masked_values[i],
observed=True, **these_parents))
return np.reshape(vars, dims) | [
"def",
"Impute",
"(",
"name",
",",
"dist_class",
",",
"imputable",
",",
"*",
"*",
"parents",
")",
":",
"dims",
"=",
"np",
".",
"shape",
"(",
"imputable",
")",
"masked_values",
"=",
"np",
".",
"ravel",
"(",
"imputable",
")",
"if",
"not",
"isinstance",
... | This function accomodates missing elements for the data of simple
Stochastic distribution subclasses. The masked_values argument is an
object of type numpy.ma.MaskedArray, which contains the raw data and
a boolean mask indicating missing values. The resulting list contains
a list of stochastics of type dist_class, with the extant values as data
stochastics and the missing values as variable stochastics.
:Arguments:
- name : string
Name of the data stochastic
- dist_class : Stochastic
Stochastic subclass such as Poisson, Normal, etc.
- imputable : numpy.ma.core.MaskedArray or iterable
A masked array with missing elements (where mask=True, value
is assumed missing), or any iterable that contains None
elements that will be imputed.
- parents (optional): dict
Arbitrary keyword arguments. | [
"This",
"function",
"accomodates",
"missing",
"elements",
"for",
"the",
"data",
"of",
"simple",
"Stochastic",
"distribution",
"subclasses",
".",
"The",
"masked_values",
"argument",
"is",
"an",
"object",
"of",
"type",
"numpy",
".",
"ma",
".",
"MaskedArray",
"whic... | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/distributions.py#L3269-L3335 | train | 220,283 |
pymc-devs/pymc | pymc/Node.py | logp_gradient_of_set | def logp_gradient_of_set(variable_set, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to all the variables in variable_set.
Calculation of the log posterior is restricted to the variables in calculation_set.
Returns a dictionary of the gradients.
"""
logp_gradients = {}
for variable in variable_set:
logp_gradients[variable] = logp_gradient(variable, calculation_set)
return logp_gradients | python | def logp_gradient_of_set(variable_set, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to all the variables in variable_set.
Calculation of the log posterior is restricted to the variables in calculation_set.
Returns a dictionary of the gradients.
"""
logp_gradients = {}
for variable in variable_set:
logp_gradients[variable] = logp_gradient(variable, calculation_set)
return logp_gradients | [
"def",
"logp_gradient_of_set",
"(",
"variable_set",
",",
"calculation_set",
"=",
"None",
")",
":",
"logp_gradients",
"=",
"{",
"}",
"for",
"variable",
"in",
"variable_set",
":",
"logp_gradients",
"[",
"variable",
"]",
"=",
"logp_gradient",
"(",
"variable",
",",
... | Calculates the gradient of the joint log posterior with respect to all the variables in variable_set.
Calculation of the log posterior is restricted to the variables in calculation_set.
Returns a dictionary of the gradients. | [
"Calculates",
"the",
"gradient",
"of",
"the",
"joint",
"log",
"posterior",
"with",
"respect",
"to",
"all",
"the",
"variables",
"in",
"variable_set",
".",
"Calculation",
"of",
"the",
"log",
"posterior",
"is",
"restricted",
"to",
"the",
"variables",
"in",
"calcu... | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Node.py#L42-L54 | train | 220,284 |
pymc-devs/pymc | pymc/Node.py | logp_gradient | def logp_gradient(variable, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to variable.
Calculation of the log posterior is restricted to the variables in calculation_set.
"""
return variable.logp_partial_gradient(variable, calculation_set) + sum(
[child.logp_partial_gradient(variable, calculation_set) for child in variable.children]) | python | def logp_gradient(variable, calculation_set=None):
"""
Calculates the gradient of the joint log posterior with respect to variable.
Calculation of the log posterior is restricted to the variables in calculation_set.
"""
return variable.logp_partial_gradient(variable, calculation_set) + sum(
[child.logp_partial_gradient(variable, calculation_set) for child in variable.children]) | [
"def",
"logp_gradient",
"(",
"variable",
",",
"calculation_set",
"=",
"None",
")",
":",
"return",
"variable",
".",
"logp_partial_gradient",
"(",
"variable",
",",
"calculation_set",
")",
"+",
"sum",
"(",
"[",
"child",
".",
"logp_partial_gradient",
"(",
"variable"... | Calculates the gradient of the joint log posterior with respect to variable.
Calculation of the log posterior is restricted to the variables in calculation_set. | [
"Calculates",
"the",
"gradient",
"of",
"the",
"joint",
"log",
"posterior",
"with",
"respect",
"to",
"variable",
".",
"Calculation",
"of",
"the",
"log",
"posterior",
"is",
"restricted",
"to",
"the",
"variables",
"in",
"calculation_set",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Node.py#L57-L63 | train | 220,285 |
pymc-devs/pymc | pymc/Node.py | Variable.summary | def summary(self, alpha=0.05, start=0, batches=100, chain=None, roundto=3):
"""
Generate a pretty-printed summary of the node.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics.
"""
# Calculate statistics for Node
statdict = self.stats(
alpha=alpha,
start=start,
batches=batches,
chain=chain)
size = np.size(statdict['mean'])
print_('\n%s:' % self.__name__)
print_(' ')
# Initialize buffer
buffer = []
# Index to interval label
iindex = [key.split()[-1] for key in statdict.keys()].index('interval')
interval = list(statdict.keys())[iindex]
# Print basic stats
buffer += [
'Mean SD MC Error %s' %
interval]
buffer += ['-' * len(buffer[-1])]
indices = range(size)
if len(indices) == 1:
indices = [None]
_format_str = lambda x, i=None, roundto=2: str(np.round(x.ravel()[i].squeeze(), roundto))
for index in indices:
# Extract statistics and convert to string
m = _format_str(statdict['mean'], index, roundto)
sd = _format_str(statdict['standard deviation'], index, roundto)
mce = _format_str(statdict['mc error'], index, roundto)
hpd = str(statdict[interval].reshape(
(2, size))[:,index].squeeze().round(roundto))
# Build up string buffer of values
valstr = m
valstr += ' ' * (17 - len(m)) + sd
valstr += ' ' * (17 - len(sd)) + mce
valstr += ' ' * (len(buffer[-1]) - len(valstr) - len(hpd)) + hpd
buffer += [valstr]
buffer += [''] * 2
# Print quantiles
buffer += ['Posterior quantiles:', '']
buffer += [
'2.5 25 50 75 97.5']
buffer += [
' |---------------|===============|===============|---------------|']
for index in indices:
quantile_str = ''
for i, q in enumerate((2.5, 25, 50, 75, 97.5)):
qstr = _format_str(statdict['quantiles'][q], index, roundto)
quantile_str += qstr + ' ' * (17 - i - len(qstr))
buffer += [quantile_str.strip()]
buffer += ['']
print_('\t' + '\n\t'.join(buffer)) | python | def summary(self, alpha=0.05, start=0, batches=100, chain=None, roundto=3):
"""
Generate a pretty-printed summary of the node.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics.
"""
# Calculate statistics for Node
statdict = self.stats(
alpha=alpha,
start=start,
batches=batches,
chain=chain)
size = np.size(statdict['mean'])
print_('\n%s:' % self.__name__)
print_(' ')
# Initialize buffer
buffer = []
# Index to interval label
iindex = [key.split()[-1] for key in statdict.keys()].index('interval')
interval = list(statdict.keys())[iindex]
# Print basic stats
buffer += [
'Mean SD MC Error %s' %
interval]
buffer += ['-' * len(buffer[-1])]
indices = range(size)
if len(indices) == 1:
indices = [None]
_format_str = lambda x, i=None, roundto=2: str(np.round(x.ravel()[i].squeeze(), roundto))
for index in indices:
# Extract statistics and convert to string
m = _format_str(statdict['mean'], index, roundto)
sd = _format_str(statdict['standard deviation'], index, roundto)
mce = _format_str(statdict['mc error'], index, roundto)
hpd = str(statdict[interval].reshape(
(2, size))[:,index].squeeze().round(roundto))
# Build up string buffer of values
valstr = m
valstr += ' ' * (17 - len(m)) + sd
valstr += ' ' * (17 - len(sd)) + mce
valstr += ' ' * (len(buffer[-1]) - len(valstr) - len(hpd)) + hpd
buffer += [valstr]
buffer += [''] * 2
# Print quantiles
buffer += ['Posterior quantiles:', '']
buffer += [
'2.5 25 50 75 97.5']
buffer += [
' |---------------|===============|===============|---------------|']
for index in indices:
quantile_str = ''
for i, q in enumerate((2.5, 25, 50, 75, 97.5)):
qstr = _format_str(statdict['quantiles'][q], index, roundto)
quantile_str += qstr + ' ' * (17 - i - len(qstr))
buffer += [quantile_str.strip()]
buffer += ['']
print_('\t' + '\n\t'.join(buffer)) | [
"def",
"summary",
"(",
"self",
",",
"alpha",
"=",
"0.05",
",",
"start",
"=",
"0",
",",
"batches",
"=",
"100",
",",
"chain",
"=",
"None",
",",
"roundto",
"=",
"3",
")",
":",
"# Calculate statistics for Node",
"statdict",
"=",
"self",
".",
"stats",
"(",
... | Generate a pretty-printed summary of the node.
:Parameters:
alpha : float
The alpha level for generating posterior intervals. Defaults to
0.05.
start : int
The starting index from which to summarize (each) chain. Defaults
to zero.
batches : int
Batch size for calculating standard deviation for non-independent
samples. Defaults to 100.
chain : int
The index for which chain to summarize. Defaults to None (all
chains).
roundto : int
The number of digits to round posterior statistics. | [
"Generate",
"a",
"pretty",
"-",
"printed",
"summary",
"of",
"the",
"node",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Node.py#L267-L358 | train | 220,286 |
pymc-devs/pymc | pymc/Container.py | file_items | def file_items(container, iterable):
"""
Files away objects into the appropriate attributes of the container.
"""
# container._value = copy(iterable)
container.nodes = set()
container.variables = set()
container.deterministics = set()
container.stochastics = set()
container.potentials = set()
container.observed_stochastics = set()
# containers needs to be a list to hold unhashable items.
container.containers = []
i = -1
for item in iterable:
# If this is a dictionary, switch from key to item.
if isinstance(iterable, (dict, dict_proxy_type)):
key = item
item = iterable[key]
# Item counter
else:
i += 1
# If the item isn't iterable, file it away.
if isinstance(item, Variable):
container.variables.add(item)
if isinstance(item, StochasticBase):
if item.observed or not getattr(item, 'mask', None) is None:
container.observed_stochastics.add(item)
if not item.observed:
container.stochastics.add(item)
elif isinstance(item, DeterministicBase):
container.deterministics.add(item)
elif isinstance(item, PotentialBase):
container.potentials.add(item)
elif isinstance(item, ContainerBase):
container.assimilate(item)
container.containers.append(item)
# Wrap internal containers
elif hasattr(item, '__iter__'):
# If this is a non-object-valued ndarray, don't container-ize it.
if isinstance(item, ndarray):
if item.dtype != dtype('object'):
continue
# If the item is iterable, wrap it in a container. Replace the item
# with the wrapped version.
try:
new_container = Container(item)
except:
continue
# Update all of container's variables, potentials, etc. with the new wrapped
# iterable's. This process recursively unpacks nested iterables.
container.assimilate(new_container)
if isinstance(container, dict):
container.replace(key, new_container)
elif isinstance(container, tuple):
return container[:i] + (new_container,) + container[i + 1:]
else:
container.replace(item, new_container, i)
container.nodes = container.potentials | container.variables
# 'Freeze' markov blanket, moral neighbors, coparents of all constituent stochastics
# for future use
for attr in ['moral_neighbors', 'markov_blanket', 'coparents']:
setattr(container, attr, {})
for s in container.stochastics:
for attr in ['moral_neighbors', 'markov_blanket', 'coparents']:
getattr(container, attr)[s] = getattr(s, attr) | python | def file_items(container, iterable):
"""
Files away objects into the appropriate attributes of the container.
"""
# container._value = copy(iterable)
container.nodes = set()
container.variables = set()
container.deterministics = set()
container.stochastics = set()
container.potentials = set()
container.observed_stochastics = set()
# containers needs to be a list to hold unhashable items.
container.containers = []
i = -1
for item in iterable:
# If this is a dictionary, switch from key to item.
if isinstance(iterable, (dict, dict_proxy_type)):
key = item
item = iterable[key]
# Item counter
else:
i += 1
# If the item isn't iterable, file it away.
if isinstance(item, Variable):
container.variables.add(item)
if isinstance(item, StochasticBase):
if item.observed or not getattr(item, 'mask', None) is None:
container.observed_stochastics.add(item)
if not item.observed:
container.stochastics.add(item)
elif isinstance(item, DeterministicBase):
container.deterministics.add(item)
elif isinstance(item, PotentialBase):
container.potentials.add(item)
elif isinstance(item, ContainerBase):
container.assimilate(item)
container.containers.append(item)
# Wrap internal containers
elif hasattr(item, '__iter__'):
# If this is a non-object-valued ndarray, don't container-ize it.
if isinstance(item, ndarray):
if item.dtype != dtype('object'):
continue
# If the item is iterable, wrap it in a container. Replace the item
# with the wrapped version.
try:
new_container = Container(item)
except:
continue
# Update all of container's variables, potentials, etc. with the new wrapped
# iterable's. This process recursively unpacks nested iterables.
container.assimilate(new_container)
if isinstance(container, dict):
container.replace(key, new_container)
elif isinstance(container, tuple):
return container[:i] + (new_container,) + container[i + 1:]
else:
container.replace(item, new_container, i)
container.nodes = container.potentials | container.variables
# 'Freeze' markov blanket, moral neighbors, coparents of all constituent stochastics
# for future use
for attr in ['moral_neighbors', 'markov_blanket', 'coparents']:
setattr(container, attr, {})
for s in container.stochastics:
for attr in ['moral_neighbors', 'markov_blanket', 'coparents']:
getattr(container, attr)[s] = getattr(s, attr) | [
"def",
"file_items",
"(",
"container",
",",
"iterable",
")",
":",
"# container._value = copy(iterable)",
"container",
".",
"nodes",
"=",
"set",
"(",
")",
"container",
".",
"variables",
"=",
"set",
"(",
")",
"container",
".",
"deterministics",
"=",
"set",
"(",
... | Files away objects into the appropriate attributes of the container. | [
"Files",
"away",
"objects",
"into",
"the",
"appropriate",
"attributes",
"of",
"the",
"container",
"."
] | c6e530210bff4c0d7189b35b2c971bc53f93f7cd | https://github.com/pymc-devs/pymc/blob/c6e530210bff4c0d7189b35b2c971bc53f93f7cd/pymc/Container.py#L168-L248 | train | 220,287 |
ethereum/web3.py | web3/middleware/gas_price_strategy.py | gas_price_strategy_middleware | def gas_price_strategy_middleware(make_request, web3):
"""
Includes a gas price using the gas price strategy
"""
def middleware(method, params):
if method == 'eth_sendTransaction':
transaction = params[0]
if 'gasPrice' not in transaction:
generated_gas_price = web3.eth.generateGasPrice(transaction)
if generated_gas_price is not None:
transaction = assoc(transaction, 'gasPrice', generated_gas_price)
return make_request(method, [transaction])
return make_request(method, params)
return middleware | python | def gas_price_strategy_middleware(make_request, web3):
"""
Includes a gas price using the gas price strategy
"""
def middleware(method, params):
if method == 'eth_sendTransaction':
transaction = params[0]
if 'gasPrice' not in transaction:
generated_gas_price = web3.eth.generateGasPrice(transaction)
if generated_gas_price is not None:
transaction = assoc(transaction, 'gasPrice', generated_gas_price)
return make_request(method, [transaction])
return make_request(method, params)
return middleware | [
"def",
"gas_price_strategy_middleware",
"(",
"make_request",
",",
"web3",
")",
":",
"def",
"middleware",
"(",
"method",
",",
"params",
")",
":",
"if",
"method",
"==",
"'eth_sendTransaction'",
":",
"transaction",
"=",
"params",
"[",
"0",
"]",
"if",
"'gasPrice'"... | Includes a gas price using the gas price strategy | [
"Includes",
"a",
"gas",
"price",
"using",
"the",
"gas",
"price",
"strategy"
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/middleware/gas_price_strategy.py#L6-L19 | train | 220,288 |
ethereum/web3.py | web3/_utils/transactions.py | fill_transaction_defaults | def fill_transaction_defaults(web3, transaction):
"""
if web3 is None, fill as much as possible while offline
"""
defaults = {}
for key, default_getter in TRANSACTION_DEFAULTS.items():
if key not in transaction:
if callable(default_getter):
if web3 is not None:
default_val = default_getter(web3, transaction)
else:
raise ValueError("You must specify %s in the transaction" % key)
else:
default_val = default_getter
defaults[key] = default_val
return merge(defaults, transaction) | python | def fill_transaction_defaults(web3, transaction):
"""
if web3 is None, fill as much as possible while offline
"""
defaults = {}
for key, default_getter in TRANSACTION_DEFAULTS.items():
if key not in transaction:
if callable(default_getter):
if web3 is not None:
default_val = default_getter(web3, transaction)
else:
raise ValueError("You must specify %s in the transaction" % key)
else:
default_val = default_getter
defaults[key] = default_val
return merge(defaults, transaction) | [
"def",
"fill_transaction_defaults",
"(",
"web3",
",",
"transaction",
")",
":",
"defaults",
"=",
"{",
"}",
"for",
"key",
",",
"default_getter",
"in",
"TRANSACTION_DEFAULTS",
".",
"items",
"(",
")",
":",
"if",
"key",
"not",
"in",
"transaction",
":",
"if",
"c... | if web3 is None, fill as much as possible while offline | [
"if",
"web3",
"is",
"None",
"fill",
"as",
"much",
"as",
"possible",
"while",
"offline"
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/_utils/transactions.py#L49-L64 | train | 220,289 |
ethereum/web3.py | web3/gas_strategies/time_based.py | _compute_probabilities | def _compute_probabilities(miner_data, wait_blocks, sample_size):
"""
Computes the probabilities that a txn will be accepted at each of the gas
prices accepted by the miners.
"""
miner_data_by_price = tuple(sorted(
miner_data,
key=operator.attrgetter('low_percentile_gas_price'),
reverse=True,
))
for idx in range(len(miner_data_by_price)):
low_percentile_gas_price = miner_data_by_price[idx].low_percentile_gas_price
num_blocks_accepting_price = sum(m.num_blocks for m in miner_data_by_price[idx:])
inv_prob_per_block = (sample_size - num_blocks_accepting_price) / sample_size
probability_accepted = 1 - inv_prob_per_block ** wait_blocks
yield Probability(low_percentile_gas_price, probability_accepted) | python | def _compute_probabilities(miner_data, wait_blocks, sample_size):
"""
Computes the probabilities that a txn will be accepted at each of the gas
prices accepted by the miners.
"""
miner_data_by_price = tuple(sorted(
miner_data,
key=operator.attrgetter('low_percentile_gas_price'),
reverse=True,
))
for idx in range(len(miner_data_by_price)):
low_percentile_gas_price = miner_data_by_price[idx].low_percentile_gas_price
num_blocks_accepting_price = sum(m.num_blocks for m in miner_data_by_price[idx:])
inv_prob_per_block = (sample_size - num_blocks_accepting_price) / sample_size
probability_accepted = 1 - inv_prob_per_block ** wait_blocks
yield Probability(low_percentile_gas_price, probability_accepted) | [
"def",
"_compute_probabilities",
"(",
"miner_data",
",",
"wait_blocks",
",",
"sample_size",
")",
":",
"miner_data_by_price",
"=",
"tuple",
"(",
"sorted",
"(",
"miner_data",
",",
"key",
"=",
"operator",
".",
"attrgetter",
"(",
"'low_percentile_gas_price'",
")",
","... | Computes the probabilities that a txn will be accepted at each of the gas
prices accepted by the miners. | [
"Computes",
"the",
"probabilities",
"that",
"a",
"txn",
"will",
"be",
"accepted",
"at",
"each",
"of",
"the",
"gas",
"prices",
"accepted",
"by",
"the",
"miners",
"."
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/gas_strategies/time_based.py#L76-L91 | train | 220,290 |
ethereum/web3.py | web3/gas_strategies/time_based.py | _compute_gas_price | def _compute_gas_price(probabilities, desired_probability):
"""
Given a sorted range of ``Probability`` named-tuples returns a gas price
computed based on where the ``desired_probability`` would fall within the
range.
:param probabilities: An iterable of `Probability` named-tuples sorted in reverse order.
:param desired_probability: An floating point representation of the desired
probability. (e.g. ``85% -> 0.85``)
"""
first = probabilities[0]
last = probabilities[-1]
if desired_probability >= first.prob:
return int(first.gas_price)
elif desired_probability <= last.prob:
return int(last.gas_price)
for left, right in sliding_window(2, probabilities):
if desired_probability < right.prob:
continue
elif desired_probability > left.prob:
# This code block should never be reachable as it would indicate
# that we already passed by the probability window in which our
# `desired_probability` is located.
raise Exception('Invariant')
adj_prob = desired_probability - right.prob
window_size = left.prob - right.prob
position = adj_prob / window_size
gas_window_size = left.gas_price - right.gas_price
gas_price = int(math.ceil(right.gas_price + gas_window_size * position))
return gas_price
else:
# The initial `if/else` clause in this function handles the case where
# the `desired_probability` is either above or below the min/max
# probability found in the `probabilities`.
#
# With these two cases handled, the only way this code block should be
# reachable would be if the `probabilities` were not sorted correctly.
# Otherwise, the `desired_probability` **must** fall between two of the
# values in the `probabilities``.
raise Exception('Invariant') | python | def _compute_gas_price(probabilities, desired_probability):
"""
Given a sorted range of ``Probability`` named-tuples returns a gas price
computed based on where the ``desired_probability`` would fall within the
range.
:param probabilities: An iterable of `Probability` named-tuples sorted in reverse order.
:param desired_probability: An floating point representation of the desired
probability. (e.g. ``85% -> 0.85``)
"""
first = probabilities[0]
last = probabilities[-1]
if desired_probability >= first.prob:
return int(first.gas_price)
elif desired_probability <= last.prob:
return int(last.gas_price)
for left, right in sliding_window(2, probabilities):
if desired_probability < right.prob:
continue
elif desired_probability > left.prob:
# This code block should never be reachable as it would indicate
# that we already passed by the probability window in which our
# `desired_probability` is located.
raise Exception('Invariant')
adj_prob = desired_probability - right.prob
window_size = left.prob - right.prob
position = adj_prob / window_size
gas_window_size = left.gas_price - right.gas_price
gas_price = int(math.ceil(right.gas_price + gas_window_size * position))
return gas_price
else:
# The initial `if/else` clause in this function handles the case where
# the `desired_probability` is either above or below the min/max
# probability found in the `probabilities`.
#
# With these two cases handled, the only way this code block should be
# reachable would be if the `probabilities` were not sorted correctly.
# Otherwise, the `desired_probability` **must** fall between two of the
# values in the `probabilities``.
raise Exception('Invariant') | [
"def",
"_compute_gas_price",
"(",
"probabilities",
",",
"desired_probability",
")",
":",
"first",
"=",
"probabilities",
"[",
"0",
"]",
"last",
"=",
"probabilities",
"[",
"-",
"1",
"]",
"if",
"desired_probability",
">=",
"first",
".",
"prob",
":",
"return",
"... | Given a sorted range of ``Probability`` named-tuples returns a gas price
computed based on where the ``desired_probability`` would fall within the
range.
:param probabilities: An iterable of `Probability` named-tuples sorted in reverse order.
:param desired_probability: An floating point representation of the desired
probability. (e.g. ``85% -> 0.85``) | [
"Given",
"a",
"sorted",
"range",
"of",
"Probability",
"named",
"-",
"tuples",
"returns",
"a",
"gas",
"price",
"computed",
"based",
"on",
"where",
"the",
"desired_probability",
"would",
"fall",
"within",
"the",
"range",
"."
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/gas_strategies/time_based.py#L94-L136 | train | 220,291 |
ethereum/web3.py | web3/gas_strategies/time_based.py | construct_time_based_gas_price_strategy | def construct_time_based_gas_price_strategy(max_wait_seconds,
sample_size=120,
probability=98):
"""
A gas pricing strategy that uses recently mined block data to derive a gas
price for which a transaction is likely to be mined within X seconds with
probability P.
:param max_wait_seconds: The desired maxiumum number of seconds the
transaction should take to mine.
:param sample_size: The number of recent blocks to sample
:param probability: An integer representation of the desired probability
that the transaction will be mined within ``max_wait_seconds``. 0 means 0%
and 100 means 100%.
"""
def time_based_gas_price_strategy(web3, transaction_params):
avg_block_time = _get_avg_block_time(web3, sample_size=sample_size)
wait_blocks = int(math.ceil(max_wait_seconds / avg_block_time))
raw_miner_data = _get_raw_miner_data(web3, sample_size=sample_size)
miner_data = _aggregate_miner_data(raw_miner_data)
probabilities = _compute_probabilities(
miner_data,
wait_blocks=wait_blocks,
sample_size=sample_size,
)
gas_price = _compute_gas_price(probabilities, probability / 100)
return gas_price
return time_based_gas_price_strategy | python | def construct_time_based_gas_price_strategy(max_wait_seconds,
sample_size=120,
probability=98):
"""
A gas pricing strategy that uses recently mined block data to derive a gas
price for which a transaction is likely to be mined within X seconds with
probability P.
:param max_wait_seconds: The desired maxiumum number of seconds the
transaction should take to mine.
:param sample_size: The number of recent blocks to sample
:param probability: An integer representation of the desired probability
that the transaction will be mined within ``max_wait_seconds``. 0 means 0%
and 100 means 100%.
"""
def time_based_gas_price_strategy(web3, transaction_params):
avg_block_time = _get_avg_block_time(web3, sample_size=sample_size)
wait_blocks = int(math.ceil(max_wait_seconds / avg_block_time))
raw_miner_data = _get_raw_miner_data(web3, sample_size=sample_size)
miner_data = _aggregate_miner_data(raw_miner_data)
probabilities = _compute_probabilities(
miner_data,
wait_blocks=wait_blocks,
sample_size=sample_size,
)
gas_price = _compute_gas_price(probabilities, probability / 100)
return gas_price
return time_based_gas_price_strategy | [
"def",
"construct_time_based_gas_price_strategy",
"(",
"max_wait_seconds",
",",
"sample_size",
"=",
"120",
",",
"probability",
"=",
"98",
")",
":",
"def",
"time_based_gas_price_strategy",
"(",
"web3",
",",
"transaction_params",
")",
":",
"avg_block_time",
"=",
"_get_a... | A gas pricing strategy that uses recently mined block data to derive a gas
price for which a transaction is likely to be mined within X seconds with
probability P.
:param max_wait_seconds: The desired maxiumum number of seconds the
transaction should take to mine.
:param sample_size: The number of recent blocks to sample
:param probability: An integer representation of the desired probability
that the transaction will be mined within ``max_wait_seconds``. 0 means 0%
and 100 means 100%. | [
"A",
"gas",
"pricing",
"strategy",
"that",
"uses",
"recently",
"mined",
"block",
"data",
"to",
"derive",
"a",
"gas",
"price",
"for",
"which",
"a",
"transaction",
"is",
"likely",
"to",
"be",
"mined",
"within",
"X",
"seconds",
"with",
"probability",
"P",
"."... | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/gas_strategies/time_based.py#L140-L170 | train | 220,292 |
ethereum/web3.py | web3/manager.py | RequestManager.default_middlewares | def default_middlewares(web3):
"""
List the default middlewares for the request manager.
Leaving ens unspecified will prevent the middleware from resolving names.
"""
return [
(request_parameter_normalizer, 'request_param_normalizer'),
(gas_price_strategy_middleware, 'gas_price_strategy'),
(name_to_address_middleware(web3), 'name_to_address'),
(attrdict_middleware, 'attrdict'),
(pythonic_middleware, 'pythonic'),
(normalize_errors_middleware, 'normalize_errors'),
(validation_middleware, 'validation'),
(abi_middleware, 'abi'),
] | python | def default_middlewares(web3):
"""
List the default middlewares for the request manager.
Leaving ens unspecified will prevent the middleware from resolving names.
"""
return [
(request_parameter_normalizer, 'request_param_normalizer'),
(gas_price_strategy_middleware, 'gas_price_strategy'),
(name_to_address_middleware(web3), 'name_to_address'),
(attrdict_middleware, 'attrdict'),
(pythonic_middleware, 'pythonic'),
(normalize_errors_middleware, 'normalize_errors'),
(validation_middleware, 'validation'),
(abi_middleware, 'abi'),
] | [
"def",
"default_middlewares",
"(",
"web3",
")",
":",
"return",
"[",
"(",
"request_parameter_normalizer",
",",
"'request_param_normalizer'",
")",
",",
"(",
"gas_price_strategy_middleware",
",",
"'gas_price_strategy'",
")",
",",
"(",
"name_to_address_middleware",
"(",
"we... | List the default middlewares for the request manager.
Leaving ens unspecified will prevent the middleware from resolving names. | [
"List",
"the",
"default",
"middlewares",
"for",
"the",
"request",
"manager",
".",
"Leaving",
"ens",
"unspecified",
"will",
"prevent",
"the",
"middleware",
"from",
"resolving",
"names",
"."
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/manager.py#L57-L71 | train | 220,293 |
ethereum/web3.py | web3/manager.py | RequestManager.request_blocking | def request_blocking(self, method, params):
"""
Make a synchronous request using the provider
"""
response = self._make_request(method, params)
if "error" in response:
raise ValueError(response["error"])
return response['result'] | python | def request_blocking(self, method, params):
"""
Make a synchronous request using the provider
"""
response = self._make_request(method, params)
if "error" in response:
raise ValueError(response["error"])
return response['result'] | [
"def",
"request_blocking",
"(",
"self",
",",
"method",
",",
"params",
")",
":",
"response",
"=",
"self",
".",
"_make_request",
"(",
"method",
",",
"params",
")",
"if",
"\"error\"",
"in",
"response",
":",
"raise",
"ValueError",
"(",
"response",
"[",
"\"erro... | Make a synchronous request using the provider | [
"Make",
"a",
"synchronous",
"request",
"using",
"the",
"provider"
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/manager.py#L90-L99 | train | 220,294 |
ethereum/web3.py | web3/manager.py | RequestManager.coro_request | async def coro_request(self, method, params):
"""
Couroutine for making a request using the provider
"""
response = await self._coro_make_request(method, params)
if "error" in response:
raise ValueError(response["error"])
if response['result'] is None:
raise ValueError(f"The call to {method} did not return a value.")
return response['result'] | python | async def coro_request(self, method, params):
"""
Couroutine for making a request using the provider
"""
response = await self._coro_make_request(method, params)
if "error" in response:
raise ValueError(response["error"])
if response['result'] is None:
raise ValueError(f"The call to {method} did not return a value.")
return response['result'] | [
"async",
"def",
"coro_request",
"(",
"self",
",",
"method",
",",
"params",
")",
":",
"response",
"=",
"await",
"self",
".",
"_coro_make_request",
"(",
"method",
",",
"params",
")",
"if",
"\"error\"",
"in",
"response",
":",
"raise",
"ValueError",
"(",
"resp... | Couroutine for making a request using the provider | [
"Couroutine",
"for",
"making",
"a",
"request",
"using",
"the",
"provider"
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/manager.py#L101-L113 | train | 220,295 |
ethereum/web3.py | web3/middleware/fixture.py | construct_fixture_middleware | def construct_fixture_middleware(fixtures):
"""
Constructs a middleware which returns a static response for any method
which is found in the provided fixtures.
"""
def fixture_middleware(make_request, web3):
def middleware(method, params):
if method in fixtures:
result = fixtures[method]
return {'result': result}
else:
return make_request(method, params)
return middleware
return fixture_middleware | python | def construct_fixture_middleware(fixtures):
"""
Constructs a middleware which returns a static response for any method
which is found in the provided fixtures.
"""
def fixture_middleware(make_request, web3):
def middleware(method, params):
if method in fixtures:
result = fixtures[method]
return {'result': result}
else:
return make_request(method, params)
return middleware
return fixture_middleware | [
"def",
"construct_fixture_middleware",
"(",
"fixtures",
")",
":",
"def",
"fixture_middleware",
"(",
"make_request",
",",
"web3",
")",
":",
"def",
"middleware",
"(",
"method",
",",
"params",
")",
":",
"if",
"method",
"in",
"fixtures",
":",
"result",
"=",
"fix... | Constructs a middleware which returns a static response for any method
which is found in the provided fixtures. | [
"Constructs",
"a",
"middleware",
"which",
"returns",
"a",
"static",
"response",
"for",
"any",
"method",
"which",
"is",
"found",
"in",
"the",
"provided",
"fixtures",
"."
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/middleware/fixture.py#L1-L14 | train | 220,296 |
ethereum/web3.py | web3/middleware/__init__.py | combine_middlewares | def combine_middlewares(middlewares, web3, provider_request_fn):
"""
Returns a callable function which will call the provider.provider_request
function wrapped with all of the middlewares.
"""
return functools.reduce(
lambda request_fn, middleware: middleware(request_fn, web3),
reversed(middlewares),
provider_request_fn,
) | python | def combine_middlewares(middlewares, web3, provider_request_fn):
"""
Returns a callable function which will call the provider.provider_request
function wrapped with all of the middlewares.
"""
return functools.reduce(
lambda request_fn, middleware: middleware(request_fn, web3),
reversed(middlewares),
provider_request_fn,
) | [
"def",
"combine_middlewares",
"(",
"middlewares",
",",
"web3",
",",
"provider_request_fn",
")",
":",
"return",
"functools",
".",
"reduce",
"(",
"lambda",
"request_fn",
",",
"middleware",
":",
"middleware",
"(",
"request_fn",
",",
"web3",
")",
",",
"reversed",
... | Returns a callable function which will call the provider.provider_request
function wrapped with all of the middlewares. | [
"Returns",
"a",
"callable",
"function",
"which",
"will",
"call",
"the",
"provider",
".",
"provider_request",
"function",
"wrapped",
"with",
"all",
"of",
"the",
"middlewares",
"."
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/middleware/__init__.py#L67-L76 | train | 220,297 |
ethereum/web3.py | web3/_utils/events.py | get_event_data | def get_event_data(event_abi, log_entry):
"""
Given an event ABI and a log entry for that event, return the decoded
event data
"""
if event_abi['anonymous']:
log_topics = log_entry['topics']
elif not log_entry['topics']:
raise MismatchedABI("Expected non-anonymous event to have 1 or more topics")
elif event_abi_to_log_topic(event_abi) != log_entry['topics'][0]:
raise MismatchedABI("The event signature did not match the provided ABI")
else:
log_topics = log_entry['topics'][1:]
log_topics_abi = get_indexed_event_inputs(event_abi)
log_topic_normalized_inputs = normalize_event_input_types(log_topics_abi)
log_topic_types = get_event_abi_types_for_decoding(log_topic_normalized_inputs)
log_topic_names = get_abi_input_names({'inputs': log_topics_abi})
if len(log_topics) != len(log_topic_types):
raise ValueError("Expected {0} log topics. Got {1}".format(
len(log_topic_types),
len(log_topics),
))
log_data = hexstr_if_str(to_bytes, log_entry['data'])
log_data_abi = exclude_indexed_event_inputs(event_abi)
log_data_normalized_inputs = normalize_event_input_types(log_data_abi)
log_data_types = get_event_abi_types_for_decoding(log_data_normalized_inputs)
log_data_names = get_abi_input_names({'inputs': log_data_abi})
# sanity check that there are not name intersections between the topic
# names and the data argument names.
duplicate_names = set(log_topic_names).intersection(log_data_names)
if duplicate_names:
raise ValueError(
"Invalid Event ABI: The following argument names are duplicated "
"between event inputs: '{0}'".format(', '.join(duplicate_names))
)
decoded_log_data = decode_abi(log_data_types, log_data)
normalized_log_data = map_abi_data(
BASE_RETURN_NORMALIZERS,
log_data_types,
decoded_log_data
)
decoded_topic_data = [
decode_single(topic_type, topic_data)
for topic_type, topic_data
in zip(log_topic_types, log_topics)
]
normalized_topic_data = map_abi_data(
BASE_RETURN_NORMALIZERS,
log_topic_types,
decoded_topic_data
)
event_args = dict(itertools.chain(
zip(log_topic_names, normalized_topic_data),
zip(log_data_names, normalized_log_data),
))
event_data = {
'args': event_args,
'event': event_abi['name'],
'logIndex': log_entry['logIndex'],
'transactionIndex': log_entry['transactionIndex'],
'transactionHash': log_entry['transactionHash'],
'address': log_entry['address'],
'blockHash': log_entry['blockHash'],
'blockNumber': log_entry['blockNumber'],
}
return AttributeDict.recursive(event_data) | python | def get_event_data(event_abi, log_entry):
"""
Given an event ABI and a log entry for that event, return the decoded
event data
"""
if event_abi['anonymous']:
log_topics = log_entry['topics']
elif not log_entry['topics']:
raise MismatchedABI("Expected non-anonymous event to have 1 or more topics")
elif event_abi_to_log_topic(event_abi) != log_entry['topics'][0]:
raise MismatchedABI("The event signature did not match the provided ABI")
else:
log_topics = log_entry['topics'][1:]
log_topics_abi = get_indexed_event_inputs(event_abi)
log_topic_normalized_inputs = normalize_event_input_types(log_topics_abi)
log_topic_types = get_event_abi_types_for_decoding(log_topic_normalized_inputs)
log_topic_names = get_abi_input_names({'inputs': log_topics_abi})
if len(log_topics) != len(log_topic_types):
raise ValueError("Expected {0} log topics. Got {1}".format(
len(log_topic_types),
len(log_topics),
))
log_data = hexstr_if_str(to_bytes, log_entry['data'])
log_data_abi = exclude_indexed_event_inputs(event_abi)
log_data_normalized_inputs = normalize_event_input_types(log_data_abi)
log_data_types = get_event_abi_types_for_decoding(log_data_normalized_inputs)
log_data_names = get_abi_input_names({'inputs': log_data_abi})
# sanity check that there are not name intersections between the topic
# names and the data argument names.
duplicate_names = set(log_topic_names).intersection(log_data_names)
if duplicate_names:
raise ValueError(
"Invalid Event ABI: The following argument names are duplicated "
"between event inputs: '{0}'".format(', '.join(duplicate_names))
)
decoded_log_data = decode_abi(log_data_types, log_data)
normalized_log_data = map_abi_data(
BASE_RETURN_NORMALIZERS,
log_data_types,
decoded_log_data
)
decoded_topic_data = [
decode_single(topic_type, topic_data)
for topic_type, topic_data
in zip(log_topic_types, log_topics)
]
normalized_topic_data = map_abi_data(
BASE_RETURN_NORMALIZERS,
log_topic_types,
decoded_topic_data
)
event_args = dict(itertools.chain(
zip(log_topic_names, normalized_topic_data),
zip(log_data_names, normalized_log_data),
))
event_data = {
'args': event_args,
'event': event_abi['name'],
'logIndex': log_entry['logIndex'],
'transactionIndex': log_entry['transactionIndex'],
'transactionHash': log_entry['transactionHash'],
'address': log_entry['address'],
'blockHash': log_entry['blockHash'],
'blockNumber': log_entry['blockNumber'],
}
return AttributeDict.recursive(event_data) | [
"def",
"get_event_data",
"(",
"event_abi",
",",
"log_entry",
")",
":",
"if",
"event_abi",
"[",
"'anonymous'",
"]",
":",
"log_topics",
"=",
"log_entry",
"[",
"'topics'",
"]",
"elif",
"not",
"log_entry",
"[",
"'topics'",
"]",
":",
"raise",
"MismatchedABI",
"("... | Given an event ABI and a log entry for that event, return the decoded
event data | [
"Given",
"an",
"event",
"ABI",
"and",
"a",
"log",
"entry",
"for",
"that",
"event",
"return",
"the",
"decoded",
"event",
"data"
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/_utils/events.py#L159-L233 | train | 220,298 |
ethereum/web3.py | web3/middleware/exception_retry_request.py | exception_retry_middleware | def exception_retry_middleware(make_request, web3, errors, retries=5):
"""
Creates middleware that retries failed HTTP requests. Is a default
middleware for HTTPProvider.
"""
def middleware(method, params):
if check_if_retry_on_failure(method):
for i in range(retries):
try:
return make_request(method, params)
except errors:
if i < retries - 1:
continue
else:
raise
else:
return make_request(method, params)
return middleware | python | def exception_retry_middleware(make_request, web3, errors, retries=5):
"""
Creates middleware that retries failed HTTP requests. Is a default
middleware for HTTPProvider.
"""
def middleware(method, params):
if check_if_retry_on_failure(method):
for i in range(retries):
try:
return make_request(method, params)
except errors:
if i < retries - 1:
continue
else:
raise
else:
return make_request(method, params)
return middleware | [
"def",
"exception_retry_middleware",
"(",
"make_request",
",",
"web3",
",",
"errors",
",",
"retries",
"=",
"5",
")",
":",
"def",
"middleware",
"(",
"method",
",",
"params",
")",
":",
"if",
"check_if_retry_on_failure",
"(",
"method",
")",
":",
"for",
"i",
"... | Creates middleware that retries failed HTTP requests. Is a default
middleware for HTTPProvider. | [
"Creates",
"middleware",
"that",
"retries",
"failed",
"HTTP",
"requests",
".",
"Is",
"a",
"default",
"middleware",
"for",
"HTTPProvider",
"."
] | 71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab | https://github.com/ethereum/web3.py/blob/71b8bf03dc6d332dd97d8902a38ffab6f8b5a5ab/web3/middleware/exception_retry_request.py#L71-L88 | train | 220,299 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.