repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
|---|---|---|---|---|---|---|
chainer
|
chainer-master/chainer/utils/cache.py
|
import functools
import chainer
class cached_property(object):
"""Cache a result of computation of Chainer functions
Caches are stored for each chainer.config.enable_backprop.
The following example calls ``F.sigmoid`` only once.
>>> class C(object):
... def __init__(self, x):
... self.x = x
... @chainer.utils.cache.cached_property
... def y(self):
... return F.sigmoid(self.x)
... def loss(self, t):
... return F.mean_squared_error(self.y, t)
>>> x = chainer.Variable(np.array([2, 3], np.float32))
>>> obj = C(x)
>>> loss1 = obj.loss(np.array([0.1, 0.2], np.float32))
>>> loss2 = obj.loss(np.array([0.3, 0.4], np.float32))
However, the following example recomputes `obj.y` because the second call
requires the computational graph.
>>> with chainer.no_backprop_mode():
... loss1 = obj.loss(np.array([0.1, 0.2], np.float32))
>>> loss2 = obj.loss(np.array([0.3, 0.4], np.float32))
"""
def __init__(self, func):
functools.update_wrapper(self, func)
self.func = func
def __get__(self, obj, cls):
if obj is None:
return self
caches = obj.__dict__.setdefault(self.__name__, {})
backprop_enabled = chainer.config.enable_backprop
try:
return caches[backprop_enabled]
except KeyError:
value = self.func(obj)
caches[backprop_enabled] = value
return value
def __set__(self, obj, cls):
# Define __set__ to make cached_property a data descriptor
raise AttributeError(
'attribute \'{}\' of {} is readonly'.format(
self.__name__, cls))
| 1,735
| 29.45614
| 77
|
py
|
chainer
|
chainer-master/chainer/utils/conv_nd_kernel.py
|
import functools
import six
from chainer.backends import cuda
def mulexp(xs, init=None):
if init is not None:
return functools.reduce('{} * {}'.format, xs, init)
else:
return functools.reduce('{} * {}'.format, xs)
def andexp(xs, init=None):
if init is not None:
return functools.reduce('{} && {}'.format, xs, init)
else:
return functools.reduce('{} && {}'.format, xs)
def muladdexp(xs, ys, init=None):
def aux(exp, arg):
x, y = arg
return '({} + {} * {})'.format(y, x, exp)
if init is not None:
return functools.reduce(aux, six.moves.zip(xs, ys), init)
else:
return functools.reduce(aux, six.moves.zip(xs, ys))
def map_(fn, *lst):
# For py2/py3 compatibility.
return list(map(fn, *lst))
def succ_sublists(xs):
# Returns successive sublists of xs.
return [xs[i:] for i in six.moves.range(len(xs))]
def vars(prefix, n):
return ['{}_{}'.format(prefix, i) for i in six.moves.range(n)]
class Writer(object):
def __init__(self):
self._indent = 0
self._lines = []
def write(self, line, indent=None):
if indent == 'dec' or indent == 'decinc':
self._indent -= 1
self._lines.append(' ' * self._indent + line)
if indent == 'inc' or indent == 'decinc':
self._indent += 1
def get(self):
return '\n'.join(self._lines)
#
# im2col
class Im2colNDKernel(object):
def _in_params(self, ds, outs, ks, ss, ps, dilate):
# 2D: raw T img, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0, int32 p_1,
# int32 di_0, int32 di_1
def aux(x):
return 'int32 {}'.format(x)
return ', '.join(
['raw T img'] + map_(aux, ds + outs + ks + ss + ps + dilate))
def _out_params(self):
return 'T col'
def _compile_c0(self, outs, ks):
# 2D: int c0 = i / (k_0 * k_1 * out_0 * out_1)
return ['int c0 = i / ({});'.format(mulexp(ks + outs))]
def _compile_kx(self, ndim, outs, ks):
# 2D: int kx_0 = i / (k_1 * out_0 * out_1) % k_0;
# int kx_1 = i / (out_0 * out_1) % k_1;
def aux(kx, xs):
head = xs[0]
tail = xs[1:] + outs
if tail:
return 'int {} = i / ({}) % {};'.format(kx, mulexp(tail), head)
else:
return 'int {} = i % {};'.format(kx, head)
kxs = vars('kx', ndim)
kx_decls = map_(aux, kxs, succ_sublists(ks))
return kx_decls, kxs
def _compile_out_x(self, ndim, outs):
# 2D: int out_x0 = i / (out_1) % out_0;
# int out_x1 = i % out_1;
def aux(out_x, xs):
head = xs[0]
tail = xs[1:]
if tail:
return 'int {} = i / ({}) % {};'.format(
out_x, mulexp(tail), head)
else:
return 'int {} = i % {};'.format(out_x, head)
out_xs = vars('out_x', ndim)
out_x_decls = map_(aux, out_xs, succ_sublists(outs))
return out_x_decls, out_xs
def _compile_main(self, ndim, ds, ks, ss, ps, dilate, kxs, out_xs):
# 2D: int in_0 = kx_0 * di_0 + out_x_0 * s_0 - p_0;
# int in_1 = kx_1 * di_1 + out_x_1 * s_1 - p_1;
# if (0 <= in_0 && in_0 < d_0 && 0 <= in_1 && in_1 < d_1) {
# int idx_0 = in_0 + d_0 * c0;
# int idx_1 = in_1 + d_1 * idx_0;
# col = img[idx_1];
# } else {
# col = (T)0;
# }
w = Writer()
ins = vars('in', ndim)
for _in, kx, out_x, s, p, di in six.moves.zip(ins, kxs, out_xs,
ss, ps, dilate):
target = 'int {} = {} * {} + {} * {} - {};'
w.write(target.format(_in, kx, di, out_x, s, p))
def rel_aux(_in, d):
return '0 <= {} && {} < {}'.format(_in, _in, d)
w.write(
'if ({}) {{'.format(andexp(map_(rel_aux, ins, ds))), indent='inc')
idxs = vars('idx', ndim)
idx0s = ['c0'] + idxs[:-1]
for idx, _in, d, idx0 in six.moves.zip(idxs, ins, ds, idx0s):
w.write('int {} = {} + {} * {};'.format(idx, _in, d, idx0))
w.write('col = img[{}];'.format(idxs[-1]))
w.write('} else {', indent='decinc')
w.write('col = (T)0;')
w.write('}', indent='dec')
return [w.get()]
def _operation(self, ndim, ds, outs, ks, ss, ps, dilate):
c0 = self._compile_c0(outs, ks)
kx, kxs = self._compile_kx(ndim, outs, ks)
out_x, out_xs = self._compile_out_x(ndim, outs)
main = self._compile_main(ndim, ds, ks, ss, ps, dilate, kxs, out_xs)
return '\n'.join(c0 + kx + out_x + main)
def _generate(self, ndim):
ds = vars('d', ndim)
outs = vars('out', ndim)
ks = vars('k', ndim)
ss = vars('s', ndim)
ps = vars('p', ndim)
dilate = vars('di', ndim)
in_params = self._in_params(ds, outs, ks, ss, ps, dilate)
out_params = self._out_params()
operation = self._operation(ndim, ds, outs, ks, ss, ps, dilate)
name = name = 'im2col_{}d'.format(ndim)
return in_params, out_params, operation, name
@staticmethod
@cuda.memoize()
def generate(ndim):
return _im2col_nd_kernel._generate(ndim)
_im2col_nd_kernel = Im2colNDKernel()
#
# col2im
class Col2imNDKernel(object):
def _in_params(self, ds, outs, ks, ss, ps, dilate):
# 2D: raw T col, int32 d_0, int32 d_1, int32 out_0, int32 out_1,
# int32 k_0, int32 k_1, int32 s_0, int32 s_1, int32 p_0, int32 p_1,
# int32 di_0, int32 di_1
def aux(x):
return 'int32 {}'.format(x)
return ', '.join(
['raw T col'] + map_(aux, ds + outs + ks + ss + ps + dilate))
def _out_params(self):
return 'T img'
def _compile_c0(self, ds):
# 2D: int c0 = i / (d_0 * d_1);
return ['int c0 = i / ({});'.format(mulexp(ds))]
def _compile_x(self, ndim, ds):
# 2D: int x_0 = i / (d_1) % d_0;
# int x_1 = i % d_1;
def aux(x, ds):
head = ds[0]
tail = ds[1:]
if tail:
return 'int {} = i / ({}) % {};'.format(
x, mulexp(tail), head)
else:
return 'int {} = i % {};'.format(x, head)
xs = vars('x', ndim)
x_decls = map_(aux, xs, succ_sublists(ds))
return x_decls, xs
def _compile_loop(self, ndim, outs, ks, ss, ps, xs, dilate):
# 2D: for (int kx_0 = 0; kx_0 < k_0; ++kx_0) {
# int out_x_0 = x_0 + p_0 - kx_0 * di_0;
# if (0 > out_x_0 || out_x_0 >= out_0 * s_0) continue;
# if (out_x_0 % s_0 != 0) continue;
# out_x_0 /= s_0;
# for (int kx_1 = 0; kx_1 < k_1; ++kx_1) {
# int out_x_1 = x_1 + p_1 - kx_1 * di_1;
# if (0 > out_x_1 || out_x_1 >= out_1 * s_1) continue;
# if (out_x_1 % s_1 != 0) continue;
# out_x_1 /= s_1;
# ... Main-part here ...
# }
# }
# ... After-part here ...
def _loop_main(main, ndim, ks, ss):
w = Writer()
# Loop openings.
out_xs = vars('out_x', ndim)
kxs = vars('kx', ndim)
for out, out_x, kx, s, p, x, k, di in six.moves.zip(
outs, out_xs, kxs, ss, ps, xs, ks, dilate):
w.write('for (int {} = 0; {} < {}; ++{}) {{'.format(
kx, kx, k, kx), indent='inc')
w.write('int {} = {} + {} - {} * {};'.format(
out_x, x, p, kx, di))
w.write('if (0 > {} || {} >= {} * {}) continue;'.format(
out_x, out_x, out, s))
w.write('if ({} % {} != 0) continue;'.format(out_x, s))
w.write('{} /= {};'.format(out_x, s))
# Main-part.
for l in main(ks, kxs, out_xs).split('\n'):
w.write(l)
# Loop closings.
for _ in out_xs:
w.write('}', indent='dec')
return [w.get()]
return _loop_main
def _compile_procedure(self, outs, xs):
# 2D: val = val + col[
# (out_x_1 + out_1 * (out_x_0 + out_0 *
# (kx_1 + k_1 * (kx_0 + k_0 * c0))))];
def _main(ks, kxs, out_xs):
index = muladdexp(outs, out_xs, muladdexp(ks, kxs, 'c0'))
return 'val = val + col[{}];'.format(index)
before = ['T val = 0;']
after = ['img = val;']
return before, _main, after
def _operation(self, ndim, ds, outs, ks, ss, ps, dilate):
c0 = self._compile_c0(ds)
x, xs = self._compile_x(ndim, ds)
loop_main = self._compile_loop(ndim, outs, ks, ss, ps, xs, dilate)
before, main, after = self._compile_procedure(outs, xs)
return '\n'.join(
c0 + x + before + loop_main(main, ndim, ks, ss) + after)
def _generate(self, ndim):
ds = vars('d', ndim)
outs = vars('out', ndim)
ks = vars('k', ndim)
ss = vars('s', ndim)
ps = vars('p', ndim)
dilate = vars('di', ndim)
in_params = self._in_params(ds, outs, ks, ss, ps, dilate)
out_params = self._out_params()
operation = self._operation(ndim, ds, outs, ks, ss, ps, dilate)
name = 'col2im_{}d'.format(ndim)
return in_params, out_params, operation, name
@staticmethod
@cuda.memoize()
def generate(ndim):
return _col2im_nd_kernel._generate(ndim)
_col2im_nd_kernel = Col2imNDKernel()
| 9,790
| 31.855705
| 79
|
py
|
chainer
|
chainer-master/chainer/utils/imgproc.py
|
import numpy
def oversample(images, crop_dims):
"""Crop an image into center, corners, and mirror images."""
# Dimensions and center.
channels, src_h, src_w = images[0].shape
cy, cx = src_h / 2.0, src_w / 2.0
dst_h, dst_w = crop_dims
# Make crop coordinates
crops_ix = numpy.empty((5, 4), dtype=int)
crops_ix[0, :2] = [0, 0]
crops_ix[1, :2] = [0, src_w - dst_w]
crops_ix[2, :2] = [src_h - dst_h, 0]
crops_ix[3, :2] = [src_h - dst_h, src_w - dst_w]
crops_ix[4, :2] = [int(cy - dst_h / 2.0), int(cx - dst_w / 2.0)]
crops_ix[:, 2] = crops_ix[:, 0] + dst_h
crops_ix[:, 3] = crops_ix[:, 1] + dst_w
crops = numpy.empty(
(10 * len(images), channels, dst_h, dst_w), dtype=images[0].dtype)
ix = 0
for img in images:
for crop in crops_ix:
crops[ix] = img[:, crop[0]:crop[2], crop[1]:crop[3]]
ix += 1
crops[ix:ix + 5] = crops[ix - 5:ix, :, :, ::-1]
ix += 5
return crops
| 992
| 30.03125
| 74
|
py
|
chainer
|
chainer-master/chainer/utils/experimental.py
|
import warnings
import chainer
def experimental(api_name):
"""Declares that user is using an experimental feature.
The developer of an API can mark it as *experimental* by calling
this function. When users call experimental APIs, :class:`FutureWarning`
is issued.
The presentation of :class:`FutureWarning` is disabled by setting
``chainer.disable_experimental_feature_warning`` to ``True``,
which is ``False`` by default.
The basic usage is to call it in the function or method we want to
mark as experimental along with the API name.
.. testsetup::
import sys
import warnings
warnings.simplefilter('always')
def wrapper(message, category, filename, lineno, file=None, line=None):
sys.stdout.write(warnings.formatwarning(
message, category, filename, lineno))
showwarning_orig = warnings.showwarning
warnings.showwarning = wrapper
.. testcleanup::
warnings.showwarning = showwarning_orig
.. testcode::
from chainer import utils
def f(x):
utils.experimental('chainer.foo.bar.f')
# concrete implementation of f follows
f(1)
.. testoutput::
:options: +ELLIPSIS, +NORMALIZE_WHITESPACE
... FutureWarning: chainer.foo.bar.f is experimental. \
The interface can change in the future. ...
We can also make a whole class experimental. In that case,
we should call this function in its ``__init__`` method.
.. testcode::
class C():
def __init__(self):
utils.experimental('chainer.foo.C')
C()
.. testoutput::
:options: +ELLIPSIS, +NORMALIZE_WHITESPACE
... FutureWarning: chainer.foo.C is experimental. \
The interface can change in the future. ...
If we want to mark ``__init__`` method only, rather than class itself,
it is recommended that we explicitly feed its API name.
.. testcode::
class D():
def __init__(self):
utils.experimental('D.__init__')
D()
.. testoutput::
:options: +ELLIPSIS, +NORMALIZE_WHITESPACE
... FutureWarning: D.__init__ is experimental. \
The interface can change in the future. ...
Currently, we do not have any sophisticated way to mark some usage of
non-experimental function as experimental.
But we can support such usage by explicitly branching it.
.. testcode::
def g(x, experimental_arg=None):
if experimental_arg is not None:
utils.experimental('experimental_arg of chainer.foo.g')
Args:
api_name(str): The name of an API marked as experimental.
"""
if not chainer.disable_experimental_feature_warning:
warnings.warn('{} is experimental. '
'The interface can change in the future.'.format(
api_name),
FutureWarning)
| 2,976
| 27.084906
| 79
|
py
|
chainer
|
chainer-master/chainer/utils/__init__.py
|
import collections
import contextlib
import shutil
import sys
import tempfile
import numpy
import six
import chainer
# import classes and functions
from chainer.utils.array import size_of_shape # NOQA
from chainer.utils.array import sum_to # NOQA
from chainer.utils.conv import get_conv_outsize # NOQA
from chainer.utils.conv import get_deconv_outsize # NOQA
from chainer.utils.error import _format_array_props # NOQA
from chainer.utils.experimental import experimental # NOQA
from chainer.utils.meta import enable_final # NOQA
from chainer.utils.meta import final # NOQA
from chainer.utils.nondeterministic import nondeterministic # NOQA
from chainer.utils.sparse import CooMatrix # NOQA
from chainer.utils.sparse import get_order # NOQA
from chainer.utils.sparse import to_coo # NOQA
# The following alias has been moved to chainer/__init__.py in order to break
# circular imports in Python 2.
# from chainer.utils.walker_alias import WalkerAlias
# TODO(kmaehashi) remove this when `six.moves.collections_abc` is implemented.
# See: https://github.com/chainer/chainer/issues/5097
try:
collections_abc = collections.abc # type: ignore
except AttributeError: # python <3.3
collections_abc = collections # type: ignore
def force_array(x, dtype=None):
# numpy returns a float value (scalar) when a return value of an operator
# is a 0-dimension array.
# We need to convert such a value to a 0-dimension array because `Function`
# object needs to return an `numpy.ndarray`.
if numpy.isscalar(x):
if dtype is None:
return numpy.array(x)
else:
return numpy.array(x, dtype)
else:
if dtype is None:
return x
else:
return x.astype(dtype, copy=False)
def force_type(dtype, value):
if numpy.isscalar(value):
return dtype.type(value)
elif value.dtype != dtype:
return value.astype(dtype, copy=False)
else:
return value
@contextlib.contextmanager
def tempdir(**kwargs):
# A context manager that defines a lifetime of a temporary directory.
ignore_errors = kwargs.pop('ignore_errors', False)
temp_dir = tempfile.mkdtemp(**kwargs)
try:
yield temp_dir
finally:
shutil.rmtree(temp_dir, ignore_errors=ignore_errors)
def _repr_with_named_data(inst, **kwargs):
"""Convenient function to generate `repr` string with custom named data"""
if six.PY2:
class_name = inst.__class__.__name__
else:
class_name = inst.__class__.__qualname__
return '<{}.{} {}>'.format(
inst.__module__, class_name,
' '.join('{}={}'.format(k, v) for k, v in six.iteritems(kwargs)))
def _check_arrays_forward_compatible(arrays, label=None):
if not chainer.is_arrays_compatible(arrays):
raise TypeError(
'incompatible array types are mixed in the forward input{}.\n'
'Actual: {}'.format(
' ({})'.format(label) if label is not None else '',
', '.join(str(type(a)) for a in arrays)))
def _raise_from(exc_type, message, orig_exc):
# Raises an exception that wraps another exception.
message = (
'{}\n\n'
'(caused by)\n'
'{}: {}\n'.format(message, type(orig_exc).__name__, orig_exc))
new_exc = exc_type(message)
if sys.version_info < (3,):
six.reraise(exc_type, new_exc, sys.exc_info()[2])
else:
six.raise_from(new_exc.with_traceback(orig_exc.__traceback__), None)
| 3,506
| 31.775701
| 79
|
py
|
chainer
|
chainer-master/chainer/utils/conv.py
|
import numpy
import six
from chainer.backends import cuda
def get_conv_outsize(size, k, s, p, cover_all=False, d=1):
"""Calculates output size of convolution.
This function takes the size of input feature map, kernel, stride, and
pooling of one particular dimension, then calculates the output feature
map size of that dimension.
.. seealso:: :func:`~chainer.utils.get_deconv_outsize`
Args:
size (int): The size of input feature map. It usually is the length of
a side of feature map.
k (int): The size of convolution kernel.
s (int): The size of stride.
p (int): The size of padding.
cover_all (bool): Use ``cover_all`` option or not.
d (int): The size of dilation.
Returns:
int: The expected output size of the convolution operation.
"""
dk = k + (k - 1) * (d - 1)
if cover_all:
return (size + p * 2 - dk + s - 1) // s + 1
else:
return (size + p * 2 - dk) // s + 1
def get_deconv_outsize(size, k, s, p, cover_all=False, d=1):
"""Calculates output size of deconvolution.
This function takes the size of input feature map, kernel, stride, and
pooling of one particular dimension, then calculates the output feature
map size of that dimension.
.. seealso:: :func:`~chainer.utils.get_conv_outsize`
Args:
size (int): The size of input feature map. It usually is the length of
a side of feature map.
k (int): The size of deconvolution kernel.
s (int): The size of stride.
p (int): The size of padding.
cover_all (bool): Use ``cover_all`` option or not.
d (int): The size of dilation.
Returns:
int: The expected output size of the deconvolution operation.
"""
dk = (k - 1) * d + 1
if cover_all:
return s * (size - 1) + dk - s + 1 - 2 * p
else:
return s * (size - 1) + dk - 2 * p
def im2col_cpu(
img, kh, kw, sy, sx, ph, pw, pval=0, cover_all=False, dy=1, dx=1,
out_h=None, out_w=None):
n, c, h, w = img.shape
if out_h is None:
out_h = get_conv_outsize(h, kh, sy, ph, cover_all, dy)
assert out_h > 0, 'Height in the output should be positive.'
if out_w is None:
out_w = get_conv_outsize(w, kw, sx, pw, cover_all, dx)
assert out_w > 0, 'Width in the output should be positive.'
img = numpy.pad(img,
((0, 0), (0, 0), (ph, ph + sy - 1), (pw, pw + sx - 1)),
mode='constant', constant_values=(pval,))
col = numpy.ndarray((n, c, kh, kw, out_h, out_w), dtype=img.dtype)
for j in six.moves.range(kh):
jdy = j * dy
j_lim = jdy + sy * out_h
for i in six.moves.range(kw):
idx = i * dx
i_lim = idx + sx * out_w
col[:, :, j, i, :, :] = img[:, :, jdy:j_lim:sy, idx:i_lim:sx]
return col
def im2col_gpu(img, kh, kw, sy, sx, ph, pw, cover_all=False, dy=1, dx=1,
out_h=None, out_w=None):
n, c, h, w = img.shape
if out_h is None:
out_h = get_conv_outsize(h, kh, sy, ph, cover_all, dy)
assert out_h > 0, 'Height in the output should be positive.'
if out_w is None:
out_w = get_conv_outsize(w, kw, sx, pw, cover_all, dx)
assert out_w > 0, 'Width in the output should be positive.'
col = cuda.cupy.empty((n, c, kh, kw, out_h, out_w), dtype=img.dtype)
cuda.elementwise(
'raw T img, int32 h, int32 w, int32 out_h, int32 out_w,'
'int32 kh, int32 kw, int32 sy, int32 sx, int32 ph, int32 pw,'
'int32 dy, int32 dx',
'T col',
'''
int c0 = i / (kh * kw * out_h * out_w);
int ky = i / (kw * out_h * out_w) % kh;
int kx = i / (out_h * out_w) % kw;
int out_y = i / out_w % out_h;
int out_x = i % out_w;
int in_y = ky * dy + out_y * sy - ph;
int in_x = kx * dx + out_x * sx - pw;
if (in_y >= 0 && in_y < h && in_x >= 0 && in_x < w) {
col = img[in_x + w * (in_y + h * c0)];
} else {
col = 0;
}
''',
'im2col')(img.reduced_view(),
h, w, out_h, out_w, kh, kw, sy, sx, ph, pw, dy, dx, col)
return col
def im2col(img, kh, kw, sy, sx, ph, pw, cover_all=False, dy=1, dx=1,
out_h=None, out_w=None):
fn = im2col_gpu if isinstance(img, cuda.ndarray) else im2col_cpu
return fn(img, kh, kw, sy, sx, ph, pw, cover_all=cover_all, dy=dy, dx=dx,
out_h=out_h, out_w=out_w)
def col2im_cpu(col, sy, sx, ph, pw, h, w, dy=1, dx=1):
n, c, kh, kw, out_h, out_w = col.shape
img = numpy.zeros((n, c, h + 2 * ph + sy - 1, w + 2 * pw + sx - 1),
dtype=col.dtype)
for j in six.moves.range(kh):
jdy = j * dy
j_lim = jdy + sy * out_h
for i in six.moves.range(kw):
idx = i * dx
i_lim = idx + sx * out_w
img[:, :, jdy:j_lim:sy, idx:i_lim:sx] += col[:, :, j, i]
return img[:, :, ph:h + ph, pw:w + pw]
def col2im_gpu(col, sy, sx, ph, pw, h, w, dy=1, dx=1):
n, c, kh, kw, out_h, out_w = col.shape
img = cuda.cupy.empty((n, c, h, w), dtype=col.dtype)
cuda.elementwise(
'raw T col, int32 h, int32 w, int32 out_h, int32 out_w,'
'int32 kh, int32 kw, int32 sy, int32 sx, int32 ph, int32 pw,'
'int32 dx, int32 dy',
'T img',
'''
int c0 = i / (h * w);
int y = i / w % h;
int x = i % w;
T val = 0;
for (int ky = 0; ky < kh; ++ky) {
int out_y = (y + ph - ky * dy);
if (0 > out_y || out_y >= out_h * sy) continue;
if (out_y % sy != 0) continue;
out_y /= sy;
for (int kx = 0; kx < kw; ++kx) {
int out_x = (x + pw - kx * dx);
if (0 > out_x || out_x >= out_w * sx) continue;
if (out_x % sx != 0) continue;
out_x /= sx;
int k = out_y + out_h * (kx + kw * (ky + kh * c0));
val = val + col[out_x + out_w * k];
}
}
img = val;
''',
'col2im')(col.reduced_view(),
h, w, out_h, out_w, kh, kw, sy, sx, ph, pw, dx, dy, img)
return img
def col2im(col, sy, sx, ph, pw, h, w, dy=1, dx=1):
fn = col2im_gpu if isinstance(col, cuda.ndarray) else col2im_cpu
return fn(col, sy, sx, ph, pw, h, w, dy, dx)
| 6,499
| 34.135135
| 78
|
py
|
chainer
|
chainer-master/chainer/utils/meta.py
|
import warnings
import six
def final(*args, **kwargs):
"""Decorator to declare a method final.
By default, :class:`TypeError` is raised when the decorated method is being
overridden.
The class in which the decorated method is defined must inherit from a
base class returned by :meth:`~chainer.utils.enable_final`.
Args:
action(type): Specifies what happens when the decorated method is
being overridden. It can be either an :class:`Exception` class or a
:class:`Warning` class. :class:`TypeError` by default.
"""
def wrap(f, action=TypeError):
assert callable(f)
f.__override_action = (action,)
return f
# apply directly
if not kwargs and len(args) == 1:
f, = args
return wrap(f)
# with arguments
assert len(args) == 0
def w(f):
return wrap(f, **kwargs)
return w
class _EnableFinal(type):
def __new__(cls, name, bases, d):
for k in d:
for base in bases:
f = getattr(base, k, None) # base method
if hasattr(f, '__override_action'):
action, = getattr(f, '__override_action')
if issubclass(action, Warning):
# Raise a warning.
warnings.warn(
'Overriding method {!r}.'.format(k),
action)
elif issubclass(action, Exception):
# Raise error.
raise action('method {!r} is final.'.format(k))
else:
assert False, 'Invalid action: {}'.format(action)
return super(_EnableFinal, cls).__new__(cls, name, bases, d)
def enable_final(base=(), meta_base=()):
"""Returns a base class in which ``final`` decorator is made available.
Inheriting from the returned value of this function enables
:meth:``~chainer.utils.final`` decorator to be applied to the methods of
the class.
Args:
base (type or tuple of types): Base classes of the returned class.
meta_base (type or tuples of type): Base metaclasses. If any descendant
classes can directly or indirectly have any metaclasses, these
metaclasses should be specified here to avoid the metaclass
conflict.
"""
if not isinstance(base, (list, tuple)):
base = (base,)
if not isinstance(meta_base, (list, tuple)):
meta_base = (meta_base,)
base_metaclass = type('base_metaclass', (_EnableFinal,) + meta_base, {})
return six.with_metaclass(base_metaclass, *base)
| 2,668
| 32.3625
| 79
|
py
|
chainer
|
chainer-master/chainer/serializers/npz.py
|
import numpy
import six
from chainer.backends import _chainerx
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import serializer
import chainerx
# For historical reasons, NPZ serializers in Chainer allow pickle despite their
# potential security issues. This behavior may be changed in future.
# `numpy.save` and `numpy.load` have `allow_pickle` option. `numpy.savez` and
# `numpy.savez_compressed` do not have an option to disable pickle.
# Before NumPy 1.10, pickle was always allowed. Since NumPy 1.16.3, pickle is
# not allowed by default.
_allow_pickle_kwargs = {}
if numpy.lib.NumpyVersion(numpy.__version__) >= '1.10.0':
_allow_pickle_kwargs['allow_pickle'] = True
class DictionarySerializer(serializer.Serializer):
"""Serializer for dictionary.
This is the standard serializer in Chainer. The hierarchy of objects are
simply mapped to a flat dictionary with keys representing the paths to
objects in the hierarchy.
.. note::
Despite of its name, this serializer DOES NOT serialize the
object into external files. It just build a flat dictionary of arrays
that can be fed into :func:`numpy.savez` and
:func:`numpy.savez_compressed`. If you want to use this serializer
directly, you have to manually send a resulting dictionary to one of
these functions.
Args:
target (dict): The dictionary that this serializer saves the objects
to. If target is None, then a new dictionary is created.
path (str): The base path in the hierarchy that this serializer
indicates.
Attributes:
~DictionarySerializer.target (dict): The target dictionary.
Once the serialization completes, this dictionary can be fed into
:func:`numpy.savez` or :func:`numpy.savez_compressed` to serialize
it in the NPZ format.
"""
def __init__(self, target=None, path=''):
self.target = {} if target is None else target
self.path = path
def __getitem__(self, key):
key = key.strip('/')
return DictionarySerializer(self.target, self.path + key + '/')
def __call__(self, key, value):
key = key.lstrip('/')
self.target[self.path + key] = (
_cpu._to_cpu(value) if value is not None
else numpy.asarray(None))
return value
def serialize(obj):
"""Serializes an object to a dictionary object.
Args:
obj: Object to be serialized. It must support serialization protocol.
Returns:
dict: Serialized object.
"""
s = DictionarySerializer()
s.save(obj)
return s.target
def save_npz(file, obj, compression=True):
"""Saves an object to the file in NPZ format.
This is a short-cut function to save only one object into an NPZ file.
Args:
file (str or file-like): Target file to write to.
obj: Object to be serialized. It must support serialization protocol.
If it is a dictionary object, the serialization will be skipped.
compression (bool): If ``True``, compression in the resulting zip file
is enabled.
.. seealso::
:func:`chainer.serializers.load_npz`
"""
if isinstance(file, six.string_types):
with open(file, 'wb') as f:
save_npz(f, obj, compression)
return
if isinstance(obj, dict):
target = obj
else:
s = DictionarySerializer()
s.save(obj)
target = s.target
if compression:
numpy.savez_compressed(file, **target)
else:
numpy.savez(file, **target)
class NpzDeserializer(serializer.Deserializer):
"""Deserializer for NPZ format.
This is the standard deserializer in Chainer. This deserializer can be used
to read an object serialized by :func:`save_npz`.
Args:
npz: `npz` file object.
path: The base path that the deserialization starts from.
strict (bool): If ``True``, the deserializer raises an error when an
expected value is not found in the given NPZ file. Otherwise,
it ignores the value and skip deserialization.
ignore_names (string, callable or list of them):
If callable, it is a function that takes a name of a parameter
and a persistent and returns ``True`` when it needs to be skipped.
If string, this is a name of a parameter or persistent that are
going to be skipped.
This can also be a list of callables and strings that behave as
described above.
"""
def __init__(self, npz, path='', strict=True, ignore_names=None):
self.npz = npz
self.path = path
self.strict = strict
if ignore_names is None:
ignore_names = []
self.ignore_names = ignore_names
def __getitem__(self, key):
key = key.strip('/')
return NpzDeserializer(
self.npz, self.path + key + '/', strict=self.strict,
ignore_names=self.ignore_names)
def __call__(self, key, value):
key = self.path + key.lstrip('/')
if not self.strict and key not in self.npz:
return value
if isinstance(self.ignore_names, (tuple, list)):
ignore_names = self.ignore_names
else:
ignore_names = (self.ignore_names,)
for ignore_name in ignore_names:
if isinstance(ignore_name, str):
if key == ignore_name:
return value
elif callable(ignore_name):
if ignore_name(key):
return value
else:
raise ValueError(
'ignore_names needs to be a callable, string or '
'list of them.')
dataset = self.npz[key]
if dataset[()] is None:
return None
if value is None:
return dataset
if isinstance(value, chainerx.ndarray):
value[...] = _chainerx._array_to_chainerx(
numpy.asarray(dataset), value.device)
elif isinstance(value, numpy.ndarray):
numpy.copyto(value, dataset)
elif isinstance(value, cuda.ndarray):
value.set(numpy.asarray(dataset, dtype=value.dtype))
elif isinstance(value, intel64.mdarray):
intel64.ideep.basic_copyto(value, numpy.asarray(dataset))
else:
value_type = type(value)
dataset_arr = numpy.asarray(dataset)
if (issubclass(dataset_arr.dtype.type, numpy.number)
and not (issubclass(dataset_arr.dtype.type, numpy.integer)
and value_type in six.integer_types)
# Casting a `numpy.integer` scalar by `int()` case above is
# safe as `int()` gives unlimited precision integer (it's
# also true for `long()`/`int()` on Python 2). For such a
# case, the check below may be too strict. For example,
# `numpy.can_cast(numpy.int64, int)`, which checks cast-
# ability to `dtype(int)`, gives `False` on a platform
# whose `dtype(int)` is `numpy.int32` like Windows/x64.
and not numpy.can_cast(
dataset_arr.dtype, value_type, casting='safe')):
raise TypeError(
'Cannot safely deserialize from numpy array with dtype={} '
'into a variable of type {}.'.format(
dataset.dtype, type(value)))
value = value_type(dataset_arr)
return value
def load_npz(file, obj, path='', strict=True, ignore_names=None):
"""Loads an object from the file in NPZ format.
This is a short-cut function to load from an `.npz` file that contains only
one object.
Args:
file (str or file-like): File to be loaded.
obj: Object to be deserialized. It must support serialization protocol.
path (str): The path in the hierarchy of the serialized data under
which the data is to be loaded. The default behavior (blank) will
load all data under the root path.
strict (bool): If ``True``, the deserializer raises an error when an
expected value is not found in the given NPZ file. Otherwise,
it ignores the value and skip deserialization.
ignore_names (string, callable or list of them):
If callable, it is a function that takes a name of a parameter
and a persistent and returns ``True`` when it needs to be skipped.
If string, this is a name of a parameter or persistent that are
going to be skipped.
This can also be a list of callables and strings that behave as
described above.
.. seealso::
:func:`chainer.serializers.save_npz`
"""
with numpy.load(file, **_allow_pickle_kwargs) as f:
d = NpzDeserializer(
f, path=path, strict=strict, ignore_names=ignore_names)
d.load(obj)
| 9,163
| 36.557377
| 79
|
py
|
chainer
|
chainer-master/chainer/serializers/hdf5.py
|
import sys
import numpy
import six
from chainer.backends import _chainerx
from chainer.backends import _cpu
from chainer.backends import cuda
from chainer.backends import intel64
from chainer import serializer
import chainerx
try:
import h5py
_available = True
except ImportError:
_available = False
def _check_available():
if not _available:
msg = '''h5py is not installed on your environment.
Please install h5py to activate hdf5 serializers.
$ pip install h5py'''
raise RuntimeError(msg)
class HDF5Serializer(serializer.Serializer):
"""Serializer for HDF5 format.
This is the standard serializer in Chainer. The chain hierarchy is simply
mapped to HDF5 hierarchical groups.
Args:
group (h5py.Group): The group that this serializer represents.
compression (int): Gzip compression level.
"""
def __init__(self, group, compression=4):
_check_available()
self.group = group
self.compression = compression
def __getitem__(self, key):
name = self.group.name + '/' + key
return HDF5Serializer(self.group.require_group(name), self.compression)
def __call__(self, key, value):
if value is None:
# use Empty to represent None
if h5py.version.version_tuple < (2, 7, 0):
raise RuntimeError(
'h5py>=2.7.0 is required to serialize None.')
arr = h5py.Empty('f')
compression = None
else:
arr = _cpu._to_cpu(value)
compression = None if arr.size <= 1 else self.compression
self.group.create_dataset(key, data=arr, compression=compression)
return value
def save_hdf5(filename, obj, compression=4):
"""Saves an object to the file in HDF5 format.
This is a short-cut function to save only one object into an HDF5 file. If
you want to save multiple objects to one HDF5 file, use
:class:`HDF5Serializer` directly by passing appropriate :class:`h5py.Group`
objects.
Args:
filename (str): Target file name.
obj: Object to be serialized. It must support serialization protocol.
If it is a dictionary object, the serialization will be skipped.
compression (int): Gzip compression level.
.. note::
Currently :func:`save_hdf5` only supports writing to an actual file on
file system due to a limitation of HD5F library.
See `h5py/h5py#687 <https://github.com/h5py/h5py/issues/687>`_ for
details.
.. seealso::
:func:`chainer.serializers.load_hdf5`
"""
_check_available()
with h5py.File(filename, 'w') as f:
if isinstance(obj, dict):
for key, value in obj.items():
key = '/' + key.lstrip('/')
arr = numpy.asarray(value)
compression = None if arr.size <= 1 else compression
try:
f.create_dataset(key, data=arr, compression=compression)
except TypeError:
sys.stderr.write(
'A key named "{}" is unable to save in HDF5 format.\n')
# In Chainer, LogReport extension and PlotReport extension
# are # unable to save in HDF5 format. These extensions
# have a data type `numpy.dtype('O')` which is not
# supported by h5py.
six.reraise(*sys.exec_info())
else:
s = HDF5Serializer(f, compression=compression)
s.save(obj)
class HDF5Deserializer(serializer.Deserializer):
"""Deserializer for HDF5 format.
This is the standard deserializer in Chainer. This deserializer can be used
to read an object serialized by :class:`HDF5Serializer`.
Args:
group (h5py.Group): The group that the deserialization starts from.
strict (bool): If ``True``, the deserializer raises an error when an
expected value is not found in the given HDF5 file. Otherwise,
it ignores the value and skip deserialization.
"""
def __init__(self, group, strict=True):
_check_available()
self.group = group
self.strict = strict
def __getitem__(self, key):
name = self.group.name + '/' + key
try:
group = self.group.require_group(name)
except ValueError:
# require_group raises ValueError if there does not exist
# the given group and the file is read mode.
group = None
return HDF5Deserializer(group, strict=self.strict)
def __call__(self, key, value):
if self.group is None:
if not self.strict:
return value
else:
raise ValueError('Inexistent group is specified')
if not self.strict and key not in self.group:
return value
dataset = self.group[key]
if dataset.shape is None: # Empty
return None
if value is None:
return numpy.asarray(dataset)
if isinstance(value, chainerx.ndarray):
value[...] = _chainerx._array_to_chainerx(
numpy.asarray(dataset), value.device)
elif isinstance(value, numpy.ndarray):
dataset.read_direct(value)
elif isinstance(value, cuda.ndarray):
value.set(numpy.asarray(dataset, dtype=value.dtype))
elif isinstance(value, intel64.mdarray):
intel64.ideep.basic_copyto(value, numpy.asarray(dataset))
else:
value = type(value)(numpy.asarray(dataset))
return value
def load_hdf5(filename, obj):
"""Loads an object from the file in HDF5 format.
This is a short-cut function to load from an HDF5 file that contains only
one object. If you want to load multiple objects from one HDF5 file, use
:class:`HDF5Deserializer` directly by passing appropriate
:class:`h5py.Group` objects.
Args:
filename (str): Name of the file to be loaded.
obj: Object to be deserialized. It must support serialization protocol.
.. note::
Currently :func:`load_hdf5` only supports loading an actual file on
file system due to a limitation of HD5F library.
See `h5py/h5py#687 <https://github.com/h5py/h5py/issues/687>`_ for
details.
.. seealso::
:func:`chainer.serializers.save_hdf5`
"""
_check_available()
with h5py.File(filename, 'r') as f:
d = HDF5Deserializer(f)
d.load(obj)
| 6,563
| 32.151515
| 79
|
py
|
chainer
|
chainer-master/chainer/serializers/__init__.py
|
from chainer.serializers.hdf5 import HDF5Deserializer # NOQA
from chainer.serializers.hdf5 import HDF5Serializer # NOQA
from chainer.serializers.hdf5 import load_hdf5 # NOQA
from chainer.serializers.hdf5 import save_hdf5 # NOQA
from chainer.serializers.npz import DictionarySerializer # NOQA
from chainer.serializers.npz import load_npz # NOQA
from chainer.serializers.npz import NpzDeserializer # NOQA
from chainer.serializers.npz import save_npz # NOQA
| 463
| 50.555556
| 64
|
py
|
vosk-api
|
vosk-api-master/python/setup.py
|
import os
import setuptools
import shutil
import glob
import platform
# Figure out environment for cross-compile
vosk_source = os.getenv("VOSK_SOURCE", os.path.abspath(os.path.join(os.path.dirname(__file__),
"..")))
system = os.environ.get('VOSK_SYSTEM', platform.system())
architecture = os.environ.get('VOSK_ARCHITECTURE', platform.architecture()[0])
machine = os.environ.get('VOSK_MACHINE', platform.machine())
# Copy precompmilled libraries
for lib in glob.glob(os.path.join(vosk_source, "src/lib*.*")):
print ("Adding library", lib)
shutil.copy(lib, "vosk")
# Create OS-dependent, but Python-independent wheels.
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
cmdclass = {}
else:
class bdist_wheel_tag_name(bdist_wheel):
def get_tag(self):
abi = 'none'
if system == 'Darwin':
oses = 'macosx_10_6_universal2'
elif system == 'Windows' and architecture == '32bit':
oses = 'win32'
elif system == 'Windows' and architecture == '64bit':
oses = 'win_amd64'
elif system == 'Linux' and machine == 'aarch64' and architecture == '64bit':
oses = 'manylinux2014_aarch64'
elif system == 'Linux':
oses = 'linux_' + machine
else:
raise TypeError("Unknown build environment")
return 'py3', abi, oses
cmdclass = {'bdist_wheel': bdist_wheel_tag_name}
with open("README.md", "rb") as fh:
long_description = fh.read().decode("utf-8")
setuptools.setup(
name="vosk",
version="0.3.45",
author="Alpha Cephei Inc",
author_email="contact@alphacephei.com",
description="Offline open source speech recognition API based on Kaldi and Vosk",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/alphacep/vosk-api",
packages=setuptools.find_packages(),
package_data = {'vosk': ['*.so', '*.dll', '*.dyld']},
entry_points = {
'console_scripts': ['vosk-transcriber=vosk.transcriber.cli:main'],
},
include_package_data=True,
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Libraries :: Python Modules'
],
cmdclass=cmdclass,
python_requires='>=3',
zip_safe=False, # Since we load so file from the filesystem, we can not run from zip file
setup_requires=['cffi>=1.0', 'requests', 'tqdm', 'srt', 'websockets'],
install_requires=['cffi>=1.0', 'requests', 'tqdm', 'srt', 'websockets'],
cffi_modules=['vosk_builder.py:ffibuilder'],
)
| 2,861
| 36.657895
| 94
|
py
|
vosk-api
|
vosk-api-master/python/vosk_builder.py
|
#!/usr/bin/env python3
import os
from cffi import FFI
vosk_root=os.environ.get("VOSK_SOURCE", "..")
cpp_command = "cpp " + vosk_root + "/src/vosk_api.h"
ffibuilder = FFI()
ffibuilder.set_source("vosk.vosk_cffi", None)
ffibuilder.cdef(os.popen(cpp_command).read())
if __name__ == '__main__':
ffibuilder.compile(verbose=True)
| 332
| 21.2
| 52
|
py
|
vosk-api
|
vosk-api-master/python/test/transcribe_scp.py
|
#!/usr/bin/env python3
import wave
import json
import sys
from multiprocessing.dummy import Pool
from vosk import Model, KaldiRecognizer
model = Model("en-us")
def recognize(line):
uid, fn = line.split()
wf = wave.open(fn, "rb")
rec = KaldiRecognizer(model, wf.getframerate())
text = ""
while True:
data = wf.readframes(1000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
jres = json.loads(rec.Result())
text = text + " " + jres["text"]
jres = json.loads(rec.FinalResult())
text = text + " " + jres["text"]
return uid + text
def main():
p = Pool(8)
texts = p.map(recognize, open(sys.argv[1], encoding="uft-8").readlines())
print ("\n".join(texts))
main()
| 774
| 21.142857
| 77
|
py
|
vosk-api
|
vosk-api-master/python/example/test_webvtt.py
|
#!/usr/bin/env python3
import sys
import subprocess
import json
import textwrap
from webvtt import WebVTT, Caption
from vosk import Model, KaldiRecognizer, SetLogLevel
SAMPLE_RATE = 16000
WORDS_PER_LINE = 7
SetLogLevel(-1)
model = Model(lang="en-us")
rec = KaldiRecognizer(model, SAMPLE_RATE)
rec.SetWords(True)
def timestring(seconds):
minutes = seconds / 60
seconds = seconds % 60
hours = int(minutes / 60)
minutes = int(minutes % 60)
return "%i:%02i:%06.3f" % (hours, minutes, seconds)
def transcribe():
command = ["ffmpeg", "-nostdin", "-loglevel", "quiet", "-i", sys.argv[1],
"-ar", str(SAMPLE_RATE), "-ac", "1", "-f", "s16le", "-"]
with subprocess.Popen(command, stdout=subprocess.PIPE) as process:
results = []
while True:
data = process.stdout.read(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
results.append(rec.Result())
results.append(rec.FinalResult())
vtt = WebVTT()
for _, res in enumerate(results):
words = json.loads(res).get("result")
if not words:
continue
start = timestring(words[0]["start"])
end = timestring(words[-1]["end"])
content = " ".join([w["word"] for w in words])
caption = Caption(start, end, textwrap.fill(content))
vtt.captions.append(caption)
# save or return webvtt
if len(sys.argv) > 2:
vtt.save(sys.argv[2])
else:
print(vtt.content)
if __name__ == "__main__":
if not 1 < len(sys.argv) < 4:
print("Usage: {} audiofile [output file]".format(sys.argv[0]))
sys.exit(1)
transcribe()
| 1,770
| 25.044118
| 77
|
py
|
vosk-api
|
vosk-api-master/python/example/test_microphone.py
|
#!/usr/bin/env python3
# prerequisites: as described in https://alphacephei.com/vosk/install and also python module `sounddevice` (simply run command `pip install sounddevice`)
# Example usage using Dutch (nl) recognition model: `python test_microphone.py -m nl`
# For more help run: `python test_microphone.py -h`
import argparse
import queue
import sys
import sounddevice as sd
from vosk import Model, KaldiRecognizer
q = queue.Queue()
def int_or_str(text):
"""Helper function for argument parsing."""
try:
return int(text)
except ValueError:
return text
def callback(indata, frames, time, status):
"""This is called (from a separate thread) for each audio block."""
if status:
print(status, file=sys.stderr)
q.put(bytes(indata))
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
"-l", "--list-devices", action="store_true",
help="show list of audio devices and exit")
args, remaining = parser.parse_known_args()
if args.list_devices:
print(sd.query_devices())
parser.exit(0)
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter,
parents=[parser])
parser.add_argument(
"-f", "--filename", type=str, metavar="FILENAME",
help="audio file to store recording to")
parser.add_argument(
"-d", "--device", type=int_or_str,
help="input device (numeric ID or substring)")
parser.add_argument(
"-r", "--samplerate", type=int, help="sampling rate")
parser.add_argument(
"-m", "--model", type=str, help="language model; e.g. en-us, fr, nl; default is en-us")
args = parser.parse_args(remaining)
try:
if args.samplerate is None:
device_info = sd.query_devices(args.device, "input")
# soundfile expects an int, sounddevice provides a float:
args.samplerate = int(device_info["default_samplerate"])
if args.model is None:
model = Model(lang="en-us")
else:
model = Model(lang=args.model)
if args.filename:
dump_fn = open(args.filename, "wb")
else:
dump_fn = None
with sd.RawInputStream(samplerate=args.samplerate, blocksize = 8000, device=args.device,
dtype="int16", channels=1, callback=callback):
print("#" * 80)
print("Press Ctrl+C to stop the recording")
print("#" * 80)
rec = KaldiRecognizer(model, args.samplerate)
while True:
data = q.get()
if rec.AcceptWaveform(data):
print(rec.Result())
else:
print(rec.PartialResult())
if dump_fn is not None:
dump_fn.write(data)
except KeyboardInterrupt:
print("\nDone")
parser.exit(0)
except Exception as e:
parser.exit(type(e).__name__ + ": " + str(e))
| 2,822
| 30.366667
| 153
|
py
|
vosk-api
|
vosk-api-master/python/example/test_gpu_batch.py
|
#!/usr/bin/env python3
import sys
import json
from vosk import BatchModel, BatchRecognizer, GpuInit
from timeit import default_timer as timer
TOT_SAMPLES = 0
GpuInit()
model = BatchModel("model")
with open(sys.argv[1]) as fn:
fnames = fn.readlines()
fds = [open(x.strip(), "rb") for x in fnames]
uids = [fname.strip().split("/")[-1][:-4] for fname in fnames]
recs = [BatchRecognizer(model, 16000) for x in fnames]
results = [""] * len(fnames)
ended = set()
start_time = timer()
while True:
# Feed in the data
for i, fd in enumerate(fds):
if i in ended:
continue
data = fd.read(8000)
if len(data) == 0:
recs[i].FinishStream()
ended.add(i)
continue
recs[i].AcceptWaveform(data)
TOT_SAMPLES += len(data)
# Wait for results from CUDA
model.Wait()
# Retrieve and add results
for i, fd in enumerate(fds):
res = recs[i].Result()
if len(res) != 0:
results[i] = results[i] + " " + json.loads(res)["text"]
if len(ended) == len(fds):
break
end_time = timer()
for i, res in enumerate(results):
print(uids[i], res.strip())
print("Processed %.3f seconds of audio in %.3f seconds (%.3f xRT)"
% (TOT_SAMPLES / 16000.0 / 2,
end_time - start_time,
(TOT_SAMPLES / 16000.0 / 2 / (end_time - start_time))),
file=sys.stderr)
| 1,411
| 21.774194
| 67
|
py
|
vosk-api
|
vosk-api-master/python/example/test_simple.py
|
#!/usr/bin/env python3
import wave
import sys
from vosk import Model, KaldiRecognizer, SetLogLevel
# You can set log level to -1 to disable debug messages
SetLogLevel(0)
wf = wave.open(sys.argv[1], "rb")
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
print("Audio file must be WAV format mono PCM.")
sys.exit(1)
model = Model(lang="en-us")
# You can also init model by name or with a folder path
# model = Model(model_name="vosk-model-en-us-0.21")
# model = Model("models/en")
rec = KaldiRecognizer(model, wf.getframerate())
rec.SetWords(True)
rec.SetPartialWords(True)
while True:
data = wf.readframes(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
print(rec.Result())
else:
print(rec.PartialResult())
print(rec.FinalResult())
| 834
| 22.194444
| 82
|
py
|
vosk-api
|
vosk-api-master/python/example/test_nlsml.py
|
#!/usr/bin/env python3
import wave
import sys
from vosk import Model, KaldiRecognizer, SetLogLevel
SetLogLevel(0)
wf = wave.open(sys.argv[1], "rb")
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
print("Audio file must be WAV format mono PCM.")
sys.exit(1)
model = Model(lang="en-us")
rec = KaldiRecognizer(model, wf.getframerate())
rec.SetMaxAlternatives(10)
rec.SetNLSML(True)
while True:
data = wf.readframes(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
print(rec.Result())
print(rec.FinalResult())
| 595
| 20.285714
| 82
|
py
|
vosk-api
|
vosk-api-master/python/example/test_alternatives.py
|
#!/usr/bin/env python3
import wave
import sys
import json
from vosk import Model, KaldiRecognizer, SetLogLevel
SetLogLevel(0)
wf = wave.open(sys.argv[1], "rb")
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
print("Audio file must be WAV format mono PCM.")
sys.exit(1)
model = Model(lang="en-us")
rec = KaldiRecognizer(model, wf.getframerate())
rec.SetMaxAlternatives(10)
rec.SetWords(True)
while True:
data = wf.readframes(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
print(json.loads(rec.Result()))
else:
print(json.loads(rec.PartialResult()))
print(json.loads(rec.FinalResult()))
| 688
| 21.225806
| 82
|
py
|
vosk-api
|
vosk-api-master/python/example/test_empty.py
|
#!/usr/bin/env python3
import json
from vosk import Model, KaldiRecognizer
model = Model(lang="en-us")
rec = KaldiRecognizer(model, 8000)
res = json.loads(rec.FinalResult())
print(res)
| 189
| 14.833333
| 39
|
py
|
vosk-api
|
vosk-api-master/python/example/test_gradio.py
|
#!/usr/bin/env python3
import json
import gradio as gr
from vosk import KaldiRecognizer, Model
model = Model(lang="en-us")
def transcribe(data, state):
sample_rate, audio_data = data
audio_data = (audio_data >> 16).astype("int16").tobytes()
if state is None:
rec = KaldiRecognizer(model, sample_rate)
result = []
else:
rec, result = state
if rec.AcceptWaveform(audio_data):
text_result = json.loads(rec.Result())["text"]
if text_result != "":
result.append(text_result)
partial_result = ""
else:
partial_result = json.loads(rec.PartialResult())["partial"] + " "
return "\n".join(result) + "\n" + partial_result, (rec, result)
gr.Interface(
fn=transcribe,
inputs=[
gr.Audio(source="microphone", type="numpy", streaming=True),
"state"
],
outputs=[
"textbox",
"state"
],
live=True).launch(share=True)
| 958
| 22.390244
| 73
|
py
|
vosk-api
|
vosk-api-master/python/example/test_reset.py
|
#!/usr/bin/env python3
import wave
import sys
import json
from vosk import Model, KaldiRecognizer, SetLogLevel
SetLogLevel(0)
wf = wave.open(sys.argv[1], "rb")
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
print("Audio file must be WAV format mono PCM.")
sys.exit(1)
model = Model(lang="en-us")
rec = KaldiRecognizer(model, wf.getframerate())
while True:
data = wf.readframes(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
print(rec.Result())
sys.exit(1)
else:
jres = json.loads(rec.PartialResult())
print(jres)
if jres["partial"] == "one zero zero zero":
print("We can reset recognizer here and start over")
rec.Reset()
| 775
| 21.823529
| 82
|
py
|
vosk-api
|
vosk-api-master/python/example/test_words.py
|
#!/usr/bin/env python3
import wave
import sys
from vosk import Model, KaldiRecognizer
wf = wave.open(sys.argv[1], "rb")
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
print("Audio file must be WAV format mono PCM.")
sys.exit(1)
model = Model(lang="en-us")
# You can also specify the possible word or phrase list as JSON list,
# the order doesn't have to be strict
rec = KaldiRecognizer(model,
wf.getframerate(),
'["oh one two three", "four five six", "seven eight nine zero", "[unk]"]')
while True:
data = wf.readframes(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
print(rec.Result())
rec.SetGrammar('["one zero one two three oh", "four five six", "seven eight nine zero", "[unk]"]')
else:
print(rec.PartialResult())
print(rec.FinalResult())
| 864
| 26.03125
| 106
|
py
|
vosk-api
|
vosk-api-master/python/example/test_srt.py
|
#!/usr/bin/env python3
import subprocess
import sys
from vosk import Model, KaldiRecognizer, SetLogLevel
SAMPLE_RATE = 16000
SetLogLevel(-1)
model = Model(lang="en-us")
rec = KaldiRecognizer(model, SAMPLE_RATE)
rec.SetWords(True)
with subprocess.Popen(["ffmpeg", "-loglevel", "quiet", "-i",
sys.argv[1],
"-ar", str(SAMPLE_RATE) , "-ac", "1", "-f", "s16le", "-"],
stdout=subprocess.PIPE).stdout as stream:
print(rec.SrtResult(stream))
| 529
| 23.090909
| 86
|
py
|
vosk-api
|
vosk-api-master/python/example/test_ffmpeg.py
|
#!/usr/bin/env python3
import subprocess
import sys
from vosk import Model, KaldiRecognizer, SetLogLevel
SAMPLE_RATE = 16000
SetLogLevel(0)
model = Model(lang="en-us")
rec = KaldiRecognizer(model, SAMPLE_RATE)
with subprocess.Popen(["ffmpeg", "-loglevel", "quiet", "-i",
sys.argv[1],
"-ar", str(SAMPLE_RATE) , "-ac", "1", "-f", "s16le", "-"],
stdout=subprocess.PIPE) as process:
while True:
data = process.stdout.read(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
print(rec.Result())
else:
print(rec.PartialResult())
print(rec.FinalResult())
| 724
| 23.166667
| 86
|
py
|
vosk-api
|
vosk-api-master/python/example/test_text.py
|
#!/usr/bin/env python3
import sys
import json
from vosk import Model, KaldiRecognizer
model = Model(lang="en-us")
# Large vocabulary free form recognition
rec = KaldiRecognizer(model, 16000)
# You can also specify the possible word list
#rec = KaldiRecognizer(model, 16000, "zero oh one two three four five six seven eight nine")
with open(sys.argv[1], "rb") as wf:
wf.read(44) # skip header
while True:
data = wf.read(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
res = json.loads(rec.Result())
print(res["text"])
res = json.loads(rec.FinalResult())
print(res["text"])
| 668
| 22.068966
| 92
|
py
|
vosk-api
|
vosk-api-master/python/example/test_speaker.py
|
#!/usr/bin/env python3
import os
import sys
import wave
import json
import numpy as np
from vosk import Model, KaldiRecognizer, SpkModel
SPK_MODEL_PATH = "model-spk"
if not os.path.exists(SPK_MODEL_PATH):
print("Please download the speaker model from "
"https://alphacephei.com/vosk/models and unpack as {SPK_MODEL_PATH} "
"in the current folder.")
sys.exit(1)
wf = wave.open(sys.argv[1], "rb")
if wf.getnchannels() != 1 or wf.getsampwidth() != 2 or wf.getcomptype() != "NONE":
print("Audio file must be WAV format mono PCM.")
sys.exit(1)
# Large vocabulary free form recognition
model = Model(lang="en-us")
spk_model = SpkModel(SPK_MODEL_PATH)
#rec = KaldiRecognizer(model, wf.getframerate(), spk_model)
rec = KaldiRecognizer(model, wf.getframerate())
rec.SetSpkModel(spk_model)
# We compare speakers with cosine distance.
# We can keep one or several fingerprints for the speaker in a database
# to distingusih among users.
spk_sig = [-1.110417,0.09703002,1.35658,0.7798632,-0.305457,-0.339204,0.6186931,
-0.4521213,0.3982236,-0.004530723,0.7651616,0.6500852,-0.6664245,0.1361499,
0.1358056,-0.2887807,-0.1280468,-0.8208137,-1.620276,-0.4628615,0.7870904,
-0.105754,0.9739769,-0.3258137,-0.7322628,-0.6212429,-0.5531687,-0.7796484,
0.7035915,1.056094,-0.4941756,-0.6521456,-0.2238328,-0.003737517,0.2165709,
1.200186,-0.7737719,0.492015,1.16058,0.6135428,-0.7183084,0.3153541,0.3458071,
-1.418189,-0.9624157,0.4168292,-1.627305,0.2742135,-0.6166027,0.1962581,
-0.6406527,0.4372789,-0.4296024,0.4898657,-0.9531326,-0.2945702,0.7879696,
-1.517101,-0.9344181,-0.5049928,-0.005040941,-0.4637912,0.8223695,-1.079849,
0.8871287,-0.9732434,-0.5548235,1.879138,-1.452064,-0.1975368,1.55047,
0.5941782,-0.52897,1.368219,0.6782904,1.202505,-0.9256122,-0.9718158,
-0.9570228,-0.5563112,-1.19049,-1.167985,2.606804,-2.261825,0.01340385,
0.2526799,-1.125458,-1.575991,-0.363153,0.3270262,1.485984,-1.769565,
1.541829,0.7293826,0.1743717,-0.4759418,1.523451,-2.487134,-1.824067,
-0.626367,0.7448186,-1.425648,0.3524166,-0.9903384,3.339342,0.4563958,
-0.2876643,1.521635,0.9508078,-0.1398541,0.3867955,-0.7550205,0.6568405,
0.09419366,-1.583935,1.306094,-0.3501927,0.1794427,-0.3768163,0.9683866,
-0.2442541,-1.696921,-1.8056,-0.6803037,-1.842043,0.3069353,0.9070363,-0.486526]
def cosine_dist(x, y):
nx = np.array(x)
ny = np.array(y)
return 1 - np.dot(nx, ny) / np.linalg.norm(nx) / np.linalg.norm(ny)
while True:
data = wf.readframes(4000)
if len(data) == 0:
break
if rec.AcceptWaveform(data):
res = json.loads(rec.Result())
print("Text:", res["text"])
if "spk" in res:
print("X-vector:", res["spk"])
print("Speaker distance:", cosine_dist(spk_sig, res["spk"]),
"based on", res["spk_frames"], "frames")
print("Note that second distance is not very reliable because utterance is too short. "
"Utterances longer than 4 seconds give better xvector")
res = json.loads(rec.FinalResult())
print("Text:", res["text"])
if "spk" in res:
print("X-vector:", res["spk"])
print("Speaker distance:", cosine_dist(spk_sig, res["spk"]),
"based on", res["spk_frames"], "frames")
| 3,345
| 41.35443
| 88
|
py
|
vosk-api
|
vosk-api-master/python/vosk/__init__.py
|
import os
import sys
import srt
import datetime
import json
import requests
from urllib.request import urlretrieve
from zipfile import ZipFile
from re import match
from pathlib import Path
from .vosk_cffi import ffi as _ffi
from tqdm import tqdm
# Remote location of the models and local folders
MODEL_PRE_URL = "https://alphacephei.com/vosk/models/"
MODEL_LIST_URL = MODEL_PRE_URL + "model-list.json"
MODEL_DIRS = [os.getenv("VOSK_MODEL_PATH"), Path("/usr/share/vosk"),
Path.home() / "AppData/Local/vosk", Path.home() / ".cache/vosk"]
def open_dll():
dlldir = os.path.abspath(os.path.dirname(__file__))
if sys.platform == "win32":
# We want to load dependencies too
os.environ["PATH"] = dlldir + os.pathsep + os.environ["PATH"]
if hasattr(os, "add_dll_directory"):
os.add_dll_directory(dlldir)
return _ffi.dlopen(os.path.join(dlldir, "libvosk.dll"))
elif sys.platform == "linux":
return _ffi.dlopen(os.path.join(dlldir, "libvosk.so"))
elif sys.platform == "darwin":
return _ffi.dlopen(os.path.join(dlldir, "libvosk.dyld"))
else:
raise TypeError("Unsupported platform")
_c = open_dll()
def list_models():
response = requests.get(MODEL_LIST_URL, timeout=10)
for model in response.json():
print(model["name"])
def list_languages():
response = requests.get(MODEL_LIST_URL, timeout=10)
languages = {m["lang"] for m in response.json()}
for lang in languages:
print (lang)
class Model:
def __init__(self, model_path=None, model_name=None, lang=None):
if model_path is not None:
self._handle = _c.vosk_model_new(model_path.encode("utf-8"))
else:
model_path = self.get_model_path(model_name, lang)
self._handle = _c.vosk_model_new(model_path.encode("utf-8"))
if self._handle == _ffi.NULL:
raise Exception("Failed to create a model")
def __del__(self):
_c.vosk_model_free(self._handle)
def vosk_model_find_word(self, word):
return _c.vosk_model_find_word(self._handle, word.encode("utf-8"))
def get_model_path(self, model_name, lang):
if model_name is None:
model_path = self.get_model_by_lang(lang)
else:
model_path = self.get_model_by_name(model_name)
return str(model_path)
def get_model_by_name(self, model_name):
for directory in MODEL_DIRS:
if directory is None or not Path(directory).exists():
continue
model_file_list = os.listdir(directory)
model_file = [model for model in model_file_list if model == model_name]
if model_file != []:
return Path(directory, model_file[0])
response = requests.get(MODEL_LIST_URL, timeout=10)
result_model = [model["name"] for model in response.json() if model["name"] == model_name]
if result_model == []:
print("model name %s does not exist" % (model_name))
sys.exit(1)
else:
self.download_model(Path(directory, result_model[0]))
return Path(directory, result_model[0])
def get_model_by_lang(self, lang):
for directory in MODEL_DIRS:
if directory is None or not Path(directory).exists():
continue
model_file_list = os.listdir(directory)
model_file = [model for model in model_file_list if
match(r"vosk-model(-small)?-{}".format(lang), model)]
if model_file != []:
return Path(directory, model_file[0])
response = requests.get(MODEL_LIST_URL, timeout=10)
result_model = [model["name"] for model in response.json() if
model["lang"] == lang and model["type"] == "small" and model["obsolete"] == "false"]
if result_model == []:
print("lang %s does not exist" % (lang))
sys.exit(1)
else:
self.download_model(Path(directory, result_model[0]))
return Path(directory, result_model[0])
def download_model(self, model_name):
if not (model_name.parent).exists():
(model_name.parent).mkdir(parents=True)
with tqdm(unit="B", unit_scale=True, unit_divisor=1024, miniters=1,
desc=(MODEL_PRE_URL + str(model_name.name) + ".zip").rsplit("/",
maxsplit=1)[-1]) as t:
reporthook = self.download_progress_hook(t)
urlretrieve(MODEL_PRE_URL + str(model_name.name) + ".zip",
str(model_name) + ".zip", reporthook=reporthook, data=None)
t.total = t.n
with ZipFile(str(model_name) + ".zip", "r") as model_ref:
model_ref.extractall(model_name.parent)
Path(str(model_name) + ".zip").unlink()
def download_progress_hook(self, t):
last_b = [0]
def update_to(b=1, bsize=1, tsize=None):
if tsize not in (None, -1):
t.total = tsize
displayed = t.update((b - last_b[0]) * bsize)
last_b[0] = b
return displayed
return update_to
class SpkModel:
def __init__(self, model_path):
self._handle = _c.vosk_spk_model_new(model_path.encode("utf-8"))
if self._handle == _ffi.NULL:
raise Exception("Failed to create a speaker model")
def __del__(self):
_c.vosk_spk_model_free(self._handle)
class KaldiRecognizer:
def __init__(self, *args):
if len(args) == 2:
self._handle = _c.vosk_recognizer_new(args[0]._handle, args[1])
elif len(args) == 3 and isinstance(args[2], SpkModel):
self._handle = _c.vosk_recognizer_new_spk(args[0]._handle,
args[1], args[2]._handle)
elif len(args) == 3 and isinstance(args[2], str):
self._handle = _c.vosk_recognizer_new_grm(args[0]._handle,
args[1], args[2].encode("utf-8"))
else:
raise TypeError("Unknown arguments")
if self._handle == _ffi.NULL:
raise Exception("Failed to create a recognizer")
def __del__(self):
_c.vosk_recognizer_free(self._handle)
def SetMaxAlternatives(self, max_alternatives):
_c.vosk_recognizer_set_max_alternatives(self._handle, max_alternatives)
def SetWords(self, enable_words):
_c.vosk_recognizer_set_words(self._handle, 1 if enable_words else 0)
def SetPartialWords(self, enable_partial_words):
_c.vosk_recognizer_set_partial_words(self._handle, 1 if enable_partial_words else 0)
def SetNLSML(self, enable_nlsml):
_c.vosk_recognizer_set_nlsml(self._handle, 1 if enable_nlsml else 0)
def SetSpkModel(self, spk_model):
_c.vosk_recognizer_set_spk_model(self._handle, spk_model._handle)
def SetGrammar(self, grammar):
_c.vosk_recognizer_set_grm(self._handle, grammar.encode("utf-8"))
def AcceptWaveform(self, data):
res = _c.vosk_recognizer_accept_waveform(self._handle, data, len(data))
if res < 0:
raise Exception("Failed to process waveform")
return res
def Result(self):
return _ffi.string(_c.vosk_recognizer_result(self._handle)).decode("utf-8")
def PartialResult(self):
return _ffi.string(_c.vosk_recognizer_partial_result(self._handle)).decode("utf-8")
def FinalResult(self):
return _ffi.string(_c.vosk_recognizer_final_result(self._handle)).decode("utf-8")
def Reset(self):
return _c.vosk_recognizer_reset(self._handle)
def SrtResult(self, stream, words_per_line = 7):
results = []
while True:
data = stream.read(4000)
if len(data) == 0:
break
if self.AcceptWaveform(data):
results.append(self.Result())
results.append(self.FinalResult())
subs = []
for res in results:
jres = json.loads(res)
if not "result" in jres:
continue
words = jres["result"]
for j in range(0, len(words), words_per_line):
line = words[j : j + words_per_line]
s = srt.Subtitle(index=len(subs),
content=" ".join([l["word"] for l in line]),
start=datetime.timedelta(seconds=line[0]["start"]),
end=datetime.timedelta(seconds=line[-1]["end"]))
subs.append(s)
return srt.compose(subs)
def SetLogLevel(level):
return _c.vosk_set_log_level(level)
def GpuInit():
_c.vosk_gpu_init()
def GpuThreadInit():
_c.vosk_gpu_thread_init()
class BatchModel:
def __init__(self, model_path, *args):
self._handle = _c.vosk_batch_model_new(model_path.encode('utf-8'))
if self._handle == _ffi.NULL:
raise Exception("Failed to create a model")
def __del__(self):
_c.vosk_batch_model_free(self._handle)
def Wait(self):
_c.vosk_batch_model_wait(self._handle)
class BatchRecognizer:
def __init__(self, *args):
self._handle = _c.vosk_batch_recognizer_new(args[0]._handle, args[1])
if self._handle == _ffi.NULL:
raise Exception("Failed to create a recognizer")
def __del__(self):
_c.vosk_batch_recognizer_free(self._handle)
def AcceptWaveform(self, data):
res = _c.vosk_batch_recognizer_accept_waveform(self._handle, data, len(data))
def Result(self):
ptr = _c.vosk_batch_recognizer_front_result(self._handle)
res = _ffi.string(ptr).decode("utf-8")
_c.vosk_batch_recognizer_pop(self._handle)
return res
def FinishStream(self):
_c.vosk_batch_recognizer_finish_stream(self._handle)
def GetPendingChunks(self):
return _c.vosk_batch_recognizer_get_pending_chunks(self._handle)
| 9,892
| 34.844203
| 100
|
py
|
vosk-api
|
vosk-api-master/python/vosk/transcriber/transcriber.py
|
import json
import logging
import asyncio
import websockets
import srt
import datetime
import shlex
import subprocess
from vosk import KaldiRecognizer, Model
from queue import Queue
from timeit import default_timer as timer
from multiprocessing.dummy import Pool
CHUNK_SIZE = 4000
SAMPLE_RATE = 16000.0
class Transcriber:
def __init__(self, args):
self.model = Model(model_path=args.model, model_name=args.model_name, lang=args.lang)
self.args = args
self.queue = Queue()
def recognize_stream(self, rec, stream):
tot_samples = 0
result = []
while True:
data = stream.stdout.read(CHUNK_SIZE)
if len(data) == 0:
break
tot_samples += len(data)
if rec.AcceptWaveform(data):
jres = json.loads(rec.Result())
logging.info(jres)
result.append(jres)
else:
jres = json.loads(rec.PartialResult())
if jres["partial"] != "":
logging.info(jres)
jres = json.loads(rec.FinalResult())
result.append(jres)
return result, tot_samples
async def recognize_stream_server(self, proc):
async with websockets.connect(self.args.server) as websocket:
tot_samples = 0
result = []
await websocket.send('{ "config" : { "sample_rate" : %f } }' % (SAMPLE_RATE))
while True:
data = await proc.stdout.read(CHUNK_SIZE)
tot_samples += len(data)
if len(data) == 0:
break
await websocket.send(data)
jres = json.loads(await websocket.recv())
logging.info(jres)
if not "partial" in jres:
result.append(jres)
await websocket.send('{"eof" : 1}')
jres = json.loads(await websocket.recv())
logging.info(jres)
result.append(jres)
return result, tot_samples
def format_result(self, result, words_per_line=7):
processed_result = ""
if self.args.output_type == "srt":
subs = []
for _, res in enumerate(result):
if not "result" in res:
continue
words = res["result"]
for j in range(0, len(words), words_per_line):
line = words[j : j + words_per_line]
s = srt.Subtitle(index=len(subs),
content = " ".join([l["word"] for l in line]),
start=datetime.timedelta(seconds=line[0]["start"]),
end=datetime.timedelta(seconds=line[-1]["end"]))
subs.append(s)
processed_result = srt.compose(subs)
elif self.args.output_type == "txt":
for part in result:
if part["text"] != "":
processed_result += part["text"] + "\n"
elif self.args.output_type == "json":
monologues = {"schemaVersion":"2.0", "monologues":[], "text":[]}
for part in result:
if part["text"] != "":
monologues["text"] += part["text"]
for _, res in enumerate(result):
if not "result" in res:
continue
monologue = { "speaker": {"id": "unknown", "name": None}, "start": 0, "end": 0, "terms": []}
monologue["start"] = res["result"][0]["start"]
monologue["end"] = res["result"][-1]["end"]
monologue["terms"] = [{"confidence": t["conf"], "start": t["start"], "end": t["end"], "text": t["word"], "type": "WORD" } for t in res["result"]]
monologues["monologues"].append(monologue)
processed_result = json.dumps(monologues)
return processed_result
def resample_ffmpeg(self, infile):
cmd = shlex.split("ffmpeg -nostdin -loglevel quiet "
"-i \'{}\' -ar {} -ac 1 -f s16le -".format(str(infile), SAMPLE_RATE))
stream = subprocess.Popen(cmd, stdout=subprocess.PIPE)
return stream
async def resample_ffmpeg_async(self, infile):
cmd = "ffmpeg -nostdin -loglevel quiet "\
"-i \'{}\' -ar {} -ac 1 -f s16le -".format(str(infile), SAMPLE_RATE)
return await asyncio.create_subprocess_shell(cmd, stdout=subprocess.PIPE)
async def server_worker(self):
while True:
try:
input_file, output_file = self.queue.get_nowait()
except Exception:
break
logging.info("Recognizing {}".format(input_file))
start_time = timer()
proc = await self.resample_ffmpeg_async(input_file)
result, tot_samples = await self.recognize_stream_server(proc)
await proc.wait()
# Bad input, continue
if tot_samples == 0:
self.queue.task_done()
continue
processed_result = self.format_result(result)
if output_file != "":
logging.info("File {} processing complete".format(output_file))
with open(output_file, "w", encoding="utf-8") as fh:
fh.write(processed_result)
else:
print(processed_result)
elapsed = timer() - start_time
logging.info("Execution time: {:.3f} sec; "\
"xRT {:.3f}".format(elapsed, float(elapsed) * (2 * SAMPLE_RATE) / tot_samples))
self.queue.task_done()
def pool_worker(self, inputdata):
logging.info("Recognizing {}".format(inputdata[0]))
start_time = timer()
try:
stream = self.resample_ffmpeg(inputdata[0])
except FileNotFoundError as e:
print(e, "Missing FFMPEG, please install and try again")
return
except Exception as e:
logging.info(e)
return
rec = KaldiRecognizer(self.model, SAMPLE_RATE)
rec.SetWords(True)
result, tot_samples = self.recognize_stream(rec, stream)
if tot_samples == 0:
return
processed_result = self.format_result(result)
if inputdata[1] != "":
logging.info("File {} processing complete".format(inputdata[1]))
with open(inputdata[1], "w", encoding="utf-8") as fh:
fh.write(processed_result)
else:
print(processed_result)
elapsed = timer() - start_time
logging.info("Execution time: {:.3f} sec; "\
"xRT {:.3f}".format(elapsed, float(elapsed) * (2 * SAMPLE_RATE) / tot_samples))
async def process_task_list_server(self, task_list):
for x in task_list:
self.queue.put(x)
workers = [asyncio.create_task(self.server_worker()) for i in range(self.args.tasks)]
await asyncio.gather(*workers)
def process_task_list_pool(self, task_list):
with Pool() as pool:
pool.map(self.pool_worker, task_list)
def process_task_list(self, task_list):
if self.args.server is None:
self.process_task_list_pool(task_list)
else:
asyncio.run(self.process_task_list_server(task_list))
| 7,336
| 35.321782
| 161
|
py
|
vosk-api
|
vosk-api-master/python/vosk/transcriber/cli.py
|
#!/usr/bin/env python3
import argparse
import logging
import sys
import os
from pathlib import Path
from vosk import list_models, list_languages
from vosk.transcriber.transcriber import Transcriber
parser = argparse.ArgumentParser(
description = "Transcribe audio file and save result in selected format")
parser.add_argument(
"--model", "-m", type=str,
help="model path")
parser.add_argument(
"--server", "-s", const="ws://localhost:2700", action="store_const",
help="use server for recognition")
parser.add_argument(
"--list-models", default=False, action="store_true",
help="list available models")
parser.add_argument(
"--list-languages", default=False, action="store_true",
help="list available languages")
parser.add_argument(
"--model-name", "-n", type=str,
help="select model by name")
parser.add_argument(
"--lang", "-l", default="en-us", type=str,
help="select model by language")
parser.add_argument(
"--input", "-i", type=str,
help="audiofile")
parser.add_argument(
"--output", "-o", default="", type=str,
help="optional output filename path")
parser.add_argument(
"--output-type", "-t", default="txt", type=str,
help="optional arg output data type")
parser.add_argument(
"--tasks", "-ts", default=10, type=int,
help="number of parallel recognition tasks")
parser.add_argument(
"--log-level", default="INFO",
help="logging level")
def main():
args = parser.parse_args()
log_level = args.log_level.upper()
logging.getLogger().setLevel(log_level)
if args.list_models is True:
list_models()
return
if args.list_languages is True:
list_languages()
return
if not args.input:
logging.info("Please specify input file or directory")
sys.exit(1)
if not Path(args.input).exists():
logging.info("File/folder {args.input} does not exist, "\
"please specify an existing file/directory")
sys.exit(1)
transcriber = Transcriber(args)
if Path(args.input).is_dir():
task_list = [(Path(args.input, fn),
Path(args.output,
Path(fn).stem).with_suffix("." + args.output_type)) for fn in os.listdir(args.input)]
elif Path(args.input).is_file():
if args.output == "":
task_list = [(Path(args.input), args.output)]
else:
task_list = [(Path(args.input), Path(args.output))]
else:
logging.info("Wrong arguments")
sys.exit(1)
transcriber.process_task_list(task_list)
if __name__ == "__main__":
main()
| 2,697
| 28.977778
| 97
|
py
|
vosk-api
|
vosk-api-master/python/vosk/transcriber/__init__.py
| 0
| 0
| 0
|
py
|
|
sysbench
|
sysbench-master/third_party/cram/setup.py
|
#!/usr/bin/env python
"""Installs cram"""
import os
import sys
from distutils.core import setup
COMMANDS = {}
CRAM_DIR = os.path.abspath(os.path.dirname(__file__))
try:
from wheel.bdist_wheel import bdist_wheel
except ImportError:
pass
else:
COMMANDS['bdist_wheel'] = bdist_wheel
def long_description():
"""Get the long description from the README"""
return open(os.path.join(sys.path[0], 'README.rst')).read()
setup(
author='Brodie Rao',
author_email='brodie@bitheap.org',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
('License :: OSI Approved :: GNU General Public License v2 '
'or later (GPLv2+)'),
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Unix Shell',
'Topic :: Software Development :: Testing',
],
cmdclass=COMMANDS,
description='Functional tests for command line applications',
download_url='https://bitheap.org/cram/cram-0.7.tar.gz',
keywords='automatic functional test framework',
license='GNU GPLv2 or any later version',
long_description=long_description(),
name='cram',
packages=['cram'],
scripts=['scripts/cram'],
url='https://bitheap.org/cram/',
version='0.7',
)
| 1,529
| 29
| 70
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/__main__.py
|
"""Main module (invoked by "python -m cram")"""
import sys
import cram
try:
sys.exit(cram.main(sys.argv[1:]))
except KeyboardInterrupt:
pass
| 152
| 12.909091
| 47
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/_main.py
|
"""Main entry point"""
import optparse
import os
import shlex
import shutil
import sys
import tempfile
try:
import configparser
except ImportError: # pragma: nocover
import ConfigParser as configparser
from cram._cli import runcli
from cram._encoding import b, fsencode, stderrb, stdoutb
from cram._run import runtests
from cram._xunit import runxunit
def _which(cmd):
"""Return the path to cmd or None if not found"""
cmd = fsencode(cmd)
for p in os.environ['PATH'].split(os.pathsep):
path = os.path.join(fsencode(p), cmd)
if os.path.isfile(path) and os.access(path, os.X_OK):
return os.path.abspath(path)
return None
def _expandpath(path):
"""Expands ~ and environment variables in path"""
return os.path.expanduser(os.path.expandvars(path))
class _OptionParser(optparse.OptionParser):
"""Like optparse.OptionParser, but supports setting values through
CRAM= and .cramrc."""
def __init__(self, *args, **kwargs):
self._config_opts = {}
optparse.OptionParser.__init__(self, *args, **kwargs)
def add_option(self, *args, **kwargs):
option = optparse.OptionParser.add_option(self, *args, **kwargs)
if option.dest and option.dest != 'version':
key = option.dest.replace('_', '-')
self._config_opts[key] = option.action == 'store_true'
return option
def parse_args(self, args=None, values=None):
config = configparser.RawConfigParser()
config.read(_expandpath(os.environ.get('CRAMRC', '.cramrc')))
defaults = {}
for key, isbool in self._config_opts.items():
try:
if isbool:
try:
value = config.getboolean('cram', key)
except ValueError:
value = config.get('cram', key)
self.error('--%s: invalid boolean value: %r'
% (key, value))
else:
value = config.get('cram', key)
except (configparser.NoSectionError, configparser.NoOptionError):
pass
else:
defaults[key] = value
self.set_defaults(**defaults)
eargs = os.environ.get('CRAM', '').strip()
if eargs:
args = args or []
args += shlex.split(eargs)
try:
return optparse.OptionParser.parse_args(self, args, values)
except optparse.OptionValueError:
self.error(str(sys.exc_info()[1]))
def _parseopts(args):
"""Parse command line arguments"""
p = _OptionParser(usage='cram [OPTIONS] TESTS...', prog='cram')
p.add_option('-V', '--version', action='store_true',
help='show version information and exit')
p.add_option('-q', '--quiet', action='store_true',
help="don't print diffs")
p.add_option('-v', '--verbose', action='store_true',
help='show filenames and test status')
p.add_option('-i', '--interactive', action='store_true',
help='interactively merge changed test output')
p.add_option('-d', '--debug', action='store_true',
help='write script output directly to the terminal')
p.add_option('-y', '--yes', action='store_true',
help='answer yes to all questions')
p.add_option('-n', '--no', action='store_true',
help='answer no to all questions')
p.add_option('-E', '--preserve-env', action='store_true',
help="don't reset common environment variables")
p.add_option('-e', '--no-err-files', action='store_true',
help="don't write .err files on test failures")
p.add_option('--keep-tmpdir', action='store_true',
help='keep temporary directories')
p.add_option('--shell', action='store', default='/bin/sh', metavar='PATH',
help='shell to use for running tests (default: %default)')
p.add_option('--shell-opts', action='store', metavar='OPTS',
help='arguments to invoke shell with')
p.add_option('--indent', action='store', default=2, metavar='NUM',
type='int', help=('number of spaces to use for indentation '
'(default: %default)'))
p.add_option('--xunit-file', action='store', metavar='PATH',
help='path to write xUnit XML output')
opts, paths = p.parse_args(args)
paths = [fsencode(path) for path in paths]
return opts, paths, p.get_usage
def main(args):
"""Main entry point.
If you're thinking of using Cram in other Python code (e.g., unit tests),
consider using the test() or testfile() functions instead.
:param args: Script arguments (excluding script name)
:type args: str
:return: Exit code (non-zero on failure)
:rtype: int
"""
opts, paths, getusage = _parseopts(args)
if opts.version:
sys.stdout.write("""Cram CLI testing framework (version 0.7)
Copyright (C) 2010-2016 Brodie Rao <brodie@bitheap.org> and others
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
""")
return
conflicts = [('--yes', opts.yes, '--no', opts.no),
('--quiet', opts.quiet, '--interactive', opts.interactive),
('--debug', opts.debug, '--quiet', opts.quiet),
('--debug', opts.debug, '--interactive', opts.interactive),
('--debug', opts.debug, '--verbose', opts.verbose),
('--debug', opts.debug, '--xunit-file', opts.xunit_file)]
for s1, o1, s2, o2 in conflicts:
if o1 and o2:
sys.stderr.write('options %s and %s are mutually exclusive\n'
% (s1, s2))
return 2
shellcmd = _which(opts.shell)
if not shellcmd:
stderrb.write(b('shell not found: ') + fsencode(opts.shell) + b('\n'))
return 2
shell = [shellcmd]
if opts.shell_opts:
shell += shlex.split(opts.shell_opts)
patchcmd = None
if opts.interactive:
patchcmd = _which('patch')
if not patchcmd:
sys.stderr.write('patch(1) required for -i\n')
return 2
if not paths:
sys.stdout.write(getusage())
return 2
badpaths = [path for path in paths if not os.path.exists(path)]
if badpaths:
stderrb.write(b('no such file: ') + badpaths[0] + b('\n'))
return 2
if opts.yes:
answer = 'y'
elif opts.no:
answer = 'n'
else:
answer = None
tmpdir = os.environ['CRAMTMP'] = tempfile.mkdtemp('', 'cramtests-')
tmpdirb = fsencode(tmpdir)
proctmp = os.path.join(tmpdir, 'tmp')
for s in ('TMPDIR', 'TEMP', 'TMP'):
os.environ[s] = proctmp
os.mkdir(proctmp)
try:
tests = runtests(paths, tmpdirb, shell, indent=opts.indent,
cleanenv=not opts.preserve_env, debug=opts.debug,
noerrfiles=opts.no_err_files)
if not opts.debug:
tests = runcli(tests, quiet=opts.quiet, verbose=opts.verbose,
patchcmd=patchcmd, answer=answer,
noerrfiles=opts.no_err_files)
if opts.xunit_file is not None:
tests = runxunit(tests, opts.xunit_file)
hastests = False
failed = False
for path, test in tests:
hastests = True
refout, postout, diff = test()
if diff:
failed = True
if not hastests:
sys.stderr.write('no tests found\n')
return 2
return int(failed)
finally:
if opts.keep_tmpdir:
stdoutb.write(b('# Kept temporary directory: ') + tmpdirb +
b('\n'))
else:
shutil.rmtree(tmpdir)
| 7,967
| 35.888889
| 78
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/_process.py
|
"""Utilities for running subprocesses"""
import os
import signal
import subprocess
import sys
from cram._encoding import fsdecode
__all__ = ['PIPE', 'STDOUT', 'execute']
PIPE = subprocess.PIPE
STDOUT = subprocess.STDOUT
def _makeresetsigpipe():
"""Make a function to reset SIGPIPE to SIG_DFL (for use in subprocesses).
Doing subprocess.Popen(..., preexec_fn=makeresetsigpipe()) will prevent
Python's SIGPIPE handler (SIG_IGN) from being inherited by the
child process.
"""
if (sys.platform == 'win32' or
getattr(signal, 'SIGPIPE', None) is None): # pragma: nocover
return None
return lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def execute(args, stdin=None, stdout=None, stderr=None, cwd=None, env=None):
"""Run a process and return its output and return code.
stdin may either be None or a string to send to the process.
stdout may either be None or PIPE. If set to PIPE, the process's output
is returned as a string.
stderr may either be None or STDOUT. If stdout is set to PIPE and stderr
is set to STDOUT, the process's stderr output will be interleaved with
stdout and returned as a string.
cwd sets the process's current working directory.
env can be set to a dictionary to override the process's environment
variables.
This function returns a 2-tuple of (output, returncode).
"""
if sys.platform == 'win32': # pragma: nocover
args = [fsdecode(arg) for arg in args]
p = subprocess.Popen(args, stdin=PIPE, stdout=stdout, stderr=stderr,
cwd=cwd, env=env, bufsize=-1,
preexec_fn=_makeresetsigpipe(),
close_fds=os.name == 'posix')
out, err = p.communicate(stdin)
return out, p.returncode
| 1,805
| 31.836364
| 77
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/_diff.py
|
"""Utilities for diffing test files and their output"""
import codecs
import difflib
import re
from cram._encoding import b
__all__ = ['esc', 'glob', 'regex', 'unified_diff']
def _regex(pattern, s):
"""Match a regular expression or return False if invalid.
>>> from cram._encoding import b
>>> [bool(_regex(r, b('foobar'))) for r in (b('foo.*'), b('***'))]
[True, False]
"""
try:
return re.match(pattern + b(r'\Z'), s)
except re.error:
return False
def _glob(el, l):
r"""Match a glob-like pattern.
The only supported special characters are * and ?. Escaping is
supported.
>>> from cram._encoding import b
>>> bool(_glob(b(r'\* \\ \? fo?b*'), b('* \\ ? foobar')))
True
"""
i, n = 0, len(el)
res = b('')
while i < n:
c = el[i:i + 1]
i += 1
if c == b('\\') and el[i] in b('*?\\'):
res += el[i - 1:i + 1]
i += 1
elif c == b('*'):
res += b('.*')
elif c == b('?'):
res += b('.')
else:
res += re.escape(c)
return _regex(res, l)
def _matchannotation(keyword, matchfunc, el, l):
"""Apply match function based on annotation keyword"""
ann = b(' (%s)\n' % keyword)
return el.endswith(ann) and matchfunc(el[:-len(ann)], l[:-1])
def regex(el, l):
"""Apply a regular expression match to a line annotated with '(re)'"""
return _matchannotation('re', _regex, el, l)
def glob(el, l):
"""Apply a glob match to a line annotated with '(glob)'"""
return _matchannotation('glob', _glob, el, l)
def esc(el, l):
"""Apply an escape match to a line annotated with '(esc)'"""
ann = b(' (esc)\n')
if el.endswith(ann):
el = codecs.escape_decode(el[:-len(ann)])[0] + b('\n')
if el == l:
return True
if l.endswith(ann):
l = codecs.escape_decode(l[:-len(ann)])[0] + b('\n')
return el == l
class _SequenceMatcher(difflib.SequenceMatcher, object):
"""Like difflib.SequenceMatcher, but supports custom match functions"""
def __init__(self, *args, **kwargs):
self._matchers = kwargs.pop('matchers', [])
super(_SequenceMatcher, self).__init__(*args, **kwargs)
def _match(self, el, l):
"""Tests for matching lines using custom matchers"""
for matcher in self._matchers:
if matcher(el, l):
return True
return False
def find_longest_match(self, alo, ahi, blo, bhi):
"""Find longest matching block in a[alo:ahi] and b[blo:bhi]"""
# SequenceMatcher uses find_longest_match() to slowly whittle down
# the differences between a and b until it has each matching block.
# Because of this, we can end up doing the same matches many times.
matches = []
for n, (el, line) in enumerate(zip(self.a[alo:ahi], self.b[blo:bhi])):
if el != line and self._match(el, line):
# This fools the superclass's method into thinking that the
# regex/glob in a is identical to b by replacing a's line (the
# expected output) with b's line (the actual output).
self.a[alo + n] = line
matches.append((n, el))
ret = super(_SequenceMatcher, self).find_longest_match(alo, ahi,
blo, bhi)
# Restore the lines replaced above. Otherwise, the diff output
# would seem to imply that the tests never had any regexes/globs.
for n, el in matches:
self.a[alo + n] = el
return ret
def unified_diff(l1, l2, fromfile=b(''), tofile=b(''), fromfiledate=b(''),
tofiledate=b(''), n=3, lineterm=b('\n'), matchers=None):
r"""Compare two sequences of lines; generate the delta as a unified diff.
This is like difflib.unified_diff(), but allows custom matchers.
>>> from cram._encoding import b
>>> l1 = [b('a\n'), b('? (glob)\n')]
>>> l2 = [b('a\n'), b('b\n')]
>>> (list(unified_diff(l1, l2, b('f1'), b('f2'), b('1970-01-01'),
... b('1970-01-02'))) ==
... [b('--- f1\t1970-01-01\n'), b('+++ f2\t1970-01-02\n'),
... b('@@ -1,2 +1,2 @@\n'), b(' a\n'), b('-? (glob)\n'), b('+b\n')])
True
>>> from cram._diff import glob
>>> list(unified_diff(l1, l2, matchers=[glob]))
[]
"""
if matchers is None:
matchers = []
started = False
matcher = _SequenceMatcher(None, l1, l2, matchers=matchers)
for group in matcher.get_grouped_opcodes(n):
if not started:
if fromfiledate:
fromdate = b('\t') + fromfiledate
else:
fromdate = b('')
if tofiledate:
todate = b('\t') + tofiledate
else:
todate = b('')
yield b('--- ') + fromfile + fromdate + lineterm
yield b('+++ ') + tofile + todate + lineterm
started = True
i1, i2, j1, j2 = group[0][1], group[-1][2], group[0][3], group[-1][4]
yield (b("@@ -%d,%d +%d,%d @@" % (i1 + 1, i2 - i1, j1 + 1, j2 - j1)) +
lineterm)
for tag, i1, i2, j1, j2 in group:
if tag == 'equal':
for line in l1[i1:i2]:
yield b(' ') + line
continue
if tag == 'replace' or tag == 'delete':
for line in l1[i1:i2]:
yield b('-') + line
if tag == 'replace' or tag == 'insert':
for line in l2[j1:j2]:
yield b('+') + line
| 5,630
| 34.415094
| 78
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/_run.py
|
"""The test runner"""
import os
import sys
from cram._encoding import b, fsdecode, fsencode
from cram._test import testfile
__all__ = ['runtests']
if sys.platform == 'win32': # pragma: nocover
def _walk(top):
top = fsdecode(top)
for root, dirs, files in os.walk(top):
yield (fsencode(root),
[fsencode(p) for p in dirs],
[fsencode(p) for p in files])
else:
_walk = os.walk
def _findtests(paths):
"""Yield tests in paths in sorted order"""
for p in paths:
if os.path.isdir(p):
for root, dirs, files in _walk(p):
if os.path.basename(root).startswith(b('.')):
continue
for f in sorted(files):
if not f.startswith(b('.')) and f.endswith(b('.t')):
yield os.path.normpath(os.path.join(root, f))
else:
yield os.path.normpath(p)
def runtests(paths, tmpdir, shell, indent=2, cleanenv=True, debug=False,
noerrfiles=False):
"""Run tests and yield results.
This yields a sequence of 2-tuples containing the following:
(test path, test function)
The test function, when called, runs the test in a temporary directory
and returns a 3-tuple:
(list of lines in the test, same list with actual output, diff)
"""
cwd = os.getcwd()
seen = set()
basenames = set()
for i, path in enumerate(_findtests(paths)):
abspath = os.path.abspath(path)
if abspath in seen:
continue
seen.add(abspath)
if not os.stat(path).st_size:
yield (path, lambda: (None, None, None))
continue
basename = os.path.basename(path)
if basename in basenames:
basename = basename + b('-%s' % i)
else:
basenames.add(basename)
def test():
"""Run test file"""
testdir = os.path.join(tmpdir, basename)
os.mkdir(testdir)
try:
os.chdir(testdir)
return testfile(abspath, shell, indent=indent,
cleanenv=cleanenv, debug=debug,
testname=path, noerrfile=noerrfiles)
finally:
os.chdir(cwd)
yield (path, test)
| 2,345
| 28.696203
| 74
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/_xunit.py
|
"""xUnit XML output"""
import locale
import os
import re
import socket
import sys
import time
from cram._encoding import u, ul
__all__ = ['runxunit']
_widecdataregex = ul(r"'(?:[^\x09\x0a\x0d\x20-\ud7ff\ue000-\ufffd"
r"\U00010000-\U0010ffff]|]]>)'")
_narrowcdataregex = ul(r"'(?:[^\x09\x0a\x0d\x20-\ud7ff\ue000-\ufffd]"
r"|]]>)'")
_widequoteattrregex = ul(r"'[^\x20\x21\x23-\x25\x27-\x3b\x3d"
r"\x3f-\ud7ff\ue000-\ufffd"
r"\U00010000-\U0010ffff]'")
_narrowquoteattrregex = ul(r"'[^\x20\x21\x23-\x25\x27-\x3b\x3d"
r"\x3f-\ud7ff\ue000-\ufffd]'")
_replacementchar = ul(r"'\N{REPLACEMENT CHARACTER}'")
if sys.maxunicode >= 0x10ffff: # pragma: nocover
_cdatasub = re.compile(_widecdataregex).sub
_quoteattrsub = re.compile(_widequoteattrregex).sub
else: # pragma: nocover
_cdatasub = re.compile(_narrowcdataregex).sub
_quoteattrsub = re.compile(_narrowquoteattrregex).sub
def _cdatareplace(m):
"""Replace _cdatasub() regex match"""
if m.group(0) == u(']]>'):
return u(']]>]]><![CDATA[')
else:
return _replacementchar
def _cdata(s):
r"""Escape a string as an XML CDATA block.
>>> from cram._encoding import ul
>>> (_cdata('1<\'2\'>&"3\x00]]>\t\r\n') ==
... ul(r"'<![CDATA[1<\'2\'>&\"3\ufffd]]>]]><![CDATA[\t\r\n]]>'"))
True
"""
return u('<![CDATA[%s]]>') % _cdatasub(_cdatareplace, s)
def _quoteattrreplace(m):
"""Replace _quoteattrsub() regex match"""
return {u('\t'): u('	'),
u('\n'): u(' '),
u('\r'): u(' '),
u('"'): u('"'),
u('&'): u('&'),
u('<'): u('<'),
u('>'): u('>')}.get(m.group(0), _replacementchar)
def _quoteattr(s):
r"""Escape a string for use as an XML attribute value.
>>> from cram._encoding import ul
>>> (_quoteattr('1<\'2\'>&"3\x00]]>\t\r\n') ==
... ul(r"'\"1<\'2\'>&"3\ufffd]]>	 \"'"))
True
"""
return u('"%s"') % _quoteattrsub(_quoteattrreplace, s)
def _timestamp():
"""Return the current time in ISO 8601 format"""
tm = time.localtime()
if tm.tm_isdst == 1: # pragma: nocover
tz = time.altzone
else: # pragma: nocover
tz = time.timezone
timestamp = time.strftime('%Y-%m-%dT%H:%M:%S', tm)
tzhours = int(-tz / 60 / 60)
tzmins = int(abs(tz) / 60 % 60)
timestamp += u('%+03d:%02d') % (tzhours, tzmins)
return timestamp
def runxunit(tests, xmlpath):
"""Run tests with xUnit XML output.
tests should be a sequence of 2-tuples containing the following:
(test path, test function)
This function yields a new sequence where each test function is wrapped
with a function that writes test results to an xUnit XML file.
"""
suitestart = time.time()
timestamp = _timestamp()
hostname = socket.gethostname()
total, skipped, failed = [0], [0], [0]
testcases = []
for path, test in tests:
def testwrapper():
"""Run test and collect XML output"""
total[0] += 1
start = time.time()
refout, postout, diff = test()
testtime = time.time() - start
classname = path.decode(locale.getpreferredencoding(), 'replace')
name = os.path.basename(classname)
if postout is None:
skipped[0] += 1
testcase = (u(' <testcase classname=%(classname)s\n'
' name=%(name)s\n'
' time="%(time).6f">\n'
' <skipped/>\n'
' </testcase>\n') %
{'classname': _quoteattr(classname),
'name': _quoteattr(name),
'time': testtime})
elif diff:
failed[0] += 1
diff = list(diff)
diffu = u('').join(l.decode(locale.getpreferredencoding(),
'replace')
for l in diff)
testcase = (u(' <testcase classname=%(classname)s\n'
' name=%(name)s\n'
' time="%(time).6f">\n'
' <failure>%(diff)s</failure>\n'
' </testcase>\n') %
{'classname': _quoteattr(classname),
'name': _quoteattr(name),
'time': testtime,
'diff': _cdata(diffu)})
else:
testcase = (u(' <testcase classname=%(classname)s\n'
' name=%(name)s\n'
' time="%(time).6f"/>\n') %
{'classname': _quoteattr(classname),
'name': _quoteattr(name),
'time': testtime})
testcases.append(testcase)
return refout, postout, diff
yield path, testwrapper
suitetime = time.time() - suitestart
header = (u('<?xml version="1.0" encoding="utf-8"?>\n'
'<testsuite name="cram"\n'
' tests="%(total)d"\n'
' failures="%(failed)d"\n'
' skipped="%(skipped)d"\n'
' timestamp=%(timestamp)s\n'
' hostname=%(hostname)s\n'
' time="%(time).6f">\n') %
{'total': total[0],
'failed': failed[0],
'skipped': skipped[0],
'timestamp': _quoteattr(timestamp),
'hostname': _quoteattr(hostname),
'time': suitetime})
footer = u('</testsuite>\n')
xmlfile = open(xmlpath, 'wb')
try:
xmlfile.write(header.encode('utf-8'))
for testcase in testcases:
xmlfile.write(testcase.encode('utf-8'))
xmlfile.write(footer.encode('utf-8'))
finally:
xmlfile.close()
| 6,247
| 34.908046
| 77
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/__init__.py
|
"""Functional testing framework for command line applications"""
from cram._main import main
from cram._test import test, testfile
__all__ = ['main', 'test', 'testfile']
| 172
| 23.714286
| 64
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/_cli.py
|
"""The command line interface implementation"""
import os
import sys
from cram._encoding import b, bytestype, stdoutb
from cram._process import execute
__all__ = ['runcli']
def _prompt(question, answers, auto=None):
"""Write a prompt to stdout and ask for answer in stdin.
answers should be a string, with each character a single
answer. An uppercase letter is considered the default answer.
If an invalid answer is given, this asks again until it gets a
valid one.
If auto is set, the question is answered automatically with the
specified value.
"""
default = [c for c in answers if c.isupper()]
while True:
sys.stdout.write('%s [%s] ' % (question, answers))
sys.stdout.flush()
if auto is not None:
sys.stdout.write(auto + '\n')
sys.stdout.flush()
return auto
answer = sys.stdin.readline().strip().lower()
if not answer and default:
return default[0]
elif answer and answer in answers.lower():
return answer
def _log(msg=None, verbosemsg=None, verbose=False):
"""Write msg to standard out and flush.
If verbose is True, write verbosemsg instead.
"""
if verbose:
msg = verbosemsg
if msg:
if isinstance(msg, bytestype):
stdoutb.write(msg)
else: # pragma: nocover
sys.stdout.write(msg)
sys.stdout.flush()
def _patch(cmd, diff):
"""Run echo [lines from diff] | cmd -p0"""
out, retcode = execute([cmd, '-p0'], stdin=b('').join(diff))
return retcode == 0
def runcli(tests, quiet=False, verbose=False, patchcmd=None, answer=None,
noerrfiles=False):
"""Run tests with command line interface input/output.
tests should be a sequence of 2-tuples containing the following:
(test path, test function)
This function yields a new sequence where each test function is wrapped
with a function that handles CLI input/output.
If quiet is True, diffs aren't printed. If verbose is True,
filenames and status information are printed.
If patchcmd is set, a prompt is written to stdout asking if
changed output should be merged back into the original test. The
answer is read from stdin. If 'y', the test is patched using patch
based on the changed output.
"""
total, skipped, failed = [0], [0], [0]
for path, test in tests:
def testwrapper():
"""Test function that adds CLI output"""
total[0] += 1
_log(None, path + b(': '), verbose)
refout, postout, diff = test()
if refout is None:
skipped[0] += 1
_log('s', 'empty\n', verbose)
return refout, postout, diff
abspath = os.path.abspath(path)
errpath = abspath + b('.err')
if postout is None:
skipped[0] += 1
_log('s', 'skipped\n', verbose)
elif not diff:
_log('.', 'passed\n', verbose)
if os.path.exists(errpath):
os.remove(errpath)
else:
failed[0] += 1
_log('!', 'failed\n', verbose)
if not quiet:
_log('\n', None, verbose)
if not noerrfiles:
errfile = open(errpath, 'wb')
try:
for line in postout:
errfile.write(line)
finally:
errfile.close()
if not quiet:
origdiff = diff
diff = []
for line in origdiff:
stdoutb.write(line)
diff.append(line)
if (patchcmd and
_prompt('Accept this change?', 'yN', answer) == 'y'):
if _patch(patchcmd, diff):
_log(None, path + b(': merged output\n'), verbose)
if not noerrfiles:
os.remove(errpath)
else:
_log(path + b(': merge failed\n'))
return refout, postout, diff
yield (path, testwrapper)
if total[0] > 0:
_log('\n', None, verbose)
_log('# Ran %s tests, %s skipped, %s failed.\n'
% (total[0], skipped[0], failed[0]))
| 4,484
| 31.5
| 78
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/_encoding.py
|
"""Encoding utilities"""
import os
import sys
try:
import builtins
except ImportError:
import __builtin__ as builtins
__all__ = ['b', 'bchr', 'bytestype', 'envencode', 'fsdecode', 'fsencode',
'stdoutb', 'stderrb', 'u', 'ul', 'unicodetype']
bytestype = getattr(builtins, 'bytes', str)
unicodetype = getattr(builtins, 'unicode', str)
if getattr(os, 'fsdecode', None) is not None:
fsdecode = os.fsdecode
fsencode = os.fsencode
elif bytestype is not str:
if sys.platform == 'win32':
def fsdecode(s):
"""Decode a filename from the filesystem encoding"""
if isinstance(s, unicodetype):
return s
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
return s.decode(encoding)
else:
return s.decode(encoding, 'surrogateescape')
def fsencode(s):
"""Encode a filename to the filesystem encoding"""
if isinstance(s, bytestype):
return s
encoding = sys.getfilesystemencoding()
if encoding == 'mbcs':
return s.encode(encoding)
else:
return s.encode(encoding, 'surrogateescape')
else:
def fsdecode(s):
"""Decode a filename from the filesystem encoding"""
if isinstance(s, unicodetype):
return s
return s.decode(sys.getfilesystemencoding(), 'surrogateescape')
def fsencode(s):
"""Encode a filename to the filesystem encoding"""
if isinstance(s, bytestype):
return s
return s.encode(sys.getfilesystemencoding(), 'surrogateescape')
else:
def fsdecode(s):
"""Decode a filename from the filesystem encoding"""
return s
def fsencode(s):
"""Encode a filename to the filesystem encoding"""
return s
if bytestype is str:
def envencode(s):
"""Encode a byte string to the os.environ encoding"""
return s
else:
envencode = fsdecode
if getattr(sys.stdout, 'buffer', None) is not None:
stdoutb = sys.stdout.buffer
stderrb = sys.stderr.buffer
else:
stdoutb = sys.stdout
stderrb = sys.stderr
if bytestype is str:
def b(s):
"""Convert an ASCII string literal into a bytes object"""
return s
bchr = chr
def u(s):
"""Convert an ASCII string literal into a unicode object"""
return s.decode('ascii')
else:
def b(s):
"""Convert an ASCII string literal into a bytes object"""
return s.encode('ascii')
def bchr(i):
"""Return a bytes character for a given integer value"""
return bytestype([i])
def u(s):
"""Convert an ASCII string literal into a unicode object"""
return s
try:
eval(r'u""')
except SyntaxError:
ul = eval
else:
def ul(e):
"""Evaluate e as a unicode string literal"""
return eval('u' + e)
| 2,990
| 26.953271
| 75
|
py
|
sysbench
|
sysbench-master/third_party/cram/cram/_test.py
|
"""Utilities for running individual tests"""
import itertools
import os
import re
import time
from cram._encoding import b, bchr, bytestype, envencode, unicodetype
from cram._diff import esc, glob, regex, unified_diff
from cram._process import PIPE, STDOUT, execute
__all__ = ['test', 'testfile']
_needescape = re.compile(b(r'[\x00-\x09\x0b-\x1f\x7f-\xff]')).search
_escapesub = re.compile(b(r'[\x00-\x09\x0b-\x1f\\\x7f-\xff]')).sub
_escapemap = dict((bchr(i), b(r'\x%02x' % i)) for i in range(256))
_escapemap.update({b('\\'): b('\\\\'), b('\r'): b(r'\r'), b('\t'): b(r'\t')})
def _escape(s):
"""Like the string-escape codec, but doesn't escape quotes"""
return (_escapesub(lambda m: _escapemap[m.group(0)], s[:-1]) +
b(' (esc)\n'))
def test(lines, shell='/bin/sh', indent=2, testname=None, env=None,
cleanenv=True, debug=False, noerrfile=False):
r"""Run test lines and return input, output, and diff.
This returns a 3-tuple containing the following:
(list of lines in test, same list with actual output, diff)
diff is a generator that yields the diff between the two lists.
If a test exits with return code 80, the actual output is set to
None and diff is set to [].
Note that the TESTSHELL environment variable is available in the
test (set to the specified shell). However, the TESTDIR and
TESTFILE environment variables are not available. To run actual
test files, see testfile().
Example usage:
>>> from cram._encoding import b
>>> refout, postout, diff = test([b(' $ echo hi\n'),
... b(' [a-z]{2} (re)\n')])
>>> refout == [b(' $ echo hi\n'), b(' [a-z]{2} (re)\n')]
True
>>> postout == [b(' $ echo hi\n'), b(' hi\n')]
True
>>> bool(diff)
False
lines may also be a single bytes string:
>>> refout, postout, diff = test(b(' $ echo hi\n bye\n'))
>>> refout == [b(' $ echo hi\n'), b(' bye\n')]
True
>>> postout == [b(' $ echo hi\n'), b(' hi\n')]
True
>>> bool(diff)
True
>>> (b('').join(diff) ==
... b('--- \n+++ \n@@ -1,2 +1,2 @@\n $ echo hi\n- bye\n+ hi\n'))
True
Note that the b() function is internal to Cram. If you're using Python 2,
use normal string literals instead. If you're using Python 3, use bytes
literals.
:param lines: Test input
:type lines: bytes or collections.Iterable[bytes]
:param shell: Shell to run test in
:type shell: bytes or str or list[bytes] or list[str]
:param indent: Amount of indentation to use for shell commands
:type indent: int
:param testname: Optional test file name (used in diff output)
:type testname: bytes or None
:param env: Optional environment variables for the test shell
:type env: dict or None
:param cleanenv: Whether or not to sanitize the environment
:type cleanenv: bool
:param debug: Whether or not to run in debug mode (don't capture stdout)
:type debug: bool
:return: Input, output, and diff iterables
:rtype: (list[bytes], list[bytes], collections.Iterable[bytes])
"""
indent = b(' ') * indent
cmdline = indent + b('$ ')
conline = indent + b('> ')
usalt = 'CRAM%s' % time.time()
salt = b(usalt)
if env is None:
env = os.environ.copy()
if cleanenv:
for s in ('LANG', 'LC_ALL', 'LANGUAGE'):
env[s] = 'C'
env['TZ'] = 'GMT'
env['CDPATH'] = ''
env['COLUMNS'] = '80'
env['GREP_OPTIONS'] = ''
if isinstance(lines, bytestype):
lines = lines.splitlines(True)
if isinstance(shell, (bytestype, unicodetype)):
shell = [shell]
env['TESTSHELL'] = shell[0]
if debug:
stdin = []
for line in lines:
if not line.endswith(b('\n')):
line += b('\n')
if line.startswith(cmdline):
stdin.append(line[len(cmdline):])
elif line.startswith(conline):
stdin.append(line[len(conline):])
execute(shell + ['-'], stdin=b('').join(stdin), env=env)
return ([], [], [])
after = {}
refout, postout = [], []
i = pos = prepos = -1
stdin = []
for i, line in enumerate(lines):
if not line.endswith(b('\n')):
line += b('\n')
refout.append(line)
if line.startswith(cmdline):
after.setdefault(pos, []).append(line)
prepos = pos
pos = i
stdin.append(b('echo %s %s $?\n' % (usalt, i)))
stdin.append(line[len(cmdline):])
elif line.startswith(conline):
after.setdefault(prepos, []).append(line)
stdin.append(line[len(conline):])
elif not line.startswith(indent):
after.setdefault(pos, []).append(line)
stdin.append(b('echo %s %s $?\n' % (usalt, i + 1)))
output, retcode = execute(shell + ['-'], stdin=b('').join(stdin),
stdout=PIPE, stderr=STDOUT, env=env)
if retcode == 80:
return (refout, None, [])
pos = -1
ret = 0
for i, line in enumerate(output[:-1].splitlines(True)):
out, cmd = line, None
if salt in line:
out, cmd = line.split(salt, 1)
if out:
if not out.endswith(b('\n')):
out += b(' (no-eol)\n')
if _needescape(out):
out = _escape(out)
postout.append(indent + out)
if cmd:
ret = int(cmd.split()[1])
if ret != 0:
postout.append(indent + b('[%s]\n' % (ret)))
postout += after.pop(pos, [])
pos = int(cmd.split()[0])
postout += after.pop(pos, [])
if testname:
diffpath = testname
errpath = diffpath + b('.err')
else:
diffpath = errpath = b('')
diff = unified_diff(refout, postout, diffpath, errpath,
matchers=[esc, glob, regex])
for firstline in diff:
return refout, postout, itertools.chain([firstline], diff)
return refout, postout, []
def testfile(path, shell='/bin/sh', indent=2, env=None, cleanenv=True,
debug=False, testname=None, noerrfile=False):
"""Run test at path and return input, output, and diff.
This returns a 3-tuple containing the following:
(list of lines in test, same list with actual output, diff)
diff is a generator that yields the diff between the two lists.
If a test exits with return code 80, the actual output is set to
None and diff is set to [].
Note that the TESTDIR, TESTFILE, and TESTSHELL environment
variables are available to use in the test.
:param path: Path to test file
:type path: bytes or str
:param shell: Shell to run test in
:type shell: bytes or str or list[bytes] or list[str]
:param indent: Amount of indentation to use for shell commands
:type indent: int
:param env: Optional environment variables for the test shell
:type env: dict or None
:param cleanenv: Whether or not to sanitize the environment
:type cleanenv: bool
:param debug: Whether or not to run in debug mode (don't capture stdout)
:type debug: bool
:param testname: Optional test file name (used in diff output)
:type testname: bytes or None
:return: Input, output, and diff iterables
:rtype: (list[bytes], list[bytes], collections.Iterable[bytes])
"""
f = open(path, 'rb')
try:
abspath = os.path.abspath(path)
env = env or os.environ.copy()
env['TESTDIR'] = envencode(os.path.dirname(abspath))
env['TESTFILE'] = envencode(os.path.basename(abspath))
if testname is None: # pragma: nocover
testname = os.path.basename(abspath)
return test(f, shell, indent=indent, testname=testname, env=env,
cleanenv=cleanenv, debug=debug, noerrfile=noerrfile)
finally:
f.close()
| 7,959
| 33.458874
| 77
|
py
|
sysbench
|
sysbench-master/third_party/cram/tests/run-doctests.py
|
#!/usr/bin/env python
import doctest
import os
import sys
def _getmodules(pkgdir):
"""Import and yield modules in pkgdir"""
for root, dirs, files in os.walk(pkgdir):
if '__pycache__' in dirs:
dirs.remove('__pycache__')
for fn in files:
if not fn.endswith('.py') or fn == '__main__.py':
continue
modname = fn.replace(os.sep, '.')[:-len('.py')]
if modname.endswith('.__init__'):
modname = modname[:-len('.__init__')]
modname = '.'.join(['cram', modname])
if '.' in modname:
fromlist = [modname.rsplit('.', 1)[1]]
else:
fromlist = []
yield __import__(modname, {}, {}, fromlist)
def rundoctests(pkgdir):
"""Run doctests in the given package directory"""
totalfailures = totaltests = 0
for module in _getmodules(pkgdir):
failures, tests = doctest.testmod(module)
totalfailures += failures
totaltests += tests
return totalfailures != 0
if __name__ == '__main__':
try:
sys.exit(rundoctests(sys.argv[1]))
except KeyboardInterrupt:
pass
| 1,182
| 27.853659
| 61
|
py
|
riscv-ocelot
|
riscv-ocelot-master/util/pipeview-helper.py
|
#!/usr/bin/env python
#******************************************************************************
# Copyright (c) 2016, The Regents of the University of California (Regents).
# All Rights Reserved. See LICENSE for license details.
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
# O3-Pipeview Visualization Helper
#------------------------------------------------------------------------------
#------------------------------------------------------------------------------
#
# Christopher Celio
# 2016 Mar 18
#
# INPUT: a *.out file generated by a Rocket-chip simulator (e.g., BOOM). Each
# output line is annotated with the fetch sequence number to help correlate
# which instruction corresponds to which timestamp printouts:
#
# (###; O3PipeView:stage: <timestamp-count>)
#
# OUTPUT: a trace compatible with the gem5 o3-pipeview.py visualization script.
#
# Helper script that processes *.out files from a processor (e.g., RISC-V BOOM),
# and re-constructs the log file to match the format expected by the Gem5
# o3-pipeview.py tool.
#
# The theory is that the processor assigns a fetch-sequence number to each
# instruction. As the instruction travels down the pipeline (potentially
# out-of-order), each stage prints to the *.out log the fetch-sequence number
# and the time-stamp. The resulting *.out log will contain an interleaving of
# committed and misspeculated instructions writing time stamps.
# The o3-pipeview.py tool expects to see each instruction's time stamps printed
# contigiously.
#
# NOTE: the o3-pipeview.py tool will print out different stages contigiously
# even if they occurred simultaneously. Example: if decode, rename, and dispatch
# occur on the same cycle, they will be printed as if they appeared on 3
# contigiously, separate cycles. This pipeview-helper tool will SQUASH the later
# stages if they occurred on the same cycle as previous "stages".
# TODO:
# implement lists as hash tables
# verify there's no key collision once using hash tables
import optparse
import sys
from collections import deque
def getFSeqNum(line, idx):
return int(line[0:idx])
# remove the fseq number and print the line.
def writeOutput(line, idx):
print line[idx+2:],
# remove the fseq number and print the line.
# Also finds and returns which cycle this event occurred.
# If its cycle matches the previous event, we supress the print.
def writeOutputDecode(line, idx):
cycle = int(line[idx+2+19:])
# debug_cycle = int(line[idx+2:].split(':')[2])
# assert cycle==debug_cycle
print line[idx+2:],
return cycle
# remove the fseq number and print the line.
# Also finds and returns which cycle this event occurred.
# If its cycle matches the previous event, we supress the print.
def writeOutputRename(line, idx, prev_cycle):
cycle = int(line[idx+2+19:])
# debug_cycle = int(line[idx+2:].split(':')[2])
# assert cycle==debug_cycle
if cycle == prev_cycle:
print "O3PipeView:rename: 0"
else:
print line[idx+2:],
return cycle
# remove the fseq number and print the line.
# Also finds and returns which cycle this event occurred.
# If its cycle matches the previous event, we supress the print.
def writeOutputDispatch(line, idx, prev_cycle):
cycle = int(line[idx+2+21:])
# debug_cycle = int(line[idx+2:].split(':')[2])
# assert cycle==debug_cycle
if cycle == prev_cycle:
print "O3PipeView:dispatch: 0"
else:
print line[idx+2:],
# re-create the proper output from the retire message and
# the store-comp message
def writeRetireStoreOutput(ret_line, st_line, r_id, idx, s_idx):
s_id = getFSeqNum(st_line, idx)
if r_id != s_id:
print "FAILURE:"
print ret_line
print st_line
assert r_id == s_id, "wrong store entry!"
s_tsc = st_line[s_idx+1:]
end_idx = ret_line.rfind(':')
print ret_line[idx+2:end_idx+1], s_tsc,
# return True if the event was found
# otherwise return False (the instruction was misspeculated)
def findAndPrintEvent(target_id, lst, stage_str, idx):
for i in range(len(lst)):
temp_id = getFSeqNum(lst[i], idx)
if temp_id == target_id:
writeOutput(lst.pop(i), idx)
return True
print "O3PipeView:%s"": 0" % (stage_str)
return False
def isStore(line):
if "sw " in line or \
"sd " in line or \
"sh " in line or \
"sb " in line or \
"amo" in line or \
"sc." in line or \
"lr." in line: # TODO remove lr from using store-completions.
return True
else:
return False
def generate_pipeview_file(log):
lines = log.readlines()
# find fetch sequence number separator, and cache result
idx = -1
while True:
if not lines:
sys.exit("Error: file contains no pipetrace info.")
idx = lines[0].find(';')
if idx != -1:
break
else:
lines.pop(0)
# in-order stages get to use queues
q_if = deque()
q_dec = deque()
q_ren = deque()
q_dis = deque()
# out-of-order stages must use lists
l_iss = []
l_wb = []
# run over the entire list once to get the store completions,
# as they occur after the store retires and thus don't fit neatly
# into our for loop below
l_stc = [line for line in lines if "store-comp" in line]
# cache the s_idx value (if there are any stores in program)
if l_stc: s_idx = l_stc[0].find(':')
last_fseq = -1
for line in lines:
if "fetch" in line:
q_if.append(line)
elif "decode" in line:
q_dec.append(line)
elif "rename" in line:
q_ren.append(line)
elif "dispatch" in line:
q_dis.append(line)
elif "issue" in line:
l_iss.append(line)
elif "complete" in line:
l_wb.append(line)
elif "retire" in line:
r_id = getFSeqNum(line, idx)
while q_if:
fetch_id = getFSeqNum(q_if[0], idx)
if fetch_id > r_id:
break
elif fetch_id == r_id:
# print out this instruction's stages and retire it
# (they'll be the head of all of the in-order queues)
fetch = q_if.popleft()
writeOutput(fetch, idx)
assert fetch_id != last_fseq, "Found duplicate fseq number."
last_fseq = fetch_id
c = writeOutputDecode(q_dec.popleft(), idx)
c = writeOutputRename(q_ren.popleft(), idx, c)
writeOutputDispatch(q_dis.popleft(), idx, c)
findAndPrintEvent(fetch_id, l_iss, "issue", idx)
findAndPrintEvent(fetch_id, l_wb, "complete", idx)
if isStore(fetch):
writeRetireStoreOutput(line, l_stc.pop(0), r_id, idx, s_idx)
else:
writeOutput(line, idx)
break
else:
# print out misspeculated instruction
writeOutput(q_if.popleft(), idx)
c = 0
if q_dec and fetch_id == getFSeqNum(q_dec[0], idx):
c = writeOutputDecode(q_dec.popleft(), idx)
else:
print "O3PipeView:decode: 0"
if q_ren and fetch_id == getFSeqNum(q_ren[0], idx):
c = writeOutputRename(q_ren.popleft(), idx, c)
else:
print "O3PipeView:rename: 0"
if q_dis and fetch_id == getFSeqNum(q_dis[0], idx):
writeOutputDispatch(q_dis.popleft(), idx, c)
findAndPrintEvent(fetch_id, l_iss, "issue", idx)
findAndPrintEvent(fetch_id, l_wb, "complete", idx)
else:
print "O3PipeView:dispatch: 0"
assert not findAndPrintEvent(fetch_id, l_iss, "issue", idx), \
"Found issue time stamp with no corresponding decode"
assert not findAndPrintEvent(fetch_id, l_wb, "complete", idx), \
"Found time stamp with no corresponding decode"
print "O3PipeView:retire: 0:store: 0"
def main():
parser = optparse.OptionParser()
parser.add_option('-f','--file', dest='infile',
help='The input *.out file to parse.', default="")
(options, args) = parser.parse_args()
assert options.infile != "", "Empty input file!"
with open(options.infile, 'r') as log:
generate_pipeview_file(log)
if __name__ == '__main__':
main()
| 8,876
| 35.9875
| 88
|
py
|
riscv-ocelot
|
riscv-ocelot-master/util/branch-processor.py
|
import argparse
import sys
parser = argparse.ArgumentParser(description='BOOM branch trace analyzer')
parser.add_argument('-v', '--verbose', action='store_true',
help='echo log contents')
parser.add_argument('file', nargs='?', type=argparse.FileType('r'),
default=sys.stdin,
help='workload.out file')
parser.add_argument('n', nargs='?', type=int, default=1,
help='number of branches to print')
args = parser.parse_args()
n = args.n
class BranchInfo:
def __init__(self, addr, mispredicted, is_br, is_jalr):
self.count = 1
self.addr = addr
self.mispredicted = 1 if mispredicted else 0
if is_br == '1':
self.branch_type = "br"
elif is_jalr == '1':
self.branch_type = "jalr"
else:
self.branch_type = "X"
self.lhist = ""
def mispredict_rate(self):
return self.mispredicted/self.count
def __str__(self):
return "({}, {}/{}, {:.4f}, {})".format(
self.addr,
self.mispredicted,
self.count,
self.mispredicted/self.count,
self.branch_type)
branches = {}
ghist = '0'*128
for line in args.file:
l = line.split(' ')
if len(l) == 6:
src, taken, is_br, is_jal, is_jalr, addr = map(str, l)
addr = addr[:-1]
label = 'br' if is_br == '1' else (
'jal' if is_jal == '1' else (
'jalr' if is_jalr == '1' else (
'X')))
print(src, taken, addr, label, flush=True)
mispredicted = (src == '3')
if is_jal != '1':# and int(addr, 16) < 0x80000000:
if addr not in branches:
branches[addr] = BranchInfo(addr, mispredicted, is_br, is_jalr)
else:
branches[addr].count += 1
branches[addr].mispredicted += (1 if mispredicted else 0)
branches[addr].lhist += taken
if is_jalr != '1':
ghist = ghist[1:] + taken
elif args.verbose:
print(line, end='', flush=True)
if not branches:
sys.exit(0)
blist = list(map(lambda kv:kv[1], branches.items()))
bad_branches = list(sorted(blist, key=lambda b:(b.mispredict_rate(), b.count)))[::-1]
freq_bad_branches = list(sorted(blist, key=lambda b:b.mispredicted))[::-1]
good_branches = list(sorted(blist, key=lambda b:(-b.mispredict_rate(), b.count)))[::-1]
freq_good_branches = list(sorted(blist, key=lambda b:b.count - b.mispredicted))[::-1]
freq_branches = list(sorted(blist, key=lambda b:b.count))[::-1]
print("Top {} mispredicted branches".format(n))
for i in bad_branches[:n]:
print(i)
print("Top {} correctly predicted branches".format(n))
for i in good_branches[:n]:
print(i)
print("Top {} frequently mispredicted branches".format(n))
for i in freq_bad_branches[:n]:
print(i)
print("Top {} frequently correctly predicted branches".format(n))
for i in freq_good_branches[:n]:
print(i)
print("Top {} most frequent branches".format(n))
for i in freq_branches[:n]:
print(i)
jalrs = [b for b in blist if b.branch_type == "jalr"]
brs = [b for b in blist if b.branch_type == "br"]
mp_jalrs = sum([b.mispredicted for b in jalrs])
mp_brs = sum([b.mispredicted for b in brs])
total_jalrs = float(max(sum([b.count for b in jalrs]), 1))
total_brs = sum([b.count for b in brs])
print("Mispredicted {}/{} jalrs, {}".format(mp_jalrs,
total_jalrs,
mp_jalrs / total_jalrs))
print("Mispredicted {}/{} brs, {}".format(mp_brs,
total_brs,
mp_brs / total_brs))
print("Mispredicted {}/{} all, {}".format(mp_jalrs + mp_brs,
total_jalrs + total_brs,
(mp_jalrs + mp_brs) / (total_jalrs + total_brs)))
| 3,997
| 34.696429
| 92
|
py
|
riscv-ocelot
|
riscv-ocelot-master/docs/conf.py
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
# import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- Project information -----------------------------------------------------
project = 'RISCV-BOOM'
copyright = '2019, The Regents of the University of California'
author = 'Chris Celio, Jerry Zhao, Abraham Gonzalez, Ben Korpan'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autosectionlabel',
'sphinxcontrib.bibtex'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add support for seeing figure numbers
numfig = True
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'RISCV-BOOMdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'RISCV-BOOM.tex', 'RISCV-BOOM Documentation',
'Chris Celio, Jerry Zhao, Abraham Gonzalez, Ben Korpan', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'riscv-boom', 'RISCV-BOOM Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'RISCV-BOOM', 'RISCV-BOOM Documentation',
author, 'RISCV-BOOM', 'The Berkeley Out-of-Order Processor',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
| 5,400
| 29.173184
| 79
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/setup.py
|
import os
from distutils.sysconfig import get_python_lib
libDir=get_python_lib()
curDir=os.path.dirname(__file__)
curDir=os.path.abspath(curDir)
print('libDir='+libDir)
print('curDir='+curDir)
with open(libDir+'/'+'cvf.pth','w') as f:
f.write(curDir+'\n')
'''
libs=['bfc']
for lib in libs:
with open(libDir+'/'+lib+'.pth','w') as f:
f.write(curDir+'\n')
'''
| 383
| 17.285714
| 46
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/__init__.py
| 0
| 0
| 0
|
py
|
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/test/test_dataset_render.py
|
import os
import sys
envs=os.environ.get("PATH")
os.environ['PATH']=envs+';F:/dev/cvfx/assim410/bin-v140/x64/release/;F:/dev/cvfx/opencv3413/bin-v140/x64/Release/;F:/dev/cvfx/bin/x64/;D:/setup/Anaconda3/;'
import cv2
from cv2 import data
from skimage import measurecd
import random
import numpy as np
import pycocotools as coco
import pycocotools.mask
def categories(label, label_id):
category = {}
category['supercategory'] = 'component'
category['id'] = label_id
category['name'] = label
return category
def get_category_list(label_list):
category_list=[]
for i,label in enumerate(label_list):
category_list.append(categories(label,i+1))
return category_list
def get_image_info(img, id, fileName):
image = {}
image['height'] = img.shape[0]
image['width'] = img.shape[1]
image['id'] = id
image['file_name'] = fileName
return image
def annotations_from_rect(bbox, category_id, image_id, object_id):
annotation = {}
x0,x1,y0,y1=bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3]
annotation['segmentation'] = [x0,y0,x0,y1,x1,y1,x1,y0]
annotation['iscrowd'] = 0
annotation['image_id'] = image_id
annotation['bbox'] = bbox
annotation['area'] = bbox[2]*bbox[3]
annotation['category_id'] = category_id
annotation['id'] = object_id
return annotation
def annotations_from_mask(obj_mask, category_id, image_id, object_id):
encoded_mask = coco.mask.encode(np.asfortranarray(obj_mask))
contours = measure.find_contours(obj_mask, 0.5)
segs=[]
for contour in contours:
contour=np.flip(contour,axis=1)
seg=contour.ravel().tolist()
segs.append(seg)
annotation = {}
annotation['segmentation'] = segs
annotation['iscrowd'] = 0
annotation['image_id'] = image_id
annotation['bbox'] = coco.mask.toBbox(encoded_mask)
annotation['area'] = coco.mask.area(encoded_mask)
annotation['category_id'] = category_id
annotation['id'] = object_id
return annotation
def rand_box(imsize, minSize, maxSize):
x=0
y=0
while True:
#x=random.randint(0,imsize[0])
#y=random.randint(0,imsize[1])
x=int(random.uniform(0,imsize[0]))
y=int(random.uniform(0,imsize[1]))
if min(x,y)>minSize:
break
size=random.randint(minSize,min(x,y,maxSize))
return [x-size, y-size, size]
import cvf.cvrender as cvr
import json
class GenDet2dDataset:
def __init__(self, imageFiles, modelFiles, labelList) -> None:
self.dr=cvr.DatasetRender()
self.dr.loadModels(modelFiles)
self.imageFiles=imageFiles
self.modelFiles=modelFiles
self.category_list=get_category_list(labelList)
self.labelList=labelList
def gen(self, outDir, setName, nImages, maxModelsPerImage):
imageDir=outDir+setName+'/'
annDir=outDir+'annotations/'
os.makedirs(imageDir,exist_ok=True)
os.makedirs(annDir,exist_ok=True)
category_list=self.category_list
images_list=[]
annotations_list=[]
#idx_of_all_models=list(range(len(label_list)))
n_all_models=len(self.labelList)
max_models_perim=min(n_all_models,maxModelsPerImage)
nobjs=0
nimgs=0
idList=[i for i in range(0,n_all_models)]
for n in range(0,nImages):
img=cv2.imread(self.imageFiles[random.randint(0,len(self.imageFiles)-1)])
if img is None:
continue
#cv2.imshow("img",img)
dsize=800.0/max(img.shape)*np.asarray(img.shape)
dsize=dsize.astype(np.int32)
dsize=(dsize[1],dsize[0])
img=cv2.resize(img,dsize)
obj_list=[]
size_list=[]
center_list=[]
nobjs_cur=random.randint(1,max_models_perim)
random.shuffle(idList)
#print(nobjs_cur)
for i in range(0,nobjs_cur):
#obj_list.append(random.randint(0,n_all_models-1))
obj_list.append(idList[i])
#size_list.append(random.randint(100,min(dsize)))
#center_list.append([random.randint(0,dsize[0]),random.randint(0,dsize[1])])
imaxSize=min(int(min(dsize)*4/5),400)
size=int(random.uniform(150,imaxSize))
rbb=rand_box(dsize,size,size+1)
size_list.append(rbb[2])
center_list.append([int(rbb[0]+rbb[2]/2),int(rbb[1]+rbb[2]/2)])
rr=self.dr.renderToImage(img,obj_list,sizes=size_list,centers=center_list)
objs_mask=rr['composite_mask']
dimg=rr['img']
nobjs0=nobjs
for obj_idx in range(0,nobjs_cur):
obj_mask=np.zeros(objs_mask.shape,dtype=np.uint8)
obj_mask[objs_mask==obj_idx]=1
ann=annotations_from_mask(obj_mask,obj_list[obj_idx]+1,nimgs+1,nobjs+1)
#cv2.rectangle(dimg,ann['bbox'],(255,0,0), thickness=3)
if(ann['area']>10):
annotations_list.append(ann)
nobjs+=1
if nobjs==nobjs0:
continue
nimgs+=1
outFileName='%06d.jpg'%nimgs
images_list.append(get_image_info(dimg,nimgs,outFileName))
cv2.imwrite(imageDir+outFileName,dimg)
print((n+1,outFileName,obj_list,size_list))
#cv2.imshow("dimg",dimg)
#cv2.waitKey()
data_coco = {}
data_coco['images'] = images_list
data_coco['categories'] = category_list
data_coco['annotations'] = annotations_list
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
print(os.path.abspath('./'))
json.dump(data_coco, open(annDir+setName+'.json', 'w'),cls=MyEncoder)
import glob
import fileinput
def getImageList(dir):
flist=[]
for f in glob.glob(dir+"/*.jpg"):
flist.append(f)
return flist
def readModelList(modelListFile):
modelListDir=os.path.dirname(modelListFile)
label_list=[]
modelFiles=[]
for line in fileinput.input(modelListFile):
line=str(line)
strs=line.split()
label_list.append(strs[0])
modelFiles.append(modelListDir+'/'+strs[1])
return label_list,modelFiles
def main():
dataDir='f:/home/aa/data/'
imageFiles=getImageList(dataDir+'/VOCdevkit/VOC2012/JPEGImages/')
modelListFile=dataDir+'/3dmodels/re3d2.txt'
#modelListFile=dataDir+'/3dmodels/re3d25.txt'
labelList,modelFiles=readModelList(modelListFile)
outDir=dataDir+'3dgen/re3d2a/'
dr=GenDet2dDataset(imageFiles, modelFiles, labelList)
#gen eval set
nImagesToGen=20
maxModelsPerImage=2
dr.gen(outDir,'eval',nImagesToGen,maxModelsPerImage)
#gen train set
nImagesToGen=100
maxModelsPerImage=2
dr.gen(outDir,'train',nImagesToGen,maxModelsPerImage)
if __name__=='__main__':
main()
| 7,343
| 29.857143
| 156
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/test/test_cvr.py
|
import os
import sys
import numpy as np
import cv2
import time
#envs=os.environ.get("PATH")
#os.environ['PATH']=envs+';F:/dev/cvfx/assim410/bin-v140/x64/release/;F:/dev/cvfx/opencv3413/bin-v140/x64/Release/;F:/dev/cvfx/bin/x64/;D:/setup/Anaconda3/;'
import cvf.cvrender as cvr
#cvr.init("")
#dr=cvr.DatasetRender()
#img=np.zeros([320,240],np.uint8)
#rr=dr.render(img,1,0)
a=cvr.I()
viewSize=[640,480]
K=cvr.defaultK(viewSize,1.5)
P=cvr.fromK(K,viewSize,1,100)
print(K,P)
#mats=cvr.CVRMats([640,480])
#print(mats.mProjection)
model=cvr.CVRModel('/home/aa/data/3dmodels/test/cat.obj')
render=cvr.CVRender(model)
mats=cvr.CVRMats(model,viewSize)
rr=render.exec(mats, viewSize)
#cvr.mdshow("model",model)
#cvr.waitKey()
start=time.perf_counter()
rr=render.exec(mats,viewSize,cvr.CVRM_IMAGE|cvr.CVRM_DEPTH)
print('time={}ms'.format((time.perf_counter()-start)*1000))
cv2.imwrite('./out.jpg',rr.img)
#cv2.imshow("img",rr.img)
#cv2.waitKey()
| 952
| 18.44898
| 157
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/compress/compress.py
|
import os
mlx_path = "/home/aa/libs/cvf/Python/compress/compressmodel.mlx" # mlx脚本的路径
root_dir = "/home/aa/data/3dmodels" # 需要处理模型的根路径
for dr in os.listdir(root_dir):
model_dir = os.path.join(root_dir, dr)
if os.path.isdir(model_dir):
model_path = os.path.join(model_dir, dr+".3ds") # 模型的路径,模型后缀根据实际修改
print("Current process model: ",model_path)
output_path = os.path.join(model_dir, dr+".ply") # 导出模型的路径
cmd = "meshlabserver -i " + model_path + " -o " + output_path + " -s " + mlx_path + " -om fc fn wt" # 需要执行的命令
# -om 参数根据导出需求自行修改
os.system(cmd)
| 622
| 35.647059
| 118
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/render_bop_dataset.py
|
import os
import sys
#envs=os.environ.get("PATH")
#os.environ['PATH']=envs+';F:/dev/cvfx/assim410/bin-v140/x64/release/;F:/dev/cvfx/opencv3413/bin-v140/x64/Release/;F:/dev/cvfx/bin/x64/;D:/setup/Anaconda3/;'
import cv2
from cv2 import data
from skimage import measure
import random
import numpy as np
def categories(label, label_id):
category = {}
category['supercategory'] = 'component'
category['id'] = label_id
category['name'] = label
return category
def get_category_list(label_list):
category_list = []
for i,label in enumerate(label_list):
category_list.append(categories(label, i+1))
return category_list
def rand_box(imsize, minSize, maxSize):
x = 0
y = 0
while True:
#x=random.randint(0,imsize[0])
#y=random.randint(0,imsize[1])
x = int(random.uniform(0, imsize[0]))
y = int(random.uniform(0, imsize[1]))
if min(x, y) > minSize:
break
size = random.randint(minSize, min(x, y, maxSize))
return [x-size, y-size, size]
import cvf.cvrender as cvr
import json
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
class GenBOPDataset:
def __init__(self, outDir, imageFiles, modelFiles, labelList) -> None:
self.dr = cvr.DatasetRender()
self.dr.loadModels(modelFiles)
self.imageFiles = imageFiles
self.modelFiles = modelFiles
self.category_list = get_category_list(labelList)
self.labelList = labelList
self.outDir = outDir
def saveBOPModels(self):
modelDir = self.outDir + '/models/'
os.makedirs(modelDir, exist_ok=True)
models = self.dr.getModels()
nModels = len(models)
for i in range(0, nModels):
fname = 'obj_%06d.ply'%(i+1)
print(fname, ':', self.modelFiles[i])
models[i].saveAs(modelDir+fname)
modelsInfo=self.dr.getBOPModelInfo()
json.dump(modelsInfo, open(modelDir+'models_info.json', 'w'), cls=MyEncoder)
def genScene(self, setName, sceneId, nImages, imgSize, maxModelsPerImage, minModelsPerImage, maxObjectSizeRatio=0.5, minObjectSizeRatio=0.2):
sceneDir=self.outDir+'/'+setName+'/'+'%06d'%sceneId+'/'
imageDir=sceneDir+'rgb/'
maskDir=sceneDir+'mask/'
maskVisibDir=sceneDir+'mask_visib/'
os.makedirs(imageDir,exist_ok=True)
os.makedirs(maskDir,exist_ok=True)
os.makedirs(maskVisibDir,exist_ok=True)
category_list=self.category_list
scene_gt={}
scene_gt_info={}
scene_camera={}
#idx_of_all_models=list(range(len(label_list)))
n_all_models=len(self.labelList)
max_models_perim=min(n_all_models,maxModelsPerImage)
min_models_perim=min(minModelsPerImage,max_models_perim)
min_object_size=int(min(imgSize)*minObjectSizeRatio)
max_object_size=int(min(imgSize)*maxObjectSizeRatio)
nobjs=0
idList=[i for i in range(0,n_all_models)]
for n in range(0,nImages):
img=cv2.imread(self.imageFiles[random.randint(0,len(self.imageFiles)-1)])
if img is None:
continue
dsize=tuple(imgSize)
img=cv2.resize(img,dsize)
obj_list=[]
size_list=[]
center_list=[]
nobjs_cur=random.randint(min_models_perim,max_models_perim)
random.shuffle(idList)
for i in range(0,nobjs_cur):
obj_list.append(idList[i])
size=int(random.uniform(min_object_size,max_object_size))
# imaxSize=min(int(min(dsize)*4/5),400)
# size=int(random.uniform(150,imaxSize))
rbb=rand_box(dsize,size,size+1)
size_list.append(rbb[2])
center_list.append([int(rbb[0]+rbb[2]/2),int(rbb[1]+rbb[2]/2)])
rr=self.dr.renderToImage(img,obj_list,sizes=size_list,centers=center_list)
imKey='%d'%(n+1)
scene_gt_i=[]
vR=rr['vR']
vT=rr['vT']
#x=np.reshape(vR[0],9)
for i in range(0,nobjs_cur):
scene_gt_i.append(
{'cam_R_m2c':np.reshape(vR[i],9),
'cam_t_m2c':vT[i],
'obj_id':obj_list[i]+1
}
)
scene_gt[imKey]=scene_gt_i
scene_gt_info[imKey]=rr['bop_info']
scene_camera[imKey]={
'cam_K':np.reshape(rr['K'],9),
'depth_scale':0.1
}
dimg=rr['img']
outFileName='%06d.png'%(n+1)
cv2.imwrite(imageDir+outFileName,dimg)
masks=rr['objs_mask']
masks_visib=rr['objs_mask_visib']
for i in range(0,nobjs_cur):
outFileName='%06d'%(n+1)+'_%06d.png'%i
cv2.imwrite(maskDir+outFileName,masks[i])
cv2.imwrite(maskVisibDir+outFileName,masks_visib[i])
print((n+1,outFileName,obj_list,size_list))
#cv2.imshow("dimg",dimg)
#cv2.waitKey()
print(os.path.abspath('./'))
json.dump(scene_gt, open(sceneDir+'scene_gt.json', 'w'), cls=MyEncoder)
json.dump(scene_gt_info, open(sceneDir+'scene_gt_info.json', 'w'), cls=MyEncoder)
json.dump(scene_camera, open(sceneDir+'scene_camera.json', 'w'), cls=MyEncoder)
camK=scene_camera['1']['cam_K']
camInfo={
'cx':camK[2],
'cy':camK[5],
'depth_scale':1.0,
'fx':camK[0],
'fy':camK[4],
'height':imgSize[1],
'width':imgSize[0]
}
json.dump(camInfo, open(self.outDir+'/camera.json', 'w'), cls=MyEncoder)
import glob
import fileinput
def getImageList(dir):
"""
get the absolute paths of all the file which suffix is like .jpg in dir root
"""
flist = []
for f in glob.glob(dir+"/*.jpg"):
flist.append(f)
return flist
def readModelList(modelListFile):
"""
get modelListFile's content
and return the relative model's lable and model's absolute path
"""
modelListDir = os.path.dirname(modelListFile) # get the parent path
label_list = []
modelFiles = []
for line in fileinput.input(modelListFile):
line = str(line)
strs = line.split()
label_list.append(strs[0])
modelFiles.append(modelListDir+'/'+strs[1])
return label_list, modelFiles
def main():
dataDir = '/home/aa/data/'
imageFiles = getImageList(dataDir+'/VOCdevkit/VOC2012/JPEGImages/')
#modelListFile = dataDir+'/3dmodels/re3d3.txt'
#modelListFile=dataDir+'/3dmodels/re3d25.txt'
modelListFile=dataDir+'/3dmodels/ycbv.txt'
labelList, modelFiles = readModelList(modelListFile)
outDir = dataDir + '3dgen/ycbv_bop21/' # save path for generate BOP type data
dr = GenBOPDataset(outDir, imageFiles, modelFiles, labelList)
dr.saveBOPModels()
#gen eval set
setName = 'train'
nScenesToGen = 12
nImagesPerScene = 1000
maxModelsPerImage = 15
#dimgSize = (800,600)
dimgSize = (640,480)
for sceneId in range(0, nScenesToGen):
dr.genScene(setName, sceneId+1, nImagesPerScene, dimgSize, maxModelsPerImage=maxModelsPerImage, minModelsPerImage=8)
if __name__=='__main__':
main()
| 7,686
| 30.504098
| 157
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/compositer.py
|
from numpy.core.fromnumeric import shape
import cvf.bfc as bfc
import cvf.cvrender as cvr
import cv2
import random
'''composite a set of image objects with a background image
harmonizeF : transform fg color with respect to bg
degradeF : add random smooth and noise to fg, specified with maxSmoothSigma and maxNoiseStd
'''
def composite_images(bg, fgList, harmonizeF=True, degradeF=True, maxSmoothSigma=1.0, maxNoiseStd=5.0, alphaScale=1.0):
compositer=cvr.Compositer()
compositer.init(bg)
for fg in fgList:
compositer.addLayer(fg['img'], # fg image
fg['mask'], # mask of fg in [0,255]
fg['roi'], # ROI of fg in the background image, the fg image will be resized as necessary
-1,harmonizeF,degradeF,maxSmoothSigma,maxNoiseStd,
alphaScale #scale the alpha value of each pixel to gen Mixup effect
)
return compositer.getComposite()
#load an image object and composite with a new background
def load_with_new_bg(fgFile, maskFile, bgFile):
# bgBorderWidth: dilate the crop box region with bgBorderWidth
# cropOffset: add offset to the top-left corner
fg=cv2.imread(fgFile,cv2.IMREAD_COLOR)
mask=cv2.imread(maskFile, cv2.IMREAD_GRAYSCALE)
#crop redudant background region in fg
# max boarder
if fg.shape[0] != fg.shape[1]:
raise ValueError('image should be a square.')
bgBorderWidth = mask.shape[0]
bgBorderWidth = random.randint(0, bgBorderWidth) # random sample int from [0, bgBorderWidth]
left_bound = random.randint(-bgBorderWidth, bgBorderWidth) # random sample int from [-bgBorderWidth, bgBorderWidth]
right_bound = random.randint(-bgBorderWidth, bgBorderWidth)
cropOffset = [left_bound, right_bound]
fg,mask=cvr.cropImageRegion(fg, mask, bgBorderWidth, cropOffset)
bg=cv2.imread(bgFile,cv2.IMREAD_COLOR)
<<<<<<< HEAD
#dsize=fg.shape[0:2]
dsize = (fg.shape[1], fg.shape[0])
bg=cv2.resize(bg,dsize)
fgList=[{'img':fg, 'mask':mask, 'roi':[0,0,dsize[0],dsize[1]]}]
alphaScale = round(random.uniform(0.5,1), 2) # random sample float from [0.5, 1)
return composite_images(bg,fgList, alphaScale=alphaScale)
if __name__=='__main__':
imdir= r'/home/aa/data/3dgen/viewclassify_01/0001/'
bgImgFile=r'/home/aa/data/plane.png'
dimg=load_with_new_bg(imdir+'img/0001.png',imdir+'mask/0001.png',bgImgFile)
print(dimg.shape)
#cv2.imshow('dimg',dimg)
#cv2.waitKey()
cv2.imwrite('/home/aa/libs/cvf/Python/cvf/tools/out.jpg',dimg)
=======
dsize=(fg.shape[1],fg.shape[0])
bg=cv2.resize(bg,dsize)
fgList=[{'img':fg, 'mask':mask, 'roi':[0,0,dsize[0],dsize[1]]
}]
return composite_images(bg,fgList)
if __name__=='__main__':
imdir= r'f:/home/aa/data/3dgen/viewclassify_01/0001/'
bgImgFile=r'f:/home/aa/data/plane.png'
dimg=load_with_new_bg(imdir+'img/0001.png',imdir+'mask/0001.png',bgImgFile)
cv2.imshow('dimg',dimg)
cv2.waitKey()
#cv2.imwrite('/home/aa/data/out.jpg',dimg)
>>>>>>> 2260dd086489de480f4d439547c58b5b149f1271
| 3,155
| 35.275862
| 120
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/render_test_idea.py
|
import cvf.bfc as bfc
import cvf.cvrender as cvr
import json
import argparse
import numpy as np
import math
import random
import numpy as np
import cv2
import os
import shutil
import csv
"""
def gen_views(nViews, nViewSamples, marginRatio=0):
assert(nViewSamples>=nViews)
views=cvr.sampleSphere(nViews)
samples=cvr.sampleSphere(nViewSamples)
viewClusters=[]
for i in range(0,len(views)):
viewClusters.append([])
for s in samples:
mcos=-1
mi=0
for i,v in enumerate(views):
sv_cos=s[0]*v[0]+s[1]*v[1]+s[2]*v[2]
if sv_cos>mcos:
mcos=sv_cos
mi=i
viewClusters[mi].append((s,mcos))
#for v in viewClusters:
for i,v in enumerate(viewClusters):
v.sort(key=lambda x:x[1],reverse=True)
viewClusters[i]={'center':views[i],
'nbrs':v[0:int(len(v)*(1-marginRatio))]
}
return viewClusters
def render_viewclassify_ds(modelFile, outDir, view_info_path, nViews, nImagesPerView):
model=cvr.CVRModel(modelFile)
modelCenter=np.array(model.getCenter())
sizeBB=model.getSizeBB()
maxBBSize=max(sizeBB)
unitScale=2.0/maxBBSize
eyeDist=4.0/unitScale
fscale=1.5
viewSize=[300,300]
viewClusters=gen_views(nViews,3000)
obj_name = modelFile.split('/')[-1].replace('.ply', '')
json_info = {}
for ci,viewCluster in enumerate(viewClusters):
print('view {}/{}'.format(ci,len(viewClusters)))
viewCenter=viewCluster['center']
viewNbrs=viewCluster['nbrs']
upDir=[0,0,1] if abs(viewCenter[2])<0.95 else [0,1,0]
imgDir=outDir+'/img/'
maskDir=outDir+'/mask/'
os.makedirs(imgDir,exist_ok=True)
os.makedirs(maskDir,exist_ok=True)
for ii in range(0, nImagesPerView**0.5):
viewDir=viewNbrs[int(random.uniform(0,len(viewNbrs)))][0]
viewDir=np.array(viewDir)
eyePos=modelCenter+viewDir*eyeDist
mats=cvr.CVRMats()
mats.mModel=cvr.lookat(eyePos[0],eyePos[1],eyePos[2],modelCenter[0],modelCenter[1],modelCenter[2],upDir[0],upDir[1],upDir[2])
mats.mProjection=cvr.perspectiveF(viewSize[1]*fscale, viewSize, max(1,eyeDist-maxBBSize), eyeDist+maxBBSize)
angle=2*math.pi*ii/nImagesPerView
mats.mView = cvr.rotateAngle(angle, [0.0, 0.0, 1.0])
render=cvr.CVRender(model)
rr=render.exec(mats,viewSize)
mask=cvr.getRenderMask(rr.depth)
R, T = cvr.decomposeR33T(mats.mModel * mats.mView)
imname = obj_name + '_' + str(ci) + '_%04d.png'%ii
cv2.imwrite(imgDir+imname,rr.img)
cv2.imwrite(maskDir+imname,mask)
#json_info[imname.replace('.png', '')] = {'R': R.tolist(), 'T': T}
# cv2.imshow('img',rr.img)
# cv2.imshow("mask",mask)
# dimg=cvr.postProRender(rr.img,mask)
# cv2.imwrite("f:/dimg.png",dimg)
# cv2.waitKey()
with open(os.path.join(view_info_path, "view_category.json"), "w") as f:
f.write(json.dumps(json_info, indent=4))
def main():
nViews = 15
nImagesPerView = 225
ds_name = 'idea_ycbv_roate'
outDir = '/home/aa/data/3dgen/{}/train'.format(ds_name)
view_category_json_path = '/home/aa/data/3dgen/{}'.format(ds_name)
os.makedirs(outDir, exist_ok=True)
shutil.rmtree(outDir)
for i in range(21):
obj_id = i+1
modelFile = '/home/aa/prjs/cp/cosypose/local_data/bop_datasets/ycbv/models/obj_%06d.ply'%obj_id
print('outdir: ' + outDir)
render_viewclassify_ds(modelFile, outDir, view_category_json_path, nViews, nImagesPerView)
if __name__=='__main__':
main()
"""
def render_angel_ds_singleV4(modelFile, outDir, nImagesPerView, csv_writer, category_info):
model_name = modelFile.split('/')[-1].replace('.ply', '')
model = cvr.CVRModel(modelFile) # 加载模型
modelCenter = np.array(model.getCenter()) # 获取模型中心
sizeBB = model.getSizeBB()
maxBBSize = max(sizeBB)
unitScale = 2.0 / maxBBSize
eyeDist = 4.0 / unitScale
fscale = 1.5
viewSize = [300, 300]
categories = []
views = []
for category, vector in category_info.items():
categories.append(category)
views.append(vector)
samples = cvr.sampleSphere(3000) # 采样3000个视角
marginRatio = 0;
viewClusters=[]
for i in range(0,len(views)):
viewClusters.append([])
for s in samples:
mcos=-1
mi=0
for i,v in enumerate(views):
sv_cos=s[0]*v[0]+s[1]*v[1]+s[2]*v[2]
if sv_cos>mcos:
mcos=sv_cos
mi=i
viewClusters[mi].append((s,mcos))
for i,v in enumerate(viewClusters):
v.sort(key=lambda x:x[1],reverse=True)
viewClusters[i]={'center':views[i],
'nbrs':v[0:int(len(v)*(1-marginRatio))]
}
views_info = [viewClusters[i+7] for i in range(0,len(viewClusters),12)]
for ci, viewCluster in enumerate(views_info):
viewCenter = viewCluster['center']
viewNbrs = viewCluster['nbrs']
upDir = [0,0,1] if abs(viewCenter[2]) < 0.95 else [0,1,0]
for ii in range(0, nImagesPerView):
viewDir = viewNbrs[int(random.uniform(0,len(viewNbrs)))][0]
viewDir = np.array(viewDir)
eyePos = modelCenter+viewDir*eyeDist
# 旋转图1
mats = cvr.CVRMats()
mats.mModel = cvr.lookat(eyePos[0],eyePos[1],eyePos[2],modelCenter[0],modelCenter[1],modelCenter[2],upDir[0],upDir[1],upDir[2])
mats.mProjection = cvr.perspectiveF(viewSize[1]*fscale, viewSize, max(1,eyeDist-maxBBSize), eyeDist+maxBBSize)
render_angle = 2*math.pi*ii/nImagesPerView
#render_angle = 2*math.pi*current/(samples-1)
#render_angle -= math.pi
mats.mView = cvr.rotateAngle(render_angle, [0.0, 0.0, 1.0])
# 渲染图片
render = cvr.CVRender(model)
rr = render.exec(mats, viewSize)
mask = cvr.getRenderMask(rr.depth)
render_image = '{}_{}_{}_{}.png'.format(int(model_name.replace('obj_', '')), ci, ii, 'origin_' + str(round(render_angle, 4)))
cv2.imwrite(os.path.join(outDir, 'img', render_image), rr.img)
cv2.imwrite(os.path.join(outDir, 'mask', render_image), mask)
print("Current: {} {} {} angle: {}".format(model_name, ci, ii, np.rad2deg(render_angle)))
def load_json(json_path: str):
with open(json_path,'r') as json_file:
json_dict = json.load(json_file)
return json_dict
def sample_all(args):
num_objs = args.num_objs
#class_samples = args.class_samples
nImagesPerView = 60 # 每个视角采取的样本数量
ds_name = args.ds_name
# 生成图片的保存路径
outDir = '/home/aa/data/3dgen/{}/{}'.format(ds_name, opt.sub_ds)
csv_save_path = '/home/aa/data/3dgen/{}/{}/{}'.format(ds_name, opt.sub_ds, 'label.csv')
os.makedirs(outDir, exist_ok=True)
shutil.rmtree(outDir)
#os.makedirs(outDir, exist_ok=True)
# 创建相应的文件夹
imgDir = outDir + '/img/'
maskDir = outDir + '/mask/'
os.makedirs(imgDir, exist_ok=True)
os.makedirs(maskDir, exist_ok=True)
# 创建文件对象
csv_file = open(csv_save_path, 'w', encoding='utf-8', newline="")
# 基于文件对象构建 csv写入对象
csv_writer = csv.writer(csv_file)
csv_writer.writerow(["image1", "image2", "theta_diff"])
# 遍历所有物体
for obj_id in range(num_objs):
obj_id += 1
# 模型路径
modelFile = '/home/aa/prjs/cp/cosypose/local_data/bop_datasets/ycbv/models/obj_' + '%06d'%obj_id + '.ply'
category_path = '/home/aa/data/3dgen/viewclassify_ycbv_margin0_240_250/train/{}/view_category.json'.format(str(obj_id))
assert os.path.exists(category_path), 'file: {} error'.format(category_path)
category_dict = load_json(category_path)
#render_angel_ds_singleV3(modelFile, outDir, nImagesPerView, csv_writer, category_info=category_dict)
render_angel_ds_singleV4(modelFile, outDir, nImagesPerView, csv_writer, category_info=category_dict)
if __name__=='__main__':
# 1 5 6
parser = argparse.ArgumentParser()
parser.add_argument('--ds_name', type=str, default='idea_ycbv_roate') # 数据集包含的类别数目
parser.add_argument('--num_objs', type=int, default=21) # 数据集包含的类别数目
parser.add_argument('--class_samples', type=int, default=1500) # 每个类别的数据量
parser.add_argument('--sub_ds', type=str, default='test') #
opt = parser.parse_args()
sample_all(opt)
| 8,753
| 32.159091
| 139
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/render_delg_databasev2.py
|
import cvf.bfc as bfc
import cvf.cvrender as cvr
import json
import argparse
import numpy as np
from sympy import im
def gen_views(nViews, nViewSamples, marginRatio=0):
assert(nViewSamples>=nViews)
views=cvr.sampleSphere(nViews)
samples=cvr.sampleSphere(nViewSamples)
viewClusters=[]
for i in range(0,len(views)):
viewClusters.append([])
for s in samples:
mcos=-1
mi=0
for i,v in enumerate(views):
sv_cos=s[0]*v[0]+s[1]*v[1]+s[2]*v[2]
if sv_cos>mcos:
mcos=sv_cos
mi=i
viewClusters[mi].append((s,mcos))
#for v in viewClusters:
for i,v in enumerate(viewClusters):
v.sort(key=lambda x:x[1],reverse=True)
viewClusters[i]={'center':views[i],
'nbrs':v[0:int(len(v)*(1-marginRatio))]
}
return viewClusters
import math
import random
import numpy as np
import cv2
import os
import shutil
def render_viewclassify_ds(modelFile, outDir, view_info_path, nViews, nImagesPerView):
model=cvr.CVRModel(modelFile)
modelCenter=np.array(model.getCenter())
sizeBB=model.getSizeBB()
maxBBSize=max(sizeBB)
unitScale=2.0/maxBBSize
eyeDist=4.0/unitScale
fscale=1.5
viewSize=[500,500]
viewClusters=gen_views(nViews,3000)
obj_name = modelFile.split('/')[-1].replace('.ply', '')
json_info = {}
for ci,viewCluster in enumerate(viewClusters):
print('view {}/{}'.format(ci,len(viewClusters)))
viewCenter=viewCluster['center']
viewNbrs=viewCluster['nbrs']
upDir=[0,0,1] if abs(viewCenter[2])<0.95 else [0,1,0]
imgDir=outDir+'/img/'
maskDir=outDir+'/mask/'
os.makedirs(imgDir,exist_ok=True)
os.makedirs(maskDir,exist_ok=True)
for ii in range(0, nImagesPerView):
viewDir=viewNbrs[int(random.uniform(0,len(viewNbrs)))][0]
viewDir=np.array(viewDir)
eyePos=modelCenter+viewDir*eyeDist
mats=cvr.CVRMats()
mats.mModel=cvr.lookat(eyePos[0],eyePos[1],eyePos[2],modelCenter[0],modelCenter[1],modelCenter[2],upDir[0],upDir[1],upDir[2])
mats.mProjection=cvr.perspectiveF(viewSize[1]*fscale, viewSize, max(1,eyeDist-maxBBSize), eyeDist+maxBBSize)
angle=2*math.pi*ii/nImagesPerView
mats.mView = cvr.rotateAngle(angle, [0.0, 0.0, 1.0])
render=cvr.CVRender(model)
rr=render.exec(mats,viewSize)
mask=cvr.getRenderMask(rr.depth)
R, T = cvr.decomposeR33T(mats.mModel * mats.mView)
imname = obj_name + '_' + str(ci) + '_%04d.png'%ii
cv2.imwrite(imgDir+imname,rr.img)
cv2.imwrite(maskDir+imname,mask)
json_info[imname.replace('.png', '')] = {'R': R.tolist(), 'T': T}
# cv2.imshow('img',rr.img)
# cv2.imshow("mask",mask)
# dimg=cvr.postProRender(rr.img,mask)
# cv2.imwrite("f:/dimg.png",dimg)
# cv2.waitKey()
with open(os.path.join(view_info_path, "view_category.json"), "w") as f:
f.write(json.dumps(json_info, indent=4))
def main():
nViews = 4
nImagesPerView = 240
ds_name = 'delg_ycbv_all_obj_{}_{}'.format(nViews, nImagesPerView)
outDir = '/home/aa/data/3dgen/{}/train'.format(ds_name)
view_category_json_path = '/home/aa/data/3dgen/{}'.format(ds_name)
print(outDir)
os.makedirs(outDir, exist_ok=True)
shutil.rmtree(outDir)
for i in range(21):
obj_id = i+1
modelFile = '/home/aa/prjs/cp/cosypose/local_data/bop_datasets/ycbv/models/obj_%06d.ply'%obj_id
print('outdir: ' + outDir)
render_viewclassify_ds(modelFile, outDir, view_category_json_path, nViews, nImagesPerView)
if __name__=='__main__':
main()
| 3,872
| 29.496063
| 137
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/render_det2d_dataset.py
|
import os
import sys
import cv2
from cv2 import data
from skimage import measure
import random
import numpy as np
import pycocotools as coco
import pycocotools.mask
def categories(label, label_id):
category = {}
category['supercategory'] = 'component'
category['id'] = label_id
category['name'] = label
return category
def get_category_list(label_list):
category_list=[]
for i,label in enumerate(label_list):
category_list.append(categories(label,i+1))
return category_list
def get_image_info(img, id, fileName):
image = {}
image['height'] = img.shape[0]
image['width'] = img.shape[1]
image['id'] = id
image['file_name'] = fileName
return image
def annotations_from_rect(bbox, category_id, image_id, object_id):
annotation = {}
x0,x1,y0,y1=bbox[0],bbox[0]+bbox[2],bbox[1],bbox[1]+bbox[3]
annotation['segmentation'] = [x0,y0,x0,y1,x1,y1,x1,y0]
annotation['iscrowd'] = 0
annotation['image_id'] = image_id
annotation['bbox'] = bbox
annotation['area'] = bbox[2]*bbox[3]
annotation['category_id'] = category_id
annotation['id'] = object_id
return annotation
def annotations_from_mask(obj_mask, category_id, image_id, object_id):
encoded_mask = coco.mask.encode(np.asfortranarray(obj_mask))
contours = measure.find_contours(obj_mask, 0.5)
segs=[]
for contour in contours:
contour=np.flip(contour,axis=1)
seg=contour.ravel().tolist()
segs.append(seg)
annotation = {}
annotation['segmentation'] = segs
annotation['iscrowd'] = 0
annotation['image_id'] = image_id
annotation['bbox'] = coco.mask.toBbox(encoded_mask)
annotation['area'] = coco.mask.area(encoded_mask)
annotation['category_id'] = category_id
annotation['id'] = object_id
return annotation
def rand_box(imsize, size):
x=int(random.uniform(0,imsize[0]))
y=int(random.uniform(0,imsize[1]))
return [x-size//2, y-size//2, size]
import cvf.cvrender as cvr
import json
class GenDet2dDataset:
def __init__(self, imageFiles, modelFiles, labelList) -> None:
self.dr=cvr.DatasetRender()
self.dr.loadModels(modelFiles)
self.imageFiles=imageFiles
self.modelFiles=modelFiles
self.category_list=get_category_list(labelList)
self.labelList=labelList
def gen(self, outDir, setName, nImages, imgSize,
modelsPerImageRange,
objectSizeRatioRange=(0.2,0.75),
alphaScalesRange=(0.8,1.0),
keepBgRatio=True,
harmonizeF=True,
degradeF=True,maxSmoothSigma=1.0,maxNoiseStd=5.0
):
imageDir=outDir+setName+'/'
annDir=outDir+'annotations/'
os.makedirs(imageDir,exist_ok=True)
os.makedirs(annDir,exist_ok=True)
category_list=self.category_list
images_list=[]
annotations_list=[]
#idx_of_all_models=list(range(len(label_list)))
n_all_models=len(self.labelList)
max_models_perim=min(n_all_models,modelsPerImageRange[1])
min_models_perim=min(modelsPerImageRange[0],max_models_perim)
nobjs=0
nimgs=0
idList=[i for i in range(0,n_all_models)]
for n in range(0,nImages):
img=cv2.imread(self.imageFiles[random.randint(0,len(self.imageFiles)-1)])
if img is None:
continue
dsize=None
if keepBgRatio:
dsize=max(imgSize)/max(img.shape)*np.asarray(img.shape)
dsize=dsize.astype(np.int32)
dsize=(dsize[1],dsize[0])
else:
dsize=tuple(imgSize)
img=cv2.resize(img,dsize)
min_object_size=int(min(dsize)*objectSizeRatioRange[0])
max_object_size=int(min(dsize)*objectSizeRatioRange[1])
obj_list=[]
size_list=[]
center_list=[]
alphaScales=[]
nobjs_cur=random.randint(min_models_perim,max_models_perim)
random.shuffle(idList)
#print(nobjs_cur)
for i in range(0,nobjs_cur):
obj_list.append(idList[i])
size=int(random.uniform(min_object_size,max_object_size))
cx=random.randint(0,dsize[0])
cy=random.randint(0,dsize[1])
size_list.append(size)
center_list.append([cx,cy])
alphaScales.append(random.uniform(alphaScalesRange[0],alphaScalesRange[1]))
rr=self.dr.renderToImage(img,obj_list,sizes=size_list,centers=center_list, alphaScales=alphaScales, harmonizeF=harmonizeF,
degradeF=degradeF,maxSmoothSigma=maxSmoothSigma,maxNoiseStd=maxNoiseStd
)
objs_mask=rr['composite_mask']
dimg=rr['img']
nobjs0=nobjs
for obj_idx in range(0,nobjs_cur):
obj_mask=np.zeros(objs_mask.shape,dtype=np.uint8)
obj_mask[objs_mask==obj_idx]=1
ann=annotations_from_mask(obj_mask,obj_list[obj_idx]+1,nimgs+1,nobjs+1)
#cv2.rectangle(dimg,ann['bbox'],(255,0,0), thickness=3)
if(ann['area']>10):
annotations_list.append(ann)
nobjs+=1
if nobjs==nobjs0:
continue
nimgs+=1
outFileName='%06d.jpg'%nimgs
images_list.append(get_image_info(dimg,nimgs,outFileName))
cv2.imwrite(imageDir+outFileName,dimg)
print((n+1,outFileName,obj_list,size_list))
#cv2.imshow("dimg",dimg)
#cv2.waitKey()
data_coco = {}
data_coco['images'] = images_list
data_coco['categories'] = category_list
data_coco['annotations'] = annotations_list
class MyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
else:
return super(MyEncoder, self).default(obj)
print(os.path.abspath('./'))
json.dump(data_coco, open(annDir+setName+'.json', 'w'),cls=MyEncoder)
import glob
import fileinput
def getImageList(dir):
flist=[]
for f in glob.glob(dir+"/*.jpg"):
flist.append(f)
return flist
def readModelList(modelListFile):
modelListDir=os.path.dirname(modelListFile)
label_list=[]
modelFiles=[]
for line in fileinput.input(modelListFile):
line=str(line)
strs=line.split()
label_list.append(strs[0])
modelFiles.append(modelListDir+'/'+strs[1])
return label_list,modelFiles
def main():
# dataDir='/home/aa/data/'
# imageFiles=getImageList(dataDir+'/VOCdevkit/VOC2012/JPEGImages/')
# modelListFile=dataDir+'/3dmodels/re3d3.txt'
# #modelListFile=dataDir+'/3dmodels/re3d8.txt'
# #modelListFile=dataDir+'/3dmodels/re3d25.txt'
# labelList,modelFiles=readModelList(modelListFile)
# outDir=dataDir+'3dgen/re3d3/'
#outDir=dataDir+'3dgen/re3d8a/'
dataDir='/home/aa/data/'
imageFiles=getImageList(dataDir+'/VOCdevkit/VOC2012/JPEGImages/')
#modelListFile=dataDir+'/3dmodels/ycbv.txt'
#modelListFile=dataDir+'/3dmodels/re3d8.txt'
#modelListFile=dataDir+'/3dmodels/re3d25.txt'
modelListFile=dataDir+'/3dmodels/re3d6_1.txt'
labelList,modelFiles=readModelList(modelListFile)
outDir=dataDir+'3dgen/re3d6_1_train/'
dr=GenDet2dDataset(imageFiles, modelFiles, labelList)
#gen eval set
imgSize=[640,480]
keepBgRatio=False
objectSizeRatio=(0.2, 0.75)
nModels=len(modelFiles)
modelsPerImage=(nModels-2, nModels)
alphaScales=(1.0,1.0)
harmonizeF=True
degradeF=True
#gen train set
nImagesToGen=1000
dr.gen(outDir,'train',nImagesToGen,imgSize,modelsPerImage,objectSizeRatio,alphaScales,keepBgRatio,harmonizeF, degradeF)
nImagesToGen=200
dr.gen(outDir,'eval',nImagesToGen,imgSize,modelsPerImage,objectSizeRatio,alphaScales,keepBgRatio,harmonizeF, degradeF)
if __name__=='__main__':
main()
| 8,333
| 30.80916
| 134
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/render_delg_database.py
|
import cvf.bfc as bfc
import cvf.cvrender as cvr
import json
import argparse
import numpy as np
from sympy import im
def gen_views(nViews, nViewSamples, marginRatio=0):
assert(nViewSamples>=nViews)
views=cvr.sampleSphere(nViews)
samples=cvr.sampleSphere(nViewSamples)
viewClusters=[]
for i in range(0,len(views)):
viewClusters.append([])
for s in samples:
mcos=-1
mi=0
for i,v in enumerate(views):
sv_cos=s[0]*v[0]+s[1]*v[1]+s[2]*v[2]
if sv_cos>mcos:
mcos=sv_cos
mi=i
viewClusters[mi].append((s,mcos))
#for v in viewClusters:
for i,v in enumerate(viewClusters):
v.sort(key=lambda x:x[1],reverse=True)
viewClusters[i]={'center':views[i],
'nbrs':v[0:int(len(v)*(1-marginRatio))]
}
return viewClusters
import math
import random
import numpy as np
import cv2
import os
import shutil
def render_viewclassify_ds(modelFile, outDir, view_info_path, nViews, nImagesPerView):
model=cvr.CVRModel(modelFile)
modelCenter=np.array(model.getCenter())
sizeBB=model.getSizeBB()
maxBBSize=max(sizeBB)
unitScale=2.0/maxBBSize
eyeDist=4.0/unitScale
fscale=1.5
viewSize=[500,500]
viewClusters=gen_views(nViews,3000)
obj_name = modelFile.split('/')[-1].replace('.ply', '')
json_info = {}
for ci,viewCluster in enumerate(viewClusters):
print('view {}/{}'.format(ci,len(viewClusters)))
viewCenter=viewCluster['center']
viewNbrs=viewCluster['nbrs']
upDir=[0,0,1] if abs(viewCenter[2])<0.95 else [0,1,0]
imgDir=outDir+'/img/'
maskDir=outDir+'/mask/'
os.makedirs(imgDir,exist_ok=True)
os.makedirs(maskDir,exist_ok=True)
for ii in range(0, nImagesPerView):
viewDir=viewNbrs[int(random.uniform(0,len(viewNbrs)))][0]
viewDir=np.array(viewDir)
eyePos=modelCenter+viewDir*eyeDist
mats=cvr.CVRMats()
mats.mModel=cvr.lookat(eyePos[0],eyePos[1],eyePos[2],modelCenter[0],modelCenter[1],modelCenter[2],upDir[0],upDir[1],upDir[2])
mats.mProjection=cvr.perspectiveF(viewSize[1]*fscale, viewSize, max(1,eyeDist-maxBBSize), eyeDist+maxBBSize)
angle=2*math.pi*ii/nImagesPerView
mats.mView = cvr.rotateAngle(angle, [0.0, 0.0, 1.0])
render=cvr.CVRender(model)
rr=render.exec(mats,viewSize)
mask=cvr.getRenderMask(rr.depth)
R, T = cvr.decomposeR33T(mats.mModel * mats.mView)
imname = obj_name + '_' + str(ci) + '_%04d.png'%ii
cv2.imwrite(imgDir+imname,rr.img)
cv2.imwrite(maskDir+imname,mask)
json_info[imname.replace('.png', '')] = {'R': R.tolist(), 'T': T}
# cv2.imshow('img',rr.img)
# cv2.imshow("mask",mask)
# dimg=cvr.postProRender(rr.img,mask)
# cv2.imwrite("f:/dimg.png",dimg)
# cv2.waitKey()
with open(os.path.join(view_info_path, "view_category.json"), "w") as f:
f.write(json.dumps(json_info, indent=4))
def main():
nViews = 240
nImagesPerView = 4
ds_name = 'delg_ycbv_all_obj_{}_{}'.format(nViews, nImagesPerView)
outDir = '/home/aa/data/3dgen/{}/train'.format(ds_name)
view_category_json_path = '/home/aa/data/3dgen/{}'.format(ds_name)
print(outDir)
os.makedirs(outDir, exist_ok=True)
shutil.rmtree(outDir)
for i in range(21):
obj_id = i+1
modelFile = '/home/aa/prjs/cp/cosypose/local_data/bop_datasets/ycbv/models/obj_%06d.ply'%obj_id
print('outdir: ' + outDir)
render_viewclassify_ds(modelFile, outDir, view_category_json_path, nViews, nImagesPerView)
if __name__=='__main__':
main()
| 3,872
| 29.496063
| 137
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/render_viewclassify_dataset.py
|
import cvf.bfc as bfc
import cvf.cvrender as cvr
import json
import argparse
def gen_views(nViews, nViewSamples, marginRatio=0):
assert(nViewSamples>=nViews)
views=cvr.sampleSphere(nViews)
samples=cvr.sampleSphere(nViewSamples)
viewClusters=[]
for i in range(0,len(views)):
viewClusters.append([])
for s in samples:
mcos=-1
mi=0
for i,v in enumerate(views):
sv_cos=s[0]*v[0]+s[1]*v[1]+s[2]*v[2]
if sv_cos>mcos:
mcos=sv_cos
mi=i
viewClusters[mi].append((s,mcos))
#for v in viewClusters:
for i,v in enumerate(viewClusters):
v.sort(key=lambda x:x[1],reverse=True)
viewClusters[i]={'center':views[i],
'nbrs':v[0:int(len(v)*(1-marginRatio))]
}
return viewClusters
import math
import random
import numpy as np
import cv2
import os
import shutil
def render_viewclassify_ds(modelFile, outDir, view_info_path, nViews, nImagesPerView):
model=cvr.CVRModel(modelFile)
modelCenter=np.array(model.getCenter())
sizeBB=model.getSizeBB()
maxBBSize=max(sizeBB)
unitScale=2.0/maxBBSize
eyeDist=4.0/unitScale
fscale=1.5
viewSize=[500,500]
viewClusters=gen_views(nViews,3000)
json_info = {}
for ci,viewCluster in enumerate(viewClusters):
print('view {}/{}'.format(ci,len(viewClusters)))
viewCenter=viewCluster['center']
viewNbrs=viewCluster['nbrs']
upDir=[0,0,1] if abs(viewCenter[2])<0.95 else [0,1,0]
viewDir=outDir+'/%04d/'%ci
imgDir=viewDir+'/img/'
maskDir=viewDir+'/mask/'
os.makedirs(imgDir,exist_ok=True)
os.makedirs(maskDir,exist_ok=True)
json_info['%04d'%ci] = viewCenter
for ii in range(0,nImagesPerView):
viewDir=viewNbrs[int(random.uniform(0,len(viewNbrs)))][0]
viewDir=np.array(viewDir)
eyePos=modelCenter+viewDir*eyeDist
mats=cvr.CVRMats()
mats.mModel=cvr.lookat(eyePos[0],eyePos[1],eyePos[2],modelCenter[0],modelCenter[1],modelCenter[2],upDir[0],upDir[1],upDir[2])
mats.mProjection=cvr.perspectiveF(viewSize[1]*fscale, viewSize, max(1,eyeDist-maxBBSize), eyeDist+maxBBSize)
angle=2*math.pi*ii/nImagesPerView
mats.mView = cvr.rotateAngle(angle, [0.0, 0.0, 1.0])
render=cvr.CVRender(model)
rr=render.exec(mats,viewSize)
mask=cvr.getRenderMask(rr.depth)
#R, T = cvr.decomposeR33T(mats.mModel)
imname='%04d.png'%ii
cv2.imwrite(imgDir+imname,rr.img)
cv2.imwrite(maskDir+imname,mask)
# cv2.imshow('img',rr.img)
# cv2.imshow("mask",mask)
# dimg=cvr.postProRender(rr.img,mask)
# cv2.imwrite("f:/dimg.png",dimg)
# cv2.waitKey()
with open(os.path.join(view_info_path, "view_category.json"), "w") as f:
f.write(json.dumps(json_info, indent=4))
def main():
<<<<<<< HEAD
parser = argparse.ArgumentParser()
parser.add_argument('--num_classes', type=int, default=240) # 数据集包含的类别数目
parser.add_argument('--obj_id', type=int, default=3) # 训练数据名称
parser.add_argument('--class_samples', type=int, default=250) # 每个类别的数据量
opt = parser.parse_args()
obj_id = opt.obj_id
nViews = opt.num_classes
nImagesPerView = opt.class_samples
ds_name = 'viewclassify_ycbv_obj{}_margin0_{}_{}'.format(obj_id, nViews, nImagesPerView)
modelFile='/home/aa/prjs/cp/cosypose/local_data/bop_datasets/ycbv/models/obj_00000{}.ply'.format(obj_id)
outDir='/home/aa/data/3dgen/{}/train'.format(ds_name)
view_category_json_path = '/home/aa/data/3dgen/{}'.format(ds_name)
os.makedirs(outDir, exist_ok=True)
=======
modelFile='f:/home/aa/data/3dmodels/3ds-model/plane2/plane2.ply'
outDir='f:/home/aa/data/3dgen/viewclassify_01'
>>>>>>> 2260dd086489de480f4d439547c58b5b149f1271
shutil.rmtree(outDir)
print('outdir: ' + outDir)
render_viewclassify_ds(modelFile, outDir, view_category_json_path, nViews,nImagesPerView)
if __name__=='__main__':
main()
| 4,178
| 30.186567
| 137
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/cls_angle_render_viewclassify_dataset.py
|
import cvf.bfc as bfc
import cvf.cvrender as cvr
import json
def gen_views(nViews, nViewSamples, marginRatio=0):
assert(nViewSamples>=nViews)
views=cvr.sampleSphere(nViews)
samples=cvr.sampleSphere(nViewSamples)
viewClusters=[]
for i in range(0,len(views)):
viewClusters.append([])
for s in samples:
mcos=-1
mi=0
for i,v in enumerate(views):
sv_cos=s[0]*v[0]+s[1]*v[1]+s[2]*v[2]
if sv_cos>mcos:
mcos=sv_cos
mi=i
viewClusters[mi].append((s,mcos))
#for v in viewClusters:
for i,v in enumerate(viewClusters):
v.sort(key=lambda x:x[1],reverse=True)
viewClusters[i]={'center':views[i],
'nbrs':v[0:int(len(v)*(1-marginRatio))]
}
return viewClusters
import math
import random
import numpy as np
import cv2
import os
import shutil
def render_viewclassify_ds(modelFile, outDir, view_info_path, nViews, nImagesPerView):
model=cvr.CVRModel(modelFile)
modelCenter=np.array(model.getCenter())
sizeBB=model.getSizeBB()
maxBBSize=max(sizeBB)
unitScale=2.0/maxBBSize
eyeDist=4.0/unitScale
fscale=1.5
viewSize=[500,500]
viewClusters=gen_views(nViews,3000)
json_info = {}
for ci,viewCluster in enumerate(viewClusters):
print('view {}/{}'.format(ci,len(viewClusters)))
viewCenter=viewCluster['center']
viewNbrs=viewCluster['nbrs']
upDir=[0,0,1] if abs(viewCenter[2])<0.95 else [0,1,0]
viewDir=outDir+'/%04d/'%ci
imgDir=viewDir+'/img/'
maskDir=viewDir+'/mask/'
os.makedirs(imgDir,exist_ok=True)
os.makedirs(maskDir,exist_ok=True)
json_info['%04d'%ci] = viewCenter
for ii in range(0,nImagesPerView):
viewDir=viewNbrs[int(random.uniform(0,len(viewNbrs)))][0]
viewDir=np.array(viewDir)
eyePos=modelCenter+viewDir*eyeDist
mats=cvr.CVRMats()
mats.mModel=cvr.lookat(eyePos[0],eyePos[1],eyePos[2],modelCenter[0],modelCenter[1],modelCenter[2],upDir[0],upDir[1],upDir[2])
mats.mProjection=cvr.perspectiveF(viewSize[1]*fscale, viewSize, max(1,eyeDist-maxBBSize), eyeDist+maxBBSize)
angle=2*math.pi*ii/nImagesPerView
mats.mView = cvr.rotateAngle(angle, [0.0, 0.0, 1.0])
render=cvr.CVRender(model)
rr=render.exec(mats,viewSize)
mask=cvr.getRenderMask(rr.depth)
sub_class = math.floor((math.degrees(angle) / 90))
imname= str(sub_class) + '_%04d.png'%ii
cv2.imwrite(imgDir+imname,rr.img)
cv2.imwrite(maskDir+imname,mask)
# cv2.imshow('img',rr.img)
# cv2.imshow("mask",mask)
# dimg=cvr.postProRender(rr.img,mask)
# cv2.imwrite("f:/dimg.png",dimg)
# cv2.waitKey()
with open(os.path.join(view_info_path, "view_category.json"), "w") as f:
f.write(json.dumps(json_info, indent=4))
def main():
modelFile='/home/aa/prjs/cp/cosypose/local_data/bop_datasets/ycbv/models/obj_000003.ply'
outDir='/home/aa/data/3dgen/viewclassify_ycbv_obj3_margin0_16_4/train'
view_category_json_path = '/home/aa/data/3dgen/viewclassify_ycbv_obj3_margin0_16_4'
os.makedirs(outDir, exist_ok=True)
shutil.rmtree(outDir)
nViews=16
nImagesPerView=625
render_viewclassify_ds(modelFile, outDir, view_category_json_path, nViews, nImagesPerView)
if __name__=='__main__':
main()
| 3,543
| 29.551724
| 137
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/__init__.py
| 0
| 0
| 0
|
py
|
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/tools/render_delg_dataset.py
|
import cvf.bfc as bfc
import cvf.cvrender as cvr
import json
import argparse
import numpy as np
from sympy import im
def gen_views(nViews, nViewSamples, marginRatio=0):
assert(nViewSamples>=nViews)
views=cvr.sampleSphere(nViews)
samples=cvr.sampleSphere(nViewSamples)
viewClusters=[]
for i in range(0,len(views)):
viewClusters.append([])
for s in samples:
mcos=-1
mi=0
for i,v in enumerate(views):
sv_cos=s[0]*v[0]+s[1]*v[1]+s[2]*v[2]
if sv_cos>mcos:
mcos=sv_cos
mi=i
viewClusters[mi].append((s,mcos))
#for v in viewClusters:
for i,v in enumerate(viewClusters):
v.sort(key=lambda x:x[1],reverse=True)
viewClusters[i]={'center':views[i],
'nbrs':v[0:int(len(v)*(1-marginRatio))]
}
return viewClusters
import math
import random
import numpy as np
import cv2
import os
import shutil
def render_viewclassify_ds(modelFile, outDir, view_info_path, nViews, nImagesPerView):
model=cvr.CVRModel(modelFile)
modelCenter=np.array(model.getCenter())
sizeBB=model.getSizeBB()
maxBBSize=max(sizeBB)
unitScale=2.0/maxBBSize
eyeDist=4.0/unitScale
fscale=1.5
viewSize=[500,500]
viewClusters=gen_views(nViews,3000)
obj_name = modelFile.split('/')[-1].replace('.ply', '')
json_info = {}
for ci,viewCluster in enumerate(viewClusters):
print('view {}/{}'.format(ci,len(viewClusters)))
viewCenter=viewCluster['center']
viewNbrs=viewCluster['nbrs']
upDir=[0,0,1] if abs(viewCenter[2])<0.95 else [0,1,0]
imgDir=outDir+'/img/'
maskDir=outDir+'/mask/'
os.makedirs(imgDir,exist_ok=True)
os.makedirs(maskDir,exist_ok=True)
for ii in range(0, nImagesPerView):
viewDir=viewNbrs[int(random.uniform(0,len(viewNbrs)))][0]
viewDir=np.array(viewDir)
eyePos=modelCenter+viewDir*eyeDist
mats=cvr.CVRMats()
mats.mModel=cvr.lookat(eyePos[0],eyePos[1],eyePos[2],modelCenter[0],modelCenter[1],modelCenter[2],upDir[0],upDir[1],upDir[2])
mats.mProjection=cvr.perspectiveF(viewSize[1]*fscale, viewSize, max(1,eyeDist-maxBBSize), eyeDist+maxBBSize)
angle=2*math.pi*ii/nImagesPerView
mats.mView = cvr.rotateAngle(angle, [0.0, 0.0, 1.0])
render=cvr.CVRender(model)
rr=render.exec(mats,viewSize)
mask=cvr.getRenderMask(rr.depth)
R, T = cvr.decomposeR33T(mats.mModel * mats.mView)
imname = obj_name + '_' + str(ci) + '_%04d.png'%ii
cv2.imwrite(imgDir+imname,rr.img)
cv2.imwrite(maskDir+imname,mask)
json_info[imname.replace('.png', '')] = {'R': R.tolist(), 'T': T}
# cv2.imshow('img',rr.img)
# cv2.imshow("mask",mask)
# dimg=cvr.postProRender(rr.img,mask)
# cv2.imwrite("f:/dimg.png",dimg)
# cv2.waitKey()
with open(os.path.join(view_info_path, "view_category.json"), "w") as f:
f.write(json.dumps(json_info, indent=4))
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--num_classes', type=int, default=300) # 数据集包含的类别数目
parser.add_argument('--obj_id', type=int, default=5) # 训练数据名称
parser.add_argument('--class_samples', type=int, default=10) # 每个类别的数据量
opt = parser.parse_args()
obj_id = opt.obj_id
nViews = opt.num_classes
nImagesPerView = opt.class_samples
ds_name = 'delg_ycbv_obj{}_margin0_{}_{}'.format(obj_id, nViews, nImagesPerView)
modelFile='/home/aa/prjs/cp/cosypose/local_data/bop_datasets/ycbv/models/obj_00000{}.ply'.format(obj_id)
outDir='/home/aa/data/3dgen/{}/train'.format(ds_name)
view_category_json_path = '/home/aa/data/3dgen/{}'.format(ds_name)
os.makedirs(outDir, exist_ok=True)
shutil.rmtree(outDir)
print('outdir: ' + outDir)
render_viewclassify_ds(modelFile, outDir, view_category_json_path, nViews,nImagesPerView)
if __name__=='__main__':
main()
| 4,146
| 30.416667
| 137
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/bfc/__init__.py
|
import os
envs=os.environ.get("PATH")
os.environ['PATH']=envs+';F:/dev/cvfx/assim410/bin-v140/x64/release/;F:/dev/cvfx/opencv3413/bin-v140/x64/Release/;F:/dev/cvfx/bin/x64/;D:/setup/Anaconda3/;'
| 197
| 32
| 156
|
py
|
nonlocal-3dtracking
|
nonlocal-3dtracking-master/cvf/Python/cvf/bfc/netcall.py
|
#from socketserver import TCPServer, StreamRequestHandler
import socketserver
import time
import numpy as np
import cv2
import struct
from numpy.core.defchararray import decode
BUFSIZ = 1024
'''
def encodeBytesList(bytesList):
INT_SIZE=4
totalSize=INT_SIZE*(len(bytesList)+1)
head=struct.pack('!i',len(bytesList))
for x in bytesList:
head+=struct.pack('!i',len(x))
totalSize+=len(x)
#data=struct.pack('i',totalSize)
data=head
for x in bytesList:
data+=bytes(x)
return data
def decodeBytesList(data):
INT_SIZE=4
bytesList=[]
data=bytes(data)
count=struct.unpack('!i', data[0:INT_SIZE])[0]
sizes=[]
for i in range(1,count+1):
t=data[i*INT_SIZE:i*INT_SIZE+INT_SIZE]
isize=struct.unpack('!i', t)[0]
sizes.append(isize)
dpos=INT_SIZE*(count+1)
for i in range(0,count):
bytesList.append(data[dpos:dpos+sizes[i]])
dpos+=sizes[i]
return bytesList
def _decodeObj(data, typeLabel):
obj=None
if typeLabel=='image':
nparr = np.frombuffer(bytes(data), np.uint8)
obj = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
return obj
def encodeObjs(objs):
INT_SIZE=4
subList=[[],[],[]]
for k,v in objs.items():
name,typeLabel=getNameType(v, k)
subList[0].append(name.encode())
subList[1].append(typeLabel.encode())
subList[2].append(_encodeObj(v,typeLabel))
totalSize=0
for i in range(0,len(subList)):
subList[i]=encodeBytesList(subList[i])
totalSize+=len(subList[i])
totalSize+=INT_SIZE*len(subList)
data=struct.pack('!i',totalSize)
for x in subList:
data+=struct.pack('!i',len(x))
for x in subList:
data+=x
return data
def decodeObjs(data):
INT_SIZE=4
subList=[0,0,0]
for i in range(0,3):
subList[i]=struct.unpack('!i', data[INT_SIZE*i:INT_SIZE*(i+1)])[0]
p=INT_SIZE*len(subList)
for i in range(0,3):
isize=subList[i]
subList[i]=decodeBytesList(data[p:p+isize])
p+=isize
objs=dict()
nobjs=len(subList[0])
for i in range(0,nobjs):
name=subList[0][i].decode()
type_=subList[1][i].decode()
obj=_decodeObj(subList[2][i], type_)
objs[name]=obj
return objs
'''
def _encodeBytes(b):
head=struct.pack('<i',len(b))
return head+bytes(b)
def _decodeBytes(data, pos):
size=struct.unpack('<i', data[pos:pos+4])[0]
end=pos+4+size
return data[pos+4:end],end
def getNameType(v,name):
p=name.find(':')
t=None
if p<0:
s=str(type(v))
beg=s.find('\'')
end=s.find('\'',beg+1)
t=(name,s[beg+1:end])
else:
t=(name[0:p],name[p+1:])
return t
def _packShape(shape):
d=bytes()
for i in shape:
d+=struct.pack('<i',i)
return d
def _encodeObj(v,typeLabel):
rd=None
if type(v)==np.ndarray:
if typeLabel=='image':
rd=cv2.imencode(".jpg", v)[1]
else:
rd=_packShape(v.shape)+v.tobytes()
elif type(v)==str:
rd=v.encode()
elif type(v)==list:
rd=_packShape([len(v)])
if len(v)>0:
dtype=type(v[0])
withElemSize= not (dtype==int or dtype==float)
for x in v:
if type(x)!=dtype:
raise 'list elems must have the same type'
xb=_encodeObj(x,typeLabel)
if withElemSize:
rd+=_encodeBytes(xb)
else:
rd+=xb
elif type(v)==int:
rd=struct.pack('<i',v)
elif type(v)==float:
rd=struct.pack('<f',v)
else:
raise 'unknown type'
return rd
def encodeObjs(objs):
INT_SIZE=4
data=bytes()
for k,v in objs.items():
name,typeLabel=getNameType(v, k)
data+=_encodeBytes(name.encode())
#data+=_encodeBytes(typeLabel.encode())
objBytes=_encodeObj(v,typeLabel)
data+=_encodeBytes(objBytes)
totalSize=len(data)
head=struct.pack('<i',totalSize)
return head+data
'''
class BytesObject:
tConfig={
'char':('<c',1),
'int':('<i',4),
'float':('<f',4),
'double':('<d',8)
}
def __init__(self, data):
self.data=bytes(data)
def decode_as(self,typeLabel):
INT_SIZE=4
obj=None
data=self.data
if typeLabel=='image':
nparr = np.frombuffer(data, np.uint8)
obj = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
elif typeLabel=='str':
n=struct.unpack_from('<i',data,0)[0]
assert len(data)==n+INT_SIZE
obj=data[INT_SIZE:].decode()
else:
if typeLabel not in BytesObject.tConfig:
raise 'unknown type'
cfg=BytesObject.tConfig[typeLabel]
SIZE=cfg[1]
fmt=cfg[0]
data=self.data
if len(data)==SIZE:
obj=struct.unpack_from(fmt,data,0)[0]
else:
n=struct.unpack_from('<i',data,0)[0]
if len(data)==SIZE*n+INT_SIZE:
obj=[]
for i in range(0,n):
v=struct.unpack_from(fmt,self.data,i*SIZE+INT_SIZE)[0]
obj.append(v)
return obj
'''
class BytesObject:
tConfig={
'char':(np.char,1,'<c'),
'int':(np.int,4,'<i'),
'float':(np.float32,4,'<f'),
'double':(np.float64,8,'<d')
}
def __init__(self, data):
self.data=bytes(data)
def decode_list(self, typeLabel):
data=self.data
INT_SIZE=4
withElemSize=typeLabel not in BytesObject.tConfig
size=struct.unpack_from('<i',data,0)[0]
p=INT_SIZE
dl=[]
if withElemSize:
for i in range(0,size):
isize=struct.unpack_from('<i',data,p)[0]
p=p+INT_SIZE
v=self.decode_(data[p:p+isize], typeLabel)
p=p+isize
dl.append(v)
else:
cfg=BytesObject.tConfig[typeLabel]
elemSize=cfg[1]
fmt=cfg[2]
for i in range(0,size):
v=struct.unpack_from(fmt,data,p)[0]
p+=elemSize
dl.append(v)
return dl
def decode(self, typeLabel):
return self.decode_(self.data,typeLabel)
def decode_(self,data,typeLabel):
obj=None
INT_SIZE=4
readSize=0
#data=self.data
if typeLabel=='image':
#n=struct.unpack_from('<i',data,0)[0]
#assert len(data)>=n+INT_SIZE
nparr = np.frombuffer(data, np.uint8, offset=0)
obj = cv2.imdecode(nparr, cv2.IMREAD_COLOR)
#readSize=n+INT_SIZE
elif typeLabel=='str':
#n=struct.unpack_from('<i',data,0)[0]
#assert len(data)==n+INT_SIZE
obj=data.decode()
#readSize=n+INT_SIZE
else:
if typeLabel not in BytesObject.tConfig:
raise 'unknown type'
cfg=BytesObject.tConfig[typeLabel]
dtype_=cfg[0]
SIZE=cfg[1]
fmt=cfg[2]
#data=self.data
# if len(data)==SIZE:
# obj=struct.unpack_from(fmt,data,0)[0]
# else:
shape=[]
if len(data)>SIZE:
n=1
totalSize=0
for i in range(0,4):
m=struct.unpack_from('<i',data,i*INT_SIZE)[0]
shape.append(m)
n=n*m
totalSize=n*SIZE+(i+1)*INT_SIZE
if totalSize>=len(data):
break
if totalSize>len(data):
raise 'invalid size'
else:
assert len(data)==SIZE
dim=len(shape)
arr=np.frombuffer(data,offset=dim*INT_SIZE,dtype=dtype_)
if dim==0:
obj=dtype_(arr[0])
# print(type(obj))
else:
obj=np.reshape(arr,tuple(shape))
return obj
def decodeObjs(data):
INT_SIZE=4
p=0
dsize=len(data)
objs=dict()
while p<dsize:
objBytes,p=_decodeBytes(data, p)
name=objBytes.decode()
#objBytes,p=_decodeBytes(data, p)
#type_=objBytes.decode()
objBytes,p=_decodeBytes(data, p)
#obj=_decodeObj(objBytes, type_)
objs[name]=BytesObject(objBytes)
return objs
def recvObjs(rq):
INT_SIZE=4
buf=rq.recv(INT_SIZE)
if len(buf)<INT_SIZE:
return None
#if len(size)!=INT_SIZE:
assert len(buf)==INT_SIZE
totalSize=struct.unpack('<i',bytes(buf))[0]
data=bytes()
while len(data)<totalSize:
buf=rq.recv(BUFSIZ)
data+=bytes(buf)
#print(len(data))
return decodeObjs(data)
def runServer(address, handleFunc):
#address = ('127.0.0.1', 8000)
class NetcallRequestHandler(socketserver.BaseRequestHandler):
# 重写 handle 方法,该方法在父类中什么都不做
# 当客户端主动连接服务器成功后,自动运行此方法
def handle(self):
# client_address 属性的值为客户端的主机端口元组
print('... connected from {}'.format(self.client_address))
while True:
try:
objs=recvObjs(self.request)
if objs==None:
break
cmd=objs['cmd'].decode('str')
if cmd=='exit':
print('...disconnet from {}'.format(self.client_address))
break
rdata=handleFunc(objs)
self.request.send(rdata)
except:
dobjs={'error':-1}
rdata=encodeObjs(dobjs)
self.request.send(rdata)
tcp_server = socketserver.TCPServer(address, NetcallRequestHandler)
print('等待客户端连接...')
try:
tcp_server.serve_forever() # 服务器永远等待客户端的连接
except KeyboardInterrupt:
tcp_server.server_close() # 关闭服务器套接字
print('\nClose')
exit()
# 创建 StreamRequestHandler 类的子类
class MyRequestHandler(socketserver.BaseRequestHandler):
# 重写 handle 方法,该方法在父类中什么都不做
# 当客户端主动连接服务器成功后,自动运行此方法
def handle(self):
# client_address 属性的值为客户端的主机端口元组
print('... connected from {}'.format(self.client_address))
objs=recvObjs(self.request)
# dimg=objs['img'].decode_as('image')
# cv2.imwrite('/home/fan/dimg.jpg',dimg)
x=objs['x'].decode('int')
y=objs['y'].decode('int')
z=objs['z'].decode('str')
img=objs['img'].decode('image')
vx=objs['vx'].decode_list('str')
print(x)
print(y)
print(z)
print(vx)
dobjs={
'x':x,'y':y,'z':z,'img:image':img, 'vx':vx
}
data=encodeObjs(dobjs)
self.request.send(data)
import sys
def test_encode():
objs={
'x':1.0, 'y':[1,2,3],'z':['he','she','me']
}
data=encodeObjs(objs)
objs=decodeObjs(data[4:])
x=objs['x'].decode('float')
y=objs['y'].decode_list('int')
z=objs['z'].decode_list('str')
data=data
def main():
#test_encode()
#return
# 创建 TCP 服务器并启动,该服务器为单线程设计,不可同时为两个客户端收发消息
# 该服务器每次连接客户端成功后,运行一次 handle 方法然后断开连接
# 也就是说,每次客户端的请求就是一次收发消息
# 连续请求需要客户端套接字不断重新创建
ADDR = ('127.0.0.1', 8000)
tcp_server = socketserver.TCPServer(ADDR, MyRequestHandler)
print('等待客户端连接...')
try:
tcp_server.serve_forever() # 服务器永远等待客户端的连接
except KeyboardInterrupt:
tcp_server.server_close() # 关闭服务器套接字
print('\nClose')
exit()
if __name__ == '__main__':
main()
| 11,842
| 25.854875
| 81
|
py
|
DeSpaWN
|
DeSpaWN-main/Script_DeSpaWN.py
|
# -*- coding: utf-8 -*-
"""
Title: Fully Learnable Deep Wavelet Transform for Unsupervised Monitoring of High-Frequency Time Series
------ (DeSpaWN)
Description:
--------------
Toy script to showcase the deep neural network DeSpaWN.
Please cite the corresponding paper:
Michau, G., Frusque, G., & Fink, O. (2022).
Fully learnable deep wavelet transform for unsupervised monitoring of high-frequency time series.
Proceedings of the National Academy of Sciences, 119(8).
Version: 1.0
--------
@author: Dr. Gabriel Michau,
-------- Chair of Intelligent Maintenance Systems
ETH Zürich
Created on 15.01.2022
Licence:
----------
MIT License
Copyright (c) 2022 Dr. Gabriel Michau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# Usual packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf # designed with 2.1.0 /!\ models output changes with tf>2.2.0
import tensorflow.keras as keras
from lib import despawn
# Load a toy time series data to run DeSPAWN
signal = pd.read_csv("monthly-sunspots.csv")
lTrain = 2000 # length of the training section
signalT = ((signal['Sunspots']-signal['Sunspots'].mean())/signal['Sunspots'].std()).values[np.newaxis,:,np.newaxis,np.newaxis]
signal = signalT[:,:lTrain,:,:]
# Number of decomposition level is max log2 of input TS
level = np.floor(np.log2(signal.shape[1])).astype(int)
# Train hard thresholding (HT) coefficient?
trainHT = True
# Initialise HT value
initHT = 0.3
# Which loss to consider for wavelet coeffs ('l1' or None)
lossCoeff='l1'
# Weight for sparsity loss versus residual?
lossFactor = 1.0
# Train wavelets? (Trainable kernels)
kernTrainable = True
# Which training mode?
# cf (https://arxiv.org/pdf/2105.00899.pdf -- https://doi.org/10.1073/pnas.2106598119) [Section 4.4 Ablation Study]
# CQF => learn wavelet 0 infer all other kernels from the network
# PerLayer => learn one wavelet per level, infer others
# PerFilter => learn wavelet + scaling function per level + infer other
# Free => learn everything
mode = 'PerLayer' # QMF PerLayer PerFilter Free
# Initialise wavelet kernel (here db-4)
kernelInit = np.array([-0.010597401785069032, 0.0328830116668852, 0.030841381835560764, -0.18703481171909309,
-0.027983769416859854, 0.6308807679298589, 0.7148465705529157, 0.2303778133088965])
epochs = 1000
verbose = 2
# Set sparsity (dummy) loss:
def coeffLoss(yTrue,yPred):
return lossFactor*tf.reduce_mean(yPred,keepdims=True)
# Set residual loss:
def recLoss(yTrue,yPred):
return tf.math.abs(yTrue-yPred)
keras.backend.clear_session()
# generates two models:
# model1 outputs the reconstructed signals and the loss on the wavelet coefficients
# model2 outputs the reconstructed signals and wavelet coefficients
model1,model2 = despawn.createDeSpaWN(inputSize=None, kernelInit=kernelInit, kernTrainable=kernTrainable, level=level, lossCoeff=lossCoeff, kernelsConstraint=mode, initHT=initHT, trainHT=trainHT)
opt = keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, name="Nadam")
# For the training we only use model1
model1.compile(optimizer=opt, loss=[recLoss, coeffLoss])
# the sparsity term has no ground truth => just input an empty numpy array as ground truth (anything would do, in coeffLoss, yTrue is not called)
H = model1.fit(signal,[signal,np.empty((signal.shape[0]))], epochs=epochs, verbose=verbose)
# Examples for plotting the model outputs and learnings
indPlot = 0
out = model1.predict(signal)
outC = model2.predict(signal)
# Test part of the signal
outTe = model1.predict(signalT[:,lTrain:,:,:])
outCTe = model2.predict(signalT[:,lTrain:,:,:])
fig = plt.figure(1)
fig.clf()
ax = fig.add_subplot(2,1,1)
ax.plot(np.arange(signal.shape[1]),signal[indPlot,:,0,0])
ax.plot(np.arange(signal.shape[1]),out[0][indPlot,:,0,0])
ax.plot(np.arange(signal.shape[1],signalT.shape[1]),signalT[indPlot,lTrain:,0,0])
ax.plot(np.arange(signal.shape[1],signalT.shape[1]),outTe[0][indPlot,:,0,0])
ax.legend(['Train Original','Train Reconstructed','Test Original', 'Test Reconstructed'])
ax = fig.add_subplot(2,2,3)
idpl = 0
for e,o in enumerate(outC[1:]):
ax.boxplot(np.abs(np.squeeze(o[indPlot,:,:,:])), positions=[e], widths=0.8)
ax.set_xlabel('Decomposition Level')
ax.set_ylabel('Coefficient Distribution')
trainYLim = ax.get_ylim()
trainXLim = ax.get_xlim()
ax = fig.add_subplot(2,2,4)
idpl = 0
for e,o in enumerate(outCTe[1:]):
print(o.shape[1])
if o.shape[1]>1:
ax.boxplot(np.abs(np.squeeze(o[indPlot,:,:,:])), positions=[e], widths=0.8)
else:
ax.plot(e,np.abs(np.squeeze(o[indPlot,:,:,:])),'o',color='k')
ax.set_xlabel('Decomposition Level')
ax.set_ylabel('Coefficient Distribution')
ax.set_ylim(trainYLim)
ax.set_xlim(trainXLim)
| 5,817
| 38.849315
| 195
|
py
|
DeSpaWN
|
DeSpaWN-main/lib/despawnLayers.py
|
# -*- coding: utf-8 -*-
"""
Title: Fully Learnable Deep Wavelet Transform for Unsupervised Monitoring of High-Frequency Time Series
------ (DeSpaWN)
Description:
--------------
Function to generate the layers used in DeSpaWN TF model.
Please cite the corresponding paper:
Michau, G., Frusque, G., & Fink, O. (2022).
Fully learnable deep wavelet transform for unsupervised monitoring of high-frequency time series.
Proceedings of the National Academy of Sciences, 119(8).
Version: 1.0
--------
@author: Dr. Gabriel Michau,
-------- Chair of Intelligent Maintenance Systems
ETH Zürich
Created on 15.01.2022
Licence:
----------
MIT License
Copyright (c) 2022 Dr. Gabriel Michau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import tensorflow as tf
class Kernel(tf.keras.layers.Layer):
def __init__(self, kernelInit=8, trainKern=True, **kwargs):
self.trainKern = trainKern
if isinstance(kernelInit,int):
self.kernelSize = kernelInit
self.kernelInit = 'random_normal'
else:
self.kernelSize = kernelInit.__len__()
self.kernelInit = tf.constant_initializer(kernelInit)
super(Kernel, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape = (self.kernelSize,1,1,1),
initializer = self.kernelInit,
trainable = self.trainKern, name='kernel')
super(Kernel, self).build(input_shape)
def call(self, inputs):
return self.kernel
class LowPassWave(tf.keras.layers.Layer):
"""
Layer that performs a convolution between its two inputs with stride (2,1)
"""
def __init__(self, **kwargs):
super(LowPassWave, self).__init__(**kwargs)
def build(self, input_shape):
super(LowPassWave, self).build(input_shape)
def call(self, inputs):
return tf.nn.conv2d(inputs[0], inputs[1], padding="SAME", strides=(2, 1))
class HighPassWave(tf.keras.layers.Layer):
"""
Layer that performs a convolution between its two inputs with stride (2,1).
Performs first the reverse alternative flip on the second inputs
"""
def __init__(self, **kwargs):
super(HighPassWave, self).__init__(**kwargs)
def build(self, input_shape):
self.qmfFlip = tf.reshape(tf.Variable([(-1)**(i) for i in range(input_shape[1][0])],
dtype='float32', name='mask', trainable=False),(-1,1,1,1))
super(HighPassWave, self).build(input_shape)
def call(self, inputs):
# print(self.qmfFlip)
return tf.nn.conv2d(inputs[0], tf.math.multiply(tf.reverse(inputs[1],[0]),self.qmfFlip),
padding="SAME", strides=(2, 1))
class LowPassTrans(tf.keras.layers.Layer):
"""
Layer that performs a convolution transpose between its two inputs with stride (2,1).
The third input specifies the size of the reconstructed signal (to make sure it matches the decomposed one)
"""
def __init__(self, **kwargs):
super(LowPassTrans, self).__init__(**kwargs)
def build(self, input_shape):
super(LowPassTrans, self).build(input_shape)
def call(self, inputs):
return tf.nn.conv2d_transpose(inputs[0], inputs[1], inputs[2], padding="SAME", strides=(2, 1))
class HighPassTrans(tf.keras.layers.Layer):
"""
Layer that performs a convolution transpose between its two inputs with stride (2,1).
Performs first the reverse alternative flip on the second inputs
The third input specifies the size of the reconstructed signal (to make sure it matches the decomposed one)
"""
def __init__(self, **kwargs):
super(HighPassTrans, self).__init__(**kwargs)
def build(self, input_shape):
self.qmfFlip = tf.reshape(tf.Variable([(-1)**(i) for i in range(input_shape[1][0])],
dtype='float32', name='mask', trainable=False),(-1,1,1,1))
super(HighPassTrans, self).build(input_shape)
def call(self, inputs):
return tf.nn.conv2d_transpose(inputs[0], tf.math.multiply(tf.reverse(inputs[1],[0]),self.qmfFlip),
inputs[2], padding="SAME", strides=(2, 1))
class HardThresholdAssym(tf.keras.layers.Layer):
"""
Learnable Hard-thresholding layers
"""
def __init__(self, init=None, trainBias=True, **kwargs):
if isinstance(init,float) or isinstance(init,int):
self.init = tf.constant_initializer(init)
else:
self.init = 'ones'
self.trainBias = trainBias
super(HardThresholdAssym, self).__init__(**kwargs)
def build(self, input_shape):
self.thrP = self.add_weight(shape = (1,1,1,1), initializer=self.init,
trainable = self.trainBias, name='threshold+')
self.thrN = self.add_weight(shape = (1,1,1,1), initializer=self.init,
trainable = self.trainBias, name='threshold-')
super(HardThresholdAssym, self).build(input_shape)
def call(self, inputs):
return tf.math.multiply(inputs,tf.math.sigmoid(10*(inputs-self.thrP))+\
tf.math.sigmoid(-10*(inputs+self.thrN)))
| 6,326
| 40.084416
| 111
|
py
|
DeSpaWN
|
DeSpaWN-main/lib/despawn.py
|
# -*- coding: utf-8 -*-
"""
Title: Fully Learnable Deep Wavelet Transform for Unsupervised Monitoring of High-Frequency Time Series
------ (DeSpaWN)
Description:
--------------
Function to generate a DeSpaWN TF model.
Please cite the corresponding paper:
Michau, G., Frusque, G., & Fink, O. (2022).
Fully learnable deep wavelet transform for unsupervised monitoring of high-frequency time series.
Proceedings of the National Academy of Sciences, 119(8).
Version: 1.0
--------
@author: Dr. Gabriel Michau,
-------- Chair of Intelligent Maintenance Systems
ETH Zürich
Created on 15.01.2022
Licence:
----------
MIT License
Copyright (c) 2022 Dr. Gabriel Michau
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# /!\ Designed for tensorflow 2.1.X
import tensorflow as tf
import tensorflow.keras as keras
from lib import despawnLayers as impLay
def createDeSpaWN(inputSize=None, kernelInit=8, kernTrainable=True, level=1, lossCoeff='l1', kernelsConstraint='QMF', initHT=1.0, trainHT=True):
"""
Function that generates a TF DeSpaWN network
Parameters
----------
inputSize : INT, optional
Length of the time series. Network is more efficient if set.
Can be set to None to allow various input size time series.
The default is None.
kernelInit : numpy array or LIST or INT, optional
Initialisation of the kernel. If INT, random normal initialisation of size kernelInit.
If array or LIST, then kernelInit is the kernel.
The default is 8.
kernTrainable : BOOL, optional
Whether the kernels are trainable. Set to FALSE to compare to traditional wavelet decomposition.
The default is True.
level : INT, optional
Number of layers in the network.
Ideally should be log2 of the time series length.
If bigger, additional layers will be of size 1.
The default is 1.
lossCoeff : STRING, optional
To specify which loss on the wavelet coefficient to compute.
Can be None (no loss computed) or 'l1'' for the L1-norm of the coefficients.
The default is 'l1'.
kernelsConstraint : STRING, optional
Specify which version of DeSpaWN to implement.
Refers to the paper (https://arxiv.org/pdf/2105.00899.pdf)
[Section 4.4 Ablation Study] for more details.
The default is 'CQF'.
initHT : FLOAT, optional
Value to initialise the Hard-thresholding coefficient.
The default is 1.0.
trainHT : BOOL, optional
Whether the hard-thresholding coefficient is trainable or not.
Set to FALSE to compare to traditiona wavelet decomposition.
The default is True.
Returns
-------
model1: a TF neural network with outputs the reconstructed signals and the loss on the wavelet coefficients
model2: a TF neural network with outputs t the reconstructed signals and wavelet coefficients
model1 and model2 share their architecture, weigths and parameters.
Training one of the two changes both models
"""
input_shape = (inputSize,1,1)
inputSig = keras.layers.Input(shape=input_shape, name='input_Raw')
g = inputSig
if kernelsConstraint=='CQF':
kern = impLay.Kernel(kernelInit, trainKern=kernTrainable)(g)
kernelsG = [kern for lev in range(level)]
kernelsH = kernelsG
kernelsGT = kernelsG
kernelsHT = kernelsG
elif kernelsConstraint=='PerLayer':
kernelsG = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)]
kernelsH = kernelsG
kernelsGT = kernelsG
kernelsHT = kernelsG
elif kernelsConstraint=='PerFilter':
kernelsG = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)]
kernelsH = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)]
kernelsGT = kernelsG
kernelsHT = kernelsH
elif kernelsConstraint=='Free':
kernelsG = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)]
kernelsH = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)]
kernelsGT = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)]
kernelsHT = [impLay.Kernel(kernelInit, trainKern=kernTrainable)(g) for lev in range(level)]
hl = []
inSizel = []
# Decomposition
for lev in range(level):
inSizel.append(tf.shape(g))
hl.append(impLay.HardThresholdAssym(init=initHT,trainBias=trainHT)(impLay.HighPassWave()([g,kernelsH[lev]])))
g = impLay.LowPassWave()([g,kernelsG[lev]])
g = impLay.HardThresholdAssym(init=initHT,trainBias=trainHT)(g)
# save intermediate coefficients to output them
gint = g
# Reconstruction
for lev in range(level-1,-1,-1):
h = impLay.HighPassTrans()([hl[lev],kernelsHT[lev],inSizel[lev]])
g = impLay.LowPassTrans()([g,kernelsGT[lev],inSizel[lev]])
g = keras.layers.Add()([g,h])
# Compute specified loss on coefficients
if not lossCoeff:
vLossCoeff = tf.zeros((1,1,1,1))
elif lossCoeff=='l1':
# L1-Sum
vLossCoeff = tf.math.reduce_mean(tf.math.abs(tf.concat([gint]+hl,axis=1)),axis=1,keepdims=True)
else:
raise ValueError('Could not understand value in \'lossCoeff\'. It should be either \'l1\' or \'None\'')
return keras.models.Model(inputSig,[g,vLossCoeff]), keras.models.Model(inputSig,[g,gint,hl[::-1]])
#### /!\ In tf > 2.2.0 each output variable is 1 output. The second model above output 3 variables and not level+2 as is tf 2.1.0
| 6,679
| 43.238411
| 144
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/example.py
|
#!/bin/python
import gym, gym_mupen64plus
env = gym.make('Mario-Kart-Luigi-Raceway-v0')
env.reset()
print("NOOP waiting for green light")
for i in range(18):
(obs, rew, end, info) = env.step([0, 0, 0, 0, 0]) # NOOP until green light
print("GO! ...drive straight as fast as possible...")
for i in range(50):
(obs, rew, end, info) = env.step([0, 0, 1, 0, 0]) # Drive straight
print("Doughnuts!!")
for i in range(10000):
if i % 100 == 0:
print("Step " + str(i))
(obs, rew, end, info) = env.step([-80, 0, 1, 0, 0]) # Hard-left doughnuts!
(obs, rew, end, info) = env.step([-80, 0, 0, 0, 0]) # Hard-left doughnuts!
raw_input("Press <enter> to exit... ")
env.close()
| 694
| 26.8
| 78
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/setup.py
|
from setuptools import setup
setup(name='gym_mupen64plus',
version='0.0.3',
install_requires=['gym==0.7.4',
'numpy==1.16.2',
'PyYAML==5.1',
'termcolor==1.1.0',
'mss==4.0.2', # 4.0.3 removes support for Python 2.7
'opencv-python==4.1.0.25'])
| 374
| 33.090909
| 76
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/__init__.py
|
import logging
from gym_mupen64plus.envs.MarioKart64.mario_kart_env import MarioKartEnv
from gym_mupen64plus.envs.Smash.smash_env import SmashEnv
logger = logging.getLogger(__name__)
| 184
| 29.833333
| 72
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/mupen64plus_env.py
|
import sys
PY3_OR_LATER = sys.version_info[0] >= 3
if PY3_OR_LATER:
# Python 3 specific definitions
from http.server import BaseHTTPRequestHandler, HTTPServer
else:
# Python 2 specific definitions
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import abc
import array
from contextlib import contextmanager
import inspect
import itertools
import json
import os
import subprocess
import threading
import time
from termcolor import cprint
import yaml
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import numpy as np
import mss
###############################################
class ImageHelper:
def GetPixelColor(self, image_array, x, y):
base_pixel = image_array[y][x]
red = base_pixel[0]
green = base_pixel[1]
blue = base_pixel[2]
return (red, green, blue)
###############################################
### Variables & Constants ###
###############################################
# The width, height, and depth of the emulator window:
SCR_W = 640
SCR_H = 480
SCR_D = 3
MILLISECOND = 1.0 / 1000.0
IMAGE_HELPER = ImageHelper()
###############################################
class Mupen64PlusEnv(gym.Env):
__metaclass__ = abc.ABCMeta
metadata = {'render.modes': ['human']}
def __init__(self):
self.viewer = None
self.reset_count = 0
self.step_count = 0
self.running = True
self.episode_over = False
self.pixel_array = None
self._base_load_config()
self._base_validate_config()
self.frame_skip = self.config['FRAME_SKIP']
if self.frame_skip < 1:
self.frame_skip = 1
self.controller_server, self.controller_server_thread = self._start_controller_server()
initial_disp = os.environ["DISPLAY"]
cprint('Initially on DISPLAY %s' % initial_disp, 'red')
# If the EXTERNAL_EMULATOR environment variable is True, we are running the
# emulator out-of-process (likely via docker/docker-compose). If not, we need
# to start the emulator in-process here
external_emulator = os.environ.has_key("EXTERNAL_EMULATOR") and os.environ["EXTERNAL_EMULATOR"] == 'True'
if not external_emulator:
self.xvfb_process, self.emulator_process = \
self._start_emulator(rom_name=self.config['ROM_NAME'],
gfx_plugin=self.config['GFX_PLUGIN'],
input_driver_path=self.config['INPUT_DRIVER_PATH'])
# TODO: Test and cleanup:
# May need to initialize this after the DISPLAY env var has been set
# so it attaches to the correct X display; otherwise screenshots may
# come from the wrong place. This used to be true when we were using
# wxPython for screenshots. Untested after switching to mss.
cprint('Calling mss.mss() with DISPLAY %s' % os.environ["DISPLAY"], 'red')
self.mss_grabber = mss.mss()
time.sleep(2) # Give mss a couple seconds to initialize; also may not be necessary
# Restore the DISPLAY env var
os.environ["DISPLAY"] = initial_disp
cprint('Changed back to DISPLAY %s' % os.environ["DISPLAY"], 'red')
with self.controller_server.frame_skip_disabled():
self._navigate_menu()
self.observation_space = \
spaces.Box(low=0, high=255, shape=(SCR_H, SCR_W, SCR_D))
self.action_space = spaces.MultiDiscrete([[-80, 80], # Joystick X-axis
[-80, 80], # Joystick Y-axis
[ 0, 1], # A Button
[ 0, 1], # B Button
[ 0, 1], # RB Button
[ 0, 1], # LB Button
[ 0, 1], # Z Button
[ 0, 1], # C Right Button
[ 0, 1], # C Left Button
[ 0, 1], # C Down Button
[ 0, 1], # C Up Button
[ 0, 1], # D-Pad Right Button
[ 0, 1], # D-Pad Left Button
[ 0, 1], # D-Pad Down Button
[ 0, 1], # D-Pad Up Button
[ 0, 1], # Start Button
])
def _base_load_config(self):
self.config = yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "config.yml")))
self._load_config()
@abc.abstractmethod
def _load_config(self):
return
def _base_validate_config(self):
if 'ROM_NAME' not in self.config:
raise AssertionError('ROM_NAME configuration is required')
if 'GFX_PLUGIN' not in self.config:
raise AssertionError('GFX_PLUGIN configuration is required')
self._validate_config()
@abc.abstractmethod
def _validate_config(self):
return
def _step(self, action):
#cprint('Step %i: %s' % (self.step_count, action), 'green')
self._act(action)
obs = self._observe()
self.episode_over = self._evaluate_end_state()
reward = self._get_reward()
self.step_count += 1
return obs, reward, self.episode_over, {}
def _act(self, action, count=1):
for _ in itertools.repeat(None, count):
self.controller_server.send_controls(ControllerState(action))
def _wait(self, count=1, wait_for='Unknown'):
self._act(ControllerState.NO_OP, count=count)
def _press_button(self, button, times=1):
for _ in itertools.repeat(None, times):
self._act(button) # Press
self._act(ControllerState.NO_OP) # and release
def _observe(self):
#cprint('Observe called!', 'yellow')
if self.config['USE_XVFB']:
offset_x = 0
offset_y = 0
else:
offset_x = self.config['OFFSET_X']
offset_y = self.config['OFFSET_Y']
image_array = \
np.array(self.mss_grabber.grab({"top": offset_y,
"left": offset_x,
"width": SCR_W,
"height": SCR_H}),
dtype=np.uint8)
# drop the alpha channel and flip red and blue channels (BGRA -> RGB)
self.pixel_array = np.flip(image_array[:, :, :3], 2)
return self.pixel_array
@abc.abstractmethod
def _navigate_menu(self):
return
@abc.abstractmethod
def _get_reward(self):
#cprint('Get Reward called!', 'yellow')
return 0
@abc.abstractmethod
def _evaluate_end_state(self):
#cprint('Evaluate End State called!', 'yellow')
return False
@abc.abstractmethod
def _reset(self):
cprint('Reset called!', 'yellow')
self.reset_count += 1
self.step_count = 0
return self._observe()
def _render(self, mode='human', close=False):
if close:
if hasattr(self, 'viewer') and self.viewer is not None:
self.viewer.close()
self.viewer = None
return
img = self.pixel_array
if mode == 'rgb_array':
return img
elif mode == 'human':
if not hasattr(self, 'viewer') or self.viewer is None:
from gym.envs.classic_control import rendering
self.viewer = rendering.SimpleImageViewer()
self.viewer.imshow(img)
def _close(self):
cprint('Close called!', 'yellow')
self.running = False
self._kill_emulator()
self._stop_controller_server()
def _start_controller_server(self):
server = ControllerHTTPServer(server_address = ('', self.config['PORT_NUMBER']),
control_timeout = self.config['ACTION_TIMEOUT'],
frame_skip = self.frame_skip) # TODO: Environment argument (with issue #26)
server_thread = threading.Thread(target=server.serve_forever, args=())
server_thread.daemon = True
server_thread.start()
print('ControllerHTTPServer started on port ', self.config['PORT_NUMBER'])
return server, server_thread
def _stop_controller_server(self):
#cprint('Stop Controller Server called!', 'yellow')
if hasattr(self, 'controller_server'):
self.controller_server.shutdown()
def _start_emulator(self,
rom_name,
gfx_plugin,
input_driver_path,
res_w=SCR_W,
res_h=SCR_H,
res_d=SCR_D):
rom_path = os.path.abspath(
os.path.join(os.path.dirname(inspect.stack()[0][1]),
'../ROMs',
rom_name))
if not os.path.isfile(rom_path):
msg = "ROM not found: " + rom_path
cprint(msg, 'red')
raise Exception(msg)
input_driver_path = os.path.abspath(os.path.expanduser(input_driver_path))
if not os.path.isfile(input_driver_path):
msg = "Input driver not found: " + input_driver_path
cprint(msg, 'red')
raise Exception(msg)
cmd = [self.config['MUPEN_CMD'],
"--nospeedlimit",
"--nosaveoptions",
"--resolution",
"%ix%i" % (res_w, res_h),
"--gfx", gfx_plugin,
"--audio", "dummy",
"--input", input_driver_path,
rom_path]
xvfb_proc = None
if self.config['USE_XVFB']:
display_num = -1
success = False
# If we couldn't find an open display number after 15 attempts, give up
while not success and display_num <= 15:
display_num += 1
xvfb_cmd = [self.config['XVFB_CMD'],
":" + str(display_num),
"-screen",
"0",
"%ix%ix%i" % (res_w, res_h, res_d * 8),
"-fbdir",
self.config['TMP_DIR']]
cprint('Starting xvfb with command: %s' % xvfb_cmd, 'yellow')
xvfb_proc = subprocess.Popen(xvfb_cmd, shell=False, stderr=subprocess.STDOUT)
time.sleep(2) # Give xvfb a couple seconds to start up
# Poll the process to see if it exited early
# (most likely due to a server already active on the display_num)
if xvfb_proc.poll() is None:
success = True
print('') # new line
if not success:
msg = "Failed to initialize Xvfb!"
cprint(msg, 'red')
raise Exception(msg)
os.environ["DISPLAY"] = ":" + str(display_num)
cprint('Using DISPLAY %s' % os.environ["DISPLAY"], 'blue')
cprint('Changed to DISPLAY %s' % os.environ["DISPLAY"], 'red')
cmd = [self.config['VGLRUN_CMD'], "-d", ":" + str(display_num)] + cmd
cprint('Starting emulator with comand: %s' % cmd, 'yellow')
emulator_process = subprocess.Popen(cmd,
env=os.environ.copy(),
shell=False,
stderr=subprocess.STDOUT)
emu_mon = EmulatorMonitor()
monitor_thread = threading.Thread(target=emu_mon.monitor_emulator,
args=[emulator_process])
monitor_thread.daemon = True
monitor_thread.start()
return xvfb_proc, emulator_process
def _kill_emulator(self):
#cprint('Kill Emulator called!', 'yellow')
try:
self._act(ControllerState.NO_OP)
if self.emulator_process is not None:
self.emulator_process.kill()
if self.xvfb_process is not None:
self.xvfb_process.terminate()
except AttributeError:
pass # We may be shut down during intialization before these attributes have been set
###############################################
class EmulatorMonitor:
def monitor_emulator(self, emulator):
emu_return = emulator.poll()
while emu_return is None:
time.sleep(2)
if emulator is not None:
emu_return = emulator.poll()
else:
print('Emulator reference is no longer valid. Shutting down?')
return
# TODO: this means our environment died... need to die too
print('Emulator closed with code: ' + str(emu_return))
###############################################
class ControllerState(object):
# Controls [ JX, JY, A, B, RB, LB, Z, CR, CL, CD, CU, DR, DL, DD, DU, S]
NO_OP = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
START_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
A_BUTTON = [ 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
B_BUTTON = [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
RB_BUTTON = [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
CR_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
CL_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
CD_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
CU_BUTTON = [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
JOYSTICK_UP = [ 0, 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_DOWN = [ 0, -128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_LEFT = [-128, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
JOYSTICK_RIGHT = [ 127, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
def __init__(self, controls=NO_OP):
self.X_AXIS = controls[0]
self.Y_AXIS = controls[1]
self.A_BUTTON = controls[2]
self.B_BUTTON = controls[3]
self.R_TRIG = controls[4]
self.L_TRIG = controls[5]
self.Z_TRIG = controls[6]
self.R_CBUTTON = controls[7]
self.L_CBUTTON = controls[8]
self.D_CBUTTON = controls[9]
self.U_CBUTTON = controls[10]
self.R_DPAD = controls[11]
self.L_DPAD = controls[12]
self.D_DPAD = controls[13]
self.U_DPAD = controls[14]
self.START_BUTTON = controls[15]
def to_json(self):
return json.dumps(self.__dict__)
###############################################
class ControllerHTTPServer(HTTPServer, object):
def __init__(self, server_address, control_timeout, frame_skip):
self.control_timeout = control_timeout
self.controls = ControllerState()
self.controls_updated = threading.Event()
self.response_sent = threading.Event()
self.running = True
self.responses_sent = 0
self.frame_skip = frame_skip
self.frame_skip_enabled = True
super(ControllerHTTPServer, self).__init__(server_address, self.ControllerRequestHandler)
def send_controls(self, controls):
self.responses_sent = 0
self.controls = controls
# Tell the request handler that the controls have been updated so it can send the response now:
self.controls_updated.set()
# Wait for response to actually be sent before returning:
if self.running:
self.response_sent.wait()
self.response_sent.clear()
def shutdown(self):
self.running = False
# Make sure we aren't blocking on anything:
self.response_sent.set()
self.controls_updated.set()
# Shutdown the server:
if PY3_OR_LATER:
super().shutdown()
super().server_close()
else:
super(ControllerHTTPServer, self).shutdown()
super(ControllerHTTPServer, self).server_close()
# http://preshing.com/20110920/the-python-with-statement-by-example/#implementing-the-context-manager-as-a-generator
@contextmanager
def frame_skip_disabled(self):
self.frame_skip_enabled = False
yield True
self.frame_skip_enabled = True
class ControllerRequestHandler(BaseHTTPRequestHandler, object):
def log_message(self, fmt, *args):
pass
def write_response(self, resp_code, resp_data):
self.send_response(resp_code)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(resp_data.encode())
def do_GET(self):
# Wait for the controls to be updated before responding:
if self.server.running:
self.server.controls_updated.wait()
if not self.server.running:
print('Sending SHUTDOWN response')
# TODO: This sometimes fails with a broken pipe because
# the emulator has already stopped. Should handle gracefully (Issue #4)
self.write_response(500, "SHUTDOWN")
else:
### respond with controller output
self.write_response(200, self.server.controls.to_json())
self.server.responses_sent += 1
# If we have sent the controls 'n' times now...
if self.server.responses_sent >= self.server.frame_skip or not self.server.frame_skip_enabled:
# ...we fire the response_sent event so the next action can happen:
self.server.controls_updated.clear()
self.server.response_sent.set()
###############################################
| 18,495
| 36.824131
| 120
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/__init__.py
| 0
| 0
| 0
|
py
|
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/Smash/smash_env.py
|
import abc
import inspect
import itertools
import os
import yaml
from termcolor import cprint
from gym import spaces
from gym_mupen64plus.envs.mupen64plus_env \
import Mupen64PlusEnv, ControllerState, IMAGE_HELPER
import numpy as np
from gym_mupen64plus.envs.Smash \
import damage_parser, damage_tracker
mk_config = yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "smash_config.yml")))
FRAMES_PER_SECOND = 60
###############################################
class SmashEnv(Mupen64PlusEnv):
"""Environment for Super Smash Bros.
Allows custom stage and characters for self and opponents.
Attributes:
_last_dmg_step: int, the last step we took damage
_my_damage_tracker: DamageTracker for the agent's character.
_their_damage_tracker: DamageTracker for the opponent's character.
_my_char_pos: (int, int), the (row, col) of agent character in the
selection screen.
_their_char_pos: (int, int), the (row, col) of opponent character in the
selection screen.
_my_char_color: [int], button to press for agent character color.
_their_char_color: [int], button to press for opponent character color.
_map_pos: (int, int), the (row, col) of the map in the selection screen.
_action_space: MultiDiscrete gym action space, possible allowed actions.
"""
__metaclass__ = abc.ABCMeta
def __init__(
self, my_character='pikachu', their_character='dk',
my_character_color='CUP', their_character_color='CLEFT',
opponent_bot_level=10, map='DreamLand'):
# TODO: Make player number configurable in the future.
self._set_characters(my_character, their_character)
self._set_characters_color(my_character_color, their_character_color)
self._opponent_bot_level = opponent_bot_level
# Agent and opponent cannot be the same character and color.
assert (self._my_char_pos != self._their_char_pos or
self._my_char_color != self._their_char_color)
self._set_map(map)
super(SmashEnv, self).__init__()
self._my_damage_tracker = damage_tracker.DamageTracker(self.frame_skip, playernum=1)
self._their_damage_tracker = damage_tracker.DamageTracker(self.frame_skip, playernum=2)
self.action_space = spaces.MultiDiscrete([[-128, 127], # Joystick X
[-128, 127], # Joystick Y
[ 0, 1], # A
[ 0, 1], # B
[ 0, 0], # RB- unused
[ 0, 1], # LB
[ 0, 1], # Z
[ 0, 1]]) # C
def _step(self, action):
# Append unneeded inputs.
num_missing = len(ControllerState.A_BUTTON) - len(action)
full_action = action + [0] * num_missing
return super(SmashEnv, self)._step(full_action)
def _reset(self):
self._my_damage_tracker = damage_tracker.DamageTracker(self.frame_skip, playernum=1)
self._their_damage_tracker = damage_tracker.DamageTracker(self.frame_skip, playernum=2)
self._last_dmg_step = 0
# Nothing to do on the first call to reset()
if self.reset_count > 0:
# Make sure we don't skip frames while navigating the menus
with self.controller_server.frame_skip_disabled():
# TODO: Possibly allow exiting an in-progress map?
pass
return super(SmashEnv, self)._reset()
# Agressiveness hyperparam- start applying if they go too long without
# either taking or giving damage.
def _get_aggressiveness_penalty(self):
frames_since_dmg = (self.step_count - self._last_dmg_step) * self.frame_skip
# Apply if we've gone 4 seconds without any damage.
if frames_since_dmg > 4 * FRAMES_PER_SECOND:
# Penalty is tuned to be equal to taking 1 damage every 1 second.
return -1.0 * self.frame_skip / FRAMES_PER_SECOND
return 0.0
def _get_dmg_reward(self):
self._my_damage_tracker.observe_damage(self.pixel_array)
self._their_damage_tracker.observe_damage(self.pixel_array)
dmg_factor = 1.0
death_factor = 200.0
reward = 0.0
me_died, my_dmg_taken = (
self._my_damage_tracker.get_death_and_delta_dmg_for_reward())
they_died, their_dmg_taken = (
self._their_damage_tracker.get_death_and_delta_dmg_for_reward())
if my_dmg_taken > 0:
reward -= my_dmg_taken * dmg_factor
if their_dmg_taken > 0:
reward += their_dmg_taken * dmg_factor
if me_died:
reward -= death_factor
if they_died:
reward += death_factor
if (me_died or they_died or my_dmg_taken != 0 or their_dmg_taken != 0):
self._last_dmg_step = self.step_count
return reward
def _get_reward(self):
rew = self._get_dmg_reward() + self._get_aggressiveness_penalty()
return rew
def _navigate_menu(self):
self._navigate_start_menus()
self._navigate_player_select()
self._navigate_map_select()
def _navigate_start_menus(self):
self._wait(count=150, wait_for='HAL Screen')
self._press_button(ControllerState.START_BUTTON)
self._wait(count=150, wait_for='Splash Screen')
self._press_button(ControllerState.START_BUTTON)
self._wait(count=30, wait_for='Load Main Menu')
# Select Versus Mode
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.START_BUTTON)
# Set time to infinity
self._wait(count=30, wait_for='Load VS Menu')
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.JOYSTICK_LEFT)
self._press_button(ControllerState.JOYSTICK_LEFT)
self._press_button(ControllerState.JOYSTICK_LEFT)
# Turn off items.
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.START_BUTTON)
self._wait(count=30, wait_for='Load Options Menu')
self._press_button(ControllerState.JOYSTICK_UP)
self._press_button(ControllerState.START_BUTTON)
self._wait(count=30, wait_for='Load Items Menu')
self._press_button(ControllerState.JOYSTICK_LEFT)
self._press_button(ControllerState.JOYSTICK_LEFT)
self._press_button(ControllerState.JOYSTICK_LEFT)
self._press_button(ControllerState.B_BUTTON)
self._wait(count=30, wait_for='Back From Items Menu')
self._press_button(ControllerState.B_BUTTON)
self._wait(count=30, wait_for='Back From Options Menu')
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.START_BUTTON)
self._wait(count=125, wait_for='Load Character Screen')
def _navigate_player_select(self):
print('Agent player (row, col): ', self._my_char_pos)
print('Opponent player (row, col): ', self._their_char_pos)
# Enable computer opponent.
self._press_button(ControllerState.JOYSTICK_UP, times=5)
self._press_button(ControllerState.JOYSTICK_RIGHT, times=13)
self._press_button(ControllerState.A_BUTTON)
# Set computer opponent's level.
self._press_button(ControllerState.JOYSTICK_DOWN, times=10)
self._press_button(ControllerState.JOYSTICK_RIGHT, times=2)
if self._opponent_bot_level > 3:
self._press_button(ControllerState.A_BUTTON,
times=self._opponent_bot_level - 3)
elif self._opponent_bot_level < 3:
self._press_button(ControllerState.JOYSTICK_LEFT, times=8)
self._press_button(ControllerState.A_BUTTON,
times=3 - self._opponent_bot_level)
# Set player 1 to a default position- doesn't matter where, as long as
# it's not the same as the default CP, or the desired CP.
default_p1 = (0, 1)
if default_p1 == self._their_char_pos:
default_p1 = (1, 0)
self._select_player(default_p1, self._my_char_color)
self._wait(count=20, wait_for='P1 Selected')
# Grab default CP button- should be at Yoshi. Note if the settings
# above change, this may change as well.
default_cp = (0, 0)
self._select_player_from(default_p1, default_cp,
ControllerState.A_BUTTON)
self._wait(count=20, wait_for='CP Grabbed')
self._select_player_from(default_cp, self._their_char_pos,
self._their_char_color)
self._wait(count=20, wait_for='CP Selected')
# Set player 1 for real.
self._press_button(ControllerState.B_BUTTON)
self._wait(count=30, wait_for='P1 Unselected')
self._select_player_from(self._their_char_pos, self._my_char_pos,
self._my_char_color)
self._press_button(ControllerState.START_BUTTON)
self._wait(count=75, wait_for='Load Map Select')
def _select_player(self, pos, color):
# Ensure we are in the upper left corner.
self._press_button(ControllerState.JOYSTICK_UP, times=35)
self._press_button(ControllerState.JOYSTICK_LEFT, times=45)
# Navigate to character
self._press_button(ControllerState.JOYSTICK_DOWN,
times=5 + 7 * pos[0])
self._press_button(ControllerState.JOYSTICK_RIGHT,
times=5 + 7 * pos[1])
self._press_button(color)
def _select_player_from(self, start_pos, pos, color):
# Navigate to character
if pos[0] > start_pos[0]:
self._press_button(ControllerState.JOYSTICK_DOWN,
times=7 * (pos[0] - start_pos[0]))
elif pos[0] < start_pos[0]:
self._press_button(ControllerState.JOYSTICK_UP,
times=7 * (start_pos[0] - pos[0]))
if pos[1] > start_pos[1]:
self._press_button(ControllerState.JOYSTICK_RIGHT,
times=7 * (pos[1] - start_pos[1]))
elif pos[1] < start_pos[1]:
self._press_button(ControllerState.JOYSTICK_LEFT,
times=7 * (start_pos[1] - pos[1]))
self._press_button(color)
def _navigate_map_select(self):
print('Map position: ', self._map_pos)
# Select map.
for i in range(self._map_pos[1]):
self._press_button(ControllerState.JOYSTICK_RIGHT)
self._wait(count=15, wait_for='Move Map Select Right')
for i in range(self._map_pos[0]):
self._press_button(ControllerState.JOYSTICK_DOWN)
self._wait(count=15, wait_for='Move Map Select Down')
# Press start.
self._press_button(ControllerState.START_BUTTON)
self._wait(count=450, wait_for='Load Level')
def _navigate_pause_screen(self):
# TODO: Possibly implement, if we want to allow exiting the map.
pass
def _evaluate_end_state(self):
return False
def _load_config(self):
self.config.update(yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "smash_config.yml"))))
def _validate_config(self):
print("validate sub")
gfx_plugin = self.config["GFX_PLUGIN"]
def _set_characters(self, my_character, their_character):
characters = {'luigi' : (0, 0),
'mario' : (0, 1),
'dk' : (0, 2),
'link' : (0, 3),
'samus' : (0, 4),
'falcon' : (0, 5),
'ness' : (1, 0),
'yoshi' : (1, 1),
'kirby' : (1, 2),
'fox' : (1, 3),
'pikachu' : (1, 4),
'jigglypuff' : (1, 5)}
self._my_char_pos = characters[my_character]
self._their_char_pos = characters[their_character]
def _set_characters_color(self, my_character_color, their_character_color):
buttons = {'CUP' : ControllerState.CU_BUTTON,
'CDOWN' : ControllerState.CD_BUTTON,
'CLEFT' : ControllerState.CL_BUTTON,
'CRIGHT' : ControllerState.CR_BUTTON}
self._my_char_color = buttons[my_character_color]
self._their_char_color = buttons[their_character_color]
def _set_map(self, map):
maps = {'PeachsCastle' : (0, 0),
'CongoJungle' : (0, 1),
'HyruleCastle' : (0, 2),
'PlanetZebes' : (0, 3),
'MushroomKingdom' : (0, 4),
'YoshisIsland' : (1, 0),
'DreamLand' : (1, 1),
'SectorZ' : (1, 2),
'SaffronCity' : (1, 3),
'Random' : (1, 4)}
self._map_pos = maps[map]
| 13,419
| 44.185185
| 122
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/Smash/discrete_envs.py
|
import abc
from gym_mupen64plus.envs.Smash.smash_env import SmashEnv
from gym import spaces
def _create_action_map():
joystick_magnitudes = [
("HARDNEG", [-120]),
("MIDNEG", [-60]),
("NEUTRAL", [0]),
("MIDPOS", [60]),
("HARDPOS", [120]),
]
# Button orders are A, B, RB, LB, Z, CR
allowed_buttons = [
("NOBUTTONS", [0, 0, 0, 0, 0, 0]),
("ABUTTON", [1, 0, 0, 0, 0, 0]),
("BBUTTON", [0, 1, 0, 0, 0, 0]),
("ZBUTTON", [0, 0, 0, 0, 1, 0]),
("CBUTTON", [0, 0, 0, 0, 0, 1]),
]
actions = []
for xmag in joystick_magnitudes:
for ymag in joystick_magnitudes:
for button in allowed_buttons:
name = xmag[0] + "X_" + ymag[0] + "Y_" + button[0]
# Joystick X and Y preceed the buttons above.
state = xmag[1] + ymag[1] + button[1]
actions.append((name, state))
# These actions shouldn't be combined with joystick directions.
actions.append(("GRAB", [0, 0, 1, 0, 0, 0, 1, 0]))
actions.append(("TAUNT", [0, 0, 0, 0, 0, 1, 0, 0]))
return actions
class DiscreteActions:
ACTION_MAP = _create_action_map()
@staticmethod
def get_action_space():
return spaces.Discrete(len(DiscreteActions.ACTION_MAP))
@staticmethod
def get_controls_from_action(action):
return DiscreteActions.ACTION_MAP[action][1]
class SmashDiscreteEnv(SmashEnv):
ENABLE_CHECKPOINTS = True
def __init__(self, my_character='pikachu', their_character='dk',
my_character_color='CUP', their_character_color='CLEFT',
opponent_bot_level=10, map='DreamLand'):
super(SmashDiscreteEnv, self).__init__(
my_character=my_character, their_character=their_character,
my_character_color=my_character_color,
their_character_color=their_character_color,
opponent_bot_level=opponent_bot_level, map=map)
# This needs to happen after the parent class init to effectively override the action space
self.action_space = DiscreteActions.get_action_space()
def _step(self, action):
# Interpret the action choice and get the actual controller state for this step
controls = DiscreteActions.get_controls_from_action(action)
return super(SmashDiscreteEnv, self)._step(controls)
| 2,402
| 34.865672
| 99
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/Smash/damage_parser.py
|
import numpy as np
import cv2
import os.path
_HEIGHT = 38
SUCCESS = 0
PERCENT_UNDETECTED = 1 # Couldn't detect the % character.
DIGIT_AFTER_PERCENT_UNDETECTED = 2 # Couldn't detect any digits after the % character.
ZERO_NOT_RIGHT_COLOR = 3 # We detected 0, but it was unexpectedly dark, so
# we likely just missed some digits to its left.
# Read ideal outlines of characters from saved files. These were generated
# using the same OpenCV method as below, but were picked as especially clean
# examples, ideal for comparisons.
def _initialize_character_pixels_from_files():
full_path = os.path.dirname(__file__)
fname = os.path.join(full_path, 'outlines/percent.png')
percent_pixels = np.asarray(cv2.imread(fname))
percent_pixels = np.squeeze(percent_pixels[:, :, 0:1])
percent_pixels = percent_pixels == 0 # True if black, False if white.
assert len(percent_pixels) == _HEIGHT
digit_to_pixels = []
for i in range(10):
fname = os.path.join(full_path, 'outlines/%d.png' % i)
digit_pixels = np.asarray(cv2.imread(fname))
digit_pixels = np.squeeze(digit_pixels[:, :, 0:1])
digit_pixels = digit_pixels == 0 # True if black, False if white.
assert len(digit_pixels) == _HEIGHT
digit_to_pixels.append(digit_pixels)
return (percent_pixels, digit_to_pixels)
PERCENT_PIXELS, DIGIT_TO_PIXELS = _initialize_character_pixels_from_files()
class DamageParser(object):
def __init__(self):
# Records the color of the inside of 0 when damage is 0%.
self._zero_pixel = None
# Returns the pixel index and score of the best match of digit_pixels in
# damage_pixels. We start looking with the leftmost pixels of digit_pixels
# at start_pixel, and stop when those leftmost pixels reach stop_pixel.
# If it doesn't find a match, returns a negative pixel index.
def _find_match(self, digit_pixels, damage_pixels, start_pixel,
stop_pixel):
mask_len = len(digit_pixels[0])
inc_or_dec = 1 if stop_pixel > start_pixel else -1
best_score = -1e9
best_idx = -1
for i in range(start_pixel, stop_pixel, inc_or_dec):
if i < 0 or i + mask_len > len(damage_pixels[0]):
continue
cut_pixels = damage_pixels[:, i:i + mask_len]
intersection = np.sum(np.logical_and(digit_pixels, cut_pixels))
union = np.sum(np.logical_or(digit_pixels, cut_pixels))
score = intersection / float(union) # Jaccard overlap of the black areas.
if (score > 0.35 and score > best_score):
best_score = score
best_idx = i
return (best_idx, best_score)
# Slice the pixels to contain only the section which contains the
# damage.
def _get_damage_screen_section(self, player_num, pixels):
x_pixel_range = (45, 178) if player_num == 1 else (185, 318)
y_pixel_range = (400, 400 + _HEIGHT)
return pixels[y_pixel_range[0]:y_pixel_range[1],
x_pixel_range[0]:x_pixel_range[1], :]
# Uses OpenCV to get the outline of the damage. Returned as a boolean array:
# True if the image is black, False if it is white.
def _get_damage_outline_from_pixels(self, player_num, pixels):
assert player_num == 1 or player_num == 2
pixels = self._get_damage_screen_section(player_num, pixels)
x_len = len(pixels[0])
assert len(pixels) == _HEIGHT
# Use OpenCV to find the outlines of the numbers in black and white.
bw = cv2.cvtColor(pixels, cv2.COLOR_BGR2GRAY)
thresh = cv2.adaptiveThreshold(bw, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY, 3, 2)
dilated = cv2.dilate(thresh, np.ones((2,2), np.uint8), iterations = 1)
return dilated == 0 # True where the pixels are black, False where white.
# The first time we detect a zero, record its inner pixels. Later, we
# can determine whether a zero is a true zero or not based on whether
# it is the correct color.
def _set_zero_pixel(self, player_num, screen, zero_x_idx):
pixels = self._get_damage_screen_section(player_num, screen)
self._zero_pixel = pixels[32][13 + zero_x_idx]
# If the zero pixel doesn't match the first pixel we recorded, it is
# not a true zero- we missed some digits.
def _is_zero_reasonable(self, player_num, screen, zero_x_idx):
pixels = self._get_damage_screen_section(player_num, screen)
zero_pixel = pixels[32][13 + zero_x_idx]
if np.any(zero_pixel != self._zero_pixel):
return False
return True
# Given the player number (1 and 2) and a screenshot of the game,
# return the damage of the player. Returns a pair. The first value returned is
# the damage if it is detected, or else -1. The second value returned is
# an error code, one of the three above.
def GetDamage(self, player_num, screen):
pixels = self._get_damage_outline_from_pixels(player_num, screen)
percent_len = len(PERCENT_PIXELS[0])
x_len = len(pixels[1])
# First find the %, and work left from there.
# X range below hand tuned to properly find the % in the smallest case (1%).
percent_match = self._find_match(
PERCENT_PIXELS, pixels, x_len // 2 - 9, x_len - percent_len)
if percent_match[0] == -1:
return (-1, PERCENT_UNDETECTED)
start_match_px = percent_match[0]
multiplier = 1
digits_found = 0
first_digit_x = -1
# Search for up to 3 digits. Look to the left of the most recently found
# character.
for i in range(3): # Need to find potentially 3 digits.
best_digit_match = (-1, -1e9)
best_digit = -1
start_idx = 1 if i == 2 else 0 # Don't search for 0 in the final digit.
for digit in range(start_idx, 10):
digit_pixels = DIGIT_TO_PIXELS[digit]
digit_len = len(digit_pixels[0])
# These have been tuned a bit to give the best possible values.
# We may see tiny decreases from making them too large as we have
# false positive digits identified, but things get really bad
# if they get much smaller.
start_search = start_match_px - digit_len
end_search = start_match_px - 4 - digit_len
digit_match = self._find_match(
digit_pixels, pixels, start_search, end_search)
if digit_match[1] > best_digit_match[1]:
best_digit_match = digit_match
best_digit = digit
if i == 0:
first_digit_x = best_digit_match[0]
if best_digit_match[0] >= 0:
digits_found += best_digit * multiplier
start_match_px = best_digit_match[0]
elif i == 0:
return (-1, DIGIT_AFTER_PERCENT_UNDETECTED)
else:
break
multiplier *= 10
if self._zero_pixel is None and digits_found == 0:
self._set_zero_pixel(player_num, screen, first_digit_x)
elif (digits_found == 0 and
not self._is_zero_reasonable(player_num, screen,
first_digit_x)):
return (-1, ZERO_NOT_RIGHT_COLOR)
return (digits_found, SUCCESS)
def main(): # Can be run as a test on the screenshots_below
# Screenshots with damage identified correctly.
correct = 0
# Screenshots which returned no value.
no_val_returned = 0
# Screenshots which were incorrect, but would be easy to identify were
# incorrect in the context of the game.
incorrect_easy_to_identify = 0
# Screenshots which were incorrect, but might be harder to identify in
# gameplay
incorrect_hard_to_identify = 0
total = 0
for p in [1, 2]:
for d in range(0, 1000):
screenshot_fname = "screenshots/p%d_health_%03d.png" % (p, d)
img = cv2.imread(screenshot_fname)
if img is not None:
pixels = np.asarray(img)
damage, error = DamageParser().GetDamage(p, pixels)
total += 1
if damage == -1:
no_val_returned += 1
elif damage == d:
correct += 1
else:
# If damage increases too much, or decreases to a nonzero
# value, this is easy to identify in gameplay.
if damage > 40 + d or (damage < d and damage != 0):
incorrect_easy_to_identify += 1
else:
incorrect_hard_to_identify += 1
t = float(total) * 0.01
print("Correct values: %d, %0.2f%%" % (correct, correct / t))
print("No value returned: %d, %0.2f%%" % (
no_val_returned, no_val_returned / t))
print("Incorrect, but easy to account for in game: %d, %0.2f%%" % (
incorrect_easy_to_identify, incorrect_easy_to_identify / t))
print("Incorrect, harder to identify in game %d, %0.2f%%:" % (
incorrect_hard_to_identify, incorrect_hard_to_identify / t))
if __name__ == '__main__':
main()
| 9,396
| 45.519802
| 87
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/Smash/__init__.py
|
from gym.envs.registration import register
from gym_mupen64plus.envs.Smash.smash_env import SmashEnv
from gym_mupen64plus.envs.Smash.discrete_envs import SmashDiscreteEnv
# TODO: Add support for other oppenents, colors, maps.
characters = ['luigi', 'mario', 'dk', 'link', 'samus', 'falcon', 'ness',
'yoshi', 'kirby', 'fox', 'pikachu', 'jigglypuff']
for character in characters:
# Continuous Action Space:
register(
id='Smash-%s-v0' % character,
entry_point='gym_mupen64plus.envs.Smash:SmashEnv',
kwargs={'my_character' : character},
tags={
'mupen': True,
'wrapper_config.TimeLimit.max_episode_steps': 2147483647,
},
nondeterministic=True,
)
# Discrete Action Space:
register(
id='Smash-Discrete-%s-v0' % character,
entry_point='gym_mupen64plus.envs.Smash:SmashDiscreteEnv',
kwargs={'my_character' : character},
tags={
'mupen': True,
'wrapper_config.TimeLimit.max_episode_steps': 2147483647,
},
nondeterministic=True,
)
| 1,105
| 31.529412
| 72
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/Smash/damage_tracker.py
|
from gym_mupen64plus.envs.Smash \
import damage_parser
_NUM_DMGS_TO_DETECT = 3 # How many times we need to detect a damage to update.
# When the player dies, his damage disappears for a few frames before
# resetting to zero. We detect this to detect deaths, which is necessary
# because you can die at 0% damage, and we still want to penalize this.
_MISSING_PERCENTS_IN_ROW_THRESHOLD = 12
# This class tracks damage observations, making sure the observations
# make sense, and reporting the likely confident current damage. Note that
# reported damage may be slightly delayed to ensure we are confident in
# the reported damage value.
# Note: We recommend using a frame_skip no higher than 3, or it may be
# unreliable at detecting deaths at 0 damage.
class DamageTracker(object):
def __init__(self, frame_skip, playernum=1):
self._damage_parser = damage_parser.DamageParser()
# How many frames are skipped at every update.
self._frame_skip = frame_skip
self._playernum = playernum
self._curr_dmg = 0
# Value of damage last time reward was updated.
self._dmg_at_last_reward = 0
# Cache of recently detected damages.
self._recent_damages = [0] * _NUM_DMGS_TO_DETECT
# Counter for _MISSING_PERCENTS_IN_ROW_THRESHOLD.
self._missing_percents_in_row = 0
# Whether to report death to reward function on next update.
self._has_processed_death = False
# Whether we have met the _MISSING_PERCENTS_IN_ROW_THRESHOLD
# and thus can process a death and resetting the damage to 0%.
self._met_percent_threshold = False
# Record a damage observation from the screen.
def observe_damage(self, screen):
dmg_observation, error = self._damage_parser.GetDamage(self._playernum, screen)
if error == damage_parser.SUCCESS:
assert dmg_observation >= 0 and dmg_observation <= 999
# Reset this counter, since we have detected a % sign.
self._missing_percents_in_row = 0
if dmg_observation != 0:
if self._recent_damages[-1] != 0:
# Detected two nonzeroes in a row. There likely
# wasn't an actual death, so reset
# _met_percent_threshold.
self._met_percent_threshold = False
self._recent_damages.pop(0)
self._recent_damages.append(dmg_observation)
# See how if everything in the cache matches the most recent
# observation
num_match_measurements = sum(
d == dmg_observation for d in self._recent_damages)
# We can update the damage if the following conditions are met:
# 1) We have _NUM_DMGS_TO_DETECT consistent observations
# 2)
# a) The damage is nonzero and the damage increased
# OR
# b) Damage reset to zero and we have met the number of
# nondetected % threshold.
if (num_match_measurements == _NUM_DMGS_TO_DETECT and
((dmg_observation != 0 and dmg_observation >= self._curr_dmg) or
(dmg_observation == 0 and self._met_percent_threshold))):
old_dmg = self._curr_dmg
self._curr_dmg = dmg_observation
if dmg_observation == 0:
# If the damage is zero, we have processed a death.
self._met_percent_threshold = False
self._has_processed_death = True
# Any damage taken since the last reward update needs
# to be accounted for, potentially by setting it below
# 0.
self._dmg_at_last_reward -= old_dmg
elif error == damage_parser.PERCENT_UNDETECTED:
# We couldn't detect a % character. If this happens a lot,
# it means the character likely died.
self._missing_percents_in_row += 1
if (self._missing_percents_in_row * self._frame_skip >=
_MISSING_PERCENTS_IN_ROW_THRESHOLD):
self._met_percent_threshold = True
else:
# The other two errors are that we couldn't detect any digit
# to the left of the % sign, or that a zero with the wrong
# color was returned. Both of these involve a detected % sign.
self._missing_percents_in_row = 0
# Return a pair. The first value is a bool, indicating whether a death
# was processed since the last time this function was called. The
# second is the total damage taken since the last time this function
# was called.
def get_death_and_delta_dmg_for_reward(self):
damage_taken = self._curr_dmg - self._dmg_at_last_reward
has_died = self._has_processed_death
self._has_processed_death = False
self._dmg_at_last_reward = self._curr_dmg
return (has_died, damage_taken)
# Return the current damage.
def get_curr_damage(self):
return self._curr_dmg
| 5,085
| 48.862745
| 87
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/MarioKart64/discrete_envs.py
|
import abc
from gym_mupen64plus.envs.MarioKart64.mario_kart_env import MarioKartEnv
from gym import spaces
class DiscreteActions:
ACTION_MAP = [
("NO_OP", [ 0, 0, 0, 0, 0]),
("STRAIGHT", [ 0, 0, 1, 0, 0]),
("BRAKE", [ 0, 0, 0, 1, 0]),
("BACK_UP", [ 0, -80, 0, 1, 0]),
("SOFT_LEFT", [-20, 0, 1, 0, 0]),
("LEFT", [-40, 0, 1, 0, 0]),
("HARD_LEFT", [-60, 0, 1, 0, 0]),
("EXTREME_LEFT", [-80, 0, 1, 0, 0]),
("SOFT_RIGHT", [ 20, 0, 1, 0, 0]),
("RIGHT", [ 40, 0, 1, 0, 0]),
("HARD_RIGHT", [ 60, 0, 1, 0, 0]),
("EXTREME_RIGHT", [ 80, 0, 1, 0, 0]),
]
@staticmethod
def get_action_space():
return spaces.Discrete(len(DiscreteActions.ACTION_MAP))
@staticmethod
def get_controls_from_action(action):
return DiscreteActions.ACTION_MAP[action][1]
class MarioKartDiscreteEnv(MarioKartEnv):
ENABLE_CHECKPOINTS = True
def __init__(self, character='mario', course='LuigiRaceway'):
super(MarioKartDiscreteEnv, self).__init__(character=character, course=course)
# This needs to happen after the parent class init to effectively override the action space
self.action_space = DiscreteActions.get_action_space()
def _step(self, action):
# Interpret the action choice and get the actual controller state for this step
controls = DiscreteActions.get_controls_from_action(action)
return super(MarioKartDiscreteEnv, self)._step(controls)
| 1,602
| 34.622222
| 99
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/MarioKart64/mario_kart_env.py
|
import abc
import inspect
import itertools
import os
import yaml
from termcolor import cprint
from gym import spaces
from gym_mupen64plus.envs.mupen64plus_env \
import Mupen64PlusEnv, ControllerState, IMAGE_HELPER
import numpy as np
###############################################
class MarioKartEnv(Mupen64PlusEnv):
__metaclass__ = abc.ABCMeta
# Indicates the color value of the pixel at point (203, 51)
# This is where the lap number is present in the default HUD
END_RACE_PIXEL_COLORS = {"mupen64plus-video-rice.so" : ( 66, 49, 66),
"mupen64plus-video-glide64mk2.so" : (214, 148, 214),
"mupen64plus-video-glide64.so" : (157, 112, 158)}
HUD_PROGRESS_COLOR_VALUES = {(000, 000, 255): 1, # Blue: Lap 1
(255, 255, 000): 2, # Yellow: Lap 2
(255, 000, 000): 3} # Red: Lap 3
DEFAULT_STEP_REWARD = -0.1
LAP_REWARD = 100
CHECKPOINT_REWARD = 0.5
BACKWARDS_PUNISHMENT = -1
END_REWARD = 1000
END_EPISODE_THRESHOLD = 0
PLAYER_ROW = 0
PLAYER_COL = 0
MAP_SERIES = 0
MAP_CHOICE = 0
ENABLE_CHECKPOINTS = False
def __init__(self, character='mario', course='LuigiRaceway'):
self._set_character(character)
self._set_course(course)
super(MarioKartEnv, self).__init__()
self.end_race_pixel_color = self.END_RACE_PIXEL_COLORS[self.config["GFX_PLUGIN"]]
self.action_space = spaces.MultiDiscrete([[-80, 80], # Joystick X-axis
[-80, 80], # Joystick Y-axis
[ 0, 1], # A Button
[ 0, 1], # B Button
[ 0, 1]]) # RB Button
def _load_config(self):
self.config.update(yaml.safe_load(open(os.path.join(os.path.dirname(inspect.stack()[0][1]), "mario_kart_config.yml"))))
def _validate_config(self):
print("validate sub")
gfx_plugin = self.config["GFX_PLUGIN"]
if gfx_plugin not in self.END_RACE_PIXEL_COLORS:
raise AssertionError("Video Plugin '" + gfx_plugin + "' not currently supported by MarioKart environment")
def _step(self, action):
# Interpret the action choice and get the actual controller state for this step
controls = action + [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
return super(MarioKartEnv, self)._step(controls)
def _reset_after_race(self):
self._wait(count=275, wait_for='times screen')
self._navigate_post_race_menu()
self._wait(count=40, wait_for='map select screen')
self._navigate_map_select()
self._wait(count=50, wait_for='race to load')
def _reset_during_race(self):
# Can't pause the race until the light turns green
if (self.step_count * self.controller_server.frame_skip) < 120:
steps_to_wait = 100 - (self.step_count * self.controller_server.frame_skip)
self._wait(count=steps_to_wait, wait_for='green light so we can pause')
self._press_button(ControllerState.START_BUTTON)
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.A_BUTTON)
self._wait(count=76, wait_for='race to load')
def _reset(self):
self.lap = 1
self.step_count_at_lap = 0
self.last_known_lap = -1
self.CHECKPOINT_LOCATIONS = list(self._generate_checkpoints(64, 36, 584, 444))
if self.ENABLE_CHECKPOINTS:
self._checkpoint_tracker = [[False for i in range(len(self.CHECKPOINT_LOCATIONS))] for j in range(3)]
self.last_known_ckpt = -1
# Nothing to do on the first call to reset()
if self.reset_count > 0:
# Make sure we don't skip frames while navigating the menus
with self.controller_server.frame_skip_disabled():
if self.episode_over:
self._reset_after_race()
self.episode_over = False
else:
self._reset_during_race()
return super(MarioKartEnv, self)._reset()
def _get_reward(self):
#cprint('Get Reward called!','yellow')
reward_to_return = 0
cur_lap = self._get_lap()
if self.ENABLE_CHECKPOINTS:
cur_ckpt = self._get_current_checkpoint()
if self.episode_over:
# Scale out the end reward based on the total steps to get here; the fewer steps, the higher the reward
reward_to_return = 5 * (1250 - self.step_count) + self.END_REWARD #self.END_REWARD * (5000 / self.step_count) - 3000
else:
if cur_lap > self.lap:
self.lap = cur_lap
cprint('Lap %s!' % self.lap, 'green')
# Scale out the lap reward based on the steps to get here; the fewer steps, the higher the reward
steps_this_lap = self.step_count - self.step_count_at_lap
reward_to_return = self.LAP_REWARD # TODO: Figure out a good scale here... number of steps required per lap will vary depending on the course; don't want negative reward for completing a lap
self.step_count_at_lap = self.step_count
elif (self.ENABLE_CHECKPOINTS and cur_ckpt > -1 and
not self._checkpoint_tracker[self.last_known_lap - 1][cur_ckpt]):
# TODO: Backwards across a lap boundary incorrectly grants a checkpoint reward
# Need to investigate further. Might need to restore check for sequential checkpoints
#cprint(str(self.step_count) + ': CHECKPOINT achieved!', 'green')
self._checkpoint_tracker[self.lap - 1][cur_ckpt] = True
reward_to_return = self.CHECKPOINT_REWARD # TODO: This should reward per progress made. It seems as though currently, by going too fast, you could end up skipping over some progress rewards, which would encourage driving around a bit to achieve those rewards. Should reward whatever progress was achieved during the step (perhaps multiple 'checkpoints')
elif (self.ENABLE_CHECKPOINTS and ( cur_lap < self.last_known_lap or
cur_ckpt < self.last_known_ckpt)):
#cprint(str(self.step_count) + ': BACKWARDS!!', 'red')
self._checkpoint_tracker[self.lap - 1][self.last_known_ckpt] = False
reward_to_return = self.BACKWARDS_PUNISHMENT
else:
reward_to_return = self.DEFAULT_STEP_REWARD
if self.ENABLE_CHECKPOINTS:
self.last_known_ckpt = cur_ckpt
self.last_known_lap = cur_lap
return reward_to_return
def _get_lap(self):
# The first checkpoint is the upper left corner. It's value should tell us the lap.
ckpt_val = self._evaluate_checkpoint(self.CHECKPOINT_LOCATIONS[0])
# If it is unknown, assume same lap (character icon is likely covering the corner)
return ckpt_val if ckpt_val != -1 else self.lap
def _generate_checkpoints(self, min_x, min_y, max_x, max_y):
# TODO: I'm sure this can/should be more pythonic somehow
# Sample 4 pixels for each checkpoint to reduce the
# likelihood of a pixel matching the color by chance
# Top
for i in range((max_x - min_x) // 2):
x_val = min_x + i*2
y_val = min_y
yield [(x_val, y_val), (x_val + 1, y_val), (x_val, y_val + 1), (x_val + 1, y_val + 1)]
# Right-side
for i in range((max_y - min_y) // 2):
x_val = max_x
y_val = min_y + i*2
yield [(x_val, y_val), (x_val + 1, y_val), (x_val, y_val + 1), (x_val + 1, y_val + 1)]
# Bottom
for i in range((max_x - min_x) // 2):
if i == 0: # Skip the bottom right corner (for some reason MK doesn't draw it)
continue
x_val = max_x - i*2
y_val = max_y
yield [(x_val, y_val), (x_val + 1, y_val), (x_val, y_val + 1), (x_val + 1, y_val + 1)]
# Left-side
for i in range((max_y - min_y) // 2):
x_val = min_x
y_val = max_y - i*2
yield [(x_val, y_val), (x_val + 1, y_val), (x_val, y_val + 1), (x_val + 1, y_val + 1)]
def _get_current_checkpoint(self):
checkpoint_values = [self._evaluate_checkpoint(points)
for points in self.CHECKPOINT_LOCATIONS]
# Check if we have achieved any checkpoints
if any(val > -1 for val in checkpoint_values):
# argmin tells us the first index with the lowest value
index_of_lowest_val = np.argmin(checkpoint_values)
if index_of_lowest_val != 0:
# If the argmin is anything but 0, we have achieved
# all the checkpoints up through the prior index
checkpoint = index_of_lowest_val - 1
else:
# If the argmin is at index 0, they are all the same value,
# which means we've hit all the checkpoints for this lap
checkpoint = len(checkpoint_values) - 1
#if self.last_known_ckpt != checkpoint:
# cprint('--------------------------------------------','red')
# cprint('Checkpoints: %s' % checkpoint_values, 'yellow')
# cprint('Checkpoint: %s' % checkpoint, 'cyan')
return checkpoint
else:
# We haven't hit any checkpoint yet :(
return -1
# https://stackoverflow.com/a/3844948
# Efficiently determines if all items in a list are equal by
# counting the occurrences of the first item in the list and
# checking if the count matches the length of the list:
def all_equal(self, some_list):
return some_list.count(some_list[0]) == len(some_list)
def _evaluate_checkpoint(self, checkpoint_points):
checkpoint_pixels = [IMAGE_HELPER.GetPixelColor(self.pixel_array, point[0], point[1])
for point in checkpoint_points]
#print(checkpoint_pixels)
# If the first pixel is not a valid color, no need to check the other three
if not checkpoint_pixels[0] in self.HUD_PROGRESS_COLOR_VALUES:
return -1
# If the first pixel is good, make sure the other three match
elif not self.all_equal(checkpoint_pixels):
return -1
# If all are good, return the corresponding value
else:
return self.HUD_PROGRESS_COLOR_VALUES[checkpoint_pixels[0]]
def _evaluate_end_state(self):
#cprint('Evaluate End State called!','yellow')
return self.end_race_pixel_color == IMAGE_HELPER.GetPixelColor(self.pixel_array, 203, 51)
def _navigate_menu(self):
self._wait(count=10, wait_for='Nintendo screen')
self._press_button(ControllerState.A_BUTTON)
self._wait(count=68, wait_for='Mario Kart splash screen')
self._press_button(ControllerState.A_BUTTON)
self._wait(count=68, wait_for='Game Select screen')
self._navigate_game_select()
self._wait(count=14, wait_for='Player Select screen')
self._navigate_player_select()
self._wait(count=31, wait_for='Map Select screen')
self._navigate_map_select()
self._wait(count=46, wait_for='race to load')
# Change HUD View twice to get to the one we want:
self._cycle_hud_view(times=2)
# Now that we have the HUD as needed, reset the race so we have a consistent starting frame:
self._reset_during_race()
def _navigate_game_select(self):
# Select number of players (1 player highlighted by default)
self._press_button(ControllerState.A_BUTTON)
self._wait(count=3, wait_for='animation')
# Select GrandPrix or TimeTrials (GrandPrix highlighted by default - down to switch to TimeTrials)
self._press_button(ControllerState.JOYSTICK_DOWN)
self._wait(count=3, wait_for='animation')
# Select TimeTrials
self._press_button(ControllerState.A_BUTTON)
# Select Begin
self._press_button(ControllerState.A_BUTTON)
# Press OK
self._press_button(ControllerState.A_BUTTON)
def _navigate_player_select(self):
print('Player row: ' + str(self.PLAYER_ROW))
print('Player col: ' + str(self.PLAYER_COL))
# Character selection is remembered each time, so ensure upper-left-most is selected
self._press_button(ControllerState.JOYSTICK_UP)
self._press_button(ControllerState.JOYSTICK_LEFT, times=3)
# Navigate to character
self._press_button(ControllerState.JOYSTICK_DOWN, times=self.PLAYER_ROW)
self._press_button(ControllerState.JOYSTICK_RIGHT, times=self.PLAYER_COL)
# Select character
self._press_button(ControllerState.A_BUTTON)
# Press OK
self._press_button(ControllerState.A_BUTTON)
def _navigate_map_select(self):
print('Map series: ' + str(self.MAP_SERIES))
print('Map choice: ' + str(self.MAP_CHOICE))
# Map series selection is remembered each time, so ensure left-most is selected
self._press_button(ControllerState.JOYSTICK_LEFT, times=3)
# Select map series
self._press_button(ControllerState.JOYSTICK_RIGHT, times=self.MAP_SERIES)
self._press_button(ControllerState.A_BUTTON)
# Map choice selection is remembered each time, so ensure top-most is selected
self._press_button(ControllerState.JOYSTICK_UP, times=3)
# Select map choice
self._press_button(ControllerState.JOYSTICK_DOWN, times=self.MAP_CHOICE)
self._press_button(ControllerState.A_BUTTON)
# Press OK
self._press_button(ControllerState.A_BUTTON)
def _cycle_hud_view(self, times=1):
for _ in itertools.repeat(None, times):
self._press_button(ControllerState.CR_BUTTON)
def _navigate_post_race_menu(self):
# Times screen
self._press_button(ControllerState.A_BUTTON)
self._wait(count=13, wait_for='Post race menu')
# Post race menu (previous choice selected by default)
# - Retry
# - Course Change
# - Driver Change
# - Quit
# - Replay
# - Save Ghost
# Because the previous choice is selected by default, we navigate to the top entry so our
# navigation is consistent. The menu doesn't cycle top to bottom or bottom to top, so we can
# just make sure we're at the top by hitting up a few times
self._press_button(ControllerState.JOYSTICK_UP, times=5)
# Now we are sure to have the top entry selected
# Go down to 'course change'
self._press_button(ControllerState.JOYSTICK_DOWN)
self._press_button(ControllerState.A_BUTTON)
def _set_character(self, character):
characters = {'mario' : (0, 0),
'luigi' : (0, 1),
'peach' : (0, 2),
'toad' : (0, 3),
'yoshi' : (1, 0),
'd.k.' : (1, 1),
'wario' : (1, 2),
'bowser' : (1, 3)}
self.PLAYER_ROW, self.PLAYER_COL = characters[character]
def _set_course(self, course):
courses = {'LuigiRaceway' : (0, 0),
'MooMooFarm' : (0, 1),
'KoopaTroopaBeach' : (0, 2),
'KalimariDesert' : (0, 3),
'ToadsTurnpike' : (1, 0),
'FrappeSnowland' : (1, 1),
'ChocoMountain' : (1, 2),
'MarioRaceway' : (1, 3),
'WarioStadium' : (2, 0),
'SherbetLand' : (2, 1),
'RoyalRaceway' : (2, 2),
'BowsersCastle' : (2, 3),
'DKsJungleParkway' : (3, 0),
'YoshiValley' : (3, 1),
'BansheeBoardwalk' : (3, 2),
'RainbowRoad' : (3, 3)}
self.MAP_SERIES, self.MAP_CHOICE = courses[course]
| 16,451
| 41.076726
| 369
|
py
|
gym-mupen64plus
|
gym-mupen64plus-master/gym_mupen64plus/envs/MarioKart64/__init__.py
|
from gym.envs.registration import register
from gym_mupen64plus.envs.MarioKart64.mario_kart_env import MarioKartEnv
from gym_mupen64plus.envs.MarioKart64.discrete_envs import MarioKartDiscreteEnv
courses = [
{ 'name' : 'Luigi-Raceway', 'cup': 'Mushroom', 'max_steps': 1250 },
{ 'name' : 'Moo-Moo-Farm', 'cup': 'Mushroom', 'max_steps': 1250 },
{ 'name' : 'Koopa-Troopa-Beach', 'cup': 'Mushroom', 'max_steps': 1250 },
{ 'name' : 'Kalimari-Desert', 'cup': 'Mushroom', 'max_steps': 1250 },
{ 'name' : 'Toads-Turnpike', 'cup': 'Flower', 'max_steps': 1250 },
{ 'name' : 'Frappe-Snowland', 'cup': 'Flower', 'max_steps': 1250 },
{ 'name' : 'Choco-Mountain', 'cup': 'Flower', 'max_steps': 1250 },
{ 'name' : 'Mario-Raceway', 'cup': 'Flower', 'max_steps': 1250 },
{ 'name' : 'Wario-Stadium', 'cup': 'Star', 'max_steps': 1250 },
{ 'name' : 'Sherbet-Land', 'cup': 'Star', 'max_steps': 1250 },
{ 'name' : 'Royal-Raceway', 'cup': 'Star', 'max_steps': 1250 },
{ 'name' : 'Bowsers-Castle', 'cup': 'Star', 'max_steps': 1250 },
{ 'name' : 'DKs-Jungle-Parkway', 'cup': 'Special', 'max_steps': 1250 },
{ 'name' : 'Yoshi-Valley', 'cup': 'Special', 'max_steps': 1250 },
{ 'name' : 'Banshee-Boardwalk', 'cup': 'Special', 'max_steps': 1250 },
{ 'name' : 'Rainbow-Road', 'cup': 'Special', 'max_steps': 1250 },
]
for course in courses:
# Continuous Action Space:
register(
id='Mario-Kart-%s-v0' % course['name'],
entry_point='gym_mupen64plus.envs.MarioKart64:MarioKartEnv',
kwargs={'course' : course['name'].replace('-','')},
tags={
'mupen': True,
'cup': course['cup'],
'wrapper_config.TimeLimit.max_episode_steps': course['max_steps'],
},
nondeterministic=True,
)
# Discrete Action Space:
register(
id='Mario-Kart-Discrete-%s-v0' % course['name'],
entry_point='gym_mupen64plus.envs.MarioKart64:MarioKartDiscreteEnv',
kwargs={'course' : course['name'].replace('-','')},
tags={
'mupen': True,
'cup': course['cup'],
'wrapper_config.TimeLimit.max_episode_steps': course['max_steps'],
},
nondeterministic=True,
)
| 2,335
| 43.923077
| 79
|
py
|
entropic_barrier
|
entropic_barrier-master/setup.py
|
from setuptools import setup, find_packages
setup(
name='diffusion',
author='Guangyao Zhou',
author_email='tczhouguangyao@gmail.com',
license='MIT',
# Package info
packages=find_packages(),
include_package_data=True,
zip_safe=True,
)
| 267
| 19.615385
| 44
|
py
|
entropic_barrier
|
entropic_barrier-master/golf_course/utils.py
|
import pickle
import numpy as np
DEFAULT_THRESHOLD_MULTIPLIER = 4
DEFAULT_RELATIVE_SCALE = 0.1
def uniform_on_sphere(center, radius, num_samples=1, reflecting_boundary_radius=np.inf):
"""uniform_on_sphere
Uniform distribution on a sphere
Parameters
----------
center : np array
center is the center of the sphere
radius : float
radius is the radius of the sphere
num_samples : int
num_samples is the number of samples we are going to get
reflecting_boundary_radius : float
The radius of the reflecting boundary. Gives us a further constraint that all
the samples need to be within the reflecting boundary
Returns
-------
valid_samples : np array
samples is an np array of shape (num_samples, center.size).
Each row is a sample
"""
n = center.size
n_valid = 0
valid_samples = []
while n_valid < num_samples:
samples = np.random.randn(num_samples, n)
sample_norms = np.linalg.norm(samples, axis=1, keepdims=True)
samples = radius * samples / sample_norms
samples = samples + center.reshape((1, n))
samples = samples[np.linalg.norm(samples, axis=1) < reflecting_boundary_radius]
valid_samples.append(samples)
n_valid += len(samples)
valid_samples = np.concatenate(valid_samples)
assert np.all(np.linalg.norm(valid_samples, axis=1) < reflecting_boundary_radius)
return valid_samples[:num_samples]
def sample_uniform_initial_location(centers, radiuses, boundary_radius):
"""sample_uniform_initial_location
Parameters
----------
centers : np array
centers is an np array of shape (num_spheres, n_dim), and is the centers of all
the targets.
radiuses : np array
radiuses is an np array of shape (num_spheres,), and is the radius of the targets.
boundary_radius : float
boundary_radius is the boundary radius of the reflecting boundary.
Returns
-------
initial_location : np array
initial_location is an np array of shape (n,), where n is the dimension of the
system. initial_location is the location we sampled.
"""
n = centers.shape[1]
while True:
initial_location = 2 * boundary_radius * np.random.rand(n) - boundary_radius
if np.linalg.norm(initial_location, ord=2) < boundary_radius:
distances = np.linalg.norm(
initial_location.reshape((1, n)) - centers, ord=2, axis=1
)
if all(distances > radiuses):
break
return initial_location
def sample_random_locations(center, radiuses, n_samples):
assert radiuses[0] < radiuses[1]
n_dim = center.size
locations = np.zeros((n_samples, n_dim))
for ii in range(n_samples):
random_radius = (radiuses[1] - radiuses[0]) * np.random.rand(1) + radiuses[0]
locations[ii] = uniform_on_sphere(center, random_radius)
return locations
def load_model_params(model_params_fname):
with open(model_params_fname, 'rb') as f:
model_params = pickle.load(f)
time_step = model_params['time_step']
target_param_list = model_params['target_param_list']
return time_step, target_param_list
| 3,260
| 30.355769
| 90
|
py
|
entropic_barrier
|
entropic_barrier-master/golf_course/__init__.py
| 0
| 0
| 0
|
py
|
|
entropic_barrier
|
entropic_barrier-master/golf_course/core/target.py
|
from math import *
import numba
import numpy as np
import sympy
from scipy.special import gamma
from sympy.utilities.lambdify import lambdastr
import golf_course.estimate.numba as nestimate
class Target(object):
"""
Target
A class that describes regions around important points in the energy function.
Outside these these regions, the energy function would simply be a constant.
"""
def __init__(self, center, radiuses, energy_type, energy_params):
"""__init__
For 'well', there's only one parameter, 'depth'. We are going to use a quadratic energy function for this.
For 'crater', there're two parameters, 'depth' and 'height'. See the info in the notes about the energy
function. We will use a fourth-order polynomial. For 'random_well', there are three parameters, 'depth'
for the well energy, 'locations' for the locations of the Gaussian bumps, and 'standard_deviations' for
the standard_deviations of each multivariate Gaussian. For each Gaussian bumps, we would start by assuming
all the dimensions are independent of each other. But for each dimension, we can have a different
standard_deviation params['locations'] is an np array of shape (num_loc, n), where num_loc is the number
of Gaussian bumps we are going to put down, and n is the dimension of the system.
params['standard_deviations'] is also an np array of shape (num_loc, n). Here, each row holds the
standard_deviations of all those dimensions. As a result, diag(params['standard_deviations'][i, :]**2) would
be the covariance matrix for the ith Gaussian bump. For 'random_crater', there are four parameters. 'depth',
'height', 'locations', and 'standard_deviations'. Refer to the above comments for the meaning of these.
Parameters
----------
center: np.array
The location of the center for this Target. Length of the array is the dimension of the sysetm
radiuses: np.array
The radiuses of the three spheres involved. We should have radiuses[0]<radiuses[1]<radiuses[2],
energy_type: str
energy_type is the name of the type of energy we are going to use within the middle sphere.
Allowed energy types include 'random_well' and 'random_crater'. 'random_well' and
'random_crater' are two random energy functions. The way to define them is, we first
randomly pick some locations, and put down some Gaussian bumps at those locations.
We then multiply this energy function by either the the well energy or the crater energy,
to get random_well and random crater.
energy_params: dict
The parameters for the spefic energy type that we are using
Returns
-------
"""
assert len(radiuses) == 3
assert (
radiuses[0] < radiuses[1] and radiuses[1] < radiuses[2]
), 'Wrong radiuses.'
assert energy_type in set(['random_well', 'random_crater', 'flat'])
self.energy_type = energy_type
self.center = center
self.radiuses = radiuses
self.energy_params = energy_params
self.generate_force_field_function()
def generate_force_field_function(self):
if self.energy_type == 'flat':
get_force_field = lambda x: list(np.zeros_like(x))
else:
expr_generation_func_dict = {
'random_well': generate_random_well_sympy_expr,
'random_crater': generate_random_crater_sympy_expr,
}
location, gradient_expr = expr_generation_func_dict[self.energy_type](
self.center, self.radiuses, **self.energy_params
)
force_field_lambda_str = lambdastr(location, -gradient_expr)
n_dim = len(location)
old_argument = ','.join(['x{}'.format(ii) for ii in range(n_dim)])
force_field_lambda_str = force_field_lambda_str.replace(old_argument, 'x')
for ii in range(n_dim):
force_field_lambda_str = force_field_lambda_str.replace(
'x{}'.format(ii), 'x[{}]'.format(ii)
)
get_force_field = eval(force_field_lambda_str)
get_force_field = numba.jit(get_force_field)
@numba.jit(nopython=True, cache=True)
def advance_within_concentric_spheres_numba(
current_location,
center,
r1,
boundary_radiuses,
time_step,
reflecting_boundary_radius,
):
origin = np.zeros_like(current_location)
n_dim = center.size
inner_boundary_squared = boundary_radiuses[0] ** 2
outer_boundary_squared = boundary_radiuses[1] ** 2
r1_squared = r1 ** 2
scale = np.sqrt(time_step)
previous_location = current_location
target_flag = False
while True:
r_vector = current_location - center
r_squared = np.sum(r_vector ** 2)
if r_squared <= inner_boundary_squared:
target_flag = True
break
elif r_squared >= outer_boundary_squared:
break
if r_squared >= r1_squared:
force_field = np.zeros_like(current_location)
else:
force_field = np.array(get_force_field(current_location))
previous_location = current_location
random_component = scale * np.random.randn(n_dim)
current_location = (
previous_location + force_field * time_step + random_component
)
current_location = nestimate.simulate_reflecting_boundary(
origin,
reflecting_boundary_radius,
previous_location,
current_location,
scale,
time_step,
force_field,
)
return previous_location, current_location, target_flag
self.advance_within_concentric_spheres_numba = (
advance_within_concentric_spheres_numba
)
def get_constant(self):
n_dim = self.center.size
constant = 2 * np.pi ** (n_dim / 2) / gamma(n_dim / 2)
if np.linalg.norm(self.center) + self.radiuses[1] > 1:
constant *= np.arccos(self.radiuses[1] / 2) / np.pi
return constant
def generate_random_well_sympy_expr(
center,
radiuses,
depth=None,
locations=None,
standard_deviations=None,
multiplier=None,
):
"""generate_random_well_sympy_expr
Parameters
----------
center :
radiuses :
depth : float
The depth of the potential well
locations : np.array
The locations of all the Gaussian random bumps
standard_deviations : np.array
The standard deviations for the different Gaussian random bumps
multiplier : float
The multiplier used to balance the main and random parts of the energy function
Returns
-------
"""
assert locations.shape[1] == center.size
assert standard_deviations.shape == (locations.shape[0],)
n_dim = center.size
n_bumps = locations.shape[0]
location = sympy.Array(sympy.symbols('x:{}'.format(n_dim)), (n_dim,))
center = sympy.Array(center, center.shape)
r_squared = sympy_array_squared_norm(location - center)
well_expr = (
-(depth / radiuses[1] ** 4)
* (r_squared ** 2 - 2 * radiuses[1] ** 2 * r_squared)
- depth
)
mollifier_expr = sympy.functions.exp(
-radiuses[1] / (radiuses[1] - sympy_array_squared_norm(location - center) ** 10)
) / np.exp(-1)
random_components = [
sympy.functions.exp(
-sympy_array_squared_norm(location - sympy.Array(locations[ii], (n_dim,)))
/ (2 * standard_deviations[ii] ** 2)
)
for ii in range(n_bumps)
]
random_expr = 0
for ii in range(n_bumps):
random_expr += random_components[ii]
sympy_expr = well_expr + multiplier * mollifier_expr * random_expr
gradient_expr = sympy.derive_by_array(sympy_expr, location)
return location, gradient_expr
def generate_random_crater_sympy_expr(
center,
radiuses,
depth=None,
height=None,
locations=None,
standard_deviations=None,
multiplier=None,
):
"""generate_random_crater_sympy_expr
Parameters
----------
center :
radiuses :
depth : float
The depth of the crater
height : float
The height of the crater
locations : np.array
The locations of all the Gaussian random bumps
standard_deviations : np.array
The standard deviations for the different Gaussian random bumps
multiplier : float
The multiplier used to balance the main and the random parts of the energy function
Returns
-------
"""
assert locations.shape[1] == center.size
assert standard_deviations.shape == (locations.shape[0],)
n_dim = center.size
n_bumps = locations.shape[0]
location = sympy.Array(sympy.symbols('x:{}'.format(n_dim)), (n_dim,))
center = sympy.Array(center, center.shape)
r_squared = sympy_array_squared_norm(location - center)
C = (
3
* radiuses[1] ** 2
* sympy.cbrt(depth * height * (depth + sympy.sqrt(depth * (depth + height))))
)
Delta0 = -9 * depth * height * radiuses[1] ** 4
b_squared = -(1 / (3 * depth)) * (-3 * depth * radiuses[1] ** 2 + C + Delta0 / C)
a = depth / (3 * b_squared * radiuses[1] ** 4 - radiuses[1] ** 6)
crater_expr = (
a
* (
2 * r_squared ** 3
- 3 * (b_squared + radiuses[1] ** 2) * r_squared ** 2
+ 6 * b_squared * radiuses[1] ** 2 * r_squared
)
- depth
)
mollifier_expr = sympy.functions.exp(
-radiuses[1] / (radiuses[1] - sympy_array_squared_norm(location - center) ** 10)
) / np.exp(-1)
random_components = [
sympy.functions.exp(
-sympy_array_squared_norm(location - sympy.Array(locations[ii], (n_dim,)))
/ (2 * standard_deviations[ii] ** 2)
)
for ii in range(n_bumps)
]
random_expr = 0
for ii in range(n_bumps):
random_expr += random_components[ii]
sympy_expr = crater_expr + multiplier * mollifier_expr * random_expr
gradient_expr = sympy.derive_by_array(sympy_expr, location)
return location, gradient_expr
def sympy_array_squared_norm(sympy_array):
return sympy.tensor.array.tensorcontraction(
sympy_array.applyfunc(lambda x: x ** 2), (0,)
)
| 10,808
| 37.466192
| 116
|
py
|
entropic_barrier
|
entropic_barrier-master/golf_course/core/model.py
|
import numpy as np
import golf_course.estimate.numba as nestimate
from golf_course.core.target import Target
from golf_course.estimate.capacity import estimate_capacity
class ToyModel(object):
"""
ToyModel
This class defines the toy model we are going to use.
For simplicity, in our program, we will simply use 0 for the constant region
outside all the targets.
"""
def __init__(self, time_step, target_param_list):
"""__init__
Parameters
----------
time_step: float
The time step we are going to use for the simulation
target_param_list: [dict]
A list of dict, each of which contains the parameters for a
particular target
"""
self.time_step = time_step
assert _check_compatibility(target_param_list, 1)
self.target_list = [Target(**params) for params in target_param_list]
def do_naive_simulation(self, current_location):
radiuses = np.array([target.radiuses[1] for target in self.target_list])
centers = np.array([target.center for target in self.target_list])
distances = np.linalg.norm(
current_location.reshape((1, -1)) - centers, ord=2, axis=1
)
assert all(distances > radiuses)
while True:
previous_location, current_location, index = nestimate.advance_flat_regions(
current_location, centers, radiuses, self.time_step
)
target = self.target_list[index]
boundary_radiuses = np.array([target.radiuses[0], target.radiuses[1]])
previous_location, current_location, target_flag = nestimate.advance_within_concentric_spheres(
current_location, target, boundary_radiuses, self.time_step, 1
)
if target_flag:
break
return index
def estimate_hitting_prob(self, capacity_estimation_param_list):
n_targets = len(self.target_list)
hitting_prob = np.zeros(n_targets)
for ii in range(n_targets):
target = self.target_list[ii]
hitting_prob[ii], _ = estimate_capacity(
target, **capacity_estimation_param_list[ii]
)
hitting_prob = hitting_prob / np.sum(hitting_prob)
return hitting_prob
def _check_compatibility(target_param_list, boundary_radius):
n_spheres = len(target_param_list)
for target_param in target_param_list:
center = target_param['center']
radius = target_param['radiuses'][0]
if np.linalg.norm(center) - radius > boundary_radius:
return False
for ii in range(n_spheres - 1):
for jj in range(ii + 1, n_spheres):
center1 = target_param_list[ii]['center']
radius1 = target_param_list[ii]['radiuses'][2]
center2 = target_param_list[jj]['center']
radius2 = target_param_list[jj]['radiuses'][2]
if np.linalg.norm(center1 - center2) < radius1 + radius2:
return False
return True
| 3,065
| 36.390244
| 107
|
py
|
entropic_barrier
|
entropic_barrier-master/golf_course/core/__init__.py
| 0
| 0
| 0
|
py
|
|
entropic_barrier
|
entropic_barrier-master/golf_course/estimate/numba.py
|
import numba
import numpy as np
from golf_course.utils import DEFAULT_THRESHOLD_MULTIPLIER
@numba.jit(nopython=True, nogil=True, cache=True)
def advance_flat_regions(current_location, centers, radiuses, time_step):
boundary_radius = 1.0
scale = np.sqrt(time_step)
n_dim = current_location.size
n_targets = centers.shape[0]
boundary_radius_squared = 1
radiuses_squared = radiuses ** 2
distances_squared = np.zeros((n_targets,))
origin = np.zeros_like(current_location)
threshold = DEFAULT_THRESHOLD_MULTIPLIER * scale
temp_step_size = np.zeros(n_targets + 1)
previous_location = current_location
while True:
for jj in range(n_targets):
distances_squared[jj] = np.sum((current_location - centers[jj]) ** 2)
if not np.all(distances_squared > radiuses_squared):
in_spheres = distances_squared <= radiuses_squared
temp = np.nonzero(in_spheres)
assert len(temp) == 1 and len(temp[0]) == 1, "Too many nonzeros"
index = temp[0][0]
break
previous_location = current_location
r_squared = np.sum(current_location ** 2)
assert r_squared < boundary_radius_squared, 'Outside reflecting boundary.'
temp_step_size[:n_targets] = np.sqrt(distances_squared) - radiuses
temp_step_size[-1] = boundary_radius - np.sqrt(r_squared)
maximum_step_size = np.min(temp_step_size)
if maximum_step_size > threshold:
temp_current_location = uniform_on_sphere(
current_location, maximum_step_size, 1
)
current_location = temp_current_location[0, :]
else:
random_component = scale * np.random.randn(n_dim)
current_location = previous_location + random_component
current_location = simulate_reflecting_boundary(
origin,
boundary_radius,
previous_location,
current_location,
scale,
time_step,
np.zeros_like(current_location),
)
return previous_location, current_location, index
@numba.jit(nopython=True, nogil=True, cache=True)
def _interpolate(point1, point2, center, target_radius):
"""_interpolate
Find a point on the sphere with center as center and target_radius as radius.
One of the two points (point1 and point2) should be inside the sphere, and
the other one should be outside the shpere.
Refer to the notes for the formulas used.
Parameters
----------
point1 : np array
point1 is of shape (n,), where n is the dimension of the system
point2 : np array
point2 is of shape (n,), where n is the dimension of the system
center : np array
center is of shape (n,), where n is the dimension of the system
target_radius : float
target_radius is the radius of the sphere
Returns
-------
point_on_sphere : np array
point_on_sphere is of shape (n,), where n is the dimension of the system.
We should have norm(point_on_sphere - center) == target_radius
lbda : float
The real number so that point_on_sphere = point1 + lbda * (point2 - point1)
"""
a = np.sum((point2 - point1) ** 2)
b = 2 * np.sum((point1 - center) * (point2 - point1))
c = np.sum((point1 - center) ** 2) - target_radius ** 2
roots = [
(-b + np.sqrt(b ** 2 - 4 * a * c)) / (2 * a),
(-b - np.sqrt(b ** 2 - 4 * a * c)) / (2 * a),
]
if roots[0] > 0 and roots[0] < 1:
assert (
roots[1] <= 0 or roots[1] >= 1
), 'Found 2 valid solutions. Should have only 1.'
lbda = roots[0]
else:
assert roots[1] > 0 and roots[1] < 1, "Didn't find a valid solution"
lbda = roots[1]
point_on_sphere = point1 + lbda * (point2 - point1)
a = np.sum((point_on_sphere - center) ** 2)
b = target_radius ** 2
rtol = 1.0e-5
atol = 1.0e-8
assert np.abs(a - b) <= (
atol + rtol * np.abs(b)
), 'Given point is not on the sphere.'
return point_on_sphere
@numba.jit(nopython=True, nogil=True, cache=True)
def _reflecting_boundary(center, radius, previous_location, current_location):
radius_squared = radius ** 2
assert np.sum((previous_location - center) ** 2) < radius_squared
assert np.sum((current_location - center) ** 2) > radius_squared
point_on_sphere = _interpolate(previous_location, current_location, center, radius)
lbda = -np.sum(
(point_on_sphere - center) * (current_location - point_on_sphere)
) / (2 * radius_squared)
reflected_location = current_location + 4 * lbda * (point_on_sphere - center)
return reflected_location
@numba.jit(nopython=True, nogil=True, cache=True)
def simulate_reflecting_boundary(
center, radius, previous_location, current_location, scale, time_step, force_field
):
n_dim = center.size
r_squared_reflecting = np.sum(current_location ** 2)
if r_squared_reflecting > radius:
current_location = _reflecting_boundary(
center, radius, previous_location, current_location
)
if np.sum(current_location ** 2) > radius:
while True:
random_component = scale * np.random.randn(n_dim)
current_location = (
previous_location + force_field * time_step + random_component
)
if np.sum(current_location ** 2) <= radius:
break
current_location = _reflecting_boundary(
center, radius, previous_location, current_location
)
if np.sum(current_location ** 2) <= radius:
break
return current_location
@numba.jit(nopython=True, cache=True)
def uniform_on_sphere(center, radius, num_samples=1):
"""uniform_on_sphere
Uniform distribution on a sphere
Parameters
----------
center : np array
center is the center of the sphere
radius : float
radius is the radius of the sphere
num_samples : int
num_samples is the number of samples we are going to get
Returns
-------
samples : np array
samples is an np array of shape (num_samples, center.size).
Each row is a sample
"""
n = center.size
samples = np.random.randn(num_samples, n)
sample_norms = np.zeros((num_samples, 1))
for ii in range(num_samples):
sample_norms[ii, 0] = np.sqrt(np.sum(samples[ii, :] ** 2))
samples = radius * samples / sample_norms
samples = samples + center.reshape((1, n))
return samples
def advance_within_concentric_spheres(
current_location, target, boundary_radiuses, time_step, reflecting_boundary_radius
):
center = target.center
radiuses = target.radiuses
r1 = radiuses[1]
assert boundary_radiuses[0] >= radiuses[0] and boundary_radiuses[1] <= radiuses[2]
assert type(current_location) == np.ndarray
assert current_location.shape == center.shape
assert type(boundary_radiuses) == np.ndarray
assert len(boundary_radiuses) == 2
assert boundary_radiuses[0] < boundary_radiuses[1]
distance = np.linalg.norm(current_location - center)
assert distance > boundary_radiuses[0] and distance < boundary_radiuses[1]
advance_within_concentric_spheres_numba = (
target.advance_within_concentric_spheres_numba
)
previous_location, current_location, target_flag = advance_within_concentric_spheres_numba(
current_location,
center,
r1,
boundary_radiuses,
time_step,
reflecting_boundary_radius,
)
return previous_location, current_location, target_flag
| 7,720
| 34.417431
| 95
|
py
|
entropic_barrier
|
entropic_barrier-master/golf_course/estimate/__init__.py
| 0
| 0
| 0
|
py
|
|
entropic_barrier
|
entropic_barrier-master/golf_course/estimate/capacity.py
|
import multiprocessing as mp
import numpy as np
from scipy.cluster.vq import kmeans2
from scipy.optimize import linear_sum_assignment
from scipy.spatial.distance import cdist
import golf_course.estimate.numba as nestimate
from golf_course.utils import uniform_on_sphere
from tqdm import tqdm
def estimate_capacity(
target,
inner,
outer,
num_points,
num_clusters,
num_trials,
time_step=1e-5,
use_parallel=True,
n_split=4,
use_analytical_gradients=True,
estimate_gradients=False,
n_surfaces_gradients_estimation=15,
):
"""
Parameters
----------
inner: int
The number of intermediate layers we are going to use for calculating the inner rate
outer: int
The number of intermediate layers we are going to use for calculating the outer rate
num_points: int
The number of points we are going to have on each layer. We are going to form clusters
based on these points.
num_clusters: int
The number of clusters we are going to have on each layer
num_trials: int
The number of trials we are going to run for each bin in order to decide the transition probabilities
time_step : float
The time step we are going to use for the simulation. Default to 1e-5
use_parallel : bool
Whether we are going to make the code parallel or not
n_split : int
The number of splits we are going to use for making things parallel. Default to 4
n_surfaces_gradients_estimation : int
The number of surfaces we are going to use for numerically estimating the gradients
analytical_gradients : bool
Whether we want to use the gradients estimated analytically
"""
if not use_analytical_gradients:
assert estimate_gradients
hitting_prob, cluster_centers, cluster_labels = estimate_hitting_prob(
target,
target.radiuses,
inner,
outer,
num_points,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
)
middle_index = outer + 1
cluster_labels = cluster_labels[middle_index]
cluster_centers = cluster_centers[middle_index]
n_points_in_clusters = np.array(
[np.sum(cluster_labels == ii) for ii in range(num_clusters)]
)
n_dim = target.center.size
dA = target.radiuses[1]
if estimate_gradients:
delta = (target.radiuses[2] - target.radiuses[1]) / (
n_surfaces_gradients_estimation + 2
)
radiuses_gradients_estimation = np.array(
[target.radiuses[1], target.radiuses[1] + delta, target.radiuses[2]]
)
hitting_prob_gradients, cluster_centers_gradients, cluster_labels_gradients = estimate_hitting_prob(
target,
radiuses_gradients_estimation,
0,
n_surfaces_gradients_estimation,
num_points,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
)
cluster_centers_gradients = cluster_centers_gradients[
n_surfaces_gradients_estimation + 1
]
_, ind = linear_sum_assignment(
cdist(cluster_centers, cluster_centers_gradients)
)
hitting_prob_gradients = hitting_prob_gradients[ind]
gradients = np.abs(hitting_prob_gradients - 1) / delta
else:
gradients = None
if use_analytical_gradients:
rAtilde = target.radiuses[2]
capacity = (
(n_dim - 2)
/ (dA ** (2 - n_dim) - rAtilde ** (2 - n_dim))
* np.sum(n_points_in_clusters * hitting_prob)
/ num_points
)
else:
capacity = (
dA ** (n_dim - 1)
* np.sum(n_points_in_clusters * hitting_prob * gradients)
/ num_points
)
capacity *= target.get_constant()
return capacity, gradients
def estimate_hitting_prob(
target,
radiuses,
inner,
outer,
num_points,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
):
cluster_centers, cluster_labels, propagated_points, statistics_from_propagation = _propagate_and_cluster(
target, radiuses, inner, outer, num_points, num_clusters, time_step
)
forward_probabilities, backward_probabilities, cluster_labels = _get_data_driven_binning_transition_probabilities(
target,
radiuses,
inner,
outer,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
)
print('Transition probabilities calculation done.')
hitting_prob = _get_data_driven_binning_hitting_probability(
forward_probabilities, backward_probabilities, inner, outer, num_clusters
)
return hitting_prob, cluster_centers, cluster_labels
def _get_data_driven_binning_transition_probabilities(
target,
radiuses,
inner,
outer,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
):
forward_probabilities = []
backward_probabilities = []
forward_probabilities, backward_probabilities = _additional_simulations_for_transition_probabilities(
target,
radiuses,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
inner,
outer,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
)
return forward_probabilities, backward_probabilities, cluster_labels
def _propagate_and_cluster(
target, radiuses, inner, outer, num_points, num_clusters, time_step
):
center = target.center
initial_locations = uniform_on_sphere(
center, radiuses[1], num_samples=num_points, reflecting_boundary_radius=1
)
num_surfaces = inner + outer + 3
middle_index = outer + 1
surfaces = _get_surfaces(radiuses, inner, outer)
assert len(surfaces) == num_surfaces, 'The generated surfaces are not right.'
# Propagate the points and gather information
propagated_points = [[] for _ in range(num_surfaces)]
propagated_points[middle_index] = initial_locations
propagated_information = []
extra_information = []
print('Doing propagation.')
# Do the initial propagation from the middle sphere
_propagate_and_get_info(
target,
surfaces,
propagated_points,
propagated_information,
extra_information,
middle_index,
num_points,
time_step,
)
# Do the forward propagation, from the middle sphere to the inner sphere
for index in range(middle_index + 1, num_surfaces - 1):
_propagate_and_get_info(
target,
surfaces,
propagated_points,
propagated_information,
extra_information,
index,
num_points,
time_step,
)
# Do the backward propagation, from the middle sphere to the outer sphere
for index in range(middle_index - 1, 0, -1):
_propagate_and_get_info(
target,
surfaces,
propagated_points,
propagated_information,
extra_information,
index,
num_points,
time_step,
)
# Do the clustering
cluster_centers = [[] for _ in range(num_surfaces)]
cluster_labels = [[] for _ in range(num_surfaces)]
print('Doing clustering.')
for ii in tqdm(range(num_surfaces)):
cluster_centers[ii], cluster_labels[ii] = kmeans2(
propagated_points[ii], num_clusters, minit='points', missing='raise'
)
# Get the statistics
print('Getting statistics.')
statistics_from_propagation = _collect_statistics(
cluster_centers,
cluster_labels,
propagated_information,
extra_information,
inner,
outer,
num_clusters,
)
return (
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
)
def _get_surfaces(radiuses, inner, outer):
inner_surfaces = np.linspace(radiuses[1], radiuses[0], inner + 2)
outer_surfaces = np.linspace(radiuses[2], radiuses[1], outer + 2)
surfaces = np.concatenate((outer_surfaces, inner_surfaces[1:]))
return surfaces
def _propagate_and_get_info(
target,
surfaces,
propagated_points,
propagated_information,
extra_information,
index,
num_points,
time_step,
):
assert (
propagated_points[index].shape[0] == num_points
), 'Number of points not right.'
boundary_radiuses = np.array([surfaces[index + 1], surfaces[index - 1]])
with tqdm() as pbar:
batch_size = 500
while True:
flag = False
random_indices = np.random.randint(0, num_points, size=(batch_size,))
initial_locations = propagated_points[index][random_indices]
ii = 0
for initial_location in initial_locations:
previous_location, current_location, target_flag = nestimate.advance_within_concentric_spheres(
initial_location, target, boundary_radiuses, time_step, 1
)
if target_flag:
indicator = 1
else:
indicator = -1
final_point = nestimate._interpolate(
previous_location,
current_location,
target.center,
surfaces[index + indicator],
)
if len(propagated_points[index + indicator]) == num_points:
extra_temp = np.concatenate(
(
np.array([index, random_indices[ii], index + indicator]),
final_point,
)
)
extra_information.append(extra_temp)
else:
propagated_points[index + indicator].append(final_point)
index_temp = len(propagated_points[index + indicator]) - 1
propagated_information.append(
np.array(
[index, random_indices[ii], index + indicator, index_temp],
dtype=int,
)
)
pbar.update()
ii += 1
if (
len(propagated_points[index + 1]) == num_points
and len(propagated_points[index - 1]) == num_points
):
propagated_points[index + 1] = np.array(
propagated_points[index + 1]
)
propagated_points[index - 1] = np.array(
propagated_points[index - 1]
)
flag = True
break
if flag:
break
def _collect_statistics(
cluster_centers,
cluster_labels,
propagated_information,
extra_information,
inner,
outer,
num_clusters,
):
num_surfaces = inner + outer + 3
statistics_from_propagation = [
[[] for _ in range(num_clusters)] for _ in range(num_surfaces)
]
_process_propagated_info(
cluster_labels, statistics_from_propagation, propagated_information
)
_process_extra_info(
cluster_centers, cluster_labels, statistics_from_propagation, extra_information
)
return statistics_from_propagation
def _process_extra_info(
cluster_centers, cluster_labels, statistics_from_propagation, extra_information
):
for info in extra_information:
centers = cluster_centers[int(info[2])]
point = info[3:]
info_temp = info[:3].astype(int)
index = _assign_clusters(point, centers)
statistics_from_propagation[info_temp[0]][
cluster_labels[info_temp[0]][info_temp[1]]
].append((info_temp[2], index))
def _assign_clusters(point, centers):
distances = np.linalg.norm(point - centers, ord=2, axis=1)
index = np.argmin(distances)
return index
def _process_propagated_info(
cluster_labels, statistics_from_propagation, propagated_information
):
for info in propagated_information:
statistics_from_propagation[info[0]][cluster_labels[info[0]][info[1]]].append(
(info[2], cluster_labels[info[2]][info[3]])
)
def _additional_simulations_for_transition_probabilities(
target,
radiuses,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
inner,
outer,
num_clusters,
num_trials,
time_step,
use_parallel,
n_split,
):
surfaces = _get_surfaces(radiuses, inner, outer)
num_surfaces = len(surfaces)
if use_parallel:
manager = mp.Manager()
statistics_from_propagation = [
[manager.list(level3) for level3 in level2]
for level2 in statistics_from_propagation
]
print('Doing additional simulations.')
# Do more simulations and update statistics_from_propagation
for ii in range(1, num_surfaces - 1):
for jj in range(num_clusters):
print('Doing simulations for surface {}, cluster {}.'.format(ii, jj))
_do_additional_simulations(
target,
radiuses,
ii,
jj,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
inner,
outer,
num_trials,
time_step,
use_parallel,
n_split,
)
if use_parallel:
for ii in range(len(statistics_from_propagation)):
statistics_from_propagation[ii] = [
list(level3) for level3 in statistics_from_propagation[ii]
]
# Use statistics_from_propagation to calculate forward and backward probabilities
forward_probabilities, backward_probabilities = _process_statistics_from_propagation(
statistics_from_propagation, num_clusters
)
return forward_probabilities, backward_probabilities
def _do_additional_simulations(
target,
radiuses,
surface_index,
cluster_index,
cluster_centers,
cluster_labels,
propagated_points,
statistics_from_propagation,
inner,
outer,
num_trials,
time_step,
use_parallel,
n_split,
):
surfaces = _get_surfaces(radiuses, inner, outer)
cluster_points_indices = np.flatnonzero(
cluster_labels[surface_index] == cluster_index
)
cluster_size = cluster_points_indices.size
random_indices = np.random.randint(0, cluster_size, size=(num_trials,))
initial_locations = propagated_points[surface_index][
cluster_points_indices[random_indices]
]
boundary_radiuses = np.array(
[surfaces[surface_index + 1], surfaces[surface_index - 1]]
)
if use_parallel:
n_locations = initial_locations.shape[0]
def worker(indices, q):
for index in indices:
initial_location = initial_locations[index]
previous_location, current_location, target_flag = nestimate.advance_within_concentric_spheres(
initial_location, target, boundary_radiuses, time_step, 1
)
if target_flag:
indicator = 1
else:
indicator = -1
final_point = nestimate._interpolate(
previous_location,
current_location,
target.center,
surfaces[surface_index + indicator],
)
centers = cluster_centers[surface_index + indicator]
index = _assign_clusters(final_point, centers)
statistics_from_propagation[surface_index][cluster_index].append(
(surface_index + indicator, index)
)
q.put(1)
process_list = []
q = mp.Queue()
def listener(q):
with tqdm(total=n_locations) as pbar:
for item in iter(q.get, None):
pbar.update()
listener_process = mp.Process(target=listener, args=(q,))
listener_process.start()
for indices in kfold_split(n_locations, n_split):
process = mp.Process(target=worker, args=(indices, q))
process_list.append(process)
process.start()
for process in process_list:
process.join()
q.put(None)
listener_process.join()
else:
for initial_location in tqdm(initial_locations):
previous_location, current_location, target_flag = nestimate.advance_within_concentric_spheres(
initial_location, target, boundary_radiuses, time_step, 1
)
if target_flag:
indicator = 1
else:
indicator = -1
final_point = nestimate._interpolate(
previous_location,
current_location,
target.center,
surfaces[surface_index + indicator],
)
centers = cluster_centers[surface_index + indicator]
index = _assign_clusters(final_point, centers)
statistics_from_propagation[surface_index][cluster_index].append(
(surface_index + indicator, index)
)
def _process_statistics_from_propagation(statistics_from_propagation, num_clusters):
num_surfaces = len(statistics_from_propagation)
forward_probabilities = [
np.zeros((num_clusters, num_clusters), dtype=float)
for _ in range(num_surfaces - 1)
]
backward_probabilities = [
np.zeros((num_clusters, num_clusters), dtype=float)
for _ in range(num_surfaces - 1)
]
for ii in range(1, num_surfaces - 1):
for jj in range(num_clusters):
statistics_temp = np.array(statistics_from_propagation[ii][jj])
forward_transitions = statistics_temp[statistics_temp[:, 0] == ii + 1, 1]
backward_transitions = statistics_temp[statistics_temp[:, 0] == ii - 1, 1]
forward_frequencies = np.bincount(
forward_transitions, minlength=num_clusters
)
backward_frequencies = np.bincount(
backward_transitions, minlength=num_clusters
)
assert (
forward_frequencies.size == num_clusters
and backward_frequencies.size == num_clusters
), "Frequencies not right. ff : {}, bf: {}, nc: {}, ft: {}, bt: {}".format(
forward_frequencies.size,
backward_frequencies.size,
num_clusters,
forward_transitions,
backward_transitions,
)
total_transitions = float(
np.sum(forward_frequencies) + np.sum(backward_frequencies)
)
assert (
total_transitions == statistics_temp.shape[0]
), '#transitions: {}, forward_frequencies: {}, backward_frequencies: {}, sum: {}'.format(
total_transitions, forward_frequencies, backward_frequencies
)
forward_frequencies = forward_frequencies.astype(float)
backward_frequencies = backward_frequencies.astype(float)
forward_probabilities[ii][jj, :] = forward_frequencies / total_transitions
backward_probabilities[ii][jj, :] = backward_frequencies / total_transitions
return forward_probabilities, backward_probabilities
def _get_data_driven_binning_hitting_probability(
forward_probabilities, backward_probabilities, inner, outer, num_clusters
):
num_surfaces = inner + outer + 3
middle_index = outer + 1
un = np.ones((num_clusters,))
u0 = np.zeros((num_clusters,))
Q_matrices = [[] for _ in range(num_surfaces)]
for jj in range(1, num_surfaces - 1):
try:
inverse_forward = np.linalg.inv(forward_probabilities[jj])
except Exception:
epsilon = 1e-3 * np.min(
forward_probabilities[jj][forward_probabilities[jj] > 0]
)
temp_sum = np.sum(forward_probabilities[jj], axis=1, keepdims=True)
forward_probabilities[jj] = forward_probabilities[jj] + epsilon * np.eye(
num_clusters
)
forward_probabilities[jj] = (
temp_sum
* forward_probabilities[jj]
/ np.sum(forward_probabilities[jj], axis=1, keepdims=True)
)
assert np.alltrue(np.sum(forward_probabilities[jj], axis=1) == temp_sum)
inverse_forward = np.linalg.inv(forward_probabilities[jj])
matrix_A = inverse_forward
matrix_B = -np.dot(inverse_forward, backward_probabilities[jj])
matrix_C = np.eye(num_clusters)
matrix_D = np.zeros((num_clusters, num_clusters))
matrix_temp = np.concatenate(
(
np.concatenate((matrix_A, matrix_B), axis=1),
np.concatenate((matrix_C, matrix_D), axis=1),
),
axis=0,
)
Q_matrices[jj] = matrix_temp
Q_product = np.eye(2 * num_clusters)
for jj in range(1, num_surfaces - 1):
Q_product = np.dot(Q_matrices[jj], Q_product)
if jj == middle_index - 1:
Q_middle = Q_product
u1 = np.linalg.solve(
Q_product[:num_clusters, :num_clusters],
un - np.dot(Q_product[:num_clusters, num_clusters:], u0),
)
if num_surfaces == 3:
probability = u1
else:
temp = np.dot(Q_middle, np.concatenate((u1, u0)))
probability = temp[:num_clusters]
return probability
def kfold_split(n_locations, n_fold):
fold_length = int(np.floor(n_locations / n_fold))
folds = [
range(ii * fold_length, (ii + 1) * fold_length) for ii in range(n_fold - 1)
]
folds.append(range((n_fold - 1) * fold_length, n_locations))
return folds
| 22,543
| 32.201767
| 118
|
py
|
entropic_barrier
|
entropic_barrier-master/golf_course/estimate/hitprob.py
|
import functools
import multiprocessing as mp
import timeit
import numpy as np
import golf_course.estimate.numba as nestimate
import joblib
from golf_course.utils import sample_uniform_initial_location
from tqdm import tqdm
def get_simple_hitprob_parallelize(
centers, target_radiuses, time_step, initial_location_list, n_simulations
):
n_initial_locations = len(initial_location_list)
n_targets = centers.shape[0]
worker = functools.partial(
nestimate.advance_flat_regions,
centers=centers,
radiuses=target_radiuses,
time_step=time_step,
)
hitting_prob_list = np.zeros((n_initial_locations, n_targets))
if n_simulations == 1:
# Parallelize over initial locations
start_time = timeit.default_timer()
output = joblib.Parallel(n_jobs=joblib.cpu_count())(
joblib.delayed(worker)(location) for location in tqdm(initial_location_list)
)
for ii, (previous_location, current_location, index) in enumerate(output):
hitting_prob_list[ii][index] = 1
end_time = timeit.default_timer()
time_taken = end_time - start_time
else:
# Parallelize over simulations
time_taken = np.zeros(n_initial_locations)
for ii in range(n_initial_locations):
initial_location = initial_location_list[ii]
print('Working on initial location: {}'.format(initial_location))
start_time = timeit.default_timer()
indices = []
with mp.Pool(processes=mp.cpu_count()) as p:
for previous_location, current_location, index in tqdm(
p.imap_unordered(
worker, np.tile(initial_location, (n_simulations, 1))
)
):
indices.append(index)
indices = np.array(indices)
n_targets = centers.shape[0]
hitting_prob = np.zeros(n_targets)
for tt in range(n_targets):
hitting_prob[tt] = np.sum(indices == tt) / n_simulations
end_time = timeit.default_timer()
print(
'Run {} finished. Hitting probability {}, time taken {}'.format(
ii, hitting_prob, end_time - start_time
)
)
hitting_prob_list[ii] = hitting_prob
time_taken[ii] = end_time - start_time
return hitting_prob_list, time_taken
def get_simple_hitprob(
centers,
target_radiuses,
outer_radiuses,
time_step,
n_initial_locations,
n_simulations,
):
# Process parameters
n_dim = centers.shape[1]
initial_location_list = np.zeros((n_initial_locations, n_dim))
for ii in tqdm(range(n_initial_locations)):
initial_location_list[ii] = sample_uniform_initial_location(
centers, outer_radiuses, 1.0
)
initial_location_list = initial_location_list
n_targets = centers.shape[0]
# Ensure parameters are valid
assert target_radiuses.ndim == 1
assert target_radiuses.size == n_targets
assert outer_radiuses.ndim == 1
assert outer_radiuses.size == n_targets
assert np.sum(target_radiuses > 0) == n_targets
for ii in range(n_targets):
assert np.linalg.norm(centers[ii]) + outer_radiuses[ii] < 1
assert outer_radiuses[ii] > target_radiuses[ii]
for ii in range(n_targets - 1):
for jj in range(ii + 1, n_targets):
assert (
np.linalg.norm(centers[ii] - centers[jj])
> outer_radiuses[ii] + outer_radiuses[jj]
)
expected_hitting_prob = (
1 / (target_radiuses ** (2 - n_dim) - outer_radiuses ** (2 - n_dim))
) / np.sum(1 / (target_radiuses ** (2 - n_dim) - outer_radiuses ** (2 - n_dim)))
hitting_prob_list, time_taken = get_simple_hitprob_parallelize(
centers, target_radiuses, time_step, initial_location_list, n_simulations
)
return initial_location_list, hitting_prob_list, time_taken, expected_hitting_prob
def get_nontrivial_hitprob(toy_model, n_initial_locations, n_simulations):
n_targets = len(toy_model.target_list)
n_dim = toy_model.target_list[0].center.size
initial_location_list = np.zeros((n_initial_locations, n_dim))
centers = np.zeros((n_targets, n_dim))
outer_radiuses = np.zeros(n_targets)
for ii in range(n_targets):
centers[ii] = toy_model.target_list[ii].center
outer_radiuses[ii] = toy_model.target_list[ii].radiuses[2]
for ii in tqdm(range(n_initial_locations)):
initial_location_list[ii] = sample_uniform_initial_location(
centers, outer_radiuses, 1.0
)
n_targets = len(toy_model.target_list)
hitting_prob_list = np.zeros((n_initial_locations, n_targets))
if n_simulations == 1:
# Parallelize over initial locations
start_time = timeit.default_timer()
output = joblib.Parallel(n_jobs=joblib.cpu_count())(
joblib.delayed(toy_model.do_naive_simulation)(location)
for location in tqdm(initial_location_list)
)
for ii, index in enumerate(output):
hitting_prob_list[ii][index] = 1
end_time = timeit.default_timer()
time_taken = end_time - start_time
else:
# Parallelize over simulations
time_taken = np.zeros(n_initial_locations)
for run_idx in range(n_initial_locations):
initial_location = initial_location_list[run_idx]
print('Working on initial location: {}'.format(initial_location))
indices = []
with mp.Pool(processes=mp.cpu_count()) as p:
for index in tqdm(
p.imap_unordered(
toy_model.do_naive_simulation,
np.tile(initial_location, (n_simulations, 1)),
)
):
indices.append(index)
indices = np.array(indices)
hitting_prob = np.zeros(n_targets)
for target_idx in range(n_targets):
hitting_prob[target_idx] = np.sum(indices == target_idx) / n_simulations
end_time = timeit.default_timer()
print(
'Run {} finished. Hitting probability {}, time taken {}'.format(
run_idx, hitting_prob, end_time - start_time
)
)
hitting_prob_list[run_idx] = hitting_prob
time_taken[run_idx] = end_time - start_time
return initial_location_list, hitting_prob_list, time_taken
| 6,584
| 37.063584
| 88
|
py
|
entropic_barrier
|
entropic_barrier-master/scripts/simulation_based_hitprob.py
|
import os
import pickle
import tempfile
import numpy as np
import sacred
from golf_course.core.model import ToyModel
from golf_course.estimate.hitprob import get_nontrivial_hitprob
from golf_course.utils import (DEFAULT_RELATIVE_SCALE, load_model_params,
sample_random_locations)
from sacred.observers import FileStorageObserver
log_folder = os.path.expanduser('~/logs/diffusion/simulation_based_hitprob')
ex = sacred.Experiment('simulation_based_hitprob')
ex.observers.append(FileStorageObserver.create(log_folder))
def generate_model_params(
centers, radiuses, n_bumps=10, relative_scale=DEFAULT_RELATIVE_SCALE
):
centers = np.array(centers)
radiuses = np.array(radiuses)
time_step = 1e-05
target_param_list = [
{
"center": centers[0],
"radiuses": radiuses[0],
"energy_type": "random_well",
"energy_params": {
"depth": 10.0,
"locations": sample_random_locations(
centers[0], radiuses[0][:2], n_bumps
),
"standard_deviations": 0.01 * np.ones(n_bumps),
},
},
{
"center": centers[1],
"radiuses": radiuses[1],
"energy_type": "random_crater",
"energy_params": {
"depth": 6.0,
"height": 1.0,
"locations": sample_random_locations(
centers[1], radiuses[1][:2], n_bumps
),
"standard_deviations": 0.01 * np.ones(n_bumps),
},
},
]
for target_param in target_param_list:
if target_param['energy_type'] == 'random_well':
energy_range = target_param['energy_params']['depth']
elif target_param['energy_type'] == 'random_crater':
energy_range = (
target_param['energy_params']['depth']
+ target_param['energy_params']['height']
)
else:
assert False, 'Wrong energy type.'
target_param['energy_params']['multiplier'] = float(
(relative_scale * energy_range)
)
return time_step, target_param_list
@ex.config
def config():
n_initial_locations = 100
n_simulations = 2000
centers = [[0.5, 0.6, 0, 0, 0], [-0.7, 0, 0, 0, 0]]
radiuses = [[0.2, 0.4, 0.5], [0.4, 0.45, 0.5]]
model_params_fname = None
if model_params_fname is None:
time_step, target_param_list = generate_model_params(centers, radiuses)
else:
time_step, target_param_list = load_model_params(model_params_fname)
@ex.main
def run(n_initial_locations, n_simulations, time_step, target_param_list):
temp_folder = tempfile.TemporaryDirectory()
folder_name = temp_folder.name
model_params = {'time_step': time_step, 'target_param_list': target_param_list}
model_params_fname = os.path.join(folder_name, 'model_params.pkl')
with open(model_params_fname, 'wb') as f:
pickle.dump(model_params, f)
ex.add_artifact(model_params_fname)
model = ToyModel(time_step, target_param_list)
initial_location_list, hitting_prob_list, time_taken = get_nontrivial_hitprob(
model, n_initial_locations, n_simulations
)
results = {
'n_initial_locations': n_initial_locations,
'n_simulations': n_simulations,
'initial_location_list': initial_location_list,
'hitting_prob_list': hitting_prob_list,
'time_taken': time_taken,
}
results_fname = os.path.join(folder_name, 'results.pkl')
with open(results_fname, 'wb') as f:
pickle.dump(results, f)
ex.add_artifact(results_fname)
temp_folder.cleanup()
centers = np.array([[0.5, 0.6, 0, 0, 0], [-0.7, 0, 0, 0, 0]])
centers /= np.linalg.norm(centers, axis=1, keepdims=True)
centers = centers.tolist()
# Large target
radiuses = [[0.45, 0.475, 0.5], [0.45, 0.475, 0.5]]
ex.run(config_updates={'centers': centers, 'radiuses': radiuses})
# Med target
radiuses = [[0.1, 0.15, 0.5], [0.2, 0.25, 0.5]]
ex.run(config_updates={'centers': centers, 'radiuses': radiuses})
# Small target
radiuses = [[0.02, 0.05, 0.5], [0.04, 0.075, 0.5]]
ex.run(config_updates={'centers': centers, 'radiuses': radiuses})
# Accurate estimate of mean hitting probabilities
n_initial_locations = 200000
n_simulations = 1
# Large target
model_params_fname = os.path.expanduser(
'~/entropic_barrier/results/simulation_based_hitprob/large_target/model_params.pkl'
)
ex.run(
config_updates={
'n_initial_locations': n_initial_locations,
'n_simulations': n_simulations,
'model_params_fname': model_params_fname,
}
)
# Med target
model_params_fname = os.path.expanduser(
'~/entropic_barrier/results/simulation_based_hitprob/med_target/model_params.pkl'
)
ex.run(
config_updates={
'n_initial_locations': n_initial_locations,
'n_simulations': n_simulations,
'model_params_fname': model_params_fname,
}
)
# Small target
model_params_fname = os.path.expanduser(
'~/entropic_barrier/results/simulation_based_hitprob/small_target/model_params.pkl'
)
ex.run(
config_updates={
'n_initial_locations': n_initial_locations,
'n_simulations': n_simulations,
'model_params_fname': model_params_fname,
}
)
| 5,335
| 32.35
| 87
|
py
|
entropic_barrier
|
entropic_barrier-master/scripts/capacity_based_hitprob.py
|
import os
import pickle
import tempfile
import timeit
import sacred
from golf_course.core.model import ToyModel
from golf_course.utils import load_model_params
from sacred.observers import FileStorageObserver
log_folder = os.path.expanduser('~/logs/diffusion/capacity_based_hitprob')
ex = sacred.Experiment('capacity_based_hitprob')
ex.observers.append(FileStorageObserver.create(log_folder))
@ex.config
def config():
case = 'med'
time_step = 1e-6
inner_list = [1, 1]
outer_list = [1, 1]
num_points_list = [5000, 5000]
num_clusters_list = [10, 10]
num_trials_list = [5000, 5000]
simulation_log_folder = os.path.expanduser(
'~/entropic_barrier/results/simulation_based_hitprob/{}_target'.format(case)
)
capacity_estimation_param_list = [
{
"inner": inner_list[0],
"outer": outer_list[0],
"num_points": num_points_list[0],
"num_clusters": num_clusters_list[0],
"num_trials": num_trials_list[0],
"use_parallel": False,
"n_split": 1,
"use_analytical_gradients": True,
"estimate_gradients": False,
"n_surfaces_gradients_estimation": 10,
"time_step": time_step,
},
{
"inner": inner_list[1],
"outer": outer_list[1],
"num_points": num_points_list[1],
"num_clusters": num_clusters_list[1],
"num_trials": num_trials_list[1],
"use_parallel": False,
"n_split": 1,
"use_analytical_gradients": True,
"estimate_gradients": False,
"n_surfaces_gradients_estimation": 10,
"time_step": time_step,
},
]
@ex.main
def run(simulation_log_folder, time_step, capacity_estimation_param_list):
temp_folder = tempfile.TemporaryDirectory()
folder_name = temp_folder.name
# Construct model
model_params_fname = os.path.join(simulation_log_folder, 'model_params.pkl')
ex.add_artifact(model_params_fname)
_, target_param_list = load_model_params(model_params_fname)
assert len(capacity_estimation_param_list) == len(target_param_list)
# Make compatible with existing results
for ii in range(len(target_param_list)):
target_param_list[ii] = {
key: target_param_list[ii][key]
for key in target_param_list[ii]
if key not in capacity_estimation_param_list[ii]
}
model = ToyModel(time_step, target_param_list)
# Save simulation results
results_fname = os.path.join(simulation_log_folder, 'results.pkl')
ex.add_artifact(results_fname, 'simulation_results.pkl')
# Estimate capacity-based hitting probability
start = timeit.default_timer()
hitting_prob = model.estimate_hitting_prob(capacity_estimation_param_list)
end = timeit.default_timer()
time_taken = end - start
results = {'hitting_prob': hitting_prob, 'time_taken': time_taken}
print(results)
results_fname = os.path.join(folder_name, 'capacity_results.pkl')
with open(results_fname, 'wb') as f:
pickle.dump(results, f)
ex.add_artifact(results_fname)
temp_folder.cleanup()
ex.run(
config_updates={
'case': 'large',
'time_step': 1e-7,
'outer_list': [5, 5],
'num_points_list': [1000, 1000],
'num_clusters_list': [3, 3],
'num_trials_list': [1000, 1000],
}
)
ex.run(
config_updates={
'case': 'med',
'time_step': 1e-5,
'outer_list': [7, 7],
'num_points_list': [1000, 1000],
'num_clusters_list': [3, 3],
'num_trials_list': [1000, 1000],
}
)
ex.run(
config_updates={
'case': 'small',
'time_step': 1e-6,
'outer_list': [7, 7],
'num_points_list': [1000, 1000],
'num_clusters_list': [3, 3],
'num_trials_list': [1000, 1000],
}
)
| 3,917
| 30.596774
| 84
|
py
|
entropic_barrier
|
entropic_barrier-master/scripts/sanity_checks.py
|
import os
import pickle
import tempfile
from pprint import pprint
import numpy as np
import sacred
from golf_course.core.target import Target
from golf_course.estimate.capacity import estimate_capacity
from golf_course.estimate.hitprob import get_simple_hitprob
from sacred.observers import FileStorageObserver
log_folder = os.path.expanduser('~/logs/diffusion/sanity_checks')
ex = sacred.Experiment('sanity_checks')
ex.observers.append(FileStorageObserver.create(log_folder))
@ex.config
def config():
do_direct_simulations = True
do_capacity_estimation = True
do_gradients_estimation = False
n_initial_locations = 100
n_simulations = 2000
time_step = 1e-5
centers = [[0.5, 0.6, 0, 0, 0], [-0.7, 0, 0, 0, 0]]
radiuses = [[0.02, 0.05, 0.1], [0.04, 0.075, 0.15]]
capacity_estimation_param = {
'num_points': int(5e2),
'time_step': 1e-06,
'inner': 1,
'outer': 1,
'num_clusters': 5,
'num_trials': int(5e3),
'use_parallel': False,
'n_split': 1,
'use_analytical_gradients': True,
'estimate_gradients': do_gradients_estimation,
"n_surfaces_gradients_estimation": None,
}
@ex.main
def run(
do_direct_simulations,
do_capacity_estimation,
do_gradients_estimation,
n_initial_locations,
n_simulations,
time_step,
centers,
radiuses,
capacity_estimation_param,
):
centers = np.array(centers)
radiuses = np.array(radiuses)
if do_gradients_estimation:
assert do_capacity_estimation
temp_folder = tempfile.TemporaryDirectory()
folder_name = temp_folder.name
results = {}
if do_direct_simulations:
target_radiuses = np.array([radius[0] for radius in radiuses])
outer_radiuses = np.array([radius[2] for radius in radiuses])
initial_location_list, hitting_prob_list, time_taken, expected_hitting_prob = get_simple_hitprob(
centers,
target_radiuses,
outer_radiuses,
time_step,
n_initial_locations,
n_simulations,
)
results.update(
{
'initial_location_list': initial_location_list,
'hitting_prob_list': hitting_prob_list,
'time_taken': time_taken,
'expected_hitting_prob': expected_hitting_prob,
}
)
if do_capacity_estimation:
target_list = [
Target(center, radius, 'flat', {})
for center, radius in zip(centers, radiuses)
]
expected_capacity = np.zeros(len(target_list))
estimated_capacity = np.zeros(len(target_list))
if do_gradients_estimation:
expected_gradients = np.zeros(len(target_list))
estimated_gradients = np.zeros(
(len(target_list), capacity_estimation_param['num_clusters'])
)
for tt, target in enumerate(target_list):
n_dim = target.center.size
estimated_capacity[tt], gradients = estimate_capacity(
target, **capacity_estimation_param
)
expected_capacity[tt] = (
target.get_constant()
* (n_dim - 2)
/ (
target.radiuses[0] ** (2 - n_dim)
- target.radiuses[2] ** (2 - n_dim)
)
)
if do_gradients_estimation:
estimated_gradients[tt] = gradients
expected_gradients[tt] = (n_dim - 2) / (
target.radiuses[1] ** (n_dim - 1)
* (
target.radiuses[1] ** (2 - n_dim)
- target.radiuses[2] ** (2 - n_dim)
)
)
results.update(
{
'expected_capacity': expected_capacity,
'estimate_capacity': estimated_capacity,
}
)
if do_gradients_estimation:
results.update(
{
'expected_gradients': expected_gradients,
'estimated_gradients': estimated_gradients,
}
)
pprint(results)
results_fname = os.path.join(folder_name, 'results.pkl')
with open(results_fname, 'wb') as f:
pickle.dump(results, f)
ex.add_artifact(results_fname)
temp_folder.cleanup()
# Large target
radiuses = [[0.15, 0.175, 0.2], [0.15, 0.175, 0.2]]
ex.run(config_updates={'radiuses': radiuses})
# Med target
radiuses = [[0.05, 0.1, 0.2], [0.075, 0.125, 0.2]]
ex.run(config_updates={'radiuses': radiuses})
# Small target
radiuses = [[0.02, 0.05, 0.2], [0.04, 0.075, 0.2]]
ex.run(config_updates={'radiuses': radiuses})
# Accurate estimate of mean hitting probabilities
n_initial_locations = 200000
n_simulations = 1
# Large target
radiuses = [[0.15, 0.175, 0.2], [0.15, 0.175, 0.2]]
ex.run(
config_updates={
'n_initial_locations': n_initial_locations,
'n_simulations': n_simulations,
'radiuses': radiuses,
'do_capacity_estimation': False,
}
)
# Med target
radiuses = [[0.05, 0.1, 0.2], [0.075, 0.125, 0.2]]
ex.run(
config_updates={
'n_initial_locations': n_initial_locations,
'n_simulations': n_simulations,
'radiuses': radiuses,
'do_capacity_estimation': False,
}
)
# Small target
radiuses = [[0.02, 0.05, 0.2], [0.04, 0.075, 0.2]]
ex.run(
config_updates={
'n_initial_locations': n_initial_locations,
'n_simulations': n_simulations,
'radiuses': radiuses,
'do_capacity_estimation': False,
}
)
| 5,646
| 29.524324
| 105
|
py
|
TTS
|
TTS-master/setup.py
|
#!/usr/bin/env python
import argparse
import os
import shutil
import subprocess
import sys
import numpy
import setuptools.command.build_py
import setuptools.command.develop
from setuptools import find_packages, setup
from distutils.extension import Extension
from Cython.Build import cythonize
# parameters for wheeling server.
parser = argparse.ArgumentParser(add_help=False, allow_abbrev=False)
parser.add_argument('--checkpoint',
type=str,
help='Path to checkpoint file to embed in wheel.')
parser.add_argument('--model_config',
type=str,
help='Path to model configuration file to embed in wheel.')
args, unknown_args = parser.parse_known_args()
# Remove our arguments from argv so that setuptools doesn't see them
sys.argv = [sys.argv[0]] + unknown_args
version = '0.0.9.2'
cwd = os.path.dirname(os.path.abspath(__file__))
# Handle Cython code
# def find_pyx(path='.'):
# pyx_files = []
# for root, _, filenames in os.walk(path):
# for fname in filenames:
# if fname.endswith('.pyx'):
# pyx_files.append(os.path.join(root, fname))
# return pyx_files
# def find_cython_extensions(path="."):
# exts = cythonize(find_pyx(path), language_level=3)
# for ext in exts:
# ext.include_dirs = [numpy.get_include()]
# return exts
class build_py(setuptools.command.build_py.build_py): # pylint: disable=too-many-ancestors
def run(self):
self.create_version_file()
setuptools.command.build_py.build_py.run(self)
@staticmethod
def create_version_file():
print('-- Building version ' + version)
version_path = os.path.join(cwd, 'version.py')
with open(version_path, 'w') as f:
f.write("__version__ = '{}'\n".format(version))
class develop(setuptools.command.develop.develop):
def run(self):
build_py.create_version_file()
setuptools.command.develop.develop.run(self)
# The documentation for this feature is in server/README.md
package_data = ['TTS/server/templates/*']
if 'bdist_wheel' in unknown_args and args.checkpoint and args.model_config:
print('Embedding model in wheel file...')
model_dir = os.path.join('TTS', 'server', 'model')
tts_dir = os.path.join(model_dir, 'tts')
os.makedirs(tts_dir, exist_ok=True)
embedded_checkpoint_path = os.path.join(tts_dir, 'checkpoint.pth.tar')
shutil.copy(args.checkpoint, embedded_checkpoint_path)
embedded_config_path = os.path.join(tts_dir, 'config.json')
shutil.copy(args.model_config, embedded_config_path)
package_data.extend([embedded_checkpoint_path, embedded_config_path])
def pip_install(package_name):
subprocess.call([sys.executable, '-m', 'pip', 'install', package_name])
requirements = open(os.path.join(cwd, 'requirements.txt'), 'r').readlines()
with open('README.md', "r", encoding="utf-8") as readme_file:
README = readme_file.read()
exts = [Extension(name='TTS.tts.layers.glow_tts.monotonic_align.core',
sources=["TTS/tts/layers/glow_tts/monotonic_align/core.pyx"])]
setup(
name='TTS',
version=version,
url='https://github.com/mozilla/TTS',
author='Eren Gölge',
author_email='egolge@mozilla.com',
description='Text to Speech with Deep Learning',
long_description=README,
long_description_content_type="text/markdown",
license='MPL-2.0',
# cython
include_dirs=numpy.get_include(),
ext_modules=cythonize(exts, language_level=3),
# ext_modules=find_cython_extensions(),
# package
include_package_data=True,
packages=find_packages(include=['TTS*']),
project_urls={
'Documentation': 'https://github.com/mozilla/TTS/wiki',
'Tracker': 'https://github.com/mozilla/TTS/issues',
'Repository': 'https://github.com/mozilla/TTS',
'Discussions': 'https://discourse.mozilla.org/c/tts',
},
cmdclass={
'build_py': build_py,
'develop': develop,
# 'build_ext': build_ext
},
install_requires=requirements,
python_requires='>=3.6.0, <3.9',
entry_points={
'console_scripts': [
'tts=TTS.bin.synthesize:main',
'tts-server = TTS.server.server:main'
]
},
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
'Development Status :: 3 - Alpha',
"Intended Audience :: Science/Research",
"Intended Audience :: Developers",
"Operating System :: POSIX :: Linux",
'License :: OSI Approved :: Mozilla Public License 2.0 (MPL 2.0)',
"Topic :: Software Development",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Multimedia :: Sound/Audio :: Speech",
"Topic :: Multimedia :: Sound/Audio",
"Topic :: Multimedia",
"Topic :: Scientific/Engineering :: Artificial Intelligence"
],
zip_safe=False
)
| 5,141
| 33.05298
| 91
|
py
|
TTS
|
TTS-master/TTS/__init__.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/TTS/speaker_encoder/losses.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# adapted from https://github.com/cvqluu/GE2E-Loss
class GE2ELoss(nn.Module):
def __init__(self, init_w=10.0, init_b=-5.0, loss_method="softmax"):
"""
Implementation of the Generalized End-to-End loss defined in https://arxiv.org/abs/1710.10467 [1]
Accepts an input of size (N, M, D)
where N is the number of speakers in the batch,
M is the number of utterances per speaker,
and D is the dimensionality of the embedding vector (e.g. d-vector)
Args:
- init_w (float): defines the initial value of w in Equation (5) of [1]
- init_b (float): definies the initial value of b in Equation (5) of [1]
"""
super(GE2ELoss, self).__init__()
# pylint: disable=E1102
self.w = nn.Parameter(torch.tensor(init_w))
# pylint: disable=E1102
self.b = nn.Parameter(torch.tensor(init_b))
self.loss_method = loss_method
print(' > Initialised Generalized End-to-End loss')
assert self.loss_method in ["softmax", "contrast"]
if self.loss_method == "softmax":
self.embed_loss = self.embed_loss_softmax
if self.loss_method == "contrast":
self.embed_loss = self.embed_loss_contrast
# pylint: disable=R0201
def calc_new_centroids(self, dvecs, centroids, spkr, utt):
"""
Calculates the new centroids excluding the reference utterance
"""
excl = torch.cat((dvecs[spkr, :utt], dvecs[spkr, utt + 1 :]))
excl = torch.mean(excl, 0)
new_centroids = []
for i, centroid in enumerate(centroids):
if i == spkr:
new_centroids.append(excl)
else:
new_centroids.append(centroid)
return torch.stack(new_centroids)
def calc_cosine_sim(self, dvecs, centroids):
"""
Make the cosine similarity matrix with dims (N,M,N)
"""
cos_sim_matrix = []
for spkr_idx, speaker in enumerate(dvecs):
cs_row = []
for utt_idx, utterance in enumerate(speaker):
new_centroids = self.calc_new_centroids(
dvecs, centroids, spkr_idx, utt_idx
)
# vector based cosine similarity for speed
cs_row.append(
torch.clamp(
torch.mm(
utterance.unsqueeze(1).transpose(0, 1),
new_centroids.transpose(0, 1),
)
/ (torch.norm(utterance) * torch.norm(new_centroids, dim=1)),
1e-6,
)
)
cs_row = torch.cat(cs_row, dim=0)
cos_sim_matrix.append(cs_row)
return torch.stack(cos_sim_matrix)
# pylint: disable=R0201
def embed_loss_softmax(self, dvecs, cos_sim_matrix):
"""
Calculates the loss on each embedding $L(e_{ji})$ by taking softmax
"""
N, M, _ = dvecs.shape
L = []
for j in range(N):
L_row = []
for i in range(M):
L_row.append(-F.log_softmax(cos_sim_matrix[j, i], 0)[j])
L_row = torch.stack(L_row)
L.append(L_row)
return torch.stack(L)
# pylint: disable=R0201
def embed_loss_contrast(self, dvecs, cos_sim_matrix):
"""
Calculates the loss on each embedding $L(e_{ji})$ by contrast loss with closest centroid
"""
N, M, _ = dvecs.shape
L = []
for j in range(N):
L_row = []
for i in range(M):
centroids_sigmoids = torch.sigmoid(cos_sim_matrix[j, i])
excl_centroids_sigmoids = torch.cat(
(centroids_sigmoids[:j], centroids_sigmoids[j + 1 :])
)
L_row.append(
1.0
- torch.sigmoid(cos_sim_matrix[j, i, j])
+ torch.max(excl_centroids_sigmoids)
)
L_row = torch.stack(L_row)
L.append(L_row)
return torch.stack(L)
def forward(self, dvecs):
"""
Calculates the GE2E loss for an input of dimensions (num_speakers, num_utts_per_speaker, dvec_feats)
"""
centroids = torch.mean(dvecs, 1)
cos_sim_matrix = self.calc_cosine_sim(dvecs, centroids)
torch.clamp(self.w, 1e-6)
cos_sim_matrix = self.w * cos_sim_matrix + self.b
L = self.embed_loss(dvecs, cos_sim_matrix)
return L.mean()
# adapted from https://github.com/clovaai/voxceleb_trainer/blob/master/loss/angleproto.py
class AngleProtoLoss(nn.Module):
"""
Implementation of the Angular Prototypical loss defined in https://arxiv.org/abs/2003.11982
Accepts an input of size (N, M, D)
where N is the number of speakers in the batch,
M is the number of utterances per speaker,
and D is the dimensionality of the embedding vector
Args:
- init_w (float): defines the initial value of w
- init_b (float): definies the initial value of b
"""
def __init__(self, init_w=10.0, init_b=-5.0):
super(AngleProtoLoss, self).__init__()
# pylint: disable=E1102
self.w = nn.Parameter(torch.tensor(init_w))
# pylint: disable=E1102
self.b = nn.Parameter(torch.tensor(init_b))
self.criterion = torch.nn.CrossEntropyLoss()
print(' > Initialised Angular Prototypical loss')
def forward(self, x):
"""
Calculates the AngleProto loss for an input of dimensions (num_speakers, num_utts_per_speaker, dvec_feats)
"""
out_anchor = torch.mean(x[:, 1:, :], 1)
out_positive = x[:, 0, :]
num_speakers = out_anchor.size()[0]
cos_sim_matrix = F.cosine_similarity(out_positive.unsqueeze(-1).expand(-1, -1, num_speakers), out_anchor.unsqueeze(-1).expand(-1, -1, num_speakers).transpose(0, 2))
torch.clamp(self.w, 1e-6)
cos_sim_matrix = cos_sim_matrix * self.w + self.b
label = torch.from_numpy(np.asarray(range(0, num_speakers))).to(cos_sim_matrix.device)
L = self.criterion(cos_sim_matrix, label)
return L
| 6,369
| 38.565217
| 172
|
py
|
TTS
|
TTS-master/TTS/speaker_encoder/model.py
|
import torch
from torch import nn
class LSTMWithProjection(nn.Module):
def __init__(self, input_size, hidden_size, proj_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.proj_size = proj_size
self.lstm = nn.LSTM(input_size, hidden_size, batch_first=True)
self.linear = nn.Linear(hidden_size, proj_size, bias=False)
def forward(self, x):
self.lstm.flatten_parameters()
o, (_, _) = self.lstm(x)
return self.linear(o)
class LSTMWithoutProjection(nn.Module):
def __init__(self, input_dim, lstm_dim, proj_dim, num_lstm_layers):
super().__init__()
self.lstm = nn.LSTM(input_size=input_dim,
hidden_size=lstm_dim,
num_layers=num_lstm_layers,
batch_first=True)
self.linear = nn.Linear(lstm_dim, proj_dim, bias=True)
self.relu = nn.ReLU()
def forward(self, x):
_, (hidden, _) = self.lstm(x)
return self.relu(self.linear(hidden[-1]))
class SpeakerEncoder(nn.Module):
def __init__(self, input_dim, proj_dim=256, lstm_dim=768, num_lstm_layers=3, use_lstm_with_projection=True):
super().__init__()
self.use_lstm_with_projection = use_lstm_with_projection
layers = []
# choise LSTM layer
if use_lstm_with_projection:
layers.append(LSTMWithProjection(input_dim, lstm_dim, proj_dim))
for _ in range(num_lstm_layers - 1):
layers.append(LSTMWithProjection(proj_dim, lstm_dim, proj_dim))
self.layers = nn.Sequential(*layers)
else:
self.layers = LSTMWithoutProjection(input_dim, lstm_dim, proj_dim, num_lstm_layers)
self._init_layers()
def _init_layers(self):
for name, param in self.layers.named_parameters():
if "bias" in name:
nn.init.constant_(param, 0.0)
elif "weight" in name:
nn.init.xavier_normal_(param)
def forward(self, x):
# TODO: implement state passing for lstms
d = self.layers(x)
if self.use_lstm_with_projection:
d = torch.nn.functional.normalize(d[:, -1], p=2, dim=1)
else:
d = torch.nn.functional.normalize(d, p=2, dim=1)
return d
@torch.no_grad()
def inference(self, x):
d = self.layers.forward(x)
if self.use_lstm_with_projection:
d = torch.nn.functional.normalize(d[:, -1], p=2, dim=1)
else:
d = torch.nn.functional.normalize(d, p=2, dim=1)
return d
def compute_embedding(self, x, num_frames=160, overlap=0.5):
"""
Generate embeddings for a batch of utterances
x: 1xTxD
"""
num_overlap = int(num_frames * overlap)
max_len = x.shape[1]
embed = None
cur_iter = 0
for offset in range(0, max_len, num_frames - num_overlap):
cur_iter += 1
end_offset = min(x.shape[1], offset + num_frames)
frames = x[:, offset:end_offset]
if embed is None:
embed = self.inference(frames)
else:
embed += self.inference(frames)
return embed / cur_iter
def batch_compute_embedding(self, x, seq_lens, num_frames=160, overlap=0.5):
"""
Generate embeddings for a batch of utterances
x: BxTxD
"""
num_overlap = num_frames * overlap
max_len = x.shape[1]
embed = None
num_iters = seq_lens / (num_frames - num_overlap)
cur_iter = 0
for offset in range(0, max_len, num_frames - num_overlap):
cur_iter += 1
end_offset = min(x.shape[1], offset + num_frames)
frames = x[:, offset:end_offset]
if embed is None:
embed = self.inference(frames)
else:
embed[cur_iter <= num_iters, :] += self.inference(
frames[cur_iter <= num_iters, :, :]
)
return embed / num_iters
| 4,118
| 35.451327
| 112
|
py
|
TTS
|
TTS-master/TTS/speaker_encoder/dataset.py
|
import numpy
import numpy as np
import queue
import torch
import random
from torch.utils.data import Dataset
from tqdm import tqdm
class MyDataset(Dataset):
def __init__(self, ap, meta_data, voice_len=1.6, num_speakers_in_batch=64,
storage_size=1, sample_from_storage_p=0.5, additive_noise=0,
num_utter_per_speaker=10, skip_speakers=False, verbose=False):
"""
Args:
ap (TTS.tts.utils.AudioProcessor): audio processor object.
meta_data (list): list of dataset instances.
seq_len (int): voice segment length in seconds.
verbose (bool): print diagnostic information.
"""
self.items = meta_data
self.sample_rate = ap.sample_rate
self.voice_len = voice_len
self.seq_len = int(voice_len * self.sample_rate)
self.num_speakers_in_batch = num_speakers_in_batch
self.num_utter_per_speaker = num_utter_per_speaker
self.skip_speakers = skip_speakers
self.ap = ap
self.verbose = verbose
self.__parse_items()
self.storage = queue.Queue(maxsize=storage_size*num_speakers_in_batch)
self.sample_from_storage_p = float(sample_from_storage_p)
self.additive_noise = float(additive_noise)
if self.verbose:
print("\n > DataLoader initialization")
print(f" | > Speakers per Batch: {num_speakers_in_batch}")
print(f" | > Storage Size: {self.storage.maxsize} speakers, each with {num_utter_per_speaker} utters")
print(f" | > Sample_from_storage_p : {self.sample_from_storage_p}")
print(f" | > Noise added : {self.additive_noise}")
print(f" | > Number of instances : {len(self.items)}")
print(f" | > Sequence length: {self.seq_len}")
print(f" | > Num speakers: {len(self.speakers)}")
def load_wav(self, filename):
audio = self.ap.load_wav(filename, sr=self.ap.sample_rate)
return audio
def load_data(self, idx):
text, wav_file, speaker_name = self.items[idx]
wav = np.asarray(self.load_wav(wav_file), dtype=np.float32)
mel = self.ap.melspectrogram(wav).astype("float32")
# sample seq_len
assert text.size > 0, self.items[idx][1]
assert wav.size > 0, self.items[idx][1]
sample = {
"mel": mel,
"item_idx": self.items[idx][1],
"speaker_name": speaker_name,
}
return sample
def __parse_items(self):
self.speaker_to_utters = {}
for i in self.items:
path_ = i[1]
speaker_ = i[2]
if speaker_ in self.speaker_to_utters.keys():
self.speaker_to_utters[speaker_].append(path_)
else:
self.speaker_to_utters[speaker_] = [path_, ]
if self.skip_speakers:
self.speaker_to_utters = {k: v for (k, v) in self.speaker_to_utters.items() if
len(v) >= self.num_utter_per_speaker}
self.speakers = [k for (k, v) in self.speaker_to_utters.items()]
# def __parse_items(self):
# """
# Find unique speaker ids and create a dict mapping utterances from speaker id
# """
# speakers = list({item[-1] for item in self.items})
# self.speaker_to_utters = {}
# self.speakers = []
# for speaker in speakers:
# speaker_utters = [item[1] for item in self.items if item[2] == speaker]
# if len(speaker_utters) < self.num_utter_per_speaker and self.skip_speakers:
# print(
# f" [!] Skipped speaker {speaker}. Not enough utterances {self.num_utter_per_speaker} vs {len(speaker_utters)}."
# )
# else:
# self.speakers.append(speaker)
# self.speaker_to_utters[speaker] = speaker_utters
def __len__(self):
return int(1e10)
def __sample_speaker(self):
speaker = random.sample(self.speakers, 1)[0]
if self.num_utter_per_speaker > len(self.speaker_to_utters[speaker]):
utters = random.choices(
self.speaker_to_utters[speaker], k=self.num_utter_per_speaker
)
else:
utters = random.sample(
self.speaker_to_utters[speaker], self.num_utter_per_speaker
)
return speaker, utters
def __sample_speaker_utterances(self, speaker):
"""
Sample all M utterances for the given speaker.
"""
wavs = []
labels = []
for _ in range(self.num_utter_per_speaker):
# TODO:dummy but works
while True:
if len(self.speaker_to_utters[speaker]) > 0:
utter = random.sample(self.speaker_to_utters[speaker], 1)[0]
else:
self.speakers.remove(speaker)
speaker, _ = self.__sample_speaker()
continue
wav = self.load_wav(utter)
if wav.shape[0] - self.seq_len > 0:
break
self.speaker_to_utters[speaker].remove(utter)
wavs.append(wav)
labels.append(speaker)
return wavs, labels
def __getitem__(self, idx):
speaker, _ = self.__sample_speaker()
return speaker
def collate_fn(self, batch):
labels = []
feats = []
for speaker in batch:
if random.random() < self.sample_from_storage_p and self.storage.full():
# sample from storage (if full), ignoring the speaker
wavs_, labels_ = random.choice(self.storage.queue)
else:
# don't sample from storage, but from HDD
wavs_, labels_ = self.__sample_speaker_utterances(speaker)
# if storage is full, remove an item
if self.storage.full():
_ = self.storage.get_nowait()
# put the newly loaded item into storage
self.storage.put_nowait((wavs_, labels_))
# add random gaussian noise
if self.additive_noise > 0:
noises_ = [numpy.random.normal(0, self.additive_noise, size=len(w)) for w in wavs_]
wavs_ = [wavs_[i] + noises_[i] for i in range(len(wavs_))]
# get a random subset of each of the wavs and convert to MFCC.
offsets_ = [random.randint(0, wav.shape[0] - self.seq_len) for wav in wavs_]
mels_ = [self.ap.melspectrogram(wavs_[i][offsets_[i]: offsets_[i] + self.seq_len]) for i in range(len(wavs_))]
feats_ = [torch.FloatTensor(mel) for mel in mels_]
labels.append(labels_)
feats.extend(feats_)
feats = torch.stack(feats)
return feats.transpose(1, 2), labels
| 6,882
| 39.488235
| 133
|
py
|
TTS
|
TTS-master/TTS/speaker_encoder/__init__.py
| 0
| 0
| 0
|
py
|
|
TTS
|
TTS-master/TTS/speaker_encoder/utils/generic_utils.py
|
import datetime
import os
import re
import torch
from TTS.speaker_encoder.model import SpeakerEncoder
from TTS.utils.generic_utils import check_argument
def to_camel(text):
text = text.capitalize()
return re.sub(r'(?!^)_([a-zA-Z])', lambda m: m.group(1).upper(), text)
def setup_model(c):
model = SpeakerEncoder(c.model['input_dim'], c.model['proj_dim'],
c.model['lstm_dim'], c.model['num_lstm_layers'])
return model
def save_checkpoint(model, optimizer, model_loss, out_path,
current_step, epoch):
checkpoint_path = 'checkpoint_{}.pth.tar'.format(current_step)
checkpoint_path = os.path.join(out_path, checkpoint_path)
print(" | | > Checkpoint saving : {}".format(checkpoint_path))
new_state_dict = model.state_dict()
state = {
'model': new_state_dict,
'optimizer': optimizer.state_dict() if optimizer is not None else None,
'step': current_step,
'epoch': epoch,
'loss': model_loss,
'date': datetime.date.today().strftime("%B %d, %Y"),
}
torch.save(state, checkpoint_path)
def save_best_model(model, optimizer, model_loss, best_loss, out_path,
current_step):
if model_loss < best_loss:
new_state_dict = model.state_dict()
state = {
'model': new_state_dict,
'optimizer': optimizer.state_dict(),
'step': current_step,
'loss': model_loss,
'date': datetime.date.today().strftime("%B %d, %Y"),
}
best_loss = model_loss
bestmodel_path = 'best_model.pth.tar'
bestmodel_path = os.path.join(out_path, bestmodel_path)
print("\n > BEST MODEL ({0:.5f}) : {1:}".format(
model_loss, bestmodel_path))
torch.save(state, bestmodel_path)
return best_loss
def check_config_speaker_encoder(c):
"""Check the config.json file of the speaker encoder"""
check_argument('run_name', c, restricted=True, val_type=str)
check_argument('run_description', c, val_type=str)
# audio processing parameters
check_argument('audio', c, restricted=True, val_type=dict)
check_argument('num_mels', c['audio'], restricted=True, val_type=int, min_val=10, max_val=2056)
check_argument('fft_size', c['audio'], restricted=True, val_type=int, min_val=128, max_val=4058)
check_argument('sample_rate', c['audio'], restricted=True, val_type=int, min_val=512, max_val=100000)
check_argument('frame_length_ms', c['audio'], restricted=True, val_type=float, min_val=10, max_val=1000, alternative='win_length')
check_argument('frame_shift_ms', c['audio'], restricted=True, val_type=float, min_val=1, max_val=1000, alternative='hop_length')
check_argument('preemphasis', c['audio'], restricted=True, val_type=float, min_val=0, max_val=1)
check_argument('min_level_db', c['audio'], restricted=True, val_type=int, min_val=-1000, max_val=10)
check_argument('ref_level_db', c['audio'], restricted=True, val_type=int, min_val=0, max_val=1000)
check_argument('power', c['audio'], restricted=True, val_type=float, min_val=1, max_val=5)
check_argument('griffin_lim_iters', c['audio'], restricted=True, val_type=int, min_val=10, max_val=1000)
# training parameters
check_argument('loss', c, enum_list=['ge2e', 'angleproto'], restricted=True, val_type=str)
check_argument('grad_clip', c, restricted=True, val_type=float)
check_argument('epochs', c, restricted=True, val_type=int, min_val=1)
check_argument('lr', c, restricted=True, val_type=float, min_val=0)
check_argument('lr_decay', c, restricted=True, val_type=bool)
check_argument('warmup_steps', c, restricted=True, val_type=int, min_val=0)
check_argument('tb_model_param_stats', c, restricted=True, val_type=bool)
check_argument('num_speakers_in_batch', c, restricted=True, val_type=int)
check_argument('num_loader_workers', c, restricted=True, val_type=int)
check_argument('wd', c, restricted=True, val_type=float, min_val=0.0, max_val=1.0)
# checkpoint and output parameters
check_argument('steps_plot_stats', c, restricted=True, val_type=int)
check_argument('checkpoint', c, restricted=True, val_type=bool)
check_argument('save_step', c, restricted=True, val_type=int)
check_argument('print_step', c, restricted=True, val_type=int)
check_argument('output_path', c, restricted=True, val_type=str)
# model parameters
check_argument('model', c, restricted=True, val_type=dict)
check_argument('input_dim', c['model'], restricted=True, val_type=int)
check_argument('proj_dim', c['model'], restricted=True, val_type=int)
check_argument('lstm_dim', c['model'], restricted=True, val_type=int)
check_argument('num_lstm_layers', c['model'], restricted=True, val_type=int)
check_argument('use_lstm_with_projection', c['model'], restricted=True, val_type=bool)
# in-memory storage parameters
check_argument('storage', c, restricted=True, val_type=dict)
check_argument('sample_from_storage_p', c['storage'], restricted=True, val_type=float, min_val=0.0, max_val=1.0)
check_argument('storage_size', c['storage'], restricted=True, val_type=int, min_val=1, max_val=100)
check_argument('additive_noise', c['storage'], restricted=True, val_type=float, min_val=0.0, max_val=1.0)
# datasets - checking only the first entry
check_argument('datasets', c, restricted=True, val_type=list)
for dataset_entry in c['datasets']:
check_argument('name', dataset_entry, restricted=True, val_type=str)
check_argument('path', dataset_entry, restricted=True, val_type=str)
check_argument('meta_file_train', dataset_entry, restricted=True, val_type=[str, list])
check_argument('meta_file_val', dataset_entry, restricted=True, val_type=str)
| 5,843
| 48.525424
| 134
|
py
|
TTS
|
TTS-master/TTS/speaker_encoder/utils/prepare_voxceleb.py
|
# coding=utf-8
# Copyright (C) 2020 ATHENA AUTHORS; Yiping Peng; Ne Luo
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Only support eager mode and TF>=2.0.0
# pylint: disable=no-member, invalid-name, relative-beyond-top-level
# pylint: disable=too-many-locals, too-many-statements, too-many-arguments, too-many-instance-attributes
''' voxceleb 1 & 2 '''
import os
import sys
import zipfile
import subprocess
import hashlib
import pandas
from absl import logging
import tensorflow as tf
import soundfile as sf
gfile = tf.compat.v1.gfile
SUBSETS = {
"vox1_dev_wav":
["http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partaa",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partab",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partac",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_dev_wav_partad"],
"vox1_test_wav":
["http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox1_test_wav.zip"],
"vox2_dev_aac":
["http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partaa",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partab",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partac",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partad",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partae",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partaf",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partag",
"http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_dev_aac_partah"],
"vox2_test_aac":
["http://www.robots.ox.ac.uk/~vgg/data/voxceleb/vox1a/vox2_test_aac.zip"]
}
MD5SUM = {
"vox1_dev_wav": "ae63e55b951748cc486645f532ba230b",
"vox2_dev_aac": "bbc063c46078a602ca71605645c2a402",
"vox1_test_wav": "185fdc63c3c739954633d50379a3d102",
"vox2_test_aac": "0d2b3ea430a821c33263b5ea37ede312"
}
USER = {
"user": "",
"password": ""
}
speaker_id_dict = {}
def download_and_extract(directory, subset, urls):
"""Download and extract the given split of dataset.
Args:
directory: the directory where to put the downloaded data.
subset: subset name of the corpus.
urls: the list of urls to download the data file.
"""
if not gfile.Exists(directory):
gfile.MakeDirs(directory)
try:
for url in urls:
zip_filepath = os.path.join(directory, url.split("/")[-1])
if os.path.exists(zip_filepath):
continue
logging.info("Downloading %s to %s" % (url, zip_filepath))
subprocess.call('wget %s --user %s --password %s -O %s' %
(url, USER["user"], USER["password"], zip_filepath), shell=True)
statinfo = os.stat(zip_filepath)
logging.info(
"Successfully downloaded %s, size(bytes): %d" % (url, statinfo.st_size)
)
# concatenate all parts into zip files
if ".zip" not in zip_filepath:
zip_filepath = "_".join(zip_filepath.split("_")[:-1])
subprocess.call('cat %s* > %s.zip' %
(zip_filepath, zip_filepath), shell=True)
zip_filepath += ".zip"
extract_path = zip_filepath.strip(".zip")
# check zip file md5sum
md5 = hashlib.md5(open(zip_filepath, 'rb').read()).hexdigest()
if md5 != MD5SUM[subset]:
raise ValueError("md5sum of %s mismatch" % zip_filepath)
with zipfile.ZipFile(zip_filepath, "r") as zfile:
zfile.extractall(directory)
extract_path_ori = os.path.join(directory, zfile.infolist()[0].filename)
subprocess.call('mv %s %s' % (extract_path_ori, extract_path), shell=True)
finally:
# gfile.Remove(zip_filepath)
pass
def exec_cmd(cmd):
"""Run a command in a subprocess.
Args:
cmd: command line to be executed.
Return:
int, the return code.
"""
try:
retcode = subprocess.call(cmd, shell=True)
if retcode < 0:
logging.info(f"Child was terminated by signal {retcode}")
except OSError as e:
logging.info(f"Execution failed: {e}")
retcode = -999
return retcode
def decode_aac_with_ffmpeg(aac_file, wav_file):
"""Decode a given AAC file into WAV using ffmpeg.
Args:
aac_file: file path to input AAC file.
wav_file: file path to output WAV file.
Return:
bool, True if success.
"""
cmd = f"ffmpeg -i {aac_file} {wav_file}"
logging.info(f"Decoding aac file using command line: {cmd}")
ret = exec_cmd(cmd)
if ret != 0:
logging.error(f"Failed to decode aac file with retcode {ret}")
logging.error("Please check your ffmpeg installation.")
return False
return True
def convert_audio_and_make_label(input_dir, subset,
output_dir, output_file):
"""Optionally convert AAC to WAV and make speaker labels.
Args:
input_dir: the directory which holds the input dataset.
subset: the name of the specified subset. e.g. vox1_dev_wav
output_dir: the directory to place the newly generated csv files.
output_file: the name of the newly generated csv file. e.g. vox1_dev_wav.csv
"""
logging.info("Preprocessing audio and label for subset %s" % subset)
source_dir = os.path.join(input_dir, subset)
files = []
# Convert all AAC file into WAV format. At the same time, generate the csv
for root, _, filenames in gfile.Walk(source_dir):
for filename in filenames:
name, ext = os.path.splitext(filename)
if ext.lower() == ".wav":
_, ext2 = (os.path.splitext(name))
if ext2:
continue
wav_file = os.path.join(root, filename)
elif ext.lower() == ".m4a":
# Convert AAC to WAV.
aac_file = os.path.join(root, filename)
wav_file = aac_file + ".wav"
if not gfile.Exists(wav_file):
if not decode_aac_with_ffmpeg(aac_file, wav_file):
raise RuntimeError("Audio decoding failed.")
else:
continue
speaker_name = root.split(os.path.sep)[-2]
if speaker_name not in speaker_id_dict:
num = len(speaker_id_dict)
speaker_id_dict[speaker_name] = num
# wav_filesize = os.path.getsize(wav_file)
wav_length = len(sf.read(wav_file)[0])
files.append(
(os.path.abspath(wav_file), wav_length, speaker_id_dict[speaker_name], speaker_name)
)
# Write to CSV file which contains four columns:
# "wav_filename", "wav_length_ms", "speaker_id", "speaker_name".
csv_file_path = os.path.join(output_dir, output_file)
df = pandas.DataFrame(
data=files, columns=["wav_filename", "wav_length_ms", "speaker_id", "speaker_name"])
df.to_csv(csv_file_path, index=False, sep="\t")
logging.info("Successfully generated csv file {}".format(csv_file_path))
def processor(directory, subset, force_process):
""" download and process """
urls = SUBSETS
if subset not in urls:
raise ValueError(subset, "is not in voxceleb")
subset_csv = os.path.join(directory, subset + '.csv')
if not force_process and os.path.exists(subset_csv):
return subset_csv
logging.info("Downloading and process the voxceleb in %s", directory)
logging.info("Preparing subset %s", subset)
download_and_extract(directory, subset, urls[subset])
convert_audio_and_make_label(
directory,
subset,
directory,
subset + ".csv"
)
logging.info("Finished downloading and processing")
return subset_csv
if __name__ == "__main__":
logging.set_verbosity(logging.INFO)
if len(sys.argv) != 4:
print("Usage: python prepare_data.py save_directory user password")
sys.exit()
DIR, USER["user"], USER["password"] = sys.argv[1], sys.argv[2], sys.argv[3]
for SUBSET in SUBSETS:
processor(DIR, SUBSET, False)
| 8,952
| 37.260684
| 104
|
py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.