after_merge
stringlengths 28
79.6k
| before_merge
stringlengths 20
79.6k
| url
stringlengths 38
71
| full_traceback
stringlengths 43
922k
| traceback_type
stringclasses 555
values |
|---|---|---|---|---|
def __rsub__(self, other):
return _ensure_poly(other) - self
|
def __rsub__(self, other):
return self + -other
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __divmod__(self, divisor):
if self.is_constant:
return divmod(int(self), divisor)
else:
def divided(count):
q, r = divmod(count, divisor)
if r != 0:
raise ValueError(
"shapecheck currently only supports strides "
"that exactly divide the strided axis length."
)
return q
return Poly(
{
k: coeff // divisor if k.degree == 0 else divided(coeff)
for k, coeff in self.items()
}
), self.get(Mon(), 0) % divisor
|
def __divmod__(self, divisor):
if self.is_constant:
q, r = divmod(int(self), divisor)
return constant_poly(q), r
def divided(count):
q, r = divmod(count, divisor)
if r != 0:
raise ValueError(
"shapecheck currently only supports strides "
"that exactly divide the strided axis length."
)
return q
return Poly(
{
k: coeff // divisor if k.degree == 0 else divided(coeff)
for k, coeff in self.items()
}
), self[Mon()] % divisor
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __hash__(self):
return hash(tuple(sorted(self.items())))
|
def __hash__(self):
return hash(super())
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __eq__(self, other):
return super().__eq__(_ensure_poly(other))
|
def __eq__(self, other):
return super().__eq__(ensure_poly(other))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __ge__(self, other):
other = _ensure_poly(other)
if other.is_constant and self.is_constant:
return int(self) >= int(other)
elif other.is_constant and int(other) <= 1:
# Assume nonzero polynomials are positive, allows use in shape rules
return True
elif self.is_constant and int(self) <= 0:
return False # See above.
elif self == other:
return True
raise ValueError(
'Polynomials comparison "{} >= {}" is inconclusive.'.format(self, other)
)
|
def __ge__(self, other):
other = ensure_poly(other)
if other.is_constant and self.is_constant:
return int(self) >= int(other)
if other.is_constant and int(other) <= 1:
# Assume polynomials > 0, allowing to use shape rules of binops, conv:
return True
if self.is_constant and int(self) <= 0:
return False # See above.
if self == other:
return True
raise ValueError(
'Polynomials comparison "{} >= {}" is inconclusive.'.format(self, other)
)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __le__(self, other):
return _ensure_poly(other) >= self
|
def __le__(self, other):
return ensure_poly(other) >= self
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __gt__(self, other):
return not (_ensure_poly(other) >= self)
|
def __gt__(self, other):
return not (ensure_poly(other) >= self)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __int__(self):
assert self.is_constant
return op.index(next(iter(self.values())))
|
def __int__(self):
assert self.is_constant
return int(next(iter(self.values())))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def finalize_spec(spec, shape):
return tuple(
_parse_lit(d) if e is _monomorphic_dim else e for e, d in zip(spec, shape)
)
|
def finalize_spec(spec, shape):
return tuple(
parse_lit(d) if e is monomorphic_dim else e for e, d in zip(spec, shape)
)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def parse_spec(spec=""):
if not spec:
return ShapeSpec(())
if spec[0] == "(":
if spec[-1] != ")":
raise ShapeSyntaxError(spec)
spec = spec[1:-1]
dims = map(_parse_dim, spec.replace(" ", "").strip(",").split(","))
return ShapeSpec(dims)
|
def parse_spec(spec=""):
if not spec:
return ShapeSpec(())
if spec[0] == "(":
if spec[-1] != ")":
raise ShapeSyntaxError(spec)
spec = spec[1:-1]
dims = map(parse_dim, spec.replace(" ", "").strip(",").split(","))
return ShapeSpec(dims)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __getitem__(self, idx):
return parse_spec(
("(" + ",".join(map(str, idx)) + ")") if type(idx) is tuple else str(idx)
)
|
def __getitem__(self, idx):
if type(idx) is tuple:
return parse_spec("(" + ",".join(map(str, idx)) + ")")
else:
return parse_spec(str(idx))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def is_pure(self):
return all(type(poly) is not Poly or poly.is_constant for poly in self.shape_expr)
|
def is_pure(self):
return all(ensure_poly(poly).is_constant for poly in self.shape_expr)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def process_primitive(self, primitive, tracers, params):
vals, shape_exprs = unzip2((t.val, t.shape_expr) for t in tracers)
if primitive in shape_parameterized_primitive_rules:
rule = shape_parameterized_primitive_rules[primitive]
out, out_shape = rule(shape_envs, vals, shape_exprs, **params)
else:
avals = [t.aval for t in tracers]
out = primitive.abstract_eval(*avals, **params)
out_shape = [o.shape for o in out] if primitive.multiple_results else out.shape
logical_shapes = map(
partial(eval_polymorphic_shape, values_dict=shape_envs.logical), shape_exprs
)
out = masking_rules[primitive](vals, logical_shapes, **params)
if not primitive.multiple_results:
return MaskTracer(self, out, out_shape)
else:
return map(partial(MaskTracer, self), out, out_shape)
|
def process_primitive(self, primitive, tracers, params):
vals, shape_exprs = unzip2((t.val, t.shape_expr) for t in tracers)
if primitive in shape_parameterized_primitive_rules:
rule = shape_parameterized_primitive_rules[primitive]
out, out_shape = rule(shape_envs, vals, shape_exprs, **params)
else:
out_shape = shape_rules[primitive](*(t.aval for t in tracers), **params)
logical_shapes = map(partial(eval_shape_expr, shape_envs.logical), shape_exprs)
out = masking_rules[primitive](vals, logical_shapes, **params)
if not primitive.multiple_results:
return MaskTracer(self, out, out_shape)
else:
return map(partial(MaskTracer, self), out, out_shape)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def __init__(self, trace, val, shape_expr):
self._trace = trace
self.val = val
self.shape_expr = shape_expr
|
def __init__(self, trace, shape_expr):
self._trace = trace
self.shape_expr = shape_expr
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def aval(self):
return ShapedArray(self.shape_expr, self.val.dtype)
|
def aval(self):
return ShapedArray(self.shape_expr, None)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def full_lower(self):
if self.is_pure():
return core.full_lower(self.val)
else:
return self
|
def full_lower(self):
return self
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def pure(self, val):
return MaskTracer(self, val, onp.shape(val))
|
def pure(self, val):
return ShapeCheckTracer(self, onp.shape(val))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def lift(self, val):
return MaskTracer(self, val, onp.shape(val))
|
def lift(self, val):
return ShapeCheckTracer(self, onp.shape(val))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def sublift(self, val):
return MaskTracer(self, val.val, val.shape_expr)
|
def sublift(self, val):
return ShapeCheckTracer(self, val.shape_expr)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def process_primitive(self, primitive, tracers, params):
vals, shape_exprs = unzip2((t.val, t.shape_expr) for t in tracers)
if primitive in shape_parameterized_primitive_rules:
rule = shape_parameterized_primitive_rules[primitive]
out, out_shape = rule(shape_envs, vals, shape_exprs, **params)
else:
avals = [t.aval for t in tracers]
out = primitive.abstract_eval(*avals, **params)
out_shape = [o.shape for o in out] if primitive.multiple_results else out.shape
logical_shapes = map(
partial(eval_polymorphic_shape, values_dict=shape_envs.logical), shape_exprs
)
out = masking_rules[primitive](vals, logical_shapes, **params)
if not primitive.multiple_results:
return MaskTracer(self, out, out_shape)
else:
return map(partial(MaskTracer, self), out, out_shape)
|
def process_primitive(self, primitive, tracers, params):
avals = [t.aval for t in tracers]
shape_rule = shape_rules.get(primitive)
if shape_rule is None:
raise NotImplementedError(
"Shape rule for {} not implemented yet.".format(primitive)
)
out_shape = shape_rule(*avals, **params)
return ShapeCheckTracer(self, out_shape)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):
raise NotImplementedError # TODO mask-of-jit
|
def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):
# TODO apply proper subtrace:
return map(self.full_raise, f.call_wrapped(*tracers))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def broadcast_shapes(*shapes):
"""Returns the shape that results from NumPy broadcasting of `shapes`."""
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes])
is_zero = onp.any(shapes == 0, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(is_zero, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
raise ValueError(
"Incompatible shapes for broadcasting: {}".format(tuple(map(tuple, shapes)))
)
return canonicalize_shape(result_shape)
|
def broadcast_shapes(*shapes):
"""Returns the shape that results from NumPy broadcasting of `shapes`."""
if len(shapes) == 1:
return shapes[0]
ndim = _max(len(shape) for shape in shapes)
shapes = onp.array([(1,) * (ndim - len(shape)) + shape for shape in shapes])
is_zero = onp.any(shapes == 0, axis=0)
max_shape = onp.max(shapes, axis=0)
result_shape = onp.where(is_zero, 0, max_shape)
if not onp.all((shapes == result_shape) | (shapes == 1)):
raise ValueError(
"Incompatible shapes for broadcasting: {}".format(tuple(map(tuple, shapes)))
)
return tuple(map(_canonicalize_dimension, result_shape))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def iota(dtype: DType, size: int) -> Array:
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
size = size if type(size) is masking.Poly else int(size)
shape = canonicalize_shape((size,))
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.iota(dtype, shape[0])
aval = ShapedArray(shape, dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
|
def iota(dtype: DType, size: int) -> Array:
"""Wraps XLA's `Iota
<https://www.tensorflow.org/xla/operation_semantics#iota>`_
operator.
"""
size = int(size)
dtype = dtypes.canonicalize_dtype(dtype)
lazy_expr = lazy.iota(dtype, size)
aval = ShapedArray((size,), dtype)
return xla.DeviceArray(aval, None, lazy_expr, xla.DeviceConstant())
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):
prim = Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(
partial(standard_abstract_eval, prim, shape_rule, dtype_rule)
)
xla.translations[prim] = translation_rule or partial(standard_translate, name)
return prim
|
def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):
prim = Primitive(name)
prim.def_impl(partial(xla.apply_primitive, prim))
prim.def_abstract_eval(
partial(standard_abstract_eval, prim, shape_rule, dtype_rule)
)
xla.translations[prim] = translation_rule or partial(standard_translate, name)
masking.shape_rules[prim] = shape_rule
return prim
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def _check_shapelike(fun_name, arg_name, obj):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if not isinstance(obj, (tuple, list, onp.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = onp.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
try:
canonicalize_shape(obj_arr)
except TypeError:
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj))))
if not (obj_arr >= 0).all():
msg = "{} {} must have every element be nonnegative, got {}."
raise TypeError(msg.format(fun_name, arg_name, obj))
|
def _check_shapelike(fun_name, arg_name, obj):
"""Check that `obj` is a shape-like value (e.g. tuple of nonnegative ints)."""
if type(obj) is tuple and masking.is_polymorphic(obj):
return obj
if not isinstance(obj, (tuple, list, onp.ndarray)):
msg = "{} {} must be of type tuple/list/ndarray, got {}."
raise TypeError(msg.format(fun_name, arg_name, type(obj)))
# bool(obj) for an ndarray raises an error, so we check len
if not len(obj): # pylint: disable=g-explicit-length-test
return
obj_arr = onp.array(obj)
if obj_arr.ndim != 1:
msg = "{} {} must be rank 1, got {}."
raise TypeError(msg.format(obj_arr.ndim))
if not dtypes.issubdtype(obj_arr.dtype, onp.integer):
msg = "{} {} must have every element be an integer type, got {}."
raise TypeError(msg.format(fun_name, arg_name, tuple(map(type, obj))))
if not (obj_arr >= 0).all():
msg = "{} {} must have every element be nonnegative, got {}."
raise TypeError(msg.format(fun_name, arg_name, obj))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def _scan_masking_rule(
shape_envs,
padded_vals,
shape_exprs,
forward,
length,
jaxpr,
num_consts,
num_carry,
linear,
):
out_shape = _scan_shape_rule(
shape_exprs, forward, length, jaxpr, num_consts, num_carry, linear
)
dynamic_length = length.evaluate(shape_envs.logical)
masked_jaxpr = _masked_scan_jaxpr(jaxpr, num_consts, num_carry)
consts, init, xs = split_list(padded_vals, [num_consts, num_carry])
(max_length,) = {x.shape[0] for x in xs}
const_linear, init_linear, xs_linear = split_list(linear, [num_consts, num_carry])
out_vals = scan_p.bind(
*itertools.chain([dynamic_length] + consts, [0], init, xs),
forward=forward,
length=max_length,
jaxpr=masked_jaxpr,
num_consts=1 + num_consts,
num_carry=1 + num_carry,
linear=tuple([False] + const_linear + [False] + init_linear + xs_linear),
)
return out_vals[1:], out_shape
|
def _scan_masking_rule(
shape_envs,
padded_vals,
shape_exprs,
forward,
length,
jaxpr,
num_consts,
num_carry,
linear,
):
out_shape = _scan_shape_rule(
shape_exprs, forward, length, jaxpr, num_consts, num_carry, linear
)
dynamic_length = masking.eval_dim_expr(shape_envs.logical, length)
masked_jaxpr = _masked_scan_jaxpr(jaxpr, num_consts, num_carry)
consts, init, xs = split_list(padded_vals, [num_consts, num_carry])
(max_length,) = {x.shape[0] for x in xs}
const_linear, init_linear, xs_linear = split_list(linear, [num_consts, num_carry])
out_vals = scan_p.bind(
*itertools.chain([dynamic_length] + consts, [0], init, xs),
forward=forward,
length=max_length,
jaxpr=masked_jaxpr,
num_consts=1 + num_consts,
num_carry=1 + num_carry,
linear=tuple([False] + const_linear + [False] + init_linear + xs_linear),
)
return out_vals[1:], out_shape
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def broadcast_to(arr, shape):
"""Like Numpy's broadcast_to but doesn't necessarily return views."""
arr = arr if isinstance(arr, ndarray) else array(arr)
shape = canonicalize_shape(shape) # check that shape is concrete
arr_shape = _shape(arr)
if arr_shape == shape:
return arr
else:
nlead = len(shape) - len(arr_shape)
compatible = onp.equal(arr_shape, shape[nlead:]) | onp.equal(arr_shape, 1)
if nlead < 0 or not onp.all(compatible):
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
(diff,) = onp.where(onp.not_equal(shape[nlead:], arr_shape))
new_dims = tuple(range(nlead)) + tuple(nlead + diff)
kept_dims = tuple(onp.delete(onp.arange(len(shape)), new_dims))
return lax.broadcast_in_dim(squeeze(arr, diff), shape, kept_dims)
|
def broadcast_to(arr, shape):
"""Like Numpy's broadcast_to but doesn't necessarily return views."""
arr = arr if isinstance(arr, ndarray) else array(arr)
shape = tuple(map(int, shape)) # check that shape is concrete
arr_shape = _shape(arr)
if arr_shape == shape:
return arr
else:
nlead = len(shape) - len(arr_shape)
compatible = onp.equal(arr_shape, shape[nlead:]) | onp.equal(arr_shape, 1)
if nlead < 0 or not onp.all(compatible):
msg = "Incompatible shapes for broadcasting: {} and requested shape {}"
raise ValueError(msg.format(arr_shape, shape))
(diff,) = onp.where(onp.not_equal(shape[nlead:], arr_shape))
new_dims = tuple(range(nlead)) + tuple(nlead + diff)
kept_dims = tuple(onp.delete(onp.arange(len(shape)), new_dims))
return lax.broadcast_in_dim(squeeze(arr, diff), shape, kept_dims)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def _normalize_index(index, axis_size):
"""Normalizes an index value in the range [-N, N) to the range [0, N)."""
if type(axis_size) is Poly:
return index + axis_size if index < 0 else index
return lax.select(
lax.lt(index, _constant_like(index, 0)),
lax.add(index, _constant_like(index, axis_size)),
index,
)
|
def _normalize_index(index, axis_size):
"""Normalizes an index value in the range [-N, N) to the range [0, N)."""
return lax.select(
lax.lt(index, _constant_like(index, 0)),
lax.add(index, _constant_like(index, axis_size)),
index,
)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def _index_to_gather(x_shape, idx):
# Remove ellipses and add trailing slice(None)s.
idx = _canonicalize_tuple_index(len(x_shape), idx)
# Check for advanced indexing:
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Do the advanced indexing axes appear contiguously? If not, NumPy semantics
# move the advanced axes to the front.
advanced_axes_are_contiguous = False
advanced_indexes = None
# The positions of the advanced indexing axes in `idx`.
idx_advanced_axes = []
# The positions of the advanced indexes in x's shape.
# collapsed, after None axes have been removed. See below.
x_advanced_axes = None
if _is_advanced_int_indexer(idx):
idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]
advanced_pairs = (
(asarray(e), i, j)
for j, (i, e) in enumerate(idx_no_nones)
if (isinstance(e, Sequence) or isinstance(e, ndarray))
)
advanced_pairs = (
(_normalize_index(e, x_shape[j]), i, j) for e, i, j in advanced_pairs
)
advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)
advanced_axes_are_contiguous = onp.all(onp.diff(idx_advanced_axes) == 1)
x_axis = 0 # Current axis in x.
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
offset_dims = []
collapsed_slice_dims = []
start_index_map = []
use_64bit_index = _any([type(d) is Poly or d >= (1 << 31) for d in x_shape])
index_dtype = int64 if use_64bit_index else int32
gather_indices = onp.zeros((0,), dtype=index_dtype) # use onp to save a compilation
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape = []
# Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`
# indices, which the scatter cannot remove itself.
newaxis_dims = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims = []
gather_slice_shape = []
for idx_pos, i in enumerate(idx):
# Handle the advanced indices here if:
# * the advanced indices were not contiguous and we are the start.
# * we are at the position of the first advanced index.
if advanced_indexes is not None and (
advanced_axes_are_contiguous
and idx_pos == idx_advanced_axes[0]
or not advanced_axes_are_contiguous
and idx_pos == 0
):
advanced_indexes = broadcast_arrays(*advanced_indexes)
shape = advanced_indexes[0].shape
ndim = len(shape)
advanced_indexes = [
lax.convert_element_type(lax.reshape(a, shape + (1,)), index_dtype)
for a in advanced_indexes
]
# Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k].
gather_indices = lax.broadcast_in_dim(
gather_indices,
onp.insert(gather_indices.shape, -1, shape),
tuple(range(gather_indices.ndim - 1))
+ (gather_indices.ndim + ndim - 1,),
)
gather_indices = concatenate([gather_indices] + advanced_indexes, -1)
start_index_map.extend(x_advanced_axes)
collapsed_slice_dims.extend(x_advanced_axes)
slice_shape.extend(shape)
y_axis += ndim
collapsed_y_axis += ndim
# Per-index bookkeeping for advanced indexes.
if idx_pos in idx_advanced_axes:
x_axis += 1
gather_slice_shape.append(1)
continue
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
# Handle basic int indexes.
if (
isinstance(abstract_i, ConcreteArray) or isinstance(abstract_i, ShapedArray)
) and _int(abstract_i):
if x_shape[x_axis] == 0:
# XLA gives error when indexing into an axis of size 0
raise IndexError(
f"index is out of bounds for axis {x_axis} with size 0"
)
i = _normalize_index(i, x_shape[x_axis])
if type(i) is Poly:
# dummy index if i is polynomial, doesn't matter for shape inference
# TODO(mattjj,j-towns,juliuskunze): revise this logic
i = 0
i = lax.convert_element_type(i, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
collapsed_slice_dims.append(x_axis)
gather_slice_shape.append(1)
start_index_map.append(x_axis)
x_axis += 1
# Handle np.newaxis (None)
elif i is None:
slice_shape.append(1)
newaxis_dims.append(y_axis)
y_axis += 1
# Handle slice(None)
elif _is_slice_none(i):
slice_shape.append(x_shape[x_axis])
gather_slice_shape.append(x_shape[x_axis])
offset_dims.append(collapsed_y_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
# Handle slice index (only static, otherwise an error is raised)
elif isinstance(i, slice):
if not _all(
elt is None
or type(elt) is Poly
or type(core.get_aval(elt)) is ConcreteArray
for elt in (i.start, i.stop, i.step)
):
msg = (
"Array slice indices must have static start/stop/step to be used "
"with Numpy indexing syntax. Try lax.dynamic_slice/"
"dynamic_update_slice instead."
)
raise IndexError(msg)
start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])
if needs_rev:
reversed_y_dims.append(collapsed_y_axis)
if stride == 1:
i = lax.convert_element_type(start, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
slice_shape.append(limit - start)
gather_slice_shape.append(limit - start)
offset_dims.append(collapsed_y_axis)
start_index_map.append(x_axis)
else:
i = arange(start, limit, stride, dtype=index_dtype)
size = i.shape[0]
slice_shape.append(size)
gather_slice_shape.append(1)
gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,)
i = lax.broadcast_in_dim(
i,
shape=gather_indices_shape + (1,),
broadcast_dimensions=(len(gather_indices_shape) - 1,),
)
gather_indices = lax.broadcast_in_dim(
gather_indices,
shape=gather_indices_shape + (len(start_index_map),),
broadcast_dimensions=(
tuple(range(len(gather_indices_shape) - 1))
+ (len(gather_indices_shape),)
),
)
gather_indices = concatenate(
(gather_indices, i), len(gather_indices_shape)
)
start_index_map.append(x_axis)
collapsed_slice_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
if abstract_i is not None and not (
issubdtype(abstract_i.dtype, integer)
or issubdtype(abstract_i.dtype, bool_)
):
msg = (
"Indexer must have integer or boolean type, got indexer "
"with type {} at position {}, indexer value {}"
)
raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))
msg = "Indexing mode not yet supported. Open a feature request!\n{}"
raise IndexError(msg.format(idx))
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(offset_dims),
collapsed_slice_dims=tuple(sorted(collapsed_slice_dims)),
start_index_map=tuple(start_index_map),
)
return _Indexer(
slice_shape=slice_shape,
newaxis_dims=tuple(newaxis_dims),
gather_slice_shape=gather_slice_shape,
reversed_y_dims=reversed_y_dims,
dnums=dnums,
gather_indices=gather_indices,
)
|
def _index_to_gather(x_shape, idx):
# Remove ellipses and add trailing slice(None)s.
idx = _canonicalize_tuple_index(len(x_shape), idx)
# Check for advanced indexing:
# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing
# Do the advanced indexing axes appear contiguously? If not, NumPy semantics
# move the advanced axes to the front.
advanced_axes_are_contiguous = False
advanced_indexes = None
# The positions of the advanced indexing axes in `idx`.
idx_advanced_axes = []
# The positions of the advanced indexes in x's shape.
# collapsed, after None axes have been removed. See below.
x_advanced_axes = None
if _is_advanced_int_indexer(idx):
idx_no_nones = [(i, d) for i, d in enumerate(idx) if d is not None]
advanced_pairs = (
(asarray(e), i, j)
for j, (i, e) in enumerate(idx_no_nones)
if (isinstance(e, Sequence) or isinstance(e, ndarray))
)
advanced_pairs = (
(_normalize_index(e, x_shape[j]), i, j) for e, i, j in advanced_pairs
)
advanced_indexes, idx_advanced_axes, x_advanced_axes = zip(*advanced_pairs)
advanced_axes_are_contiguous = onp.all(onp.diff(idx_advanced_axes) == 1)
x_axis = 0 # Current axis in x.
y_axis = 0 # Current axis in y, before collapsing. See below.
collapsed_y_axis = 0 # Current axis in y, after collapsing.
# Scatter dimension numbers.
offset_dims = []
collapsed_slice_dims = []
start_index_map = []
index_dtype = int64 if _max(x_shape, default=0) >= (1 << 31) else int32
gather_indices = onp.zeros((0,), dtype=index_dtype) # use onp to save a compilation
# We perform three transformations to y before the scatter op, in order:
# First, y is broadcast to slice_shape. In general `y` only need broadcast to
# the right shape.
slice_shape = []
# Next, y is squeezed to remove newaxis_dims. This removes np.newaxis/`None`
# indices, which the scatter cannot remove itself.
newaxis_dims = []
# Finally, we reverse reversed_y_dims to handle slices with negative strides.
reversed_y_dims = []
gather_slice_shape = []
for idx_pos, i in enumerate(idx):
# Handle the advanced indices here if:
# * the advanced indices were not contiguous and we are the start.
# * we are at the position of the first advanced index.
if advanced_indexes is not None and (
advanced_axes_are_contiguous
and idx_pos == idx_advanced_axes[0]
or not advanced_axes_are_contiguous
and idx_pos == 0
):
advanced_indexes = broadcast_arrays(*advanced_indexes)
shape = advanced_indexes[0].shape
ndim = len(shape)
advanced_indexes = [
lax.convert_element_type(lax.reshape(a, shape + (1,)), index_dtype)
for a in advanced_indexes
]
# Broadcast gather_indices from [..., k] to [..., 1, 1, ..., 1, k].
gather_indices = lax.broadcast_in_dim(
gather_indices,
onp.insert(gather_indices.shape, -1, shape),
tuple(range(gather_indices.ndim - 1))
+ (gather_indices.ndim + ndim - 1,),
)
gather_indices = concatenate([gather_indices] + advanced_indexes, -1)
start_index_map.extend(x_advanced_axes)
collapsed_slice_dims.extend(x_advanced_axes)
slice_shape.extend(shape)
y_axis += ndim
collapsed_y_axis += ndim
# Per-index bookkeeping for advanced indexes.
if idx_pos in idx_advanced_axes:
x_axis += 1
gather_slice_shape.append(1)
continue
try:
abstract_i = core.get_aval(i)
except TypeError:
abstract_i = None
# Handle basic int indexes.
if (
isinstance(abstract_i, ConcreteArray) or isinstance(abstract_i, ShapedArray)
) and _int(abstract_i):
if x_shape[x_axis] == 0:
# XLA gives error when indexing into an axis of size 0
raise IndexError(
f"index is out of bounds for axis {x_axis} with size 0"
)
i = _normalize_index(i, x_shape[x_axis])
i = lax.convert_element_type(i, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
collapsed_slice_dims.append(x_axis)
gather_slice_shape.append(1)
start_index_map.append(x_axis)
x_axis += 1
# Handle np.newaxis (None)
elif i is None:
slice_shape.append(1)
newaxis_dims.append(y_axis)
y_axis += 1
# Handle slice(None)
elif _is_slice_none(i):
slice_shape.append(x_shape[x_axis])
gather_slice_shape.append(x_shape[x_axis])
offset_dims.append(collapsed_y_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
# Handle slice index (only static, otherwise an error is raised)
elif isinstance(i, slice):
if not _all(
elt is None or type(core.get_aval(elt)) is ConcreteArray
for elt in (i.start, i.stop, i.step)
):
msg = (
"Array slice indices must have static start/stop/step to be used "
"with Numpy indexing syntax. Try lax.dynamic_slice/"
"dynamic_update_slice instead."
)
raise IndexError(msg)
start, limit, stride, needs_rev = _static_idx(i, x_shape[x_axis])
if needs_rev:
reversed_y_dims.append(collapsed_y_axis)
if stride == 1:
i = lax.convert_element_type(start, index_dtype)
i = broadcast_to(i, tuple(gather_indices.shape[:-1]) + (1,))
gather_indices = concatenate((gather_indices, i), -1)
slice_shape.append(limit - start)
gather_slice_shape.append(limit - start)
offset_dims.append(collapsed_y_axis)
start_index_map.append(x_axis)
else:
i = arange(start, limit, stride, dtype=index_dtype)
size = i.shape[0]
slice_shape.append(size)
gather_slice_shape.append(1)
gather_indices_shape = tuple(gather_indices.shape[:-1]) + (size,)
i = lax.broadcast_in_dim(
i,
shape=gather_indices_shape + (1,),
broadcast_dimensions=(len(gather_indices_shape) - 1,),
)
gather_indices = lax.broadcast_in_dim(
gather_indices,
shape=gather_indices_shape + (len(start_index_map),),
broadcast_dimensions=(
tuple(range(len(gather_indices_shape) - 1))
+ (len(gather_indices_shape),)
),
)
gather_indices = concatenate(
(gather_indices, i), len(gather_indices_shape)
)
start_index_map.append(x_axis)
collapsed_slice_dims.append(x_axis)
collapsed_y_axis += 1
y_axis += 1
x_axis += 1
else:
if abstract_i is not None and not (
issubdtype(abstract_i.dtype, integer)
or issubdtype(abstract_i.dtype, bool_)
):
msg = (
"Indexer must have integer or boolean type, got indexer "
"with type {} at position {}, indexer value {}"
)
raise TypeError(msg.format(abstract_i.dtype.name, idx_pos, i))
msg = "Indexing mode not yet supported. Open a feature request!\n{}"
raise IndexError(msg.format(idx))
dnums = lax.GatherDimensionNumbers(
offset_dims=tuple(offset_dims),
collapsed_slice_dims=tuple(sorted(collapsed_slice_dims)),
start_index_map=tuple(start_index_map),
)
return _Indexer(
slice_shape=slice_shape,
newaxis_dims=tuple(newaxis_dims),
gather_slice_shape=gather_slice_shape,
reversed_y_dims=reversed_y_dims,
dnums=dnums,
gather_indices=gather_indices,
)
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def _static_idx(idx: slice, size: Union[int, Poly]):
"""Helper function to compute the static slice start/limit/stride values."""
if _any(type(s) is Poly for s in (idx.start, idx.stop, idx.step, size)):
start, stop, step = _polymorphic_slice_indices(idx, size)
elif isinstance(size, int):
start, stop, step = idx.indices(size)
else:
raise TypeError(size)
if type(start) is not Poly and type(stop) is not Poly:
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
|
def _static_idx(idx, size):
"""Helper function to compute the static slice start/limit/stride values."""
assert isinstance(idx, slice)
start, stop, step = idx.indices(size)
if (step < 0 and stop >= start) or (step > 0 and start >= stop):
return 0, 0, 1, False # sliced to size zero
if step > 0:
return start, stop, step, False
else:
k = (start - stop - 1) % (-step)
return stop + k + 1, start + 1, -step, True
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def _check_shape(name, shape, *param_shapes):
shape = abstract_arrays.canonicalize_shape(shape)
if param_shapes:
shape_ = lax.broadcast_shapes(shape, *param_shapes)
if shape != shape_:
msg = (
"{} parameter shapes must be broadcast-compatible with shape "
"argument, and the result of broadcasting the shapes must equal "
"the shape argument, but got result {} for shape argument {}."
)
raise ValueError(msg.format(name, shape_, shape))
|
def _check_shape(name, shape, *param_shapes):
try:
shape = tuple(map(int, shape))
except TypeError as err:
msg = "{} requires a concrete tuple of integers as shape argument, got {}."
raise ValueError(msg.format(name, shape)) from err
if param_shapes:
shape_ = lax.broadcast_shapes(shape, *param_shapes)
if shape != shape_:
msg = (
"{} parameter shapes must be broadcast-compatible with shape "
"argument, and the result of broadcasting the shapes must equal "
"the shape argument, but got result {} for shape argument {}."
)
raise ValueError(msg.format(name, shape_, shape))
|
https://github.com/google/jax/issues/2245
|
Traceback (most recent call last):
File "/Users/necula/Source/jax/jax/interpreters/xla.py", line 230, in primitive_computation
return c.Build()
File "/Users/necula/Source/jax/jax/lib/xla_bridge.py", line 281, in Build
*args, **kwargs)
File "/Users/necula/Source/jax/build/jaxlib/xla_client.py", line 734, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Slice size at index 0 in gather op is out of range, must be within [0, 6), got 10.:
|
RuntimeError
|
def custom_jvp(fwd, jvp):
@wraps(fwd)
def fun_(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs))
flat_fun, out_data = _flatten_fun_and_count_res(lu.wrap_init(fwd), in_tree)
out_flat = custom_jvp_call(
flat_fun,
*args_flat,
out_data=out_data,
jvp=jvp,
in_tree=in_tree,
keep_res=False,
)
ans_tree, _, _ = out_data()
return tree_unflatten(ans_tree, out_flat)
return fun_
|
def custom_jvp(primals, tangents):
ans = fun(*primals)
tangents_out = [
rule(t, ans, *primals)
for rule, t in zip(jvprules, tangents)
if rule is not None and t is not ad_util.zero
]
return ans, functools.reduce(ad.add_tangents, tangents_out, ad_util.zero)
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def custom_vjp(fwd, bwd):
@wraps(fwd)
def fun_(*args, **kwargs):
args_flat, in_tree = tree_flatten((args, kwargs))
flat_fun, out_data = _flatten_fun_and_count_res(lu.wrap_init(fwd), in_tree)
out_flat = custom_vjp_call(
flat_fun, *args_flat, bwd=bwd, out_data=out_data, keep_res=False
)
ans_tree, _, _ = out_data()
return tree_unflatten(ans_tree, out_flat)
return fun_
|
def custom_vjp(*primals):
ans = fun(*primals)
# TODO(mattjj): avoid instantiating zeros?
def vjpfun(ct):
return tuple(
vjp(ct, ans, *primals) if vjp else ad_util.zeros_like_jaxval(x)
for x, vjp in zip(primals, vjprules)
)
return ans, vjpfun
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def __repr__(self):
return "<axis {}>".format(hex(id(self.obj)))
|
def __repr__(self):
return "<jax.custom_transforms function {fun}>".format(fun=self.__name__)
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def optimal_step_size(
last_step, mean_error_ratio, safety=0.9, ifactor=10.0, dfactor=0.2, order=5.0
):
"""Compute optimal Runge-Kutta stepsize."""
mean_error_ratio = np.max(mean_error_ratio)
dfactor = np.where(mean_error_ratio < 1, 1.0, dfactor)
err_ratio = np.sqrt(mean_error_ratio)
factor = np.maximum(
1.0 / ifactor, np.minimum(err_ratio ** (1.0 / order) / safety, 1.0 / dfactor)
)
return np.where(mean_error_ratio == 0, last_step * ifactor, last_step / factor)
|
def optimal_step_size(
last_step, mean_error_ratio, safety=0.9, ifactor=10.0, dfactor=0.2, order=5.0
):
"""Compute optimal Runge-Kutta stepsize."""
mean_error_ratio = np.max(mean_error_ratio)
dfactor = np.where(mean_error_ratio < 1, 1.0, dfactor)
err_ratio = np.sqrt(mean_error_ratio)
factor = np.maximum(
1.0 / ifactor, np.minimum(err_ratio ** (1.0 / order) / safety, 1.0 / dfactor)
)
return np.where(
mean_error_ratio == 0,
last_step * ifactor,
last_step / factor,
)
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def odeint(ofunc, y0, t, *args, **kwargs):
"""Adaptive stepsize (Dormand-Prince) Runge-Kutta odeint implementation.
Args:
ofunc: Function to evaluate `yt = ofunc(y, t, *args)` that
returns the time derivative of `y`.
y0: initial value for the state.
t: Timespan for `ofunc` evaluation like `np.linspace(0., 10., 101)`.
*args: Additional arguments to `ofunc` beyond y0 and t.
**kwargs: Two relevant keyword arguments:
'rtol': Relative local error tolerance for solver.
'atol': Absolute local error tolerance for solver.
'mxstep': Maximum number of steps to take for each timepoint.
Returns:
Integrated system values at each timepoint.
"""
rtol = kwargs.get("rtol", 1.4e-8)
atol = kwargs.get("atol", 1.4e-8)
mxstep = kwargs.get("mxstep", np.inf)
func = lambda y, t: ofunc(y, t, *args)
def _fori_body_fun(i, val):
"""Internal fori_loop body to interpolate an integral at each timestep."""
t, cur_y, cur_f, cur_t, dt, last_t, interp_coeff, solution = val
cur_y, cur_f, cur_t, dt, last_t, interp_coeff, _ = jax.lax.while_loop(
lambda x: (x[2] < t[i]) & (x[-1] < mxstep),
_while_body_fun,
(cur_y, cur_f, cur_t, dt, last_t, interp_coeff, 0.0),
)
relative_output_time = (t[i] - last_t) / (cur_t - last_t)
out_x = np.polyval(interp_coeff, relative_output_time)
solution = jax.ops.index_update(solution, jax.ops.index[i, :], out_x)
return (t, cur_y, cur_f, cur_t, dt, last_t, interp_coeff, solution)
def _while_body_fun(x):
"""Internal while_loop body to determine interpolation coefficients."""
cur_y, cur_f, cur_t, dt, last_t, interp_coeff, j = x
next_t = cur_t + dt
next_y, next_f, next_y_error, k = runge_kutta_step(
func, cur_y, cur_f, cur_t, dt
)
error_ratios = error_ratio(next_y_error, rtol, atol, cur_y, next_y)
new_interp_coeff = interp_fit_dopri(cur_y, next_y, k, dt)
dt = optimal_step_size(dt, error_ratios)
next_j = j + 1
new = (next_y, next_f, next_t, dt, cur_t, new_interp_coeff, next_j)
old = (cur_y, cur_f, cur_t, dt, last_t, interp_coeff, next_j)
return tuple(map(partial(np.where, np.all(error_ratios <= 1.0)), new, old))
f0 = func(y0, t[0])
dt = initial_step_size(func, t[0], y0, 4, rtol, atol, f0)
interp_coeff = np.array([y0] * 5)
solution = jax.ops.index_update(
np.zeros((t.shape[0], y0.shape[0])), jax.ops.index[0, :], y0
)
init_carry = (t, y0, f0, t[0], dt, t[0], interp_coeff, solution)
*_, solution = jax.lax.fori_loop(1, t.shape[0], _fori_body_fun, init_carry)
return solution
|
def odeint(ofunc, y0, t, *args, **kwargs):
"""Adaptive stepsize (Dormand-Prince) Runge-Kutta odeint implementation.
Args:
ofunc: Function to evaluate `yt = ofunc(y, t, *args)` that
returns the time derivative of `y`.
y0: initial value for the state.
t: Timespan for `ofunc` evaluation like `np.linspace(0., 10., 101)`.
*args: Additional arguments to `ofunc` beyond y0 and t.
**kwargs: Two relevant keyword arguments:
'rtol': Relative local error tolerance for solver.
'atol': Absolute local error tolerance for solver.
'mxstep': Maximum number of steps to take for each timepoint.
Returns:
Integrated system values at each timepoint.
"""
rtol = kwargs.get("rtol", 1.4e-8)
atol = kwargs.get("atol", 1.4e-8)
mxstep = kwargs.get("mxstep", np.inf)
@functools.partial(jax.jit, static_argnums=(0,))
def _fori_body_fun(func, i, val):
"""Internal fori_loop body to interpolate an integral at each timestep."""
t, cur_y, cur_f, cur_t, dt, last_t, interp_coeff, solution = val
cur_y, cur_f, cur_t, dt, last_t, interp_coeff, _ = jax.lax.while_loop(
lambda x: (x[2] < t[i]) & (x[-1] < mxstep),
functools.partial(_while_body_fun, func),
(cur_y, cur_f, cur_t, dt, last_t, interp_coeff, 0.0),
)
relative_output_time = (t[i] - last_t) / (cur_t - last_t)
out_x = np.polyval(interp_coeff, relative_output_time)
return (
t,
cur_y,
cur_f,
cur_t,
dt,
last_t,
interp_coeff,
jax.ops.index_update(solution, jax.ops.index[i, :], out_x),
)
@functools.partial(jax.jit, static_argnums=(0,))
def _while_body_fun(func, x):
"""Internal while_loop body to determine interpolation coefficients."""
cur_y, cur_f, cur_t, dt, last_t, interp_coeff, j = x
next_t = cur_t + dt
next_y, next_f, next_y_error, k = runge_kutta_step(
func, cur_y, cur_f, cur_t, dt
)
error_ratios = error_ratio(next_y_error, rtol, atol, cur_y, next_y)
new_interp_coeff = interp_fit_dopri(cur_y, next_y, k, dt)
dt = optimal_step_size(dt, error_ratios)
next_j = j + 1
new_rav, unravel = ravel_pytree(
(next_y, next_f, next_t, dt, cur_t, new_interp_coeff, next_j)
)
old_rav, _ = ravel_pytree(
(cur_y, cur_f, cur_t, dt, last_t, interp_coeff, next_j)
)
return unravel(np.where(np.all(error_ratios <= 1.0), new_rav, old_rav))
func = lambda y, t: ofunc(y, t, *args)
f0 = func(y0, t[0])
dt = initial_step_size(func, t[0], y0, 4, rtol, atol, f0)
interp_coeff = np.array([y0] * 5)
return jax.lax.fori_loop(
1,
t.shape[0],
functools.partial(_fori_body_fun, func),
(
t,
y0,
f0,
t[0],
dt,
t[0],
interp_coeff,
jax.ops.index_update(
np.zeros((t.shape[0], y0.shape[0])), jax.ops.index[0, :], y0
),
),
)[-1]
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def _fori_body_fun(i, val):
"""Internal fori_loop body to interpolate an integral at each timestep."""
t, cur_y, cur_f, cur_t, dt, last_t, interp_coeff, solution = val
cur_y, cur_f, cur_t, dt, last_t, interp_coeff, _ = jax.lax.while_loop(
lambda x: (x[2] < t[i]) & (x[-1] < mxstep),
_while_body_fun,
(cur_y, cur_f, cur_t, dt, last_t, interp_coeff, 0.0),
)
relative_output_time = (t[i] - last_t) / (cur_t - last_t)
out_x = np.polyval(interp_coeff, relative_output_time)
solution = jax.ops.index_update(solution, jax.ops.index[i, :], out_x)
return (t, cur_y, cur_f, cur_t, dt, last_t, interp_coeff, solution)
|
def _fori_body_fun(func, i, val):
"""Internal fori_loop body to interpolate an integral at each timestep."""
t, cur_y, cur_f, cur_t, dt, last_t, interp_coeff, solution = val
cur_y, cur_f, cur_t, dt, last_t, interp_coeff, _ = jax.lax.while_loop(
lambda x: (x[2] < t[i]) & (x[-1] < mxstep),
functools.partial(_while_body_fun, func),
(cur_y, cur_f, cur_t, dt, last_t, interp_coeff, 0.0),
)
relative_output_time = (t[i] - last_t) / (cur_t - last_t)
out_x = np.polyval(interp_coeff, relative_output_time)
return (
t,
cur_y,
cur_f,
cur_t,
dt,
last_t,
interp_coeff,
jax.ops.index_update(solution, jax.ops.index[i, :], out_x),
)
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def _while_body_fun(x):
"""Internal while_loop body to determine interpolation coefficients."""
cur_y, cur_f, cur_t, dt, last_t, interp_coeff, j = x
next_t = cur_t + dt
next_y, next_f, next_y_error, k = runge_kutta_step(func, cur_y, cur_f, cur_t, dt)
error_ratios = error_ratio(next_y_error, rtol, atol, cur_y, next_y)
new_interp_coeff = interp_fit_dopri(cur_y, next_y, k, dt)
dt = optimal_step_size(dt, error_ratios)
next_j = j + 1
new = (next_y, next_f, next_t, dt, cur_t, new_interp_coeff, next_j)
old = (cur_y, cur_f, cur_t, dt, last_t, interp_coeff, next_j)
return tuple(map(partial(np.where, np.all(error_ratios <= 1.0)), new, old))
|
def _while_body_fun(func, x):
"""Internal while_loop body to determine interpolation coefficients."""
cur_y, cur_f, cur_t, dt, last_t, interp_coeff, j = x
next_t = cur_t + dt
next_y, next_f, next_y_error, k = runge_kutta_step(func, cur_y, cur_f, cur_t, dt)
error_ratios = error_ratio(next_y_error, rtol, atol, cur_y, next_y)
new_interp_coeff = interp_fit_dopri(cur_y, next_y, k, dt)
dt = optimal_step_size(dt, error_ratios)
next_j = j + 1
new_rav, unravel = ravel_pytree(
(next_y, next_f, next_t, dt, cur_t, new_interp_coeff, next_j)
)
old_rav, _ = ravel_pytree((cur_y, cur_f, cur_t, dt, last_t, interp_coeff, next_j))
return unravel(np.where(np.all(error_ratios <= 1.0), new_rav, old_rav))
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def vjp_odeint(ofunc, y0, t, *args, **kwargs):
"""Return a function that calculates `vjp(odeint(func(y, t, *args))`.
Args:
ofunc: Function `ydot = ofunc(y, t, *args)` to compute the time
derivative of `y`.
y0: initial value for the state.
t: Timespan for `ofunc` evaluation like `np.linspace(0., 10., 101)`.
*args: Additional arguments to `ofunc` beyond y0 and t.
**kwargs: Two relevant keyword arguments:
'rtol': Relative local error tolerance for solver.
'atol': Absolute local error tolerance for solver.
'mxstep': Maximum number of steps to take for each timepoint.
Returns:
VJP function `vjp = vjp_all(g)` where `yt = ofunc(y, t, *args)`
and g is used for VJP calculation. To evaluate the gradient w/ the VJP,
supply `g = np.ones_like(yt)`. To evaluate the reverse Jacobian do a vmap
over the standard basis of yt.
"""
rtol = kwargs.get("rtol", 1.4e-8)
atol = kwargs.get("atol", 1.4e-8)
mxstep = kwargs.get("mxstep", np.inf)
flat_args, unravel_args = ravel_pytree(args)
flat_func = lambda y, t, flat_args: ofunc(y, t, *unravel_args(flat_args))
@jax.jit
def aug_dynamics(augmented_state, t, flat_args):
"""Original system augmented with vjp_y, vjp_t and vjp_args."""
state_len = int(
np.floor_divide(augmented_state.shape[0] - flat_args.shape[0] - 1, 2)
)
y = augmented_state[:state_len]
adjoint = augmented_state[state_len : 2 * state_len]
dy_dt, vjpfun = jax.vjp(flat_func, y, t, flat_args)
return np.hstack([np.ravel(dy_dt), np.hstack(vjpfun(-adjoint))])
rev_aug_dynamics = lambda y, t, flat_args: -aug_dynamics(y, -t, flat_args)
@jax.jit
def _fori_body_fun(i, val):
"""fori_loop function for VJP calculation."""
rev_yt, rev_t, rev_tarray, rev_gi, vjp_y, vjp_t0, vjp_args, time_vjp_list = val
this_yt = rev_yt[i, :]
this_t = rev_t[i]
this_tarray = rev_tarray[i, :]
this_gi = rev_gi[i, :]
# this is g[i-1, :] when g has been reversed
this_gim1 = rev_gi[i + 1, :]
state_len = this_yt.shape[0]
vjp_cur_t = np.dot(flat_func(this_yt, this_t, flat_args), this_gi)
vjp_t0 = vjp_t0 - vjp_cur_t
# Run augmented system backwards to the previous observation.
aug_y0 = np.hstack((this_yt, vjp_y, vjp_t0, vjp_args))
aug_ans = odeint(
rev_aug_dynamics,
aug_y0,
this_tarray,
flat_args,
rtol=rtol,
atol=atol,
mxstep=mxstep,
)
vjp_y = aug_ans[1][state_len : 2 * state_len] + this_gim1
vjp_t0 = aug_ans[1][2 * state_len]
vjp_args = aug_ans[1][2 * state_len + 1 :]
time_vjp_list = jax.ops.index_update(time_vjp_list, i, vjp_cur_t)
return rev_yt, rev_t, rev_tarray, rev_gi, vjp_y, vjp_t0, vjp_args, time_vjp_list
@jax.jit
def vjp_all(g, yt, t):
"""Calculate the VJP g * Jac(odeint(ofunc, y0, t, *args))."""
rev_yt = yt[-1::-1, :]
rev_t = t[-1::-1]
rev_tarray = -np.array([t[-1:0:-1], t[-2::-1]]).T
rev_gi = g[-1::-1, :]
vjp_y = g[-1, :]
vjp_t0 = 0.0
vjp_args = np.zeros_like(flat_args)
time_vjp_list = np.zeros_like(t)
init = (
rev_yt,
rev_t,
rev_tarray,
rev_gi,
vjp_y,
vjp_t0,
vjp_args,
time_vjp_list,
)
result = jax.lax.fori_loop(0, rev_t.shape[0] - 1, _fori_body_fun, init)
time_vjp_list = jax.ops.index_update(result[-1], -1, result[-3])
vjp_times = np.hstack(time_vjp_list)[::-1]
return tuple([result[-4], vjp_times] + list(result[-2]))
primals_out = odeint(
flat_func, y0, t, flat_args, rtol=rtol, atol=atol, mxstep=mxstep
)
vjp_fun = lambda g: vjp_all(g, primals_out, t)
return primals_out, vjp_fun
|
def vjp_odeint(ofunc, y0, t, *args, **kwargs):
"""Return a function that calculates `vjp(odeint(func(y, t, *args))`.
Args:
ofunc: Function `ydot = ofunc(y, t, *args)` to compute the time
derivative of `y`.
y0: initial value for the state.
t: Timespan for `ofunc` evaluation like `np.linspace(0., 10., 101)`.
*args: Additional arguments to `ofunc` beyond y0 and t.
**kwargs: Two relevant keyword arguments:
'rtol': Relative local error tolerance for solver.
'atol': Absolute local error tolerance for solver.
'mxstep': Maximum number of steps to take for each timepoint.
Returns:
VJP function `vjp = vjp_all(g)` where `yt = ofunc(y, t, *args)`
and g is used for VJP calculation. To evaluate the gradient w/ the VJP,
supply `g = np.ones_like(yt)`. To evaluate the reverse Jacobian do a vmap
over the standard basis of yt.
"""
rtol = kwargs.get("rtol", 1.4e-8)
atol = kwargs.get("atol", 1.4e-8)
mxstep = kwargs.get("mxstep", np.inf)
flat_args, unravel_args = ravel_pytree(args)
flat_func = lambda y, t, flat_args: ofunc(y, t, *unravel_args(flat_args))
@jax.jit
def aug_dynamics(augmented_state, t, flat_args):
"""Original system augmented with vjp_y, vjp_t and vjp_args."""
state_len = int(
np.floor_divide(augmented_state.shape[0] - flat_args.shape[0] - 1, 2)
)
y = augmented_state[:state_len]
adjoint = augmented_state[state_len : 2 * state_len]
dy_dt, vjpfun = jax.vjp(flat_func, y, t, flat_args)
return np.hstack([np.ravel(dy_dt), np.hstack(vjpfun(-adjoint))])
rev_aug_dynamics = lambda y, t, flat_args: -aug_dynamics(y, -t, flat_args)
@jax.jit
def _fori_body_fun(i, val):
"""fori_loop function for VJP calculation."""
rev_yt, rev_t, rev_tarray, rev_gi, vjp_y, vjp_t0, vjp_args, time_vjp_list = val
this_yt = rev_yt[i, :]
this_t = rev_t[i]
this_tarray = rev_tarray[i, :]
this_gi = rev_gi[i, :]
# this is g[i-1, :] when g has been reversed
this_gim1 = rev_gi[i + 1, :]
state_len = this_yt.shape[0]
vjp_cur_t = np.dot(flat_func(this_yt, this_t, flat_args), this_gi)
vjp_t0 = vjp_t0 - vjp_cur_t
# Run augmented system backwards to the previous observation.
aug_y0 = np.hstack((this_yt, vjp_y, vjp_t0, vjp_args))
aug_ans = odeint(
rev_aug_dynamics,
aug_y0,
this_tarray,
flat_args,
rtol=rtol,
atol=atol,
mxstep=mxstep,
)
vjp_y = aug_ans[1][state_len : 2 * state_len] + this_gim1
vjp_t0 = aug_ans[1][2 * state_len]
vjp_args = aug_ans[1][2 * state_len + 1 :]
time_vjp_list = jax.ops.index_update(time_vjp_list, i, vjp_cur_t)
return rev_yt, rev_t, rev_tarray, rev_gi, vjp_y, vjp_t0, vjp_args, time_vjp_list
@jax.jit
def vjp_all(g, yt, t):
"""Calculate the VJP g * Jac(odeint(ofunc, y0, t, *args))."""
rev_yt = yt[-1::-1, :]
rev_t = t[-1::-1]
rev_tarray = -np.array([t[-1:0:-1], t[-2::-1]]).T
rev_gi = g[-1::-1, :]
vjp_y = g[-1, :]
vjp_t0 = 0.0
vjp_args = np.zeros_like(flat_args)
time_vjp_list = np.zeros_like(t)
result = jax.lax.fori_loop(
0,
rev_t.shape[0] - 1,
_fori_body_fun,
(rev_yt, rev_t, rev_tarray, rev_gi, vjp_y, vjp_t0, vjp_args, time_vjp_list),
)
time_vjp_list = jax.ops.index_update(result[-1], -1, result[-3])
vjp_times = np.hstack(time_vjp_list)[::-1]
return tuple([result[-4], vjp_times] + list(result[-2]))
primals_out = odeint(
flat_func, y0, t, flat_args, rtol=rtol, atol=atol, mxstep=mxstep
)
vjp_fun = lambda g: vjp_all(g, primals_out, t)
return primals_out, vjp_fun
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def vjp_all(g, yt, t):
"""Calculate the VJP g * Jac(odeint(ofunc, y0, t, *args))."""
rev_yt = yt[-1::-1, :]
rev_t = t[-1::-1]
rev_tarray = -np.array([t[-1:0:-1], t[-2::-1]]).T
rev_gi = g[-1::-1, :]
vjp_y = g[-1, :]
vjp_t0 = 0.0
vjp_args = np.zeros_like(flat_args)
time_vjp_list = np.zeros_like(t)
init = (rev_yt, rev_t, rev_tarray, rev_gi, vjp_y, vjp_t0, vjp_args, time_vjp_list)
result = jax.lax.fori_loop(0, rev_t.shape[0] - 1, _fori_body_fun, init)
time_vjp_list = jax.ops.index_update(result[-1], -1, result[-3])
vjp_times = np.hstack(time_vjp_list)[::-1]
return tuple([result[-4], vjp_times] + list(result[-2]))
|
def vjp_all(g, yt, t):
"""Calculate the VJP g * Jac(odeint(ofunc, y0, t, *args))."""
rev_yt = yt[-1::-1, :]
rev_t = t[-1::-1]
rev_tarray = -np.array([t[-1:0:-1], t[-2::-1]]).T
rev_gi = g[-1::-1, :]
vjp_y = g[-1, :]
vjp_t0 = 0.0
vjp_args = np.zeros_like(flat_args)
time_vjp_list = np.zeros_like(t)
result = jax.lax.fori_loop(
0,
rev_t.shape[0] - 1,
_fori_body_fun,
(rev_yt, rev_t, rev_tarray, rev_gi, vjp_y, vjp_t0, vjp_args, time_vjp_list),
)
time_vjp_list = jax.ops.index_update(result[-1], -1, result[-3])
vjp_times = np.hstack(time_vjp_list)[::-1]
return tuple([result[-4], vjp_times] + list(result[-2]))
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def build_odeint(ofunc, rtol=1.4e-8, atol=1.4e-8, mxstep=np.inf):
"""Return `f(y0, t, args) = odeint(ofunc(y, t, *args), y0, t, args)`.
Given the function ofunc(y, t, *args), return the jitted function
`f(y0, t, args) = odeint(ofunc(y, t, *args), y0, t, args)` with
the VJP of `f` defined using `vjp_odeint`, where:
`y0` is the initial condition of the ODE integration,
`t` is the time course of the integration, and
`*args` are all other arguments to `ofunc`.
Args:
ofunc: The function to be wrapped into an ODE integration.
rtol: relative local error tolerance for solver.
atol: absolute local error tolerance for solver.
mxstep: Maximum number of steps to take for each timepoint.
Returns:
`f(y0, t, args) = odeint(ofunc(y, t, *args), y0, t, args)`
"""
fwd = partial(odeint, ofunc, rtol=rtol, atol=atol, mxstep=mxstep)
bwd = partial(vjp_odeint, ofunc, rtol=rtol, atol=atol, mxstep=mxstep)
return custom_gradient(fwd, bwd)
|
def build_odeint(ofunc, rtol=1.4e-8, atol=1.4e-8, mxstep=onp.inf):
"""Return `f(y0, t, args) = odeint(ofunc(y, t, *args), y0, t, args)`.
Given the function ofunc(y, t, *args), return the jitted function
`f(y0, t, args) = odeint(ofunc(y, t, *args), y0, t, args)` with
the VJP of `f` defined using `vjp_odeint`, where:
`y0` is the initial condition of the ODE integration,
`t` is the time course of the integration, and
`*args` are all other arguments to `ofunc`.
Args:
ofunc: The function to be wrapped into an ODE integration.
rtol: relative local error tolerance for solver.
atol: absolute local error tolerance for solver.
mxstep: Maximum number of steps to take for each timepoint.
Returns:
`f(y0, t, args) = odeint(ofunc(y, t, *args), y0, t, args)`
"""
ct_odeint = jax.custom_transforms(
lambda y0, t, *args: odeint(
ofunc, y0, t, *args, rtol=rtol, atol=atol, mxstep=mxstep
)
)
v = lambda y0, t, *args: vjp_odeint(
ofunc, y0, t, *args, rtol=rtol, atol=atol, mxstep=mxstep
)
jax.defvjp_all(ct_odeint, v)
return jax.jit(ct_odeint)
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def my_odeint_jacrev(fun):
"""Calculate the Jacobian of an odeint."""
@jax.jit
def _jacfun(*args, **kwargs):
ys, pullback = vjp_odeint(fun, *args, **kwargs)
my_jac = jax.vmap(pullback)(jax.api._std_basis(ys))
my_jac = jax.api.tree_map(
partial(jax.api._unravel_array_into_pytree, ys, 0), my_jac
)
my_jac = jax.api.tree_transpose(
jax.api.tree_structure(args), jax.api.tree_structure(ys), my_jac
)
return my_jac
return _jacfun
|
def my_odeint_jacrev(fun):
"""Calculate the Jacobian of an odeint."""
@jax.jit
def _jacfun(*args, **kwargs):
ys, pullback = vjp_odeint(fun, *args, **kwargs)
my_jac = jax.vmap(pullback)(jax.api._std_basis(ys))
my_jac = jax.api.tree_map(
functools.partial(jax.api._unravel_array_into_pytree, ys, 0), my_jac
)
my_jac = jax.api.tree_transpose(
jax.api.tree_structure(args), jax.api.tree_structure(ys), my_jac
)
return my_jac
return _jacfun
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def _jacfun(*args, **kwargs):
ys, pullback = vjp_odeint(fun, *args, **kwargs)
my_jac = jax.vmap(pullback)(jax.api._std_basis(ys))
my_jac = jax.api.tree_map(
partial(jax.api._unravel_array_into_pytree, ys, 0), my_jac
)
my_jac = jax.api.tree_transpose(
jax.api.tree_structure(args), jax.api.tree_structure(ys), my_jac
)
return my_jac
|
def _jacfun(*args, **kwargs):
ys, pullback = vjp_odeint(fun, *args, **kwargs)
my_jac = jax.vmap(pullback)(jax.api._std_basis(ys))
my_jac = jax.api.tree_map(
functools.partial(jax.api._unravel_array_into_pytree, ys, 0), my_jac
)
my_jac = jax.api.tree_transpose(
jax.api.tree_structure(args), jax.api.tree_structure(ys), my_jac
)
return my_jac
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def pend_benchmark_odeint():
_, _ = benchmark_odeint(
pend, (np.pi - 0.1, 0.0), np.linspace(0.0, 10.0, 101), 0.25, 9.8
)
|
def pend_benchmark_odeint():
_, _ = benchmark_odeint(
pend, (onp.pi - 0.1, 0.0), onp.linspace(0.0, 10.0, 101), 0.25, 9.8
)
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def batch(fun, in_vals, in_dims, out_dim_dests):
# executes a batched version of `fun` following out_dim_dests
batched_fun = batch_fun(fun, in_dims, out_dim_dests)
return batched_fun.call_wrapped(*in_vals)
|
def batch(fun, in_vals, in_dims, out_dim_dests):
(size,) = {x.shape[d] for x, d in zip(in_vals, in_dims) if d is not not_mapped}
out_vals, out_dims = batch_fun(fun, in_vals, in_dims)
return map(partial(matchaxis, size), out_dims, out_dim_dests(), out_vals)
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def batch_subtrace(master, in_dims, *in_vals, **params):
trace = BatchTrace(master, core.cur_sublevel())
in_tracers = [
BatchTracer(trace, val, dim) if dim is not None else val
for val, dim in zip(in_vals, in_dims)
]
outs = yield in_tracers, params
out_tracers = map(trace.full_raise, outs)
out_vals, out_dims = unzip2((t.val, t.batch_dim) for t in out_tracers)
yield out_vals, out_dims
|
def batch_subtrace(master, in_dims, *in_vals):
trace = BatchTrace(master, core.cur_sublevel())
in_tracers = [
BatchTracer(trace, val, dim) if dim is not None else val
for val, dim in zip(in_vals, in_dims)
]
outs = yield in_tracers, {}
out_tracers = map(trace.full_raise, outs)
out_vals, out_dims = unzip2((t.val, t.batch_dim) for t in out_tracers)
yield out_vals, out_dims
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def batch_fun(fun, in_dims, out_dim_dests):
# transformation version of batch, which doesn't call the function
fun, out_dims = batch_subtrace(fun)
return _batch_fun(fun, in_dims, out_dims, out_dim_dests)
|
def batch_fun(fun, in_vals, in_dims):
with new_master(BatchTrace) as master:
fun, out_dims = batch_subtrace(fun, master, in_dims)
out_vals = fun.call_wrapped(*in_vals)
del master
return out_vals, out_dims()
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def partial_eval_wrapper(avals, *consts):
py_args = (map(PartialVal, zip(avals, consts)),)
jaxpr, (out_pvals, consts, env) = yield py_args, {}
out_pvs, out_consts = unzip2(out_pvals)
out = tuple(out_consts) + tuple(consts)
yield out, (out_pvs, jaxpr, env)
|
def partial_eval_wrapper(avals, *consts):
py_args = (map(PartialVal, zip(avals, consts)),)
jaxpr, (out_pvals, consts, env) = yield py_args, {}
out_pvs, out_consts = unzip2(out_pvals)
out = tuple(out_consts) + tuple(consts) # TODO: can consts be traced?
yield out, (out_pvs, jaxpr, env)
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def _jvp_slogdet(g, ans, x):
if np.issubdtype(np._dtype(x), np.complexfloating):
raise NotImplementedError # TODO(pfau): make this work for complex types
jvp_logdet = np.trace(solve(x, g), axis1=-1, axis2=-2)
return ad_util.zero, jvp_logdet
|
def _jvp_slogdet(g, ans, x):
jvp_sign = np.zeros(x.shape[:-2])
jvp_logdet = np.trace(solve(x, g), axis1=-1, axis2=-2)
return jvp_sign, jvp_logdet
|
https://github.com/google/jax/issues/1097
|
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-91-eb0f4b094a7d> in <module>()
12 args = ([1.0, {}],)
13 print(identity(args))
---> 14 jax.jvp(identity, args, args)
/usr/local/lib/python3.6/dist-packages/jax/api.py in jvp(fun, primals, tangents)
866 ps_flat, ts_flat, in_trees = unzip3(map(trim_arg, primals, tangents))
867 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 868 out_primal, out_tangent = ad.jvp(jaxtree_fun).call_wrapped(ps_flat, ts_flat)
869 return (build_tree(out_tree(), out_primal), build_tree(out_tree(), out_tangent))
870
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
147
148 del gen
--> 149 ans = self.f(*args, **dict(self.params, **kwargs))
150 del args
151 while stack:
/usr/local/lib/python3.6/dist-packages/jax/api.py in __call__(self, *args, **kwargs)
1177 jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals_in, instantiate=True)
1178 ans = self.prim.bind(core.pack(consts), jax_kwargs, *jax_args,
-> 1179 in_trees=in_trees, jaxpr=jaxpr)
1180 return build_tree(out_tree(), ans)
1181
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/ad.py in process_primitive(self, primitive, tracers, params)
250 "Forward-mode differentiation rule for '{}' not implemented"
251 .format(primitive))
--> 252 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
253 return JVPTracer(self, primal_out, tangent_out)
254
/usr/local/lib/python3.6/dist-packages/jax/api.py in custom_transforms_jvp(primals, tangents, **params)
1321 in_trees = params['in_trees']
1322 args = tuple(map(build_tree, in_trees, jax_args))
-> 1323 args_dot = tuple(map(build_tree, in_trees, jax_args_dot))
1324 pytree_out, pytree_out_dot = custom_jvp(args, args_dot)
1325 out, out_tree = pytree_to_jaxtupletree(pytree_out)
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
/usr/local/lib/python3.6/dist-packages/jax/util.py in safe_map(f, *args)
39 for arg in args[1:]:
40 assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
---> 41 return list(map(f, *args))
42
43
/usr/local/lib/python3.6/dist-packages/jax/tree_util.py in build_tree(treedef, xs)
202 else:
203 # We use 'iter' for clearer error messages
--> 204 children = safe_map(build_tree, iter(treedef.children), iter(xs))
205 return treedef.node_type.from_iterable(treedef.node_data, children)
206
TypeError: 'Zero' object is not iterable
|
TypeError
|
def vmap(fun: Callable, in_axes=0, out_axes=0):
"""Vectorizing map. Creates a function which maps `fun` over argument axes.
Args:
fun: Function to be mapped over additional axes.
in_axes: A nonnegative integer, None, or (nested) standard Python container
(tuple/list/dict) thereof specifying which input array axes to map over.
If each positional argument to ``fun`` is an array, then ``in_axes`` can
be a nonnegative integer, a None, or a tuple of integers and Nones with
length equal to the number of positional arguments to ``fun``. An integer
or None indicates which array axis to map over for all arguments (with
None indicating not to map any axis), and a tuple indicates which axis to
map for each corresponding positional argument. If the positional
arguments to ``fun`` are container types, the corresponding element of
``in_axes`` can itself be a matching container, so that distinct array
axes can be mapped for different container elements. ``in_axes`` must be a
container tree prefix of the positional argument tuple passed to ``fun``.
out_axes: A nonnegative integer, None, or (nested) standard Python container
(tuple/list/dict) thereof indicating where the mapped axis should appear
in the output.
Returns:
Batched/vectorized version of ``fun`` with arguments that correspond to
those of ``fun``, but with extra array axes at positions indicated by
``in_axes``, and a return value that corresponds to that of ``fun``, but
with extra array axes at positions indicated by ``out_axes``.
For example, we can implement a matrix-matrix product using a vector dot
product:
>>> vv = lambda x, y: np.vdot(x, y) # ([a], [a]) -> []
>>> mv = vmap(vv, (0, None), 0) # ([b,a], [a]) -> [b] (b is the mapped axis)
>>> mm = vmap(mv, (None, 1), 1) # ([b,a], [a,c]) -> [b,c] (c is the mapped axis)
Here we use ``[a,b]`` to indicate an array with shape (a,b). Here are some
variants:
>>> mv1 = vmap(vv, (0, 0), 0) # ([b,a], [b,a]) -> [b] (b is the mapped axis)
>>> mv2 = vmap(vv, (0, 1), 0) # ([b,a], [a,b]) -> [b] (b is the mapped axis)
>>> mm2 = vmap(mv2, (1, 1), 0) # ([b,c,a], [a,c,b]) -> [c,b] (c is the mapped axis)
Here's an example of using container types in ``in_axes`` to specify which
axes of the container elements to map over:
>>> A, B, C, D = 2, 3, 4, 5
>>> x = np.ones((A, B))
>>> y = np.ones((B, C))
>>> z = np.ones((C, D))
>>> def foo(tree_arg):
... x, (y, z) = tree_arg
... return np.dot(x, np.dot(y, z))
>>> tree = (x, (y, z))
>>> print(foo(tree))
[[12. 12. 12. 12. 12.]
[12. 12. 12. 12. 12.]]
>>> from jax import vmap
>>> K = 6 # batch size
>>> x = np.ones((K, A, B)) # batch axis in different locations
>>> y = np.ones((B, K, C))
>>> z = np.ones((C, D, K))
>>> tree = (x, (y, z))
>>> vfoo = vmap(foo, in_axes=((0, (1, 2)),))
>>> print(vfoo(tree)).shape
(6, 2, 5)
"""
docstr = (
"Vectorized version of {fun}. Takes similar arguments as {fun} "
"but with additional array axes over which {fun} is mapped."
)
if isinstance(in_axes, list):
# To be a tree prefix of the positional args tuple, in_axes can never be a
# list: if in_axes is not a leaf, it must be a tuple of trees. However,
# in cases like these users expect tuples and lists to be treated
# essentially interchangeably, so we canonicalize lists to tuples here
# rather than raising an error. https://github.com/google/jax/issues/2367
in_axes = tuple(in_axes)
_check_callable(fun)
if not isinstance(in_axes, (list, tuple, type(None), int)) or not isinstance(
out_axes, (list, tuple, type(None), int)
):
msg = (
"vmap arguments in_axes and out_axes must each be an integer, None, "
"or a (nested) tuple of those types, got {} and {} respectively."
)
raise TypeError(msg.format(type(in_axes), type(out_axes)))
def _check_axis_sizes(tree, vals, dims):
mapped_axis_sizes = {x.shape[d] for x, d in zip(vals, dims) if d is not None}
try:
(sizes,) = mapped_axis_sizes
except ValueError as e:
msg = "vmap got inconsistent sizes for array axes to be mapped:\n{}"
# we switch the error message based on whether args is a tuple of arrays,
# in which case we can produce an error message based on argument indices,
# or if it has nested containers.
# TODO(mattjj,phawkins): add a way to inspect pytree kind more directly
if tree == tree_flatten((core.unit,) * tree.num_leaves)[1]:
lines1 = [
"arg {} has shape {} and axis {} is to be mapped".format(
i, x.shape, d
)
for i, (x, d) in enumerate(zip(vals, dims))
]
sizes = collections.defaultdict(list)
for i, (x, d) in enumerate(zip(vals, dims)):
if d is not None:
sizes[x.shape[d]].append(i)
lines2 = [
"{} {} {} {} to be mapped of size {}".format(
"args" if len(idxs) > 1 else "arg",
", ".join(map(str, idxs)),
"have" if len(idxs) > 1 else "has",
"axes" if len(idxs) > 1 else "an axis",
size,
)
for size, idxs in sizes.items()
]
raise ValueError(msg.format("\n".join(lines1 + ["so"] + lines2))) from e
else:
sizes = [
x.shape[d] if d is not None else None for x, d in zip(vals, dims)
]
sizes = tree_unflatten(tree, sizes)
raise ValueError(
msg.format("the tree of axis sizes is:\n{}".format(sizes))
) from e
@wraps(fun, docstr=docstr)
def batched_fun(*args):
args_flat, in_tree = tree_flatten(args)
f = lu.wrap_init(fun)
flat_fun, out_tree = flatten_fun_nokwargs(f, in_tree)
in_axes_flat = _flatten_axes(in_tree, in_axes)
_check_axis_sizes(in_tree, args_flat, in_axes_flat)
out_flat = batching.batch(
flat_fun,
args_flat,
in_axes_flat,
lambda: _flatten_axes(out_tree(), out_axes),
)
return tree_unflatten(out_tree(), out_flat)
return batched_fun
|
def vmap(fun: Callable, in_axes=0, out_axes=0):
"""Vectorizing map. Creates a function which maps `fun` over argument axes.
Args:
fun: Function to be mapped over additional axes.
in_axes: A nonnegative integer, None, or (nested) standard Python container
(tuple/list/dict) thereof specifying which input array axes to map over.
If each positional argument to ``fun`` is an array, then ``in_axes`` can
be a nonnegative integer, a None, or a tuple of integers and Nones with
length equal to the number of positional arguments to ``fun``. An integer
or None indicates which array axis to map over for all arguments (with
None indicating not to map any axis), and a tuple indicates which axis to
map for each corresponding positional argument. If the positional
arguments to ``fun`` are container types, the corresponding element of
``in_axes`` can itself be a matching container, so that distinct array
axes can be mapped for different container elements. ``in_axes`` must be a
container tree prefix of the positional argument tuple passed to ``fun``.
out_axes: A nonnegative integer, None, or (nested) standard Python container
(tuple/list/dict) thereof indicating where the mapped axis should appear
in the output.
Returns:
Batched/vectorized version of ``fun`` with arguments that correspond to
those of ``fun``, but with extra array axes at positions indicated by
``in_axes``, and a return value that corresponds to that of ``fun``, but
with extra array axes at positions indicated by ``out_axes``.
For example, we can implement a matrix-matrix product using a vector dot
product:
>>> vv = lambda x, y: np.vdot(x, y) # ([a], [a]) -> []
>>> mv = vmap(vv, (0, None), 0) # ([b,a], [a]) -> [b] (b is the mapped axis)
>>> mm = vmap(mv, (None, 1), 1) # ([b,a], [a,c]) -> [b,c] (c is the mapped axis)
Here we use ``[a,b]`` to indicate an array with shape (a,b). Here are some
variants:
>>> mv1 = vmap(vv, (0, 0), 0) # ([b,a], [b,a]) -> [b] (b is the mapped axis)
>>> mv2 = vmap(vv, (0, 1), 0) # ([b,a], [a,b]) -> [b] (b is the mapped axis)
>>> mm2 = vmap(mv2, (1, 1), 0) # ([b,c,a], [a,c,b]) -> [c,b] (c is the mapped axis)
Here's an example of using container types in ``in_axes`` to specify which
axes of the container elements to map over:
>>> A, B, C, D = 2, 3, 4, 5
>>> x = np.ones((A, B))
>>> y = np.ones((B, C))
>>> z = np.ones((C, D))
>>> def foo(tree_arg):
... x, (y, z) = tree_arg
... return np.dot(x, np.dot(y, z))
>>> tree = (x, (y, z))
>>> print(foo(tree))
[[12. 12. 12. 12. 12.]
[12. 12. 12. 12. 12.]]
>>> from jax import vmap
>>> K = 6 # batch size
>>> x = np.ones((K, A, B)) # batch axis in different locations
>>> y = np.ones((B, K, C))
>>> z = np.ones((C, D, K))
>>> tree = (x, (y, z))
>>> vfoo = vmap(foo, in_axes=((0, (1, 2)),))
>>> print(vfoo(tree)).shape
(6, 2, 5)
"""
docstr = (
"Vectorized version of {fun}. Takes similar arguments as {fun} "
"but with additional array axes over which {fun} is mapped."
)
_check_callable(fun)
if not isinstance(in_axes, (list, tuple, type(None), int)) or not isinstance(
out_axes, (list, tuple, type(None), int)
):
msg = (
"vmap arguments in_axes and out_axes must each be an integer, None, "
"or a (nested) tuple of those types, got {} and {} respectively."
)
raise TypeError(msg.format(type(in_axes), type(out_axes)))
def _check_axis_sizes(tree, vals, dims):
mapped_axis_sizes = {x.shape[d] for x, d in zip(vals, dims) if d is not None}
try:
(sizes,) = mapped_axis_sizes
except ValueError as e:
msg = "vmap got inconsistent sizes for array axes to be mapped:\n{}"
# we switch the error message based on whether args is a tuple of arrays,
# in which case we can produce an error message based on argument indices,
# or if it has nested containers.
# TODO(mattjj,phawkins): add a way to inspect pytree kind more directly
if tree == tree_flatten((core.unit,) * tree.num_leaves)[1]:
lines1 = [
"arg {} has shape {} and axis {} is to be mapped".format(
i, x.shape, d
)
for i, (x, d) in enumerate(zip(vals, dims))
]
sizes = collections.defaultdict(list)
for i, (x, d) in enumerate(zip(vals, dims)):
if d is not None:
sizes[x.shape[d]].append(i)
lines2 = [
"{} {} {} {} to be mapped of size {}".format(
"args" if len(idxs) > 1 else "arg",
", ".join(map(str, idxs)),
"have" if len(idxs) > 1 else "has",
"axes" if len(idxs) > 1 else "an axis",
size,
)
for size, idxs in sizes.items()
]
raise ValueError(msg.format("\n".join(lines1 + ["so"] + lines2))) from e
else:
sizes = [
x.shape[d] if d is not None else None for x, d in zip(vals, dims)
]
sizes = tree_unflatten(tree, sizes)
raise ValueError(
msg.format("the tree of axis sizes is:\n{}".format(sizes))
) from e
@wraps(fun, docstr=docstr)
def batched_fun(*args):
args_flat, in_tree = tree_flatten(args)
f = lu.wrap_init(fun)
flat_fun, out_tree = flatten_fun_nokwargs(f, in_tree)
in_axes_flat = _flatten_axes(in_tree, in_axes)
_check_axis_sizes(in_tree, args_flat, in_axes_flat)
out_flat = batching.batch(
flat_fun,
args_flat,
in_axes_flat,
lambda: _flatten_axes(out_tree(), out_axes),
)
return tree_unflatten(out_tree(), out_flat)
return batched_fun
|
https://github.com/google/jax/issues/2367
|
ValueError: Expected list, got (([<object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>], <object object at 0x7fcb21ef58b0>), <object object at 0x7fcb21ef58b0>, <object object at 0x7fcb21ef58b0>).
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/jax/api.py in _flatten_axes(treedef, axis_tree)
714 msg = ("axes specification must be a tree prefix of the corresponding "
715 "value, got specification {} for value {}.")
--> 716 raise ValueError(msg.format(axis_tree, treedef))
717 axes = [None if a is proxy else a for a in axes]
718 assert len(axes) == treedef.num_leaves
ValueError: axes specification must be a tree prefix of the corresponding value, got specification [([], PyTreeDef(dict[['dense1', 'dense2', 'dense3']], [PyTreeDef(dict[[]], []),PyTreeDef(dict[[]], []),PyTreeDef(dict[[]], [])])), 0, 0] for value PyTreeDef(tuple, [PyTreeDef(tuple, [PyTreeDef(list, [*,*,*,*,*,*]),*]),*,*]).
|
ValueError
|
def backward_pass(jaxpr: core.Jaxpr, consts, args, cotangents_in):
if all(ct is zero for ct in cotangents_in):
return [zero] * len(jaxpr.invars)
def write_cotangent(v, ct):
# assert v not in primal_env
if ct is not None and type(v) is not Literal:
ct_env[v] = add_tangents(ct_env[v], ct) if v in ct_env else ct
def read_cotangent(v):
return ct_env.get(v, zero)
def read_primal(v):
if type(v) is Literal:
return v.val
else:
return primal_env.get(v, undefined_primal)
def write_primal(v, val):
if val is not undefined_primal:
primal_env[v] = val
primal_env = {}
write_primal(core.unitvar, core.unit)
map(write_primal, jaxpr.constvars, consts)
map(write_primal, jaxpr.invars, args)
def is_linear(var):
if type(var) is Literal:
return False
else:
return primal_env.get(var, undefined_primal) is undefined_primal
linear_eqns = []
for eqn in jaxpr.eqns:
if not eqn.primitive.call_primitive:
if any(is_linear(v) for v in eqn.invars):
linear_eqns.append(eqn)
else:
in_vals = map(read_primal, eqn.invars)
ans = eqn.primitive.bind(*in_vals, **eqn.params)
if eqn.primitive.multiple_results:
map(write_primal, eqn.outvars, ans)
else:
write_primal(eqn.outvars[0], ans)
else:
call_jaxpr, params = core.extract_call_jaxpr(eqn.primitive, eqn.params)
if any(is_linear(v) for v in eqn.invars):
linear_eqns.append(eqn)
if any(not is_linear(v) for v in eqn.invars):
ans = _eval_subjaxpr_primals(
eqn.primitive, call_jaxpr, map(read_primal, eqn.invars), params
)
map(write_primal, eqn.outvars, ans)
ct_env = {}
map(write_cotangent, jaxpr.outvars, cotangents_in)
for eqn in linear_eqns[::-1]:
invals = map(read_primal, eqn.invars)
if eqn.primitive.multiple_results:
cts_in = map(read_cotangent, eqn.outvars)
else:
(cts_in,) = map(read_cotangent, eqn.outvars)
if eqn.primitive.call_primitive:
call_jaxpr, params = core.extract_call_jaxpr(eqn.primitive, eqn.params)
cts_out = get_primitive_transpose(eqn.primitive)(
params, call_jaxpr, invals, cts_in
)
else:
cts_out = get_primitive_transpose(eqn.primitive)(
cts_in, *invals, **eqn.params
)
cts_out = [zero] * len(eqn.invars) if cts_out is zero else cts_out
map(write_cotangent, eqn.invars, cts_out)
cotangents_out = map(read_cotangent, jaxpr.invars)
return cotangents_out
|
def backward_pass(jaxpr: core.Jaxpr, consts, args, cotangents_in):
if all(ct is zero for ct in cotangents_in):
return [zero] * len(jaxpr.invars)
def write_cotangent(v, ct):
# assert v not in primal_env
if ct is not None and type(v) is not Literal:
ct_env[v] = add_tangents(ct_env[v], ct) if v in ct_env else ct
def read_cotangent(v):
return ct_env.get(v, zero)
def read_primal(v):
if type(v) is Literal:
return v.val
else:
return primal_env.get(v, undefined_primal)
def write_primal(v, val):
if val is not undefined_primal:
primal_env[v] = val
primal_env = {}
write_primal(core.unitvar, core.unit)
map(write_primal, jaxpr.constvars, consts)
map(write_primal, jaxpr.invars, args)
def is_linear(var):
if type(var) is Literal:
return False
else:
return primal_env.get(var, undefined_primal) is undefined_primal
linear_eqns = []
for eqn in jaxpr.eqns:
if not eqn.primitive.call_primitive:
if any(is_linear(v) for v in eqn.invars):
linear_eqns.append(eqn)
else:
in_vals = map(read_primal, eqn.invars)
ans = eqn.primitive.bind(*in_vals, **eqn.params)
if eqn.primitive.multiple_results:
map(write_primal, eqn.outvars, ans)
else:
write_primal(eqn.outvars[0], ans)
else:
call_jaxpr = eqn.params["call_jaxpr"]
if any(is_linear(v) for v in eqn.invars):
linear_eqns.append(eqn)
elif eqn.primitive is not pe.remat_call_p:
ans = _eval_subjaxpr_primals(
eqn.primitive, call_jaxpr, map(read_primal, eqn.invars), eqn.params
)
map(write_primal, eqn.outvars, ans)
# we special-case remat_call here because it can be mixed linear /
# nonlinear, so we always evaluate it even if it has a linear part
if eqn.primitive is pe.remat_call_p:
ans = _eval_subjaxpr_primals(
eqn.primitive, call_jaxpr, map(read_primal, eqn.invars), eqn.params
)
map(write_primal, eqn.outvars, ans)
ct_env = {}
map(write_cotangent, jaxpr.outvars, cotangents_in)
for eqn in linear_eqns[::-1]:
invals = map(read_primal, eqn.invars)
if eqn.primitive.multiple_results:
cts_in = map(read_cotangent, eqn.outvars)
else:
(cts_in,) = map(read_cotangent, eqn.outvars)
if eqn.primitive.call_primitive:
call_jaxpr, params = core.extract_call_jaxpr(eqn.primitive, eqn.params)
cts_out = get_primitive_transpose(eqn.primitive)(
params, call_jaxpr, invals, cts_in
)
else:
cts_out = get_primitive_transpose(eqn.primitive)(
cts_in, *invals, **eqn.params
)
cts_out = [zero] * len(eqn.invars) if cts_out is zero else cts_out
map(write_cotangent, eqn.invars, cts_out)
cotangents_out = map(read_cotangent, jaxpr.invars)
return cotangents_out
|
https://github.com/google/jax/issues/2180
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-16-3143f2d8c2b3> in <module>()
11 x = jnp.ones([1, 1, 1])
12 f = jax.remat(f)
---> 13 jax.grad(f)(w, x)
17 frames
google3/third_party/py/jax/interpreters/ad.py in bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs)
503
504 def bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs):
--> 505 assert (x is undefined_primal) ^ (y is undefined_primal)
506 if x is undefined_primal:
507 out = zero if cotangent is zero else lhs_rule(cotangent, y, **kwargs)
AssertionError:
|
AssertionError
|
def _eval_primals(jaxpr, args):
primal_env = {}
def read_primal(v):
if type(v) is Literal:
return v.val
else:
return primal_env.get(v, undefined_primal)
def write_primal(v, val):
if val is not undefined_primal:
primal_env[v] = val
def is_linear(var):
if type(var) is Literal:
return False
else:
return primal_env.get(var, undefined_primal) is undefined_primal
write_primal(core.unitvar, core.unit)
assert not jaxpr.constvars
map(write_primal, jaxpr.invars, args)
for eqn in jaxpr.eqns:
if not eqn.primitive.call_primitive:
if not any(is_linear(v) for v in eqn.invars):
in_vals = map(read_primal, eqn.invars)
ans = eqn.primitive.bind(*in_vals, **eqn.params)
if eqn.primitive.multiple_results:
map(write_primal, eqn.outvars, ans)
else:
write_primal(eqn.outvars[0], ans)
else:
call_jaxpr, params = core.extract_call_jaxpr(eqn.primitive, eqn.params)
if any(not is_linear(v) for v in eqn.invars):
ans = _eval_subjaxpr_primals(
eqn.primitive, call_jaxpr, map(read_primal, eqn.invars), params
)
map(write_primal, eqn.outvars, ans)
return map(read_primal, jaxpr.outvars)
|
def _eval_primals(jaxpr, args):
primal_env = {}
def read_primal(v):
if type(v) is Literal:
return v.val
else:
return primal_env.get(v, undefined_primal)
def write_primal(v, val):
if val is not undefined_primal:
primal_env[v] = val
def is_linear(var):
if type(var) is Literal:
return False
else:
return primal_env.get(var, undefined_primal) is undefined_primal
write_primal(core.unitvar, core.unit)
assert not jaxpr.constvars
map(write_primal, jaxpr.invars, args)
for eqn in jaxpr.eqns:
if not eqn.primitive.call_primitive:
if not any(is_linear(v) for v in eqn.invars):
in_vals = map(read_primal, eqn.invars)
ans = eqn.primitive.bind(*in_vals, **eqn.params)
if eqn.primitive.multiple_results:
map(write_primal, eqn.outvars, ans)
else:
write_primal(eqn.outvars[0], ans)
else:
call_jaxpr = eqn.params["call_jaxpr"]
if eqn.primitive is pe.remat_call_p or not any(
is_linear(v) for v in eqn.invars
):
ans = _eval_subjaxpr_primals(
eqn.primitive, call_jaxpr, map(read_primal, eqn.invars), eqn.params
)
map(write_primal, eqn.outvars, ans)
return map(read_primal, jaxpr.outvars)
|
https://github.com/google/jax/issues/2180
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-16-3143f2d8c2b3> in <module>()
11 x = jnp.ones([1, 1, 1])
12 f = jax.remat(f)
---> 13 jax.grad(f)(w, x)
17 frames
google3/third_party/py/jax/interpreters/ad.py in bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs)
503
504 def bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs):
--> 505 assert (x is undefined_primal) ^ (y is undefined_primal)
506 if x is undefined_primal:
507 out = zero if cotangent is zero else lhs_rule(cotangent, y, **kwargs)
AssertionError:
|
AssertionError
|
def _scan_transpose(cts, *args, forward, length, num_consts, num_carry, jaxpr, linear):
# we've only implemented transposing scans with specific lin/nonlin patterns
consts_lin, init_lin, xs_lin = split_list(linear, [num_consts, num_carry])
num_ires = len(consts_lin) - sum(consts_lin)
num_eres = len(xs_lin) - sum(xs_lin)
if consts_lin != [False] * num_ires + [True] * (len(consts_lin) - num_ires):
raise NotImplementedError
if xs_lin != [True] * (len(xs_lin) - num_eres) + [False] * num_eres:
raise NotImplementedError
if not all(init_lin):
pass # TODO(mattjj): error check https://github.com/google/jax/issues/1963
consts, _, xs = split_list(args, [num_consts, num_carry])
ires, _ = split_list(consts, [num_ires])
_, eres = split_list(xs, [sum(xs_lin)])
assert not any(r is ad.undefined_primal for r in ires)
assert not any(r is ad.undefined_primal for r in eres)
carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])
ys_avals = _map(partial(_promote_aval_rank, length), y_avals)
ct_carry, ct_ys = split_list(cts, [num_carry])
ct_carry = _map(ad.instantiate_zeros_aval, carry_avals, ct_carry)
ct_ys = _map(ad.instantiate_zeros_aval, ys_avals, ct_ys)
ct_consts = _map(ad_util.zeros_like_aval, jaxpr.in_avals[num_ires:num_consts])
# jaxpr :: [ires, T d] -> [T c] -> [T a, eres] -> ([T c], [T b])
# jaxpr_trans :: [ires] -> [CT d, CT c] -> [CT b, eres] -> ([CT d, CT c], [CT a])
jaxpr_trans = _transpose_scan_jaxpr(
num_ires, num_consts - num_ires, num_eres, jaxpr
)
linear_trans = (
[False] * num_ires
+ [True] * (len(ct_consts) + len(ct_carry) + len(ct_ys))
+ [False] * num_eres
)
outs = scan_p.bind(
*(ires + ct_consts + ct_carry + ct_ys + eres),
forward=not forward,
length=length,
jaxpr=jaxpr_trans,
num_consts=num_ires,
num_carry=num_consts - num_ires + num_carry,
linear=tuple(linear_trans),
)
ct_consts, ct_init, ct_xs = split_list(outs, [num_consts - num_ires, num_carry])
return [None] * num_ires + ct_consts + ct_init + ct_xs + [None] * num_eres
|
def _scan_transpose(cts, *args, forward, length, num_consts, num_carry, jaxpr, linear):
# we've only implemented transposing scans with specific lin/nonlin patterns
consts_lin, init_lin, xs_lin = split_list(linear, [num_consts, num_carry])
num_ires = len(consts_lin) - sum(consts_lin)
num_eres = len(xs_lin) - sum(xs_lin)
if consts_lin != [False] * num_ires + [True] * (len(consts_lin) - num_ires):
raise NotImplementedError
if xs_lin != [True] * (len(xs_lin) - num_eres) + [False] * num_eres:
raise NotImplementedError
if not all(init_lin):
raise NotImplementedError
consts, init, xs = split_list(args, [num_consts, num_carry])
ires, consts = split_list(consts, [num_ires])
xs, eres = split_list(xs, [sum(xs_lin)])
assert not any(r is ad.undefined_primal for r in ires)
assert not any(r is ad.undefined_primal for r in eres)
carry_avals, y_avals = split_list(jaxpr.out_avals, [num_carry])
ys_avals = _map(partial(_promote_aval_rank, length), y_avals)
ct_carry, ct_ys = split_list(cts, [num_carry])
ct_carry = _map(ad.instantiate_zeros_aval, carry_avals, ct_carry)
ct_ys = _map(ad.instantiate_zeros_aval, ys_avals, ct_ys)
ct_consts = _map(ad_util.zeros_like_aval, jaxpr.in_avals[num_ires:num_consts])
# jaxpr :: [ires, T d] -> [T c] -> [T a, eres] -> ([T c], [T b])
# jaxpr_trans :: [ires] -> [CT d, CT c] -> [CT b, eres] -> ([CT d, CT c], [CT a])
jaxpr_trans = _transpose_scan_jaxpr(
num_ires, num_consts - num_ires, num_eres, jaxpr
)
linear_trans = (
[False] * num_ires
+ [True] * (len(ct_consts) + len(ct_carry) + len(ct_ys))
+ [False] * num_eres
)
outs = scan_p.bind(
*(ires + ct_consts + ct_carry + ct_ys + eres),
forward=not forward,
length=length,
jaxpr=jaxpr_trans,
num_consts=num_ires,
num_carry=num_consts - num_ires + num_carry,
linear=tuple(linear_trans),
)
ct_consts, ct_init, ct_xs = split_list(outs, [num_consts - num_ires, num_carry])
return [None] * num_ires + ct_consts + ct_init + ct_xs + [None] * num_eres
|
https://github.com/google/jax/issues/2180
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-16-3143f2d8c2b3> in <module>()
11 x = jnp.ones([1, 1, 1])
12 f = jax.remat(f)
---> 13 jax.grad(f)(w, x)
17 frames
google3/third_party/py/jax/interpreters/ad.py in bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs)
503
504 def bilinear_transpose(lhs_rule, rhs_rule, cotangent, x, y, **kwargs):
--> 505 assert (x is undefined_primal) ^ (y is undefined_primal)
506 if x is undefined_primal:
507 out = zero if cotangent is zero else lhs_rule(cotangent, y, **kwargs)
AssertionError:
|
AssertionError
|
def sign(x):
r"""Elementwise sign.
For floating-point inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
-0 & x = -0\\
\mathit{NaN} & x = \mathit{NaN}\\
+0 & x = +0\\
1 & x > 0
\end{cases}`
For signed integer inputs, returns
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
0 & x = 0\\
1 & x > 0
\end{cases}`
For complex inputs, returns the complex phase, i.e.
:math:`\mathrm{sign}(x) = \frac{x}{|x|}`.
"""
return sign_p.bind(x)
|
def sign(x):
r"""Elementwise sign.
:math:`\mathrm{sign}(x) = \begin{cases}
-1 & x < 0\\
-0 & x = -0\\
\mathit{NaN} & x = \mathit{NaN}\\
+0 & x = +0\\
1 & x > 0
\end{cases}`.
"""
return sign_p.bind(x)
|
https://github.com/google/jax/issues/1933
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in primitive_computation(prim, *avals, **params)
179 try:
--> 180 return c.Build()
181 except RuntimeError as e:
8 frames
/usr/local/lib/python3.6/dist-packages/jax/lib/xla_bridge.py in Build(self, *args, **kwargs)
256 return super(_JaxComputationBuilder, self).Build(
--> 257 *args, **kwargs)
258
/usr/local/lib/python3.6/dist-packages/jaxlib/xla_client.py in Build(self, root, backend)
729 else:
--> 730 return Computation(self._builder.Build(), backend=backend)
731
RuntimeError: Invalid argument: Expected element type in shape to be signed or complex for sign operation; got U32.:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-12-3db71aa0f40b> in <module>()
1 from jax import numpy as np
2
----> 3 np.ones((1,), np.uint32) // 2
/usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in floor_divide(x1, x2)
459 if issubdtype(dtype, integer):
460 quotient = lax.div(x1, x2)
--> 461 select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)
462 # TODO(mattjj): investigate why subtracting a scalar was causing promotion
463 return where(select, quotient - onp.array(1, _dtype(quotient)), quotient)
/usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in sign(x)
117 \end{cases}`.
118 """
--> 119 return sign_p.bind(x)
120
121 def floor(x):
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
150 top_trace = find_top_trace(args)
151 if top_trace is None:
--> 152 return self.impl(*args, **kwargs)
153
154 tracers = map(top_trace.full_raise, args)
/usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in apply_primitive(prim, *args, **params)
138 """Impl rule that compiles and runs a single primitive 'prim' using XLA."""
139 abstract_args = map(abstractify, args)
--> 140 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params)
141 return compiled_fun(*args)
142
/usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in xla_primitive_callable(prim, *abstract_args, **params)
150 else:
151 handle_result = aval_to_result_handler(aval_out)
--> 152 built_c = primitive_computation(prim, *abstract_args, **params)
153 compiled = built_c.Compile(compile_options=xb.get_compile_options(),
154 backend=xb.get_backend(backend))
/usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in primitive_computation(prim, *avals, **params)
183 "This is a bug in JAX's shape-checking rules; please report it!\n"
184 "https://github.com/google/jax/issues\n")
--> 185 raise RuntimeError(msg)
186
187 def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args):
RuntimeError: Invalid argument: Expected element type in shape to be signed or complex for sign operation; got U32.:
This is a bug in JAX's shape-checking rules; please report it!
https://github.com/google/jax/issues
|
RuntimeError
|
def unop(result_dtype, accepted_dtypes, name, translation_rule=None):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(
_attrgetter("shape"), dtype_rule, name, translation_rule=translation_rule
)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
|
def unop(result_dtype, accepted_dtypes, name):
dtype_rule = partial(unop_dtype_rule, result_dtype, accepted_dtypes, name)
prim = standard_primitive(_attrgetter("shape"), dtype_rule, name)
batching.defvectorized(prim)
masking.defvectorized(prim)
return prim
|
https://github.com/google/jax/issues/1933
|
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in primitive_computation(prim, *avals, **params)
179 try:
--> 180 return c.Build()
181 except RuntimeError as e:
8 frames
/usr/local/lib/python3.6/dist-packages/jax/lib/xla_bridge.py in Build(self, *args, **kwargs)
256 return super(_JaxComputationBuilder, self).Build(
--> 257 *args, **kwargs)
258
/usr/local/lib/python3.6/dist-packages/jaxlib/xla_client.py in Build(self, root, backend)
729 else:
--> 730 return Computation(self._builder.Build(), backend=backend)
731
RuntimeError: Invalid argument: Expected element type in shape to be signed or complex for sign operation; got U32.:
During handling of the above exception, another exception occurred:
RuntimeError Traceback (most recent call last)
<ipython-input-12-3db71aa0f40b> in <module>()
1 from jax import numpy as np
2
----> 3 np.ones((1,), np.uint32) // 2
/usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in floor_divide(x1, x2)
459 if issubdtype(dtype, integer):
460 quotient = lax.div(x1, x2)
--> 461 select = logical_and(lax.sign(x1) != lax.sign(x2), lax.rem(x1, x2) != 0)
462 # TODO(mattjj): investigate why subtracting a scalar was causing promotion
463 return where(select, quotient - onp.array(1, _dtype(quotient)), quotient)
/usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in sign(x)
117 \end{cases}`.
118 """
--> 119 return sign_p.bind(x)
120
121 def floor(x):
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
150 top_trace = find_top_trace(args)
151 if top_trace is None:
--> 152 return self.impl(*args, **kwargs)
153
154 tracers = map(top_trace.full_raise, args)
/usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in apply_primitive(prim, *args, **params)
138 """Impl rule that compiles and runs a single primitive 'prim' using XLA."""
139 abstract_args = map(abstractify, args)
--> 140 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params)
141 return compiled_fun(*args)
142
/usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in xla_primitive_callable(prim, *abstract_args, **params)
150 else:
151 handle_result = aval_to_result_handler(aval_out)
--> 152 built_c = primitive_computation(prim, *abstract_args, **params)
153 compiled = built_c.Compile(compile_options=xb.get_compile_options(),
154 backend=xb.get_backend(backend))
/usr/local/lib/python3.6/dist-packages/jax/interpreters/xla.py in primitive_computation(prim, *avals, **params)
183 "This is a bug in JAX's shape-checking rules; please report it!\n"
184 "https://github.com/google/jax/issues\n")
--> 185 raise RuntimeError(msg)
186
187 def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args):
RuntimeError: Invalid argument: Expected element type in shape to be signed or complex for sign operation; got U32.:
This is a bug in JAX's shape-checking rules; please report it!
https://github.com/google/jax/issues
|
RuntimeError
|
def __getitem__(self, idx):
if self._npy_value is None and type(idx) is int:
ids = self._ids()
device_buffer = self.device_buffers[ids[idx]]
aval = ShapedArray(self.aval.shape[1:], self.aval.dtype)
handler = xla.aval_to_result_handler(None, aval)
return handler(device_buffer)
else:
return super(ShardedDeviceArray, self).__getitem__(idx)
|
def __getitem__(self, idx):
if self._npy_value is None and type(idx) is int:
ids = self._ids()
device_buffer = self.device_buffers[ids[idx]]
aval = ShapedArray(self.aval.shape[1:], self.aval.dtype)
handler = xla.aval_to_result_handler(aval)
return handler(device_buffer)
else:
return super(ShardedDeviceArray, self).__getitem__(idx)
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def aval_to_result_handler(device, aval):
try:
return xla_result_handlers[type(aval)](device, aval)
except KeyError:
raise TypeError("No xla_result_handler for type: {}".format(type(aval)))
|
def aval_to_result_handler(aval):
try:
return xla_result_handlers[type(aval)](aval)
except KeyError:
raise TypeError("No xla_result_handler for type: {}".format(type(aval)))
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def array_result_handler(device, aval):
return partial(DeviceArray, raise_to_shaped(aval), device)
|
def array_result_handler(aval):
return partial(DeviceArray, raise_to_shaped(aval))
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def xla_primitive_callable(prim, *arg_specs, **params):
avals, arg_devices = unzip2(arg_specs)
device = _device_from_arg_devices(arg_devices)
backend = xb.get_device_backend(device)
aval_out = prim.abstract_eval(*avals, **params)
if not prim.multiple_results:
handle_result = aval_to_result_handler(device, aval_out)
else:
handlers = tuple(map(partial(aval_to_result_handler, device), aval_out))
handle_result = lambda xs: tuple(
h(x) for h, x in zip(handlers, xs.destructure())
)
tuple_args = len(avals) > 100
built_c = primitive_computation(prim, backend, tuple_args, *avals, **params)
options = xb.get_compile_options(device_assignment=device and (device.id,))
compiled = built_c.Compile(compile_options=options, backend=backend)
return partial(
_execute_compiled_primitive, prim, compiled, backend, tuple_args, handle_result
)
|
def xla_primitive_callable(prim, *arg_specs, **params):
avals, devices = unzip2(arg_specs)
# TODO(mattjj): make Device hashable instead of handling pairs here
try:
(device,) = set(d for d in devices if d is not None) or (None,)
except ValueError:
msg = "primitive arguments must be colocated on the same device, got {}"
names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
raise ValueError(msg.format(", ".join(names)))
else:
all_devices = it.chain(xb.devices(), xb.devices("cpu"))
device = device and next(d for d in all_devices if (type(d), d.id) == device)
backend = xb.get_device_backend(device)
aval_out = prim.abstract_eval(*avals, **params)
if prim.multiple_results:
handlers = tuple(map(aval_to_result_handler, aval_out))
handle_result = lambda xs: tuple(
h(x) for h, x in zip(handlers, xs.destructure())
)
else:
handle_result = aval_to_result_handler(aval_out)
tuple_args = len(avals) > 100
built_c = primitive_computation(prim, backend, tuple_args, *avals, **params)
options = xb.get_compile_options(device_assignment=(device.id,) if device else None)
compiled = built_c.Compile(compile_options=options, backend=backend)
return partial(
_execute_compiled_primitive, prim, compiled, backend, tuple_args, handle_result
)
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def _xla_call_impl(fun, *args, **params):
device = params["device"]
backend = params["backend"]
compiled_fun = _xla_callable(fun, device, backend, *map(arg_spec, args))
try:
return compiled_fun(*args)
except FloatingPointError:
print(
"Invalid value encountered in the output of a jit function. "
"Calling the de-optimized version."
)
return fun.call_wrapped(*args) # probably won't return
|
def _xla_call_impl(fun, *args, **params):
device = params["device"]
backend = params["backend"]
compiled_fun = _xla_callable(fun, device, backend, *map(abstractify, args))
try:
return compiled_fun(*args)
except FloatingPointError:
print(
"Invalid value encountered in the output of a jit function. "
"Calling the de-optimized version."
)
return fun.call_wrapped(*args) # probably won't return
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def _xla_callable(fun, device, backend, *arg_specs):
if device is not None and backend is not None:
raise ValueError(
"can't specify both a device and a backend for jit, "
"got device={} and backend={}".format(device, backend)
)
abstract_args, arg_devices = unzip2(arg_specs)
pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]
with core.new_master(pe.StagingJaxprTrace, True) as master:
jaxpr, (pvals, consts, env) = pe.trace_to_subjaxpr(
fun, master, False
).call_wrapped(pvals)
assert not env # no subtraces here
del master, env
_map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))
nreps = jaxpr_replicas(jaxpr)
device = _xla_callable_device(nreps, backend, device, arg_devices)
result_handlers = tuple(map(partial(_pval_to_result_handler, device), pvals))
# Computations that only produce constants and/or only rearrange their inputs,
# which are often produced from partial evaluation, don't need compilation,
# and don't need to force their (potentially lazy) arguments.
if not jaxpr.eqns:
device = device or xb.get_backend(None).get_default_device_assignment(1)[0]
return partial(_execute_trivial, jaxpr, device, consts, result_handlers)
log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG
logging.log(
log_priority, "Compiling {} for args {}.".format(fun.__name__, abstract_args)
)
if nreps > xb.device_count(backend):
msg = (
"compiling computation that requires {} replicas, but only {} XLA "
"devices are available"
)
raise ValueError(msg.format(nreps, xb.device_count(backend)))
if xb.host_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):
raise NotImplementedError(
"jit of multi-host pmap not implemented (and jit-of-pmap can cause "
"extra data movement anyway, so maybe you don't want it after all)."
)
tuple_args = len(abstract_args) > 100 # pass long arg lists as tuple for TPU
c = xb.make_computation_builder("jit_{}".format(fun.__name__))
xla_consts = _map(c.Constant, consts)
xla_args = _xla_callable_args(c, abstract_args, tuple_args)
out_nodes = jaxpr_subcomp(
c, jaxpr, backend, AxisEnv(nreps, [], []), xla_consts, (), *xla_args
)
built = c.Build(c.Tuple(*out_nodes))
options = xb.get_compile_options(
num_replicas=nreps, device_assignment=(device.id,) if device else None
)
compiled = built.Compile(compile_options=options, backend=xb.get_backend(backend))
if nreps == 1:
return partial(
_execute_compiled, compiled, backend, result_handlers, tuple_args
)
else:
return partial(
_execute_replicated, compiled, backend, result_handlers, tuple_args
)
|
def _xla_callable(fun, device, backend, *abstract_args):
pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]
with core.new_master(pe.StagingJaxprTrace, True) as master:
jaxpr, (pvals, consts, env) = pe.trace_to_subjaxpr(
fun, master, False
).call_wrapped(pvals)
assert not env # no subtraces here
del master, env
_map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))
result_handlers = tuple(map(_pval_to_result_handler, pvals))
# Computations that only produce constants and/or only rearrange their inputs,
# which are often produced from partial evaluation, don't need compilation,
# and don't need to force their (potentially lazy) arguments.
if not jaxpr.eqns:
device = _get_device(device, backend)
return partial(_execute_trivial, jaxpr, device, consts, result_handlers)
log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG
logging.log(
log_priority, "Compiling {} for args {}.".format(fun.__name__, abstract_args)
)
nreps = jaxpr_replicas(jaxpr)
if nreps > xb.device_count(backend):
msg = (
"compiling computation that requires {} replicas, but only {} XLA "
"devices are available"
)
raise ValueError(msg.format(nreps, xb.device_count(backend)))
axis_env = AxisEnv(nreps, [], [])
if xb.host_count() > 1 and (nreps > 1 or jaxpr_has_pmap(jaxpr)):
raise NotImplementedError(
"jit of multi-host pmap not implemented (and jit-of-pmap can cause "
"extra data movement anyway, so maybe you don't want it after all)."
)
tuple_args = len(abstract_args) > 100 # pass long arg lists as tuple for TPU
c = xb.make_computation_builder("jit_{}".format(fun.__name__))
xla_consts = _map(c.Constant, consts)
xla_args = _xla_callable_args(c, abstract_args, tuple_args)
out_nodes = jaxpr_subcomp(c, jaxpr, backend, axis_env, xla_consts, (), *xla_args)
built = c.Build(c.Tuple(*out_nodes))
if device is not None and nreps > 1:
raise ValueError("can't specify device assignment for jit-of-pmap")
options = xb.get_compile_options(
num_replicas=nreps, device_assignment=(device.id,) if device else None
)
compiled = built.Compile(compile_options=options, backend=xb.get_backend(backend))
if nreps == 1:
return partial(
_execute_compiled, compiled, backend, result_handlers, tuple_args
)
else:
return partial(
_execute_replicated, compiled, backend, result_handlers, tuple_args
)
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def _pval_to_result_handler(device, pval):
pv, const = pval
if pv is None:
return lambda _: const
else:
return aval_to_result_handler(device, pv)
|
def _pval_to_result_handler(pval):
pv, const = pval
if pv is None:
return lambda _: const
else:
return aval_to_result_handler(pv)
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def __init__(self, aval, device, device_buffer):
self.aval = aval
self.device_buffer = device_buffer
self._device = device and (type(device), device.id)
self._npy_value = None
if not core.skip_checks:
assert type(aval) is ShapedArray
npy_value = self._value
assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape
|
def __init__(self, aval, device_buffer):
self.aval = aval
self.device_buffer = device_buffer
# TODO(mattjj): make Device hashable
device = device_buffer.device()
self._device = device and (type(device), device.id)
self._npy_value = None
if not core.skip_checks:
assert type(aval) is ShapedArray
npy_value = self._value
assert npy_value.dtype == aval.dtype and npy_value.shape == aval.shape
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def _device_put_impl(x, device=None):
try:
a = abstractify(x)
except TypeError:
raise TypeError(
"Argument '{}' of type {} is not a valid JAX type".format(x, type(x))
)
handler = aval_to_result_handler(device, a)
return handler(device_put(x, device))
|
def _device_put_impl(x, device=None):
try:
a = abstractify(x)
except TypeError:
raise TypeError(
"Argument '{}' of type {} is not a valid JAX type".format(x, type(x))
)
handler = aval_to_result_handler(a)
return handler(device_put(x, device))
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def _instantiate_device_constant(const, device=None, backend=None, cutoff=1e6):
# dispatch an XLA Computation to build the constant on the device if it's
# large, or alternatively build it on the host and transfer it if it's small
assert isinstance(const, DeviceConstant)
backend = xb.get_backend(device.platform) if device else xb.get_backend(backend)
if const.size > cutoff:
c = xb.make_computation_builder("constant_instantiating_computation")
xla_const = const.constant_handler(c, const)
device_assignment = (device.id,) if device else None
opts = xb.get_compile_options(device_assignment=device_assignment)
compiled = c.Build(xla_const).Compile((), opts, backend=backend)
return compiled.Execute(())
else:
return xc.Buffer.from_pyval(onp.asarray(const), device, backend=backend)
|
def _instantiate_device_constant(const, device=None, backend=None, cutoff=1e6):
# dispatch an XLA Computation to build the constant on the device if it's
# large, or alternatively build it on the host and transfer it if it's small
assert isinstance(const, DeviceConstant)
if const.size > cutoff:
c = xb.make_computation_builder("constant_instantiating_computation")
xla_const = const.constant_handler(c, const)
device_assignment = (device.id,) if device else None
opts = xb.get_compile_options(device_assignment=device_assignment)
compiled = c.Build(xla_const).Compile((), opts, backend=xb.get_backend(backend))
return compiled.Execute(())
else:
return xc.Buffer.from_pyval(
onp.asarray(const), device, backend=xb.get_backend(backend)
)
|
https://github.com/google/jax/issues/1914
|
---------------------------------------------------------------------------
ValueError Traceback (most recent call last)
<ipython-input-20-ca9775de72ea> in <module>()
----> 1 x / x.shape[0]
4 frames
google3/third_party/py/jax/interpreters/xla.py in xla_primitive_callable(prim, *arg_specs, **params)
169 msg = "primitive arguments must be colocated on the same device, got {}"
170 names = ("{}({})".format(d[0].__name__, d[1]) for d in devices if d is not None)
--> 171 raise ValueError(msg.format(", ".join(names)))
172 else:
173 all_devices = it.chain(xb.devices(), xb.devices('cpu'))
ValueError: primitive arguments must be colocated on the same device, got CpuDevice(0), GpuDevice(0)
|
ValueError
|
def update(self, name, val):
if self.use_absl:
setattr(self.absl_flags.FLAGS, name, val)
else:
self.check_exists(name)
if name not in self.values:
raise Exception("Unrecognized config option: {}".format(name))
self.values[name] = val
|
def update(self, name, val):
self.check_exists(name)
if name not in self.values:
raise Exception("Unrecognized config option: {}".format(name))
self.values[name] = val
|
https://github.com/google/jax/issues/1401
|
TypeError Traceback (most recent call last)
<ipython-input-171-c4c8dbbffe17> in <module>
1 A = np.array([[1.,2.],[2.,3.]])
----> 2 V,D = np.linalg.eig(A)
~/RESEARCH/jax/jax/numpy/linalg.py in eig(a)
95 def eig(a):
96 a = _promote_arg_dtypes(np.asarray(a))
---> 97 w, vl, vr = lax_linalg.eig(a)
98 return w, vr
99
~/RESEARCH/jax/jax/lax_linalg.py in eig(x)
47
48 def eig(x):
---> 49 w, vl, vr = eig_p.bind(x)
50 return w, vl, vr
51
~/RESEARCH/jax/jax/core.py in bind(self, *args, **kwargs)
128 top_trace = find_top_trace(args)
129 if top_trace is None:
--> 130 return self.impl(*args, **kwargs)
131
132 tracers = map(top_trace.full_raise, args)
~/RESEARCH/jax/jax/lax_linalg.py in eig_impl(operand)
153
154 def eig_impl(operand):
--> 155 return xla.apply_primitive(eig_p, operand)
156
157 def eig_translation_rule(c, operand):
~/RESEARCH/jax/jax/interpreters/xla.py in apply_primitive(prim, *args, **params)
122 abstract_args = map(abstractify, args)
123 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params)
--> 124 return compiled_fun(*args)
125
126 @cache()
~/RESEARCH/jax/jax/interpreters/xla.py in _execute_compiled_primitive(prim, compiled, backend, result_handler, *args)
172 input_bufs = [device_put(x, device_num, backend=backend) for x in args]
173 out_buf = compiled.Execute(input_bufs)
--> 174 if FLAGS.jax_debug_nans: check_nans(prim, out_buf)
175 return result_handler(out_buf)
176
~/RESEARCH/jax/jax/interpreters/xla.py in check_nans(prim, bufs)
177 def check_nans(prim, bufs):
178 if prim.multiple_results:
--> 179 for buf in bufs:
180 _check_nans(prim.name, buf.shape(), buf)
181 else:
TypeError: 'jaxlib.xla_extension.PyLocalBuffer' object is not iterable
|
TypeError
|
def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args):
(device_num,) = compiled.DeviceOrdinals()
input_bufs = [device_put(x, device_num, backend=backend) for x in args]
out_buf = compiled.Execute(input_bufs)
if FLAGS.jax_debug_nans:
check_nans(prim, out_buf.destructure() if prim.multiple_results else out_buf)
return result_handler(out_buf)
|
def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args):
(device_num,) = compiled.DeviceOrdinals()
input_bufs = [device_put(x, device_num, backend=backend) for x in args]
out_buf = compiled.Execute(input_bufs)
if FLAGS.jax_debug_nans:
check_nans(prim, out_buf)
return result_handler(out_buf)
|
https://github.com/google/jax/issues/1401
|
TypeError Traceback (most recent call last)
<ipython-input-171-c4c8dbbffe17> in <module>
1 A = np.array([[1.,2.],[2.,3.]])
----> 2 V,D = np.linalg.eig(A)
~/RESEARCH/jax/jax/numpy/linalg.py in eig(a)
95 def eig(a):
96 a = _promote_arg_dtypes(np.asarray(a))
---> 97 w, vl, vr = lax_linalg.eig(a)
98 return w, vr
99
~/RESEARCH/jax/jax/lax_linalg.py in eig(x)
47
48 def eig(x):
---> 49 w, vl, vr = eig_p.bind(x)
50 return w, vl, vr
51
~/RESEARCH/jax/jax/core.py in bind(self, *args, **kwargs)
128 top_trace = find_top_trace(args)
129 if top_trace is None:
--> 130 return self.impl(*args, **kwargs)
131
132 tracers = map(top_trace.full_raise, args)
~/RESEARCH/jax/jax/lax_linalg.py in eig_impl(operand)
153
154 def eig_impl(operand):
--> 155 return xla.apply_primitive(eig_p, operand)
156
157 def eig_translation_rule(c, operand):
~/RESEARCH/jax/jax/interpreters/xla.py in apply_primitive(prim, *args, **params)
122 abstract_args = map(abstractify, args)
123 compiled_fun = xla_primitive_callable(prim, *abstract_args, **params)
--> 124 return compiled_fun(*args)
125
126 @cache()
~/RESEARCH/jax/jax/interpreters/xla.py in _execute_compiled_primitive(prim, compiled, backend, result_handler, *args)
172 input_bufs = [device_put(x, device_num, backend=backend) for x in args]
173 out_buf = compiled.Execute(input_bufs)
--> 174 if FLAGS.jax_debug_nans: check_nans(prim, out_buf)
175 return result_handler(out_buf)
176
~/RESEARCH/jax/jax/interpreters/xla.py in check_nans(prim, bufs)
177 def check_nans(prim, bufs):
178 if prim.multiple_results:
--> 179 for buf in bufs:
180 _check_nans(prim.name, buf.shape(), buf)
181 else:
TypeError: 'jaxlib.xla_extension.PyLocalBuffer' object is not iterable
|
TypeError
|
def _reduction_init_val(a, init_val):
a_dtype = xla_bridge.canonicalize_dtype(_dtype(a))
if a_dtype == "bool":
return onp.array(init_val > 0, dtype=a_dtype)
try:
return onp.array(init_val, dtype=a_dtype)
except OverflowError:
assert onp.issubdtype(a_dtype, onp.integer)
sign, iinfo = onp.sign(init_val), onp.iinfo(a_dtype)
return onp.array(iinfo.min if sign < 0 else iinfo.max, dtype=a_dtype)
|
def _reduction_init_val(a, init_val):
a_dtype = xla_bridge.canonicalize_dtype(_dtype(a))
try:
return onp.array(init_val, dtype=a_dtype)
except OverflowError:
assert onp.issubdtype(a_dtype, onp.integer)
sign, iinfo = onp.sign(init_val), onp.iinfo(a_dtype)
return onp.array(iinfo.min if sign < 0 else iinfo.max, dtype=a_dtype)
|
https://github.com/google/jax/issues/1101
|
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
/usr/local/lib/python3.6/dist-packages/jax/interpreters/batching.py in get_primitive_batcher(p)
231 try:
--> 232 return primitive_batchers[p]
233 except KeyError:
KeyError: reduce
During handling of the above exception, another exception occurred:
NotImplementedError Traceback (most recent call last)
10 frames
<ipython-input-32-f78d0b39613e> in <module>()
----> 1 jax.vmap(np.argmax)(m > 0.5)
/usr/local/lib/python3.6/dist-packages/jax/api.py in batched_fun(*args, **kwargs)
491 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args))
492 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)
--> 493 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes)
494 return build_tree(out_tree(), out_flat)
495
/usr/local/lib/python3.6/dist-packages/jax/interpreters/batching.py in batch(fun, in_vals, in_dims, out_dim_dst)
43 elif len(sizes) == 1:
44 sz = sizes.pop()
---> 45 return batch_transform(fun, sz, in_dims, out_dim_dst).call_wrapped(in_vals)
46 else:
47 raise TypeError("got inconsistent map dimension sizes: {}".format(sizes))
/usr/local/lib/python3.6/dist-packages/jax/linear_util.py in call_wrapped(self, *args, **kwargs)
145
146 del gen
--> 147 ans = self.f(*args, **dict(self.params, **kwargs))
148 del args
149 while stack:
/usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in argmax(a, axis)
2011 a = ravel(a)
2012 axis = 0
-> 2013 return _argminmax(max, a, axis)
2014
2015
/usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in _argminmax(op, a, axis)
2028 idxs = onp.arange(a.shape[axis]).reshape(shape)
2029 maxval = onp.iinfo(xla_bridge.canonicalize_dtype(idxs.dtype)).max
-> 2030 mask_idxs = where(lax._eq_meet(a, op(a, axis, keepdims=True)), idxs, maxval)
2031 return min(mask_idxs, axis)
2032
/usr/local/lib/python3.6/dist-packages/jax/numpy/lax_numpy.py in reduction(a, axis, dtype, out, keepdims)
956 if _dtype(a) != result_dtype:
957 a = lax.convert_element_type(a, result_dtype)
--> 958 result = lax.reduce(a, _reduction_init_val(a, init_val), op, dims)
959 if keepdims:
960 shape_with_singletons = lax.subvals(shape(a), zip(dims, (1,) * len(dims)))
/usr/local/lib/python3.6/dist-packages/jax/lax/lax.py in reduce(operand, init_value, computation, dimensions)
791 jaxpr, consts = _reduction_jaxpr(computation, init_value)
792 return reduce_p.bind(operand, init_value, computation=computation,
--> 793 jaxpr=jaxpr, consts=consts, dimensions=tuple(dimensions))
794
795 def _reduction_jaxpr(computation, init_value):
/usr/local/lib/python3.6/dist-packages/jax/core.py in bind(self, *args, **kwargs)
145
146 tracers = map(top_trace.full_raise, args)
--> 147 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
148 return full_lower(out_tracer)
149
/usr/local/lib/python3.6/dist-packages/jax/interpreters/batching.py in process_primitive(self, primitive, tracers, params)
121 else:
122 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here
--> 123 batched_primitive = get_primitive_batcher(primitive)
124 val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
125 return BatchTracer(self, val_out, dim_out)
/usr/local/lib/python3.6/dist-packages/jax/interpreters/batching.py in get_primitive_batcher(p)
233 except KeyError:
234 raise NotImplementedError(
--> 235 "Batching rule for '{}' not implemented".format(p))
236
237 def defvectorized(prim):
NotImplementedError: Batching rule for 'reduce' not implemented
|
KeyError
|
def jaxpr_replicas(jaxpr):
return max(it.chain([1], (eqn_replicas(eqn) for eqn in jaxpr.eqns)))
|
def jaxpr_replicas(jaxpr):
nums = (eqn_replicas(eqn) for eqn in jaxpr.eqns if eqn.bound_subjaxprs)
return max(it.chain([1], nums)) # max(itr, default=1)
|
https://github.com/google/jax/issues/1065
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-4-9a3737aed707> in <module>()
----> 1 multi_step_pmap(np.zeros((8,)), count=1)
13 frames
<ipython-input-3-8106aee94f86> in multi_step_pmap(state, count)
23 return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state)
24
---> 25 return time_evolution(state)
google3/third_party/py/jax/api.py in f_jitted(*args, **kwargs)
129 _check_args(args_flat)
130 flat_fun, out_tree = flatten_fun_leafout(f, in_tree)
--> 131 out = xla.xla_call(flat_fun, *args_flat, device_values=device_values)
132 return out if out_tree() is leaf else tree_unflatten(out_tree(), out)
133
google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params)
661 if top_trace is None:
662 with new_sublevel():
--> 663 ans = primitive.impl(f, *args, **params)
664 else:
665 tracers = map(top_trace.full_raise, args)
google3/third_party/py/jax/interpreters/xla.py in _xla_call_impl(fun, *args, **params)
672 def _xla_call_impl(fun, *args, **params):
673 device_values = FLAGS.jax_device_values and params.pop('device_values')
--> 674 compiled_fun = _xla_callable(fun, device_values, *map(abstractify, args))
675 try:
676 return compiled_fun(*args)
google3/third_party/py/jax/linear_util.py in memoized_fun(f, *args)
203
204 def memoized_fun(f, *args):
--> 205 ans, f_prev = memoized_fun_body(f, args)
206 if id(f_prev) != id(f):
207 f.populate_stores(f_prev)
google3/third_party/py/jax/linear_util.py in memoized_fun_body(f, args)
200 @fastcache.clru_cache(maxsize=max_size)
201 def memoized_fun_body(f, args):
--> 202 return call(f, *args), f
203
204 def memoized_fun(f, *args):
google3/third_party/py/jax/interpreters/xla.py in _xla_callable(fun, device_values, *abstract_args)
687 assert not env # no subtraces here (though cond might eventually need them)
688 axis_env = AxisEnv(jaxpr_replicas(jaxpr), [], [])
--> 689 compiled, result_shape = _compile_jaxpr(jaxpr, axis_env, consts, *abstract_args)
690 del master, consts, jaxpr, env
691 if device_values:
google3/third_party/py/jax/interpreters/xla.py in _compile_jaxpr(jaxpr, axis_env, const_vals, *abstract_args)
223 raise ValueErrr(msg.format(axis_env.nreps, xb.device_count()))
224 arg_shapes = list(map(xla_shape, abstract_args))
--> 225 built_c = _jaxpr_computation(jaxpr, axis_env, const_vals, (), *arg_shapes)
226 result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())
227 return built_c.Compile(arg_shapes, xb.get_compile_options(axis_env.nreps),
google3/third_party/py/jax/interpreters/xla.py in _jaxpr_computation(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes)
291 elif eqn.primitive in initial_style_translations:
292 rule = initial_style_translations[eqn.primitive]
--> 293 ans = rule(c, axis_env, *in_nodes, **eqn.params)
294 elif eqn.primitive in parallel_translations:
295 replica_groups = axis_groups(axis_env, eqn.params['axis_name'])
google3/third_party/py/jax/lax/lax_control_flow.py in _while_loop_translation_rule(c, axis_env, init_val, cond_consts, body_consts, aval_out, cond_jaxpr, body_jaxpr)
202
203 cond_c = xla._jaxpr_computation(cond_jaxpr_converted, axis_env, (), (), shape)
--> 204 body_c = xla._jaxpr_computation(body_jaxpr_converted, axis_env, (), (), shape)
205 full_ans = c.While(cond_c, body_c, loop_carry)
206 return c.GetTupleElement(full_ans, 0)
google3/third_party/py/jax/interpreters/xla.py in _jaxpr_computation(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes)
301 env_nodes = list(map(read, const_bindings + freevar_bindings))
302 rule = call_translations[eqn.primitive]
--> 303 ans = rule(c, subjaxpr, axis_env, env_nodes, in_nodes, **eqn.params)
304 else:
305 msg = "XLA translation rule for primitive '{}' not found"
google3/third_party/py/jax/interpreters/pxla.py in _xla_pmap_translation_rule(c, jaxpr, axis_env, env_nodes, in_nodes, axis_name, axis_size)
591 *map(c.GetShape, in_nodes_sharded))
592 sharded_result = c.Call(subc, env_nodes + in_nodes_sharded)
--> 593 return xla_unshard(c, xla.axis_groups(new_env, axis_name), sharded_result)
594 xla.call_translations[xla_pmap_p] = _xla_pmap_translation_rule
595 ad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)
google3/third_party/py/jax/interpreters/xla.py in axis_groups(axis_env, name)
338 else:
339 mesh_axes = (axis_read(axis_env, name),)
--> 340 return _axis_groups(axis_env.nreps, axis_env.sizes, mesh_axes)
341
342 def _axis_groups(nrep, mesh_spec, mesh_axes):
google3/third_party/py/jax/interpreters/xla.py in _axis_groups(nrep, mesh_spec, mesh_axes)
342 def _axis_groups(nrep, mesh_spec, mesh_axes):
343 trailing_size, ragged = divmod(nrep, prod(mesh_spec))
--> 344 assert not ragged
345 full_spec = list(mesh_spec) + [trailing_size]
346 iota = onp.arange(prod(full_spec)).reshape(full_spec)
AssertionError:
|
AssertionError
|
def eqn_replicas(eqn):
if eqn.bound_subjaxprs:
((subjaxpr, _, _),) = eqn.bound_subjaxprs
return eqn.params.get("axis_size", 1) * jaxpr_replicas(subjaxpr)
elif eqn.primitive in initial_style_translations:
nums = (
jaxpr_replicas(param if type(param) is core.Jaxpr else param.jaxpr)
for param in eqn.params.values()
if type(param) in (core.Jaxpr, core.TypedJaxpr)
)
return max(it.chain([1], nums))
else:
return 1
|
def eqn_replicas(eqn):
((subjaxpr, _, _),) = eqn.bound_subjaxprs
return eqn.params.get("axis_size", 1) * jaxpr_replicas(subjaxpr)
|
https://github.com/google/jax/issues/1065
|
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-4-9a3737aed707> in <module>()
----> 1 multi_step_pmap(np.zeros((8,)), count=1)
13 frames
<ipython-input-3-8106aee94f86> in multi_step_pmap(state, count)
23 return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state)
24
---> 25 return time_evolution(state)
google3/third_party/py/jax/api.py in f_jitted(*args, **kwargs)
129 _check_args(args_flat)
130 flat_fun, out_tree = flatten_fun_leafout(f, in_tree)
--> 131 out = xla.xla_call(flat_fun, *args_flat, device_values=device_values)
132 return out if out_tree() is leaf else tree_unflatten(out_tree(), out)
133
google3/third_party/py/jax/core.py in call_bind(primitive, f, *args, **params)
661 if top_trace is None:
662 with new_sublevel():
--> 663 ans = primitive.impl(f, *args, **params)
664 else:
665 tracers = map(top_trace.full_raise, args)
google3/third_party/py/jax/interpreters/xla.py in _xla_call_impl(fun, *args, **params)
672 def _xla_call_impl(fun, *args, **params):
673 device_values = FLAGS.jax_device_values and params.pop('device_values')
--> 674 compiled_fun = _xla_callable(fun, device_values, *map(abstractify, args))
675 try:
676 return compiled_fun(*args)
google3/third_party/py/jax/linear_util.py in memoized_fun(f, *args)
203
204 def memoized_fun(f, *args):
--> 205 ans, f_prev = memoized_fun_body(f, args)
206 if id(f_prev) != id(f):
207 f.populate_stores(f_prev)
google3/third_party/py/jax/linear_util.py in memoized_fun_body(f, args)
200 @fastcache.clru_cache(maxsize=max_size)
201 def memoized_fun_body(f, args):
--> 202 return call(f, *args), f
203
204 def memoized_fun(f, *args):
google3/third_party/py/jax/interpreters/xla.py in _xla_callable(fun, device_values, *abstract_args)
687 assert not env # no subtraces here (though cond might eventually need them)
688 axis_env = AxisEnv(jaxpr_replicas(jaxpr), [], [])
--> 689 compiled, result_shape = _compile_jaxpr(jaxpr, axis_env, consts, *abstract_args)
690 del master, consts, jaxpr, env
691 if device_values:
google3/third_party/py/jax/interpreters/xla.py in _compile_jaxpr(jaxpr, axis_env, const_vals, *abstract_args)
223 raise ValueErrr(msg.format(axis_env.nreps, xb.device_count()))
224 arg_shapes = list(map(xla_shape, abstract_args))
--> 225 built_c = _jaxpr_computation(jaxpr, axis_env, const_vals, (), *arg_shapes)
226 result_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())
227 return built_c.Compile(arg_shapes, xb.get_compile_options(axis_env.nreps),
google3/third_party/py/jax/interpreters/xla.py in _jaxpr_computation(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes)
291 elif eqn.primitive in initial_style_translations:
292 rule = initial_style_translations[eqn.primitive]
--> 293 ans = rule(c, axis_env, *in_nodes, **eqn.params)
294 elif eqn.primitive in parallel_translations:
295 replica_groups = axis_groups(axis_env, eqn.params['axis_name'])
google3/third_party/py/jax/lax/lax_control_flow.py in _while_loop_translation_rule(c, axis_env, init_val, cond_consts, body_consts, aval_out, cond_jaxpr, body_jaxpr)
202
203 cond_c = xla._jaxpr_computation(cond_jaxpr_converted, axis_env, (), (), shape)
--> 204 body_c = xla._jaxpr_computation(body_jaxpr_converted, axis_env, (), (), shape)
205 full_ans = c.While(cond_c, body_c, loop_carry)
206 return c.GetTupleElement(full_ans, 0)
google3/third_party/py/jax/interpreters/xla.py in _jaxpr_computation(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes)
301 env_nodes = list(map(read, const_bindings + freevar_bindings))
302 rule = call_translations[eqn.primitive]
--> 303 ans = rule(c, subjaxpr, axis_env, env_nodes, in_nodes, **eqn.params)
304 else:
305 msg = "XLA translation rule for primitive '{}' not found"
google3/third_party/py/jax/interpreters/pxla.py in _xla_pmap_translation_rule(c, jaxpr, axis_env, env_nodes, in_nodes, axis_name, axis_size)
591 *map(c.GetShape, in_nodes_sharded))
592 sharded_result = c.Call(subc, env_nodes + in_nodes_sharded)
--> 593 return xla_unshard(c, xla.axis_groups(new_env, axis_name), sharded_result)
594 xla.call_translations[xla_pmap_p] = _xla_pmap_translation_rule
595 ad.primitive_transposes[xla_pmap_p] = partial(ad.map_transpose, xla_pmap_p)
google3/third_party/py/jax/interpreters/xla.py in axis_groups(axis_env, name)
338 else:
339 mesh_axes = (axis_read(axis_env, name),)
--> 340 return _axis_groups(axis_env.nreps, axis_env.sizes, mesh_axes)
341
342 def _axis_groups(nrep, mesh_spec, mesh_axes):
google3/third_party/py/jax/interpreters/xla.py in _axis_groups(nrep, mesh_spec, mesh_axes)
342 def _axis_groups(nrep, mesh_spec, mesh_axes):
343 trailing_size, ragged = divmod(nrep, prod(mesh_spec))
--> 344 assert not ragged
345 full_spec = list(mesh_spec) + [trailing_size]
346 iota = onp.arange(prod(full_spec)).reshape(full_spec)
AssertionError:
|
AssertionError
|
def cholesky_cpu_translation_rule(c, operand):
shape = c.GetShape(operand)
dtype = shape.element_type().type
if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types:
potrf_output = lapack.jax_potrf(c, operand, lower=True)
result = c.GetTupleElement(potrf_output, 0)
info = c.GetTupleElement(potrf_output, 1)
return c.Select(
c.Eq(info, c.ConstantS32Scalar(0)), result, _nan_like(c, result)
)
else:
# Fall back to the HLO implementation for batched Cholesky decomposition or
# unsupported types.
# TODO(phawkins): support LAPACK primitives in batched mode.
return c.Cholesky(operand)
|
def cholesky_cpu_translation_rule(c, operand):
shape = c.GetShape(operand)
dtype = shape.element_type().type
if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types:
return c.GetTupleElement(lapack.jax_potrf(c, operand, lower=True), 0)
else:
# Fall back to the HLO implementation for batched Cholesky decomposition or
# unsupported types.
# TODO(phawkins): support LAPACK primitives in batched mode.
return c.Cholesky(operand)
|
https://github.com/google/jax/issues/775
|
/home/luke/.local/lib/python3.5/site-packages/jax/scipy/linalg.py:36: UserWarning: scipy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
[[0. 1. 2.]
[0. 4. 5.]
[0. 0. 8.]]
From Scipy
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
print(linalg.cholesky(x))
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 91, in cholesky
check_finite=check_finite)
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 40, in _cholesky
"definite" % info)
numpy.linalg.LinAlgError: 1-th leading minor of the array is not positive definite
|
numpy.linalg.LinAlgError
|
def eig_cpu_translation_rule(c, operand):
shape = c.GetShape(operand)
batch_dims = shape.dimensions()[:-2]
geev_out = lapack.jax_geev(c, operand)
w = c.GetTupleElement(geev_out, 0)
vl = c.GetTupleElement(geev_out, 1)
vr = c.GetTupleElement(geev_out, 2)
ok = c.Eq(c.GetTupleElement(geev_out, 3), c.ConstantS32Scalar(0))
w = _broadcasting_select(
c, c.Reshape(ok, None, batch_dims + (1,)), w, _nan_like(c, w)
)
vl = _broadcasting_select(
c, c.Reshape(ok, None, batch_dims + (1, 1)), vl, _nan_like(c, vl)
)
vr = _broadcasting_select(
c, c.Reshape(ok, None, batch_dims + (1, 1)), vr, _nan_like(c, vr)
)
return c.Tuple(w, vl, vr)
|
def eig_cpu_translation_rule(c, operand):
out = lapack.jax_geev(c, operand)
return c.Tuple(
c.GetTupleElement(out, 0), c.GetTupleElement(out, 1), c.GetTupleElement(out, 2)
)
|
https://github.com/google/jax/issues/775
|
/home/luke/.local/lib/python3.5/site-packages/jax/scipy/linalg.py:36: UserWarning: scipy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
[[0. 1. 2.]
[0. 4. 5.]
[0. 0. 8.]]
From Scipy
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
print(linalg.cholesky(x))
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 91, in cholesky
check_finite=check_finite)
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 40, in _cholesky
"definite" % info)
numpy.linalg.LinAlgError: 1-th leading minor of the array is not positive definite
|
numpy.linalg.LinAlgError
|
def eigh_cpu_translation_rule(c, operand, lower):
shape = c.GetShape(operand)
batch_dims = shape.dimensions()[:-2]
syevd_out = lapack.jax_syevd(c, operand, lower=lower)
v = c.GetTupleElement(syevd_out, 0)
w = c.GetTupleElement(syevd_out, 1)
ok = c.Eq(c.GetTupleElement(syevd_out, 2), c.ConstantS32Scalar(0))
v = _broadcasting_select(
c, c.Reshape(ok, None, batch_dims + (1, 1)), v, _nan_like(c, v)
)
w = _broadcasting_select(
c, c.Reshape(ok, None, batch_dims + (1,)), w, _nan_like(c, w)
)
return c.Tuple(v, w)
|
def eigh_cpu_translation_rule(c, operand, lower):
out = lapack.jax_syevd(c, operand, lower=lower)
return c.Tuple(c.GetTupleElement(out, 0), c.GetTupleElement(out, 1))
|
https://github.com/google/jax/issues/775
|
/home/luke/.local/lib/python3.5/site-packages/jax/scipy/linalg.py:36: UserWarning: scipy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
[[0. 1. 2.]
[0. 4. 5.]
[0. 0. 8.]]
From Scipy
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
print(linalg.cholesky(x))
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 91, in cholesky
check_finite=check_finite)
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 40, in _cholesky
"definite" % info)
numpy.linalg.LinAlgError: 1-th leading minor of the array is not positive definite
|
numpy.linalg.LinAlgError
|
def lu_cpu_translation_rule(c, operand):
shape = c.GetShape(operand)
batch_dims = shape.dimensions()[:-2]
getrf_out = lapack.jax_getrf(c, operand)
lu = c.GetTupleElement(getrf_out, 0)
# Subtract 1 from the pivot to get 0-based indices.
pivot = c.Sub(c.GetTupleElement(getrf_out, 1), c.ConstantS32Scalar(1))
ok = c.Eq(c.GetTupleElement(getrf_out, 2), c.ConstantS32Scalar(0))
lu = _broadcasting_select(
c, c.Reshape(ok, None, batch_dims + (1, 1)), lu, _nan_like(c, lu)
)
return c.Tuple(lu, pivot)
|
def lu_cpu_translation_rule(c, operand):
shape = c.GetShape(operand)
dtype = shape.element_type().type
out = lapack.jax_getrf(c, operand)
lu = c.GetTupleElement(out, 0)
# Subtract 1 from the pivot to get 0-based indices.
pivot = c.Sub(c.GetTupleElement(out, 1), c.ConstantS32Scalar(1))
# Throw away the `info` value, because we have no way to report errors.
return c.Tuple(lu, pivot)
|
https://github.com/google/jax/issues/775
|
/home/luke/.local/lib/python3.5/site-packages/jax/scipy/linalg.py:36: UserWarning: scipy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
[[0. 1. 2.]
[0. 4. 5.]
[0. 0. 8.]]
From Scipy
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
print(linalg.cholesky(x))
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 91, in cholesky
check_finite=check_finite)
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 40, in _cholesky
"definite" % info)
numpy.linalg.LinAlgError: 1-th leading minor of the array is not positive definite
|
numpy.linalg.LinAlgError
|
def svd_cpu_translation_rule(c, operand, full_matrices, compute_uv):
shape = c.GetShape(operand)
dtype = shape.element_type().type
if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types:
gesdd_out = lapack.jax_gesdd(
c, operand, full_matrices=full_matrices, compute_uv=compute_uv
)
s = c.GetTupleElement(gesdd_out, 0)
u = c.GetTupleElement(gesdd_out, 1)
vt = c.GetTupleElement(gesdd_out, 2)
ok = c.Eq(c.GetTupleElement(gesdd_out, 3), c.ConstantS32Scalar(0))
s = _broadcasting_select(c, c.Reshape(ok, None, (1,)), s, _nan_like(c, s))
u = _broadcasting_select(c, c.Reshape(ok, None, (1, 1)), u, _nan_like(c, u))
vt = _broadcasting_select(c, c.Reshape(ok, None, (1, 1)), vt, _nan_like(c, vt))
return c.Tuple(s, u, vt)
else:
raise NotImplementedError(
"Only unbatched singular value decomposition is implemented on CPU"
)
|
def svd_cpu_translation_rule(c, operand, full_matrices, compute_uv):
shape = c.GetShape(operand)
dtype = shape.element_type().type
if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types:
out = lapack.jax_gesdd(
c, operand, full_matrices=full_matrices, compute_uv=compute_uv
)
return c.Tuple(
c.GetTupleElement(out, 0),
c.GetTupleElement(out, 1),
c.GetTupleElement(out, 2),
)
else:
raise NotImplementedError(
"Only unbatched singular value decomposition is implemented on CPU"
)
|
https://github.com/google/jax/issues/775
|
/home/luke/.local/lib/python3.5/site-packages/jax/scipy/linalg.py:36: UserWarning: scipy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
[[0. 1. 2.]
[0. 4. 5.]
[0. 0. 8.]]
From Scipy
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
print(linalg.cholesky(x))
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 91, in cholesky
check_finite=check_finite)
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 40, in _cholesky
"definite" % info)
numpy.linalg.LinAlgError: 1-th leading minor of the array is not positive definite
|
numpy.linalg.LinAlgError
|
def cholesky_cpu_translation_rule(c, operand):
shape = c.GetShape(operand)
dtype = shape.element_type().type
if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types:
potrf_output = lapack.jax_potrf(c, operand, lower=True)
result = c.GetTupleElement(potrf_output, 0)
info = c.GetTupleElement(potrf_output, 1)
return c.Select(
c.Eq(info, c.ConstantS32Scalar(0)), result, _nan_like(c, result)
)
else:
# Fall back to the HLO implementation for batched Cholesky decomposition or
# unsupported types.
# TODO(phawkins): support LAPACK primitives in batched mode.
return c.Cholesky(operand)
|
def cholesky_cpu_translation_rule(c, operand):
shape = c.GetShape(operand)
dtype = shape.element_type().type
if len(shape.dimensions()) == 2 and dtype in _cpu_lapack_types:
potrf_output = lapack.jax_potrf(c, operand, lower=True)
result = c.GetTupleElement(potrf_output, 0)
info = c.GetTupleElement(potrf_output, 1)
return c.Select(
c.Eq(info, c.ConstantS32Scalar(0)),
result,
c.Broadcast(
c.Constant(onp.array(onp.nan, dtype=dtype)), shape.dimensions()
),
)
else:
# Fall back to the HLO implementation for batched Cholesky decomposition or
# unsupported types.
# TODO(phawkins): support LAPACK primitives in batched mode.
return c.Cholesky(operand)
|
https://github.com/google/jax/issues/775
|
/home/luke/.local/lib/python3.5/site-packages/jax/scipy/linalg.py:36: UserWarning: scipy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
[[0. 1. 2.]
[0. 4. 5.]
[0. 0. 8.]]
From Scipy
Traceback (most recent call last):
File "tmp.py", line 11, in <module>
print(linalg.cholesky(x))
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 91, in cholesky
check_finite=check_finite)
File "/home/luke/.local/lib/python3.5/site-packages/scipy/linalg/decomp_cholesky.py", line 40, in _cholesky
"definite" % info)
numpy.linalg.LinAlgError: 1-th leading minor of the array is not positive definite
|
numpy.linalg.LinAlgError
|
def _wrap_hashably(arg):
try:
hash(arg)
except TypeError:
return WrapHashably(arg) # e.g. ndarrays, DeviceArrays
else:
return Hashable(arg)
|
def _wrap_hashably(arg):
try:
hash(arg)
except TypeError:
return WrapHashably(arg)
else:
return Hashable(arg)
|
https://github.com/google/jax/issues/883
|
Traceback (most recent call last):
File "<ipython-input-7-563ff9ef5fe4>", line 1, in <module>
runfile('/home/hpc/capm/sn0523/SIR/minExample.py', wdir='/home/hpc/capm/sn0523/SIR')
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/home/hpc/capm/sn0523/SIR/minExample.py", line 25, in <module>
main()
File "/home/hpc/capm/sn0523/SIR/minExample.py", line 21, in main
secondCall = f(x,v)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/api.py", line 123, in f_jitted
out = xla.xla_call(flat_fun, *args_flat, device_values=device_values)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/core.py", line 658, in call_bind
ans = primitive.impl(f, *args, **params)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/interpreters/xla.py", line 653, in xla_call_impl
compiled_fun = xla_callable(fun, device_values, *map(abstractify, args))
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/linear_util.py", line 201, in memoized_fun
if key in cache:
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/linear_util.py", line 174, in __eq__
return self.hashable_payload() == other.hashable_payload()
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/interpreters/xla.py", line 489, in forward_method
return fun(getattr(self, attrname), *args)
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def __hash__(self):
raise TypeError("JAX DeviceArray, like numpy.ndarray, is not hashable.")
|
def __hash__(self):
# TODO(mattjj): this is not semantically correct because it is possible
# __eq__ is true for values with unequal __hash__ values. However, the
# main use case at the moment is memoization for which false negatives are
# fine.
return id(self)
|
https://github.com/google/jax/issues/883
|
Traceback (most recent call last):
File "<ipython-input-7-563ff9ef5fe4>", line 1, in <module>
runfile('/home/hpc/capm/sn0523/SIR/minExample.py', wdir='/home/hpc/capm/sn0523/SIR')
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/home/hpc/capm/sn0523/SIR/minExample.py", line 25, in <module>
main()
File "/home/hpc/capm/sn0523/SIR/minExample.py", line 21, in main
secondCall = f(x,v)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/api.py", line 123, in f_jitted
out = xla.xla_call(flat_fun, *args_flat, device_values=device_values)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/core.py", line 658, in call_bind
ans = primitive.impl(f, *args, **params)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/interpreters/xla.py", line 653, in xla_call_impl
compiled_fun = xla_callable(fun, device_values, *map(abstractify, args))
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/linear_util.py", line 201, in memoized_fun
if key in cache:
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/linear_util.py", line 174, in __eq__
return self.hashable_payload() == other.hashable_payload()
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/interpreters/xla.py", line 489, in forward_method
return fun(getattr(self, attrname), *args)
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def _conv_transpose_padding(k, s, padding):
"""Calculate before and after padding for a dim of transposed convolution.
Args:
k: int: kernel dimension.
s: int: dimension stride value.
padding: 'same' or 'valid' padding mode for original forward conv.
Returns:
2-tuple: ints: before and after padding for transposed convolution.
"""
if padding == "SAME":
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(onp.ceil(pad_len / 2))
elif padding == "VALID":
pad_len = k + s - 2 + _max(k - s, 0)
pad_a = k - 1
else:
raise ValueError("Padding mode must be `SAME` or `VALID`.")
pad_b = pad_len - pad_a
return pad_a, pad_b
|
def _conv_transpose_padding(k, s, padding):
"""Calculate before and after padding for a dim of transposed convolution.
Args:
k: int: kernel dimension.
s: int: dimension stride value.
padding: 'same' or 'valid' padding mode for original forward conv.
Returns:
2-tuple: ints: before and after padding for transposed convolution.
"""
if padding == "SAME":
pad_len = k + s - 2
if s > k - 1:
pad_a = k - 1
else:
pad_a = int(onp.ceil(pad_len / 2))
elif padding == "VALID":
pad_len = k + s - 2 + max(k - s, 0)
pad_a = k - 1
else:
raise ValueError("Padding mode must be `SAME` or `VALID`.")
pad_b = pad_len - pad_a
return pad_a, pad_b
|
https://github.com/google/jax/issues/883
|
Traceback (most recent call last):
File "<ipython-input-7-563ff9ef5fe4>", line 1, in <module>
runfile('/home/hpc/capm/sn0523/SIR/minExample.py', wdir='/home/hpc/capm/sn0523/SIR')
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 827, in runfile
execfile(filename, namespace)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 110, in execfile
exec(compile(f.read(), filename, 'exec'), namespace)
File "/home/hpc/capm/sn0523/SIR/minExample.py", line 25, in <module>
main()
File "/home/hpc/capm/sn0523/SIR/minExample.py", line 21, in main
secondCall = f(x,v)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/api.py", line 123, in f_jitted
out = xla.xla_call(flat_fun, *args_flat, device_values=device_values)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/core.py", line 658, in call_bind
ans = primitive.impl(f, *args, **params)
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/interpreters/xla.py", line 653, in xla_call_impl
compiled_fun = xla_callable(fun, device_values, *map(abstractify, args))
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/linear_util.py", line 201, in memoized_fun
if key in cache:
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/linear_util.py", line 174, in __eq__
return self.hashable_payload() == other.hashable_payload()
File "/home/hpc/capm/sn0523/jax/lib/python3.6/site-packages/jax/interpreters/xla.py", line 489, in forward_method
return fun(getattr(self, attrname), *args)
ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
|
ValueError
|
def arange(*args, **kwargs):
dtype = kwargs.get("dtype", None)
if not args:
raise TypeError(
"Required argument 'start' (pos 1) not found"
) # same as numpy error
# If called like np.arange(N), we create a lazy lax._IotaConstant.
if len(args) == 1 and not kwargs:
(stop,) = args
dtype = dtype or _dtype(stop)
if onp.issubdtype(dtype, onp.integer):
return lax.iota(dtype, stop) # avoids materializing
# Fall back to instantiating an ndarray in host memory
return onp.arange(*args, **kwargs)
|
def arange(*args, **kwargs):
dtype = kwargs.pop("dtype", None)
if not args:
raise TypeError(
"Required argument 'start' (pos 1) not found"
) # same as numpy error
# If called like np.arange(N), we create a lazy lax._IotaConstant.
if len(args) == 1 and not kwargs:
(stop,) = args
dtype = dtype or _dtype(stop)
if onp.issubdtype(dtype, onp.integer):
return lax.iota(dtype, stop) # avoids materializing
# Fall back to instantiating an ndarray in host memory
return onp.arange(*args, **kwargs)
|
https://github.com/google/jax/issues/830
|
int64
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
<ipython-input-12-5b101f76f4c3> in <module>()
1 a = np.arange(4, dtype=np.complex64)
2 print(a.dtype)
----> 3 assert a.dtype is np.complex64
AssertionError:
|
AssertionError
|
def _scatter_add_transpose_rule(
t,
operand,
scatter_indices,
updates,
update_jaxpr,
update_consts,
dimension_numbers,
updates_shape,
):
assert scatter_indices is not None
if t is ad_util.zero:
return [ad_util.zero, None, ad_util.zero]
operand_t = update_t = None
if operand is None:
operand_t = t
if updates is None:
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims,
)
slice_sizes = []
pos = 0
for i in xrange(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(
updates_shape[dimension_numbers.update_window_dims[pos]]
)
pos += 1
update_t = gather(
t, scatter_indices, dimension_numbers=gather_dnums, slice_sizes=slice_sizes
)
return [operand_t, None, update_t]
|
def _scatter_add_transpose_rule(
t,
operand,
scatter_indices,
updates,
update_jaxpr,
update_consts,
dimension_numbers,
updates_shape,
):
assert scatter_indices is not None
operand_t = update_t = None
if operand is None:
operand_t = t
if updates is None:
gather_dnums = GatherDimensionNumbers(
offset_dims=dimension_numbers.update_window_dims,
collapsed_slice_dims=dimension_numbers.inserted_window_dims,
start_index_map=dimension_numbers.scatter_dims_to_operand_dims,
)
slice_sizes = []
pos = 0
for i in xrange(len(t.shape)):
if i in dimension_numbers.inserted_window_dims:
slice_sizes.append(1)
else:
slice_sizes.append(
updates_shape[dimension_numbers.update_window_dims[pos]]
)
pos += 1
update_t = gather(
t, scatter_indices, dimension_numbers=gather_dnums, slice_sizes=slice_sizes
)
return [operand_t, None, update_t]
|
https://github.com/google/jax/issues/776
|
Traceback (most recent call last):
File "/usr/local/google/home/schsam/.local/lib/python2.7/site-packages/absl/third_party/unittest3_backport/case.py", line 37, in testPartExecutor
yield
File "/usr/local/google/home/schsam/.local/lib/python2.7/site-packages/absl/third_party/unittest3_backport/case.py", line 162, in run
testMethod()
File "/usr/local/google/home/schsam/.local/lib/python2.7/site-packages/absl/testing/parameterized.py", line 262, in bound_param_test
test_method(self, **testcase_params)
File "simulate_test.py", line 164, in test_grad_through_nvt
assert grad(do_sim)(1.0) > 0.0
File "/usr/local/google/home/schsam/Source/jax/jax/api.py", line 235, in grad_f
_, g = value_and_grad_f(*args, **kwargs)
File "/usr/local/google/home/schsam/Source/jax/jax/api.py", line 289, in value_and_grad_f
g = vjp_py(onp.ones((), dtype=dtype))
File "/usr/local/google/home/schsam/Source/jax/jax/api_util.py", line 62, in apply_jaxtree_fun
ans = fun(*args)
File "/usr/local/google/home/schsam/Source/jax/jax/api.py", line 822, in out_vjp_packed
return out_vjp(cotangent_in)
File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/ad.py", line 112, in vjp_
_, arg_cts = backward_pass(jaxpr, consts, (), dummy_args, dummy_primal_and_ct)
File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/ad.py", line 180, in backward_pass
eqn.params, subjaxprs, sub_consts, sub_freevar_vals, invals, ct_in)
File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/ad.py", line 536, in call_transpose
ans = primitive.bind(fun, all_args, **params)
File "/usr/local/google/home/schsam/Source/jax/jax/core.py", line 636, in call_bind
ans = primitive.impl(f, *args, **params)
File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/xla.py", line 591, in xla_call_impl
compiled_fun = xla_callable(fun, device_values, *map(abstractify, args))
File "/usr/local/google/home/schsam/Source/jax/jax/linear_util.py", line 208, in memoized_fun
ans = call(f, *args)
File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/xla.py", line 604, in xla_callable
jaxpr, (pval, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals)
File "/usr/local/google/home/schsam/Source/jax/jax/linear_util.py", line 147, in call_wrapped
ans = self.f(*args, **dict(self.params, **kwargs))
File "/usr/local/google/home/schsam/Source/jax/jax/interpreters/ad.py", line 186, in backward_pass
cts_out = get_primitive_transpose(eqn.primitive)(ct_in, *invals, **eqn.params)
File "/usr/local/google/home/schsam/Source/jax/jax/lax/lax.py", line 2818, in _scatter_add_transpose_rule
for i in xrange(len(t.shape)):
AttributeError: 'Zero' object has no attribute 'shape'
|
AttributeError
|
def tensordot(a, b, axes=2):
_check_arraylike("tensordot", a, b)
if not (ndim(a) >= 1 and ndim(b) >= 1):
msg = "tensordot requires a.ndim and b.dim to be at least 1, got {} and {}."
raise TypeError(msg.format(ndim(a), ndim(b)))
if type(axes) is int:
if axes == 0:
a, b = _promote_dtypes(a, b)
return lax.mul(
lax.reshape(a, shape(a) + (1,) * ndim(b)),
lax.reshape(b, (1,) * ndim(a) + shape(b)),
)
else:
a, b = _promote_dtypes(a, b)
a_reshape = lax.reshape(a, (_prod(a.shape[:-axes]), _prod(a.shape[-axes:])))
b_reshape = lax.reshape(b, (_prod(b.shape[:axes]), _prod(b.shape[axes:])))
out_reshape = lax.dot(a_reshape, b_reshape)
return lax.reshape(out_reshape, a.shape[:-axes] + b.shape[axes:])
elif type(axes) in (list, tuple) and len(axes) == 2:
ax1, ax2 = axes
if type(ax1) == type(ax2) == int:
a_transposed = moveaxis(a, ax1, -1) if ax1 != a.ndim - 1 else a
b_transposed = moveaxis(b, ax2, 0) if ax2 != 0 else b
return tensordot(a_transposed, b_transposed, 1)
elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):
if len(ax1) != len(ax2):
msg = (
"tensordot requires axes lists to have equal length, got {} and {}."
)
raise TypeError(msg.format(ax1, ax2))
num_axes = len(ax1)
a_transposed = moveaxis(a, ax1, tuple(range(a.ndim - num_axes, a.ndim)))
b_transposed = moveaxis(b, ax2, tuple(range(num_axes)))
return tensordot(a_transposed, b_transposed, num_axes)
msg = (
"tensordot axes argument must be an int, a pair of ints, or a pair of "
"lists/tuples of ints."
)
raise TypeError(msg)
|
def tensordot(a, b, axes=2):
_check_arraylike("tensordot", a, b)
if not (ndim(a) >= 1 and ndim(b) >= 1):
msg = "tensordot requires a.ndim and b.dim to be at least 1, got {} and {}."
raise TypeError(msg.format(ndim(a), ndim(b)))
if type(axes) is int:
a, b = _promote_dtypes(a, b)
a_reshape = lax.reshape(a, (_prod(a.shape[:-axes]), _prod(a.shape[-axes:])))
b_reshape = lax.reshape(b, (_prod(b.shape[:axes]), _prod(b.shape[axes:])))
out_reshape = lax.dot(a_reshape, b_reshape)
return lax.reshape(out_reshape, a.shape[:-axes] + b.shape[axes:])
elif type(axes) in (list, tuple) and len(axes) == 2:
ax1, ax2 = axes
if type(ax1) == type(ax2) == int:
a_transposed = moveaxis(a, ax1, -1) if ax1 != a.ndim - 1 else a
b_transposed = moveaxis(b, ax2, 0) if ax2 != 0 else b
return tensordot(a_transposed, b_transposed, 1)
elif type(ax1) in (list, tuple) and type(ax2) in (list, tuple):
if len(ax1) != len(ax2):
msg = (
"tensordot requires axes lists to have equal length, got {} and {}."
)
raise TypeError(msg.format(ax1, ax2))
num_axes = len(ax1)
a_transposed = moveaxis(a, ax1, tuple(range(a.ndim - num_axes, a.ndim)))
b_transposed = moveaxis(b, ax2, tuple(range(num_axes)))
return tensordot(a_transposed, b_transposed, num_axes)
msg = (
"tensordot axes argument must be an int, a pair of ints, or a pair of "
"lists/tuples of ints."
)
raise TypeError(msg)
|
https://github.com/google/jax/issues/740
|
result = np.tensordot(np.ones((2, 3, 4)), np.ones((5, 6, 7)), 0)
/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lib/xla_bridge.py:130: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Traceback (most recent call last):
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lib/xla_bridge.py", line 267, in Build
*args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jaxlib/xla_client.py", line 640, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Cannot infer shape for dot operation: f32[1,24] <dot> f32[1,210]. Contracting dimension sizes do not match.:
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/numpy/lax_numpy.py", line 1598, in tensordot
out_reshape = lax.dot(a_reshape, b_reshape)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 462, in dot
return dot_p.bind(lhs, rhs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/core.py", line 117, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 73, in primitive_computation
prim.abstract_eval(*map(aval_from_xla_shape, shapes), **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 1295, in standard_abstract_eval
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 1824, in _dot_shape_rule
require(lhs.shape[1] == rhs.shape[0])
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 1818, in require
raise TypeError(msg.format(lhs.shape, rhs.shape))
TypeError: Incompatible shapes for dot: got (1, 24) and (1, 210).
|
RuntimeError
|
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
if out:
raise NotImplementedError("The 'out' argument to trace is not supported.")
axis1 = axis1 % ndim(a)
axis2 = axis2 % ndim(a)
a_shape = shape(a)
if dtype is None:
dtype = _dtype(a)
if issubdtype(dtype, integer):
default_int = xla_bridge.canonicalize_dtype(onp.int_)
if iinfo(dtype).bits < iinfo(default_int).bits:
dtype = default_int
# Move the axis? dimensions to the end.
perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce.
a = where(
eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool), a, zeros_like(a)
)
return sum(a, axis=(-2, -1), dtype=dtype)
|
def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):
if out:
raise NotImplementedError("The 'out' argument to trace is not supported.")
a_shape = shape(a)
if dtype is None:
dtype = _dtype(a)
if issubdtype(dtype, integer):
default_int = xla_bridge.canonicalize_dtype(onp.int_)
if iinfo(dtype).bits < iinfo(default_int).bits:
dtype = default_int
# Move the axis? dimensions to the end.
perm = [i for i in range(len(a_shape)) if i != axis1 and i != axis2]
perm = perm + [axis1, axis2]
a = lax.transpose(a, perm)
# Mask out the diagonal and reduce.
a = where(
eye(a_shape[axis1], a_shape[axis2], k=offset, dtype=bool), a, zeros_like(a)
)
return sum(a, axis=(-2, -1), dtype=dtype)
|
https://github.com/google/jax/issues/738
|
print(np.trace(np.ones((2, 3, 4, 4)), axis1=-1, axis2=-2))
Traceback (most recent call last):
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lib/xla_bridge.py", line 267, in Build
*args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jaxlib/xla_client.py", line 640, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: Transpose dimensions [0,1,2,3,-1,-2] are not a permutation of the operand dimensions (operand shape is f32[2,3,4,4]).:
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/numpy/lax_numpy.py", line 1465, in trace
a = lax.transpose(a, perm)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 685, in transpose
return transpose_p.bind(operand, permutation=permutation)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/core.py", line 117, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/interpreters/xla.py", line 73, in primitive_computation
prim.abstract_eval(*map(aval_from_xla_shape, shapes), **kwargs)
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 1295, in standard_abstract_eval
return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
File "/usr/local/google/home/chaseriley/.local/lib/python3.6/site-packages/jax/lax/lax.py", line 2268, in _transpose_shape_rule
raise TypeError(msg.format(permutation, operand.shape))
TypeError: transpose permutation isn't a permutation of operand dimensions, got permutation (0, 1, 2, 3, -1, -2) for operand shape (2, 3, 4, 4).
|
RuntimeError
|
def partial_eval(f, trace, pvs):
f = trace_to_subjaxpr(f, trace.master, False)
return partial_eval_wrapper(f, tuple(pvs))
|
def partial_eval(f, trace, pvs):
f = trace_to_subjaxpr(f, trace.master)
return partial_eval_wrapper(f, tuple(pvs))
|
https://github.com/google/jax/issues/649
|
$ python loop.py
/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py:144: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Traceback (most recent call last):
File "loop.py", line 11, in <module>
print(lax.while_loop(cond, body, (33, 4)))
File "/usr/local/google/home/phawkins/p/jax/jax/lax/lax_control_flow.py", line 108, in while_loop
aval_out=aval_out, cond_jaxpr=cond_jaxpr, body_jaxpr=body_jaxpr)
File "/usr/local/google/home/phawkins/p/jax/jax/core.py", line 75, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 74, in primitive_computation
raise e
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py", line 283, in Build
*args, **kwargs)
File "/usr/local/google/home/phawkins/.pyenv/versions/3.7.3/lib/python3.7/site-packages/jaxlib/xla_client.py", line 875, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: The parameter of condition and body, the result of the body, and init must all have the same shape; got Condition: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> pred[]; body: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> (((), s32[]), (s32[]), (s32[])); init: ((s32[], s32[]), (s32[]), (s32[]))..:
|
RuntimeError
|
def trace_to_jaxpr(fun, pvals, **kwargs):
"""Traces a function, given abstract inputs, to a jaxpr."""
instantiate = kwargs.pop("instantiate", False)
with new_master(JaxprTrace) as master:
fun = trace_to_subjaxpr(fun, master, instantiate)
jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals)
assert not env
del master
return jaxpr, out_pval, consts
|
def trace_to_jaxpr(fun, pvals):
"""Traces a function, given abstract inputs, to a jaxpr."""
with new_master(JaxprTrace) as master:
fun = trace_to_subjaxpr(fun, master)
jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals)
assert not env
del master
return jaxpr, out_pval, consts
|
https://github.com/google/jax/issues/649
|
$ python loop.py
/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py:144: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Traceback (most recent call last):
File "loop.py", line 11, in <module>
print(lax.while_loop(cond, body, (33, 4)))
File "/usr/local/google/home/phawkins/p/jax/jax/lax/lax_control_flow.py", line 108, in while_loop
aval_out=aval_out, cond_jaxpr=cond_jaxpr, body_jaxpr=body_jaxpr)
File "/usr/local/google/home/phawkins/p/jax/jax/core.py", line 75, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 74, in primitive_computation
raise e
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py", line 283, in Build
*args, **kwargs)
File "/usr/local/google/home/phawkins/.pyenv/versions/3.7.3/lib/python3.7/site-packages/jaxlib/xla_client.py", line 875, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: The parameter of condition and body, the result of the body, and init must all have the same shape; got Condition: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> pred[]; body: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> (((), s32[]), (s32[]), (s32[])); init: ((s32[], s32[]), (s32[]), (s32[]))..:
|
RuntimeError
|
def trace_to_subjaxpr(master, instantiate, pvals):
assert all([isinstance(pv, PartialVal) for pv in pvals]), pvals
trace = JaxprTrace(master, core.cur_sublevel())
in_tracers = map(trace.new_arg, pvals)
out_tracer = yield in_tracers, {}
out_tracer = trace.full_raise(out_tracer)
if instantiate:
out_tracer = trace.instantiate_const(out_tracer)
jaxpr, consts, env = tracers_to_jaxpr(in_tracers, out_tracer)
out_pval = out_tracer.pval
del trace, in_tracers, out_tracer
yield jaxpr, (out_pval, consts, env)
|
def trace_to_subjaxpr(master, pvals):
assert all([isinstance(pv, PartialVal) for pv in pvals]), pvals
trace = JaxprTrace(master, core.cur_sublevel())
in_tracers = map(trace.new_arg, pvals)
out_tracer = yield in_tracers, {}
out_tracer = trace.full_raise(out_tracer)
jaxpr, consts, env = tracers_to_jaxpr(in_tracers, out_tracer)
out_pval = out_tracer.pval
del trace, in_tracers, out_tracer
yield jaxpr, (out_pval, consts, env)
|
https://github.com/google/jax/issues/649
|
$ python loop.py
/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py:144: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Traceback (most recent call last):
File "loop.py", line 11, in <module>
print(lax.while_loop(cond, body, (33, 4)))
File "/usr/local/google/home/phawkins/p/jax/jax/lax/lax_control_flow.py", line 108, in while_loop
aval_out=aval_out, cond_jaxpr=cond_jaxpr, body_jaxpr=body_jaxpr)
File "/usr/local/google/home/phawkins/p/jax/jax/core.py", line 75, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 74, in primitive_computation
raise e
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py", line 283, in Build
*args, **kwargs)
File "/usr/local/google/home/phawkins/.pyenv/versions/3.7.3/lib/python3.7/site-packages/jaxlib/xla_client.py", line 875, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: The parameter of condition and body, the result of the body, and init must all have the same shape; got Condition: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> pred[]; body: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> (((), s32[]), (s32[]), (s32[])); init: ((s32[], s32[]), (s32[]), (s32[]))..:
|
RuntimeError
|
def compiled_call_impl(fun, *args):
with new_master(JaxprTrace, True) as master:
pvals = map(abstractify, args)
jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master, False).call_wrapped(
pvals
)
jaxpr_ans = eval_jaxpr_raw(jaxpr, consts, env, *args)
ans = merge_pvals(jaxpr_ans, pval)
del master, pvals, pval, consts, env, jaxpr_ans, jaxpr
return ans
|
def compiled_call_impl(fun, *args):
with new_master(JaxprTrace, True) as master:
pvals = map(abstractify, args)
jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master).call_wrapped(pvals)
jaxpr_ans = eval_jaxpr_raw(jaxpr, consts, env, *args)
ans = merge_pvals(jaxpr_ans, pval)
del master, pvals, pval, consts, env, jaxpr_ans, jaxpr
return ans
|
https://github.com/google/jax/issues/649
|
$ python loop.py
/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py:144: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Traceback (most recent call last):
File "loop.py", line 11, in <module>
print(lax.while_loop(cond, body, (33, 4)))
File "/usr/local/google/home/phawkins/p/jax/jax/lax/lax_control_flow.py", line 108, in while_loop
aval_out=aval_out, cond_jaxpr=cond_jaxpr, body_jaxpr=body_jaxpr)
File "/usr/local/google/home/phawkins/p/jax/jax/core.py", line 75, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 74, in primitive_computation
raise e
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py", line 283, in Build
*args, **kwargs)
File "/usr/local/google/home/phawkins/.pyenv/versions/3.7.3/lib/python3.7/site-packages/jaxlib/xla_client.py", line 875, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: The parameter of condition and body, the result of the body, and init must all have the same shape; got Condition: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> pred[]; body: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> (((), s32[]), (s32[]), (s32[])); init: ((s32[], s32[]), (s32[]), (s32[]))..:
|
RuntimeError
|
def parallel_callable(fun, axis_name, axis_size, *avals):
pvals = [PartialVal((aval, core.unit)) for aval in avals]
with core.new_master(JaxprTrace, True) as master:
jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master, False).call_wrapped(
pvals
)
assert not env
out = compile_replicated(jaxpr, axis_name, axis_size, consts, *avals)
compiled, nrep, result_shape = out
del master, consts, jaxpr, env
handle_arg = partial(shard_arg, compiled._device_ordinals)
handle_result = xla.result_handler(result_shape)
return partial(
execute_replicated, compiled, pval, axis_size, nrep, handle_arg, handle_result
)
|
def parallel_callable(fun, axis_name, axis_size, *avals):
pvals = [PartialVal((aval, core.unit)) for aval in avals]
with core.new_master(JaxprTrace, True) as master:
jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master).call_wrapped(pvals)
assert not env
out = compile_replicated(jaxpr, axis_name, axis_size, consts, *avals)
compiled, nrep, result_shape = out
del master, consts, jaxpr, env
handle_arg = partial(shard_arg, compiled._device_ordinals)
handle_result = xla.result_handler(result_shape)
return partial(
execute_replicated, compiled, pval, axis_size, nrep, handle_arg, handle_result
)
|
https://github.com/google/jax/issues/649
|
$ python loop.py
/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py:144: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Traceback (most recent call last):
File "loop.py", line 11, in <module>
print(lax.while_loop(cond, body, (33, 4)))
File "/usr/local/google/home/phawkins/p/jax/jax/lax/lax_control_flow.py", line 108, in while_loop
aval_out=aval_out, cond_jaxpr=cond_jaxpr, body_jaxpr=body_jaxpr)
File "/usr/local/google/home/phawkins/p/jax/jax/core.py", line 75, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 74, in primitive_computation
raise e
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py", line 283, in Build
*args, **kwargs)
File "/usr/local/google/home/phawkins/.pyenv/versions/3.7.3/lib/python3.7/site-packages/jaxlib/xla_client.py", line 875, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: The parameter of condition and body, the result of the body, and init must all have the same shape; got Condition: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> pred[]; body: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> (((), s32[]), (s32[]), (s32[])); init: ((s32[], s32[]), (s32[]), (s32[]))..:
|
RuntimeError
|
def xla_callable(fun, *abstract_args):
pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]
with core.new_master(pe.JaxprTrace, True) as master:
jaxpr, (pval, consts, env) = pe.trace_to_subjaxpr(
fun, master, False
).call_wrapped(pvals)
assert not env # no subtraces here (though cond might eventually need them)
compiled, result_shape = compile_jaxpr(jaxpr, consts, *abstract_args)
del master, consts, jaxpr, env
handle_result = result_handler(result_shape)
return partial(execute_compiled, compiled, pval, handle_result)
|
def xla_callable(fun, *abstract_args):
pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]
with core.new_master(pe.JaxprTrace, True) as master:
jaxpr, (pval, consts, env) = pe.trace_to_subjaxpr(fun, master).call_wrapped(
pvals
)
assert not env # no subtraces here (though cond might eventually need them)
compiled, result_shape = compile_jaxpr(jaxpr, consts, *abstract_args)
del master, consts, jaxpr, env
handle_result = result_handler(result_shape)
return partial(execute_compiled, compiled, pval, handle_result)
|
https://github.com/google/jax/issues/649
|
$ python loop.py
/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py:144: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Traceback (most recent call last):
File "loop.py", line 11, in <module>
print(lax.while_loop(cond, body, (33, 4)))
File "/usr/local/google/home/phawkins/p/jax/jax/lax/lax_control_flow.py", line 108, in while_loop
aval_out=aval_out, cond_jaxpr=cond_jaxpr, body_jaxpr=body_jaxpr)
File "/usr/local/google/home/phawkins/p/jax/jax/core.py", line 75, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 74, in primitive_computation
raise e
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py", line 283, in Build
*args, **kwargs)
File "/usr/local/google/home/phawkins/.pyenv/versions/3.7.3/lib/python3.7/site-packages/jaxlib/xla_client.py", line 875, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: The parameter of condition and body, the result of the body, and init must all have the same shape; got Condition: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> pred[]; body: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> (((), s32[]), (s32[]), (s32[])); init: ((s32[], s32[]), (s32[]), (s32[]))..:
|
RuntimeError
|
def while_loop(cond_fun, body_fun, init_val):
"""Call `body_fun` repeatedly in a loop while `cond_fun` is True.
Arguments:
cond_fun: pure function of type `T -> Bool`.
body_fun: pure function of type `T -> T`.
init_val: value of type `T`, a type that can be a scalar, array, or any
(nested) Python tuple/list/dict thereof.
Returns:
The output from the final iteration of body_fun, of type `T`.
The semantics of `while_loop` are given by this Python implementation::
def while_loop(cond_fun, body_fun, init_val):
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
Unlike that pure Python version, `while_loop` is a JAX primitive and is
lowered to a single XLA While HLO. That makes it useful for reducing
compilation times for jit-compiled functions, since native Python loop
constructs in an `@jit` function are unrolled, leading to large XLA
computations.
Another difference from using Python-native loop constructs is that
`while_loop` is not (yet) reverse-mode differentiable because XLA computations
require static bounds on memory requirements.
"""
init_val_flat, in_tree = pytree_to_jaxtupletree(init_val)
flat_body_fun, out_tree = pytree_fun_to_jaxtupletree_fun(
lu.wrap_init(body_fun), (in_tree,)
)
flat_cond_fun, _ = pytree_fun_to_jaxtupletree_fun(
lu.wrap_init(cond_fun), (in_tree,)
)
carry_pval_flat = carry_aval, _ = lax._abstractify(init_val_flat)
cond_jaxpr, cond_pval_out, cond_consts = pe.trace_to_jaxpr(
flat_cond_fun, (carry_pval_flat,)
)
body_jaxpr, body_pval_out, body_consts = pe.trace_to_jaxpr(
flat_body_fun, (carry_pval_flat,), instantiate=True
)
carry_aval_out, _ = body_pval_out
assert isinstance(carry_aval_out, core.AbstractValue)
assert carry_aval == core.lattice_join(carry_aval, carry_aval_out)
cond_pv, cond_const = cond_pval_out
if cond_pv is None:
# cond_fun evaluates to a constant, so don't need to generate a while_loop
if cond_const:
raise ValueError("infinite loop with no effects")
else:
return init_val
else:
assert isinstance(cond_pv, core.AbstractValue)
if (
not isinstance(cond_pv, ShapedArray)
or cond_pv.shape
or cond_pv.dtype != onp.bool_
):
msg = "while_loop cond_fun must return a scalar boolean, got {}."
raise TypeError(msg.format(cond_pv))
# We don't want to promote literal constants as loop arguments; there are
# sometimes many of them. We pass tracers as loop arguments, but leave
# nontracers as constants. We also sort the constants so the nontracers are
# first.
def split_tracers_and_nontracers(jaxpr, consts):
tracer = []
nontracer = []
for x in zip(jaxpr.constvars, consts):
# TODO(phawkins): We avoid treating DeviceArrays as constant literals so
# we don't copy large arrays back to the host. We probably should relax
# this and either always copy small constants, or opportunistically use
# DeviceArray values for which we already know npy_value.
not_literal_const = isinstance(x[1], (core.Tracer, xla.DeviceArray))
(tracer if not_literal_const else nontracer).append(x)
tracer_vars, tracer_consts = unzip2(tracer)
nontracer_vars, nontracer_consts = unzip2(nontracer)
return nontracer_vars + tracer_vars, nontracer_consts, tracer_consts
cond_split = split_tracers_and_nontracers(cond_jaxpr, cond_consts)
cond_jaxpr.constvars, cond_nontracer_consts, cond_tracer_consts = cond_split
body_split = split_tracers_and_nontracers(body_jaxpr, body_consts)
body_jaxpr.constvars, body_nontracer_consts, body_tracer_consts = body_split
if out_tree() != in_tree:
raise TypeError("body_fun input and output must have identical structure")
out_flat = while_p.bind(
init_val_flat,
core.pack(cond_tracer_consts),
core.pack(body_tracer_consts),
cond_consts=lax._OpaqueParam(cond_nontracer_consts),
body_consts=lax._OpaqueParam(body_nontracer_consts),
aval_out=carry_aval_out,
cond_jaxpr=cond_jaxpr,
body_jaxpr=body_jaxpr,
)
return build_tree(out_tree(), out_flat)
|
def while_loop(cond_fun, body_fun, init_val):
"""Call `body_fun` repeatedly in a loop while `cond_fun` is True.
Arguments:
cond_fun: pure function of type `T -> Bool`.
body_fun: pure function of type `T -> T`.
init_val: value of type `T`, a type that can be a scalar, array, or any
(nested) Python tuple/list/dict thereof.
Returns:
The output from the final iteration of body_fun, of type `T`.
The semantics of `while_loop` are given by this Python implementation::
def while_loop(cond_fun, body_fun, init_val):
val = init_val
while cond_fun(val):
val = body_fun(val)
return val
Unlike that pure Python version, `while_loop` is a JAX primitive and is
lowered to a single XLA While HLO. That makes it useful for reducing
compilation times for jit-compiled functions, since native Python loop
constructs in an `@jit` function are unrolled, leading to large XLA
computations.
Another difference from using Python-native loop constructs is that
`while_loop` is not (yet) reverse-mode differentiable because XLA computations
require static bounds on memory requirements.
"""
init_val_flat, in_tree = pytree_to_jaxtupletree(init_val)
flat_body_fun, out_tree = pytree_fun_to_jaxtupletree_fun(
lu.wrap_init(body_fun), (in_tree,)
)
flat_cond_fun, _ = pytree_fun_to_jaxtupletree_fun(
lu.wrap_init(cond_fun), (in_tree,)
)
pval_flat = lax._abstractify(init_val_flat)
cond_jaxpr, _, cond_consts = pe.trace_to_jaxpr(flat_cond_fun, (pval_flat,))
body_jaxpr, pval_out, body_consts = pe.trace_to_jaxpr(flat_body_fun, (pval_flat,))
aval_out, _ = pval_out
# We don't want to promote literal constants as loop arguments; there are
# sometimes many of them. We pass tracers as loop arguments, but leave
# nontracers as constants. We also sort the constants so the nontracers are
# first.
def split_tracers_and_nontracers(jaxpr, consts):
tracer = []
nontracer = []
for x in zip(jaxpr.constvars, consts):
# TODO(phawkins): We avoid treating DeviceArrays as constant literals so
# we don't copy large arrays back to the host. We probably should relax
# this and either always copy small constants, or opportunistically use
# DeviceArray values for which we already know npy_value.
not_literal_const = isinstance(x[1], (core.Tracer, xla.DeviceArray))
(tracer if not_literal_const else nontracer).append(x)
tracer_vars, tracer_consts = unzip2(tracer)
nontracer_vars, nontracer_consts = unzip2(nontracer)
return nontracer_vars + tracer_vars, nontracer_consts, tracer_consts
cond_split = split_tracers_and_nontracers(cond_jaxpr, cond_consts)
cond_jaxpr.constvars, cond_nontracer_consts, cond_tracer_consts = cond_split
body_split = split_tracers_and_nontracers(body_jaxpr, body_consts)
body_jaxpr.constvars, body_nontracer_consts, body_tracer_consts = body_split
if out_tree() != in_tree:
raise TypeError("body_fun input and output must have identical structure")
out_flat = while_p.bind(
init_val_flat,
core.pack(cond_tracer_consts),
core.pack(body_tracer_consts),
cond_consts=lax._OpaqueParam(cond_nontracer_consts),
body_consts=lax._OpaqueParam(body_nontracer_consts),
aval_out=aval_out,
cond_jaxpr=cond_jaxpr,
body_jaxpr=body_jaxpr,
)
return build_tree(out_tree(), out_flat)
|
https://github.com/google/jax/issues/649
|
$ python loop.py
/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py:144: UserWarning: No GPU/TPU found, falling back to CPU.
warnings.warn('No GPU/TPU found, falling back to CPU.')
Traceback (most recent call last):
File "loop.py", line 11, in <module>
print(lax.while_loop(cond, body, (33, 4)))
File "/usr/local/google/home/phawkins/p/jax/jax/lax/lax_control_flow.py", line 108, in while_loop
aval_out=aval_out, cond_jaxpr=cond_jaxpr, body_jaxpr=body_jaxpr)
File "/usr/local/google/home/phawkins/p/jax/jax/core.py", line 75, in bind
return self.impl(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 51, in apply_primitive
compiled_fun = xla_primitive_callable(prim, *abstract_args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 57, in xla_primitive_callable
built_c = primitive_computation(prim, *shapes, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/util.py", line 174, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 74, in primitive_computation
raise e
File "/usr/local/google/home/phawkins/p/jax/jax/interpreters/xla.py", line 70, in primitive_computation
return c.Build()
File "/usr/local/google/home/phawkins/p/jax/jax/lib/xla_bridge.py", line 283, in Build
*args, **kwargs)
File "/usr/local/google/home/phawkins/.pyenv/versions/3.7.3/lib/python3.7/site-packages/jaxlib/xla_client.py", line 875, in Build
return Computation(self._builder.Build(), backend=backend)
RuntimeError: Invalid argument: The parameter of condition and body, the result of the body, and init must all have the same shape; got Condition: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> pred[]; body: (parameter: ((s32[], s32[]), (s32[]), (s32[]))) -> (((), s32[]), (s32[]), (s32[])); init: ((s32[], s32[]), (s32[]), (s32[]))..:
|
RuntimeError
|
def atleast_1d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 1 else reshape(arr, -1)
else:
return [atleast_1d(arr) for arr in arys]
|
def atleast_1d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if arr.ndim >= 1 else arr.reshape(-1)
else:
return [atleast_1d(arr) for arr in arys]
|
https://github.com/google/jax/issues/634
|
In [3]: onp.atleast_1d(1)
Out[3]: array([1])
In [4]: np.atleast_1d(1)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-84084c6642da> in <module>
----> 1 np.atleast_1d(1)
~/src/jax/jax/numpy/lax_numpy.py in atleast_1d(*arys)
1182 arr = array(arys[0])
-> 1183 return arr if arr.ndim >= 1 else arr.reshape(-1)
1184 else:
1185 return [atleast_1d(arr) for arr in arys]
AttributeError: 'int' object has no attribute 'ndim'
In [5]: np.atleast_1d(np.int64(1))
Out[5]: array([1])
|
AttributeError
|
def atleast_2d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if ndim(arr) >= 2 else reshape(arr, (1, -1))
else:
return [atleast_2d(arr) for arr in arys]
|
def atleast_2d(*arys):
if len(arys) == 1:
arr = array(arys[0])
return arr if arr.ndim >= 2 else arr.reshape((1, -1))
else:
return [atleast_2d(arr) for arr in arys]
|
https://github.com/google/jax/issues/634
|
In [3]: onp.atleast_1d(1)
Out[3]: array([1])
In [4]: np.atleast_1d(1)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-84084c6642da> in <module>
----> 1 np.atleast_1d(1)
~/src/jax/jax/numpy/lax_numpy.py in atleast_1d(*arys)
1182 arr = array(arys[0])
-> 1183 return arr if arr.ndim >= 1 else arr.reshape(-1)
1184 else:
1185 return [atleast_1d(arr) for arr in arys]
AttributeError: 'int' object has no attribute 'ndim'
In [5]: np.atleast_1d(np.int64(1))
Out[5]: array([1])
|
AttributeError
|
def atleast_3d(*arys):
if len(arys) == 1:
arr = array(arys[0])
if ndim(arr) <= 1:
arr = reshape(arr, (1, -1, 1))
elif ndim(arr) == 2:
arr = reshape(arr, shape(arr) + (1,))
return arr
else:
return [atleast_3d(arr) for arr in arys]
|
def atleast_3d(*arys):
if len(arys) == 1:
arr = array(arys[0])
if ndim(arr) <= 1:
arr = arr.reshape((1, -1, 1))
elif ndim(arr) == 2:
arr = arr.reshape(shape(arr) + (1,))
return arr
else:
return [atleast_3d(arr) for arr in arys]
|
https://github.com/google/jax/issues/634
|
In [3]: onp.atleast_1d(1)
Out[3]: array([1])
In [4]: np.atleast_1d(1)
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-4-84084c6642da> in <module>
----> 1 np.atleast_1d(1)
~/src/jax/jax/numpy/lax_numpy.py in atleast_1d(*arys)
1182 arr = array(arys[0])
-> 1183 return arr if arr.ndim >= 1 else arr.reshape(-1)
1184 else:
1185 return [atleast_1d(arr) for arr in arys]
AttributeError: 'int' object has no attribute 'ndim'
In [5]: np.atleast_1d(np.int64(1))
Out[5]: array([1])
|
AttributeError
|
def _brcast(x, *others):
# Used in jvprules to make binop broadcasting explicit for transposability.
# Requires shape info during jvp tracing, which isn't strictly necessary.
# We don't need full numpy broadcasting, but otherwise the logic is the same
# so we reuse the broadcast_shapes function after filtering out scalars.
shapes = tuple(filter(None, map(onp.shape, (x,) + others)))
shape = shapes and broadcast_shapes(*shapes)
if onp.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
|
def _brcast(x, *others):
# used in jvprules to make binop broadcasting explicit for transposability.
# requires shape info during jvp tracing, which isn't strictly necessary.
shapes = list(filter(None, map(onp.shape, (x,) + others)))
shape = tuple(shapes and onp.max(shapes, axis=0))
if onp.shape(x) != shape:
return _brcast_to(x, shape)
else:
return x
|
https://github.com/google/jax/issues/354
|
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.py:53: UserWarning: numpy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
/usr/local/lib/python2.7/dist-packages/jax/lib/xla_bridge.py:146: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
-8.891344
[-8.891344 -8.891344 -8.891344 -8.891344 -8.891344]
TypeErrorTraceback (most recent call last)
<ipython-input-3-73aa1b00356c> in <module>()
14
15 vmapped_f_grad = jax.grad(vmapped_f)
---> 16 print(vmapped_f_grad(0.1 + onp.zeros((5, 1))))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in grad_f(*args, **kwargs)
112 @wraps(fun, docstr=docstr, argnums=argnums)
113 def grad_f(*args, **kwargs):
--> 114 ans, g = value_and_grad_f(*args, **kwargs)
115 return g
116
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in value_and_grad_f(*args, **kwargs)
147 f = lu.wrap_init(fun, kwargs)
148 f_partial, dyn_args = argnums_partial(f, argnums, args)
--> 149 ans, vjp_py = vjp(f_partial, *dyn_args)
150 check_scalar(ans)
151 g = vjp_py(onp.ones((), onp.result_type(ans)))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in vjp(fun, *primals)
358 check_args(primals_flat)
359 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 360 out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat)
361 out_tree = out_tree()
362 out_primal_py = build_tree(out_tree, out_primal)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in vjp(traceable, primals)
72
73 def vjp(traceable, primals):
---> 74 out_primal, pval, jaxpr, consts = linearize(traceable, *primals)
75 def vjp_(ct):
76 ct = ignore_consts(ct, pval)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in linearize(traceable, *primals)
65 in_pvals = (pe.PartialVal((None, pack(primals))),
66 pe.PartialVal((core.AbstractTuple(tangent_avals), core.unit)))
---> 67 jaxpr, out_pval, consts = pe.trace_to_jaxpr(jvpfun, in_pvals)
68 pval_primal, pval_tangent = unpair_pval(out_pval)
69 aval_primal, const_primal = pval_primal
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in trace_to_jaxpr(fun, pvals, **kwargs)
254 with new_master(JaxprTrace) as master:
255 fun = trace_to_subjaxpr(fun, master)
--> 256 jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals, **kwargs)
257 assert not env
258 del master
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in batched_fun(*args, **kwargs)
253 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args))
254 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)
--> 255 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes)
256 return build_tree(out_tree(), out_flat)
257
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in batch(fun, in_vals, in_dims, out_dim_target)
41 return fun.call_wrapped(*in_vals), None # no mapped dimensions
42 elif len(sizes) == 1:
---> 43 out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
44 return moveaxis(sizes.pop(), out_dim_target, out_dim, out_val)
45 else:
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
<ipython-input-3-73aa1b00356c> in f(scale)
5 def f(scale):
6 scaled_mat = scale * psd_mat
----> 7 chol = np.linalg.cholesky(scaled_mat)
8 return -0.5 * np.sum((np.dot(chol, vec))**2)
9
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.pyc in cholesky(a)
53 warnings.warn(_EXPERIMENTAL_WARNING)
54 a = _promote_arg_dtypes(np.asarray(a))
---> 55 return lax_linalg.cholesky(a)
56
57
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in process_primitive(self, primitive, tracers, params)
120 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here
121 batched_primitive = get_primitive_batcher(primitive)
--> 122 val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
123 return BatchTracer(self, val_out, dim_out)
124
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_batching_rule(batched_args, batch_dims)
89 bd, = batch_dims
90 x = batching.bdim_at_front(x, bd)
---> 91 return cholesky(x), 0
92
93 cholesky_p = standard_unop(_float | _complex, 'cholesky')
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in process_primitive(self, primitive, tracers, params)
178 "Forward-mode differentiation rule for '{}' not implemented"
179 .format(primitive))
--> 180 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
181 return JVPTracer(self, primal_out, tangent_out)
182
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_jvp_rule(primals, tangents)
82 left_side=False, transpose_a=True, lower=True)
83 L_dot = lax.dot(L, phi(triangular_solve(
---> 84 L, tmp, left_side=True, transpose_a=False, lower=True)))
85 return L, L_dot
86
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in dot(lhs, rhs)
190 rhs_shape=rhs.shape)
191
--> 192 def dot(lhs, rhs): return dot_p.bind(lhs, rhs)
193
194 def dot_general(lhs, rhs, dimension_numbers):
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in process_primitive(self, primitive, tracers, params)
67 tracers = map(self.instantiate_const, tracers)
68 avals = [t.aval for t in tracers]
---> 69 out_aval = primitive.abstract_eval(*avals, **params)
70 eqn = JaxprEqn(tracers, None, primitive, (), False, params)
71 return JaxprTracer(self, PartialVal((out_aval, unit)), eqn)
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs)
753 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
754 elif least_specialized is ShapedArray:
--> 755 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
756 elif least_specialized is UnshapedArray:
757 return UnshapedArray(dtype_rule(*args, **kwargs))
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in _dot_shape_rule(lhs, rhs)
1227 if lhs.ndim > 2 or rhs.ndim > 2:
1228 msg = "Dot only supports rank 2 or less, got shapes {} and {}."
-> 1229 raise TypeError(msg.format(lhs.shape, rhs.shape))
1230
1231 def require(shape_cond):
TypeError: Dot only supports rank 2 or less, got shapes (5, 10, 10) and (5, 10, 10).
|
TypeError
|
def _dot_general_shape_rule(lhs, rhs, dimension_numbers):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if len(lhs_batch) != len(rhs_batch):
msg = (
"dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}."
)
raise TypeError(msg.format(lhs_batch, rhs_batch))
if not onp.all(onp.equal(lhs_batch, rhs_batch)):
msg = (
"dot_general requires same lhs and rhs batch dimension numbers, "
"got {} and {}."
)
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_batch_shape = onp.take(lhs.shape, lhs_batch)
rhs_batch_shape = onp.take(rhs.shape, rhs_batch)
if not onp.all(onp.equal(lhs_batch_shape, rhs_batch_shape)):
msg = (
"dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}."
)
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))):
msg = (
"dot_general requires lhs batch dimensions to precede contracting "
"and non-contracting dimensions, got lhs_batch {}."
)
raise TypeError(msg.format(lhs_batch))
if tuple(sorted(rhs_batch)) != tuple(range(len(rhs_batch))):
msg = (
"dot_general requires rhs batch dimensions to precede contracting "
"and non-contracting dimensions, got rhs_batch {}."
)
raise TypeError(msg.format(rhs_batch))
lhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)
if not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = (
"dot_general requires contracting dimensions to have the same "
"shape, got {} and {}."
)
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
if lhs.ndim > len(lhs_batch) + len(lhs_contracting) + 1:
msg = (
"dot_general requires either one or zero non-batch non-contracting "
"lhs dimension, got {}."
)
diff = lhs.ndim - len(lhs_batch) - len(lhs_contracting)
raise TypeError(msg.format(diff))
if rhs.ndim > len(rhs_batch) + len(rhs_contracting) + 1:
msg = (
"dot_general requires either one or zero non-batch non-contracting "
"rhs dimension, got {}."
)
diff = rhs.ndim - len(rhs_batch) - len(rhs_contracting)
raise TypeError(msg.format(diff))
batch_shape = tuple(onp.take(lhs.shape, lhs_batch))
lhs_contract_or_batch = tuple(lhs_contracting) + tuple(lhs_batch)
lhs_tensored_shape = tuple(onp.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(rhs_contracting) + tuple(rhs_batch)
rhs_tensored_shape = tuple(onp.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
|
def _dot_general_shape_rule(lhs, rhs, dimension_numbers):
(lhs_contracting, rhs_contracting), (lhs_batch, rhs_batch) = dimension_numbers
if len(lhs_batch) != len(rhs_batch):
msg = (
"dot_general requires equal numbers of lhs_batch and rhs_batch "
"dimensions, got lhs_batch {} and rhs_batch {}."
)
raise TypeError(msg.format(lhs_batch, rhs_batch))
if not onp.all(onp.equal(lhs_batch, rhs_batch)):
msg = (
"dot_general requires same lhs and rhs batch dimension numbers, "
"got {} and {}."
)
raise TypeError(msg.format(lhs_batch, rhs_batch))
lhs_batch_shape = onp.take(lhs.shape, lhs_batch)
rhs_batch_shape = onp.take(rhs.shape, rhs_batch)
if not onp.all(onp.equal(lhs_batch_shape, rhs_batch_shape)):
msg = (
"dot_general requires lhs batch dimensions and rhs batch dimensions "
"to have the same shape, got {} and {}."
)
raise TypeError(msg.format(lhs_batch_shape, rhs_batch_shape))
if tuple(sorted(lhs_batch)) != tuple(range(len(lhs_batch))):
msg = (
"dot_general requires lhs batch dimensions to precede contracting "
"and non-contracting dimensions, got lhs_batch {}."
)
raise TypeError(msg.format(lhs_batch))
if tuple(sorted(rhs_batch)) != tuple(range(len(rhs_batch))):
msg = (
"dot_general requires rhs batch dimensions to precede contracting "
"and non-contracting dimensions, got rhs_batch {}."
)
raise TypeError(msg.format(rhs_batch))
if not len(lhs_contracting) == len(rhs_contracting) == 1:
msg = (
"dot_general accepts exactly one lhs_contracting and "
"rhs_contracting dimension, got {} and {}."
)
raise TypeError(msg.format(lhs_contracting, rhs_contracting))
lhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)
rhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)
if not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):
msg = (
"dot_general requires contracting dimensions to have the same "
"shape, got {} and {}."
)
raise TypeError(msg.format(lhs_contracting_shape, rhs_contracting_shape))
if lhs.ndim > len(lhs_batch) + len(lhs_contracting) + 1:
msg = (
"dot_general requires either one or zero non-batch non-contracting "
"lhs dimension, got {}."
)
diff = lhs.ndim - len(lhs_batch) - len(lhs_contracting)
raise TypeError(msg.format(diff))
if rhs.ndim > len(rhs_batch) + len(rhs_contracting) + 1:
msg = (
"dot_general requires either one or zero non-batch non-contracting "
"rhs dimension, got {}."
)
diff = rhs.ndim - len(rhs_batch) - len(rhs_contracting)
raise TypeError(msg.format(diff))
batch_shape = tuple(onp.take(lhs.shape, lhs_batch))
lhs_contract_or_batch = tuple(lhs_contracting) + tuple(lhs_batch)
lhs_tensored_shape = tuple(onp.delete(lhs.shape, lhs_contract_or_batch))
rhs_contract_or_batch = tuple(rhs_contracting) + tuple(rhs_batch)
rhs_tensored_shape = tuple(onp.delete(rhs.shape, rhs_contract_or_batch))
return batch_shape + lhs_tensored_shape + rhs_tensored_shape
|
https://github.com/google/jax/issues/354
|
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.py:53: UserWarning: numpy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
/usr/local/lib/python2.7/dist-packages/jax/lib/xla_bridge.py:146: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
-8.891344
[-8.891344 -8.891344 -8.891344 -8.891344 -8.891344]
TypeErrorTraceback (most recent call last)
<ipython-input-3-73aa1b00356c> in <module>()
14
15 vmapped_f_grad = jax.grad(vmapped_f)
---> 16 print(vmapped_f_grad(0.1 + onp.zeros((5, 1))))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in grad_f(*args, **kwargs)
112 @wraps(fun, docstr=docstr, argnums=argnums)
113 def grad_f(*args, **kwargs):
--> 114 ans, g = value_and_grad_f(*args, **kwargs)
115 return g
116
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in value_and_grad_f(*args, **kwargs)
147 f = lu.wrap_init(fun, kwargs)
148 f_partial, dyn_args = argnums_partial(f, argnums, args)
--> 149 ans, vjp_py = vjp(f_partial, *dyn_args)
150 check_scalar(ans)
151 g = vjp_py(onp.ones((), onp.result_type(ans)))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in vjp(fun, *primals)
358 check_args(primals_flat)
359 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 360 out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat)
361 out_tree = out_tree()
362 out_primal_py = build_tree(out_tree, out_primal)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in vjp(traceable, primals)
72
73 def vjp(traceable, primals):
---> 74 out_primal, pval, jaxpr, consts = linearize(traceable, *primals)
75 def vjp_(ct):
76 ct = ignore_consts(ct, pval)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in linearize(traceable, *primals)
65 in_pvals = (pe.PartialVal((None, pack(primals))),
66 pe.PartialVal((core.AbstractTuple(tangent_avals), core.unit)))
---> 67 jaxpr, out_pval, consts = pe.trace_to_jaxpr(jvpfun, in_pvals)
68 pval_primal, pval_tangent = unpair_pval(out_pval)
69 aval_primal, const_primal = pval_primal
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in trace_to_jaxpr(fun, pvals, **kwargs)
254 with new_master(JaxprTrace) as master:
255 fun = trace_to_subjaxpr(fun, master)
--> 256 jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals, **kwargs)
257 assert not env
258 del master
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in batched_fun(*args, **kwargs)
253 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args))
254 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)
--> 255 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes)
256 return build_tree(out_tree(), out_flat)
257
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in batch(fun, in_vals, in_dims, out_dim_target)
41 return fun.call_wrapped(*in_vals), None # no mapped dimensions
42 elif len(sizes) == 1:
---> 43 out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
44 return moveaxis(sizes.pop(), out_dim_target, out_dim, out_val)
45 else:
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
<ipython-input-3-73aa1b00356c> in f(scale)
5 def f(scale):
6 scaled_mat = scale * psd_mat
----> 7 chol = np.linalg.cholesky(scaled_mat)
8 return -0.5 * np.sum((np.dot(chol, vec))**2)
9
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.pyc in cholesky(a)
53 warnings.warn(_EXPERIMENTAL_WARNING)
54 a = _promote_arg_dtypes(np.asarray(a))
---> 55 return lax_linalg.cholesky(a)
56
57
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in process_primitive(self, primitive, tracers, params)
120 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here
121 batched_primitive = get_primitive_batcher(primitive)
--> 122 val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
123 return BatchTracer(self, val_out, dim_out)
124
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_batching_rule(batched_args, batch_dims)
89 bd, = batch_dims
90 x = batching.bdim_at_front(x, bd)
---> 91 return cholesky(x), 0
92
93 cholesky_p = standard_unop(_float | _complex, 'cholesky')
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in process_primitive(self, primitive, tracers, params)
178 "Forward-mode differentiation rule for '{}' not implemented"
179 .format(primitive))
--> 180 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
181 return JVPTracer(self, primal_out, tangent_out)
182
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_jvp_rule(primals, tangents)
82 left_side=False, transpose_a=True, lower=True)
83 L_dot = lax.dot(L, phi(triangular_solve(
---> 84 L, tmp, left_side=True, transpose_a=False, lower=True)))
85 return L, L_dot
86
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in dot(lhs, rhs)
190 rhs_shape=rhs.shape)
191
--> 192 def dot(lhs, rhs): return dot_p.bind(lhs, rhs)
193
194 def dot_general(lhs, rhs, dimension_numbers):
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in process_primitive(self, primitive, tracers, params)
67 tracers = map(self.instantiate_const, tracers)
68 avals = [t.aval for t in tracers]
---> 69 out_aval = primitive.abstract_eval(*avals, **params)
70 eqn = JaxprEqn(tracers, None, primitive, (), False, params)
71 return JaxprTracer(self, PartialVal((out_aval, unit)), eqn)
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs)
753 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
754 elif least_specialized is ShapedArray:
--> 755 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
756 elif least_specialized is UnshapedArray:
757 return UnshapedArray(dtype_rule(*args, **kwargs))
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in _dot_shape_rule(lhs, rhs)
1227 if lhs.ndim > 2 or rhs.ndim > 2:
1228 msg = "Dot only supports rank 2 or less, got shapes {} and {}."
-> 1229 raise TypeError(msg.format(lhs.shape, rhs.shape))
1230
1231 def require(shape_cond):
TypeError: Dot only supports rank 2 or less, got shapes (5, 10, 10) and (5, 10, 10).
|
TypeError
|
def cholesky(x, symmetrize_input=True):
if symmetrize_input:
x = symmetrize(x)
return cholesky_p.bind(x)
|
def cholesky(x):
return cholesky_p.bind(x)
|
https://github.com/google/jax/issues/354
|
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.py:53: UserWarning: numpy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
/usr/local/lib/python2.7/dist-packages/jax/lib/xla_bridge.py:146: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
-8.891344
[-8.891344 -8.891344 -8.891344 -8.891344 -8.891344]
TypeErrorTraceback (most recent call last)
<ipython-input-3-73aa1b00356c> in <module>()
14
15 vmapped_f_grad = jax.grad(vmapped_f)
---> 16 print(vmapped_f_grad(0.1 + onp.zeros((5, 1))))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in grad_f(*args, **kwargs)
112 @wraps(fun, docstr=docstr, argnums=argnums)
113 def grad_f(*args, **kwargs):
--> 114 ans, g = value_and_grad_f(*args, **kwargs)
115 return g
116
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in value_and_grad_f(*args, **kwargs)
147 f = lu.wrap_init(fun, kwargs)
148 f_partial, dyn_args = argnums_partial(f, argnums, args)
--> 149 ans, vjp_py = vjp(f_partial, *dyn_args)
150 check_scalar(ans)
151 g = vjp_py(onp.ones((), onp.result_type(ans)))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in vjp(fun, *primals)
358 check_args(primals_flat)
359 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 360 out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat)
361 out_tree = out_tree()
362 out_primal_py = build_tree(out_tree, out_primal)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in vjp(traceable, primals)
72
73 def vjp(traceable, primals):
---> 74 out_primal, pval, jaxpr, consts = linearize(traceable, *primals)
75 def vjp_(ct):
76 ct = ignore_consts(ct, pval)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in linearize(traceable, *primals)
65 in_pvals = (pe.PartialVal((None, pack(primals))),
66 pe.PartialVal((core.AbstractTuple(tangent_avals), core.unit)))
---> 67 jaxpr, out_pval, consts = pe.trace_to_jaxpr(jvpfun, in_pvals)
68 pval_primal, pval_tangent = unpair_pval(out_pval)
69 aval_primal, const_primal = pval_primal
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in trace_to_jaxpr(fun, pvals, **kwargs)
254 with new_master(JaxprTrace) as master:
255 fun = trace_to_subjaxpr(fun, master)
--> 256 jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals, **kwargs)
257 assert not env
258 del master
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in batched_fun(*args, **kwargs)
253 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args))
254 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)
--> 255 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes)
256 return build_tree(out_tree(), out_flat)
257
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in batch(fun, in_vals, in_dims, out_dim_target)
41 return fun.call_wrapped(*in_vals), None # no mapped dimensions
42 elif len(sizes) == 1:
---> 43 out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
44 return moveaxis(sizes.pop(), out_dim_target, out_dim, out_val)
45 else:
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
<ipython-input-3-73aa1b00356c> in f(scale)
5 def f(scale):
6 scaled_mat = scale * psd_mat
----> 7 chol = np.linalg.cholesky(scaled_mat)
8 return -0.5 * np.sum((np.dot(chol, vec))**2)
9
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.pyc in cholesky(a)
53 warnings.warn(_EXPERIMENTAL_WARNING)
54 a = _promote_arg_dtypes(np.asarray(a))
---> 55 return lax_linalg.cholesky(a)
56
57
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in process_primitive(self, primitive, tracers, params)
120 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here
121 batched_primitive = get_primitive_batcher(primitive)
--> 122 val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
123 return BatchTracer(self, val_out, dim_out)
124
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_batching_rule(batched_args, batch_dims)
89 bd, = batch_dims
90 x = batching.bdim_at_front(x, bd)
---> 91 return cholesky(x), 0
92
93 cholesky_p = standard_unop(_float | _complex, 'cholesky')
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in process_primitive(self, primitive, tracers, params)
178 "Forward-mode differentiation rule for '{}' not implemented"
179 .format(primitive))
--> 180 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
181 return JVPTracer(self, primal_out, tangent_out)
182
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_jvp_rule(primals, tangents)
82 left_side=False, transpose_a=True, lower=True)
83 L_dot = lax.dot(L, phi(triangular_solve(
---> 84 L, tmp, left_side=True, transpose_a=False, lower=True)))
85 return L, L_dot
86
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in dot(lhs, rhs)
190 rhs_shape=rhs.shape)
191
--> 192 def dot(lhs, rhs): return dot_p.bind(lhs, rhs)
193
194 def dot_general(lhs, rhs, dimension_numbers):
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in process_primitive(self, primitive, tracers, params)
67 tracers = map(self.instantiate_const, tracers)
68 avals = [t.aval for t in tracers]
---> 69 out_aval = primitive.abstract_eval(*avals, **params)
70 eqn = JaxprEqn(tracers, None, primitive, (), False, params)
71 return JaxprTracer(self, PartialVal((out_aval, unit)), eqn)
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs)
753 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
754 elif least_specialized is ShapedArray:
--> 755 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
756 elif least_specialized is UnshapedArray:
757 return UnshapedArray(dtype_rule(*args, **kwargs))
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in _dot_shape_rule(lhs, rhs)
1227 if lhs.ndim > 2 or rhs.ndim > 2:
1228 msg = "Dot only supports rank 2 or less, got shapes {} and {}."
-> 1229 raise TypeError(msg.format(lhs.shape, rhs.shape))
1230
1231 def require(shape_cond):
TypeError: Dot only supports rank 2 or less, got shapes (5, 10, 10) and (5, 10, 10).
|
TypeError
|
def cholesky_jvp_rule(primals, tangents):
(x,) = primals
(sigma_dot,) = tangents
L = cholesky_p.bind(x)
# Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf
phi = lambda X: np.tril(X) / (1 + np.eye(X.shape[-1], dtype=X.dtype))
tmp = triangular_solve(L, sigma_dot, left_side=False, transpose_a=True, lower=True)
L_dot = lax.batch_matmul(
L, phi(triangular_solve(L, tmp, left_side=True, transpose_a=False, lower=True))
)
return L, L_dot
|
def cholesky_jvp_rule(primals, tangents):
(x,) = primals
(sigma_dot,) = tangents
L = cholesky_p.bind(x)
# Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf
sigma_dot = (sigma_dot + _T(sigma_dot)) / 2
phi = lambda X: np.tril(X) / (1 + np.eye(x.shape[-1]))
tmp = triangular_solve(L, sigma_dot, left_side=False, transpose_a=True, lower=True)
L_dot = lax.dot(
L, phi(triangular_solve(L, tmp, left_side=True, transpose_a=False, lower=True))
)
return L, L_dot
|
https://github.com/google/jax/issues/354
|
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.py:53: UserWarning: numpy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
/usr/local/lib/python2.7/dist-packages/jax/lib/xla_bridge.py:146: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
-8.891344
[-8.891344 -8.891344 -8.891344 -8.891344 -8.891344]
TypeErrorTraceback (most recent call last)
<ipython-input-3-73aa1b00356c> in <module>()
14
15 vmapped_f_grad = jax.grad(vmapped_f)
---> 16 print(vmapped_f_grad(0.1 + onp.zeros((5, 1))))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in grad_f(*args, **kwargs)
112 @wraps(fun, docstr=docstr, argnums=argnums)
113 def grad_f(*args, **kwargs):
--> 114 ans, g = value_and_grad_f(*args, **kwargs)
115 return g
116
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in value_and_grad_f(*args, **kwargs)
147 f = lu.wrap_init(fun, kwargs)
148 f_partial, dyn_args = argnums_partial(f, argnums, args)
--> 149 ans, vjp_py = vjp(f_partial, *dyn_args)
150 check_scalar(ans)
151 g = vjp_py(onp.ones((), onp.result_type(ans)))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in vjp(fun, *primals)
358 check_args(primals_flat)
359 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 360 out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat)
361 out_tree = out_tree()
362 out_primal_py = build_tree(out_tree, out_primal)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in vjp(traceable, primals)
72
73 def vjp(traceable, primals):
---> 74 out_primal, pval, jaxpr, consts = linearize(traceable, *primals)
75 def vjp_(ct):
76 ct = ignore_consts(ct, pval)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in linearize(traceable, *primals)
65 in_pvals = (pe.PartialVal((None, pack(primals))),
66 pe.PartialVal((core.AbstractTuple(tangent_avals), core.unit)))
---> 67 jaxpr, out_pval, consts = pe.trace_to_jaxpr(jvpfun, in_pvals)
68 pval_primal, pval_tangent = unpair_pval(out_pval)
69 aval_primal, const_primal = pval_primal
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in trace_to_jaxpr(fun, pvals, **kwargs)
254 with new_master(JaxprTrace) as master:
255 fun = trace_to_subjaxpr(fun, master)
--> 256 jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals, **kwargs)
257 assert not env
258 del master
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in batched_fun(*args, **kwargs)
253 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args))
254 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)
--> 255 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes)
256 return build_tree(out_tree(), out_flat)
257
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in batch(fun, in_vals, in_dims, out_dim_target)
41 return fun.call_wrapped(*in_vals), None # no mapped dimensions
42 elif len(sizes) == 1:
---> 43 out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
44 return moveaxis(sizes.pop(), out_dim_target, out_dim, out_val)
45 else:
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
<ipython-input-3-73aa1b00356c> in f(scale)
5 def f(scale):
6 scaled_mat = scale * psd_mat
----> 7 chol = np.linalg.cholesky(scaled_mat)
8 return -0.5 * np.sum((np.dot(chol, vec))**2)
9
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.pyc in cholesky(a)
53 warnings.warn(_EXPERIMENTAL_WARNING)
54 a = _promote_arg_dtypes(np.asarray(a))
---> 55 return lax_linalg.cholesky(a)
56
57
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in process_primitive(self, primitive, tracers, params)
120 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here
121 batched_primitive = get_primitive_batcher(primitive)
--> 122 val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
123 return BatchTracer(self, val_out, dim_out)
124
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_batching_rule(batched_args, batch_dims)
89 bd, = batch_dims
90 x = batching.bdim_at_front(x, bd)
---> 91 return cholesky(x), 0
92
93 cholesky_p = standard_unop(_float | _complex, 'cholesky')
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in process_primitive(self, primitive, tracers, params)
178 "Forward-mode differentiation rule for '{}' not implemented"
179 .format(primitive))
--> 180 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
181 return JVPTracer(self, primal_out, tangent_out)
182
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_jvp_rule(primals, tangents)
82 left_side=False, transpose_a=True, lower=True)
83 L_dot = lax.dot(L, phi(triangular_solve(
---> 84 L, tmp, left_side=True, transpose_a=False, lower=True)))
85 return L, L_dot
86
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in dot(lhs, rhs)
190 rhs_shape=rhs.shape)
191
--> 192 def dot(lhs, rhs): return dot_p.bind(lhs, rhs)
193
194 def dot_general(lhs, rhs, dimension_numbers):
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in process_primitive(self, primitive, tracers, params)
67 tracers = map(self.instantiate_const, tracers)
68 avals = [t.aval for t in tracers]
---> 69 out_aval = primitive.abstract_eval(*avals, **params)
70 eqn = JaxprEqn(tracers, None, primitive, (), False, params)
71 return JaxprTracer(self, PartialVal((out_aval, unit)), eqn)
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs)
753 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
754 elif least_specialized is ShapedArray:
--> 755 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
756 elif least_specialized is UnshapedArray:
757 return UnshapedArray(dtype_rule(*args, **kwargs))
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in _dot_shape_rule(lhs, rhs)
1227 if lhs.ndim > 2 or rhs.ndim > 2:
1228 msg = "Dot only supports rank 2 or less, got shapes {} and {}."
-> 1229 raise TypeError(msg.format(lhs.shape, rhs.shape))
1230
1231 def require(shape_cond):
TypeError: Dot only supports rank 2 or less, got shapes (5, 10, 10) and (5, 10, 10).
|
TypeError
|
def cholesky(a, lower=False, overwrite_a=False, check_finite=True):
warnings.warn(_EXPERIMENTAL_WARNING)
del overwrite_a, check_finite
a = np_linalg._promote_arg_dtypes(np.asarray(a))
l = lax_linalg.cholesky(a if lower else np.conj(_T(a)), symmetrize_input=False)
return l if lower else np.conj(_T(l))
|
def cholesky(a, lower=False, overwrite_a=False, check_finite=True):
warnings.warn(_EXPERIMENTAL_WARNING)
del overwrite_a, check_finite
a = np_linalg._promote_arg_dtypes(np.asarray(a))
l = lax_linalg.cholesky(a if lower else np.conj(_T(a)))
return l if lower else np.conj(_T(l))
|
https://github.com/google/jax/issues/354
|
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.py:53: UserWarning: numpy.linalg support is experimental and may cause silent failures or wrong outputs
warnings.warn(_EXPERIMENTAL_WARNING)
/usr/local/lib/python2.7/dist-packages/jax/lib/xla_bridge.py:146: UserWarning: No GPU found, falling back to CPU.
warnings.warn('No GPU found, falling back to CPU.')
-8.891344
[-8.891344 -8.891344 -8.891344 -8.891344 -8.891344]
TypeErrorTraceback (most recent call last)
<ipython-input-3-73aa1b00356c> in <module>()
14
15 vmapped_f_grad = jax.grad(vmapped_f)
---> 16 print(vmapped_f_grad(0.1 + onp.zeros((5, 1))))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in grad_f(*args, **kwargs)
112 @wraps(fun, docstr=docstr, argnums=argnums)
113 def grad_f(*args, **kwargs):
--> 114 ans, g = value_and_grad_f(*args, **kwargs)
115 return g
116
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in value_and_grad_f(*args, **kwargs)
147 f = lu.wrap_init(fun, kwargs)
148 f_partial, dyn_args = argnums_partial(f, argnums, args)
--> 149 ans, vjp_py = vjp(f_partial, *dyn_args)
150 check_scalar(ans)
151 g = vjp_py(onp.ones((), onp.result_type(ans)))
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in vjp(fun, *primals)
358 check_args(primals_flat)
359 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)
--> 360 out_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat)
361 out_tree = out_tree()
362 out_primal_py = build_tree(out_tree, out_primal)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in vjp(traceable, primals)
72
73 def vjp(traceable, primals):
---> 74 out_primal, pval, jaxpr, consts = linearize(traceable, *primals)
75 def vjp_(ct):
76 ct = ignore_consts(ct, pval)
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in linearize(traceable, *primals)
65 in_pvals = (pe.PartialVal((None, pack(primals))),
66 pe.PartialVal((core.AbstractTuple(tangent_avals), core.unit)))
---> 67 jaxpr, out_pval, consts = pe.trace_to_jaxpr(jvpfun, in_pvals)
68 pval_primal, pval_tangent = unpair_pval(out_pval)
69 aval_primal, const_primal = pval_primal
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in trace_to_jaxpr(fun, pvals, **kwargs)
254 with new_master(JaxprTrace) as master:
255 fun = trace_to_subjaxpr(fun, master)
--> 256 jaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals, **kwargs)
257 assert not env
258 del master
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
/usr/local/lib/python2.7/dist-packages/jax/api.pyc in batched_fun(*args, **kwargs)
253 in_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, args))
254 jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)
--> 255 out_flat = batching.batch(jaxtree_fun, in_flat, in_axes_, out_axes)
256 return build_tree(out_tree(), out_flat)
257
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in batch(fun, in_vals, in_dims, out_dim_target)
41 return fun.call_wrapped(*in_vals), None # no mapped dimensions
42 elif len(sizes) == 1:
---> 43 out_val, out_dim = batch_transform(fun).call_wrapped(in_vals, in_dims)
44 return moveaxis(sizes.pop(), out_dim_target, out_dim, out_val)
45 else:
/usr/local/lib/python2.7/dist-packages/jax/linear_util.pyc in call_wrapped(self, *args)
84
85 del gen
---> 86 ans = self.f(*args, **self.kwargs)
87 del args
88 while stack:
<ipython-input-3-73aa1b00356c> in f(scale)
5 def f(scale):
6 scaled_mat = scale * psd_mat
----> 7 chol = np.linalg.cholesky(scaled_mat)
8 return -0.5 * np.sum((np.dot(chol, vec))**2)
9
/usr/local/lib/python2.7/dist-packages/jax/numpy/linalg.pyc in cholesky(a)
53 warnings.warn(_EXPERIMENTAL_WARNING)
54 a = _promote_arg_dtypes(np.asarray(a))
---> 55 return lax_linalg.cholesky(a)
56
57
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/batching.pyc in process_primitive(self, primitive, tracers, params)
120 # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here
121 batched_primitive = get_primitive_batcher(primitive)
--> 122 val_out, dim_out = batched_primitive(vals_in, dims_in, **params)
123 return BatchTracer(self, val_out, dim_out)
124
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_batching_rule(batched_args, batch_dims)
89 bd, = batch_dims
90 x = batching.bdim_at_front(x, bd)
---> 91 return cholesky(x), 0
92
93 cholesky_p = standard_unop(_float | _complex, 'cholesky')
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky(x)
35 # traceables
36
---> 37 def cholesky(x): return cholesky_p.bind(x)
38
39 def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/ad.pyc in process_primitive(self, primitive, tracers, params)
178 "Forward-mode differentiation rule for '{}' not implemented"
179 .format(primitive))
--> 180 primal_out, tangent_out = jvp(primals_in, tangents_in, **params)
181 return JVPTracer(self, primal_out, tangent_out)
182
/usr/local/lib/python2.7/dist-packages/jax/lax_linalg.pyc in cholesky_jvp_rule(primals, tangents)
82 left_side=False, transpose_a=True, lower=True)
83 L_dot = lax.dot(L, phi(triangular_solve(
---> 84 L, tmp, left_side=True, transpose_a=False, lower=True)))
85 return L, L_dot
86
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in dot(lhs, rhs)
190 rhs_shape=rhs.shape)
191
--> 192 def dot(lhs, rhs): return dot_p.bind(lhs, rhs)
193
194 def dot_general(lhs, rhs, dimension_numbers):
/usr/local/lib/python2.7/dist-packages/jax/core.pyc in bind(self, *args, **kwargs)
72
73 tracers = map(top_trace.full_raise, args)
---> 74 out_tracer = top_trace.process_primitive(self, tracers, kwargs)
75 return full_lower(out_tracer)
76
/usr/local/lib/python2.7/dist-packages/jax/interpreters/partial_eval.pyc in process_primitive(self, primitive, tracers, params)
67 tracers = map(self.instantiate_const, tracers)
68 avals = [t.aval for t in tracers]
---> 69 out_aval = primitive.abstract_eval(*avals, **params)
70 eqn = JaxprEqn(tracers, None, primitive, (), False, params)
71 return JaxprTracer(self, PartialVal((out_aval, unit)), eqn)
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in standard_abstract_eval(shape_rule, dtype_rule, *args, **kwargs)
753 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
754 elif least_specialized is ShapedArray:
--> 755 return ShapedArray(shape_rule(*args, **kwargs), dtype_rule(*args, **kwargs))
756 elif least_specialized is UnshapedArray:
757 return UnshapedArray(dtype_rule(*args, **kwargs))
/usr/local/lib/python2.7/dist-packages/jax/lax.pyc in _dot_shape_rule(lhs, rhs)
1227 if lhs.ndim > 2 or rhs.ndim > 2:
1228 msg = "Dot only supports rank 2 or less, got shapes {} and {}."
-> 1229 raise TypeError(msg.format(lhs.shape, rhs.shape))
1230
1231 def require(shape_cond):
TypeError: Dot only supports rank 2 or less, got shapes (5, 10, 10) and (5, 10, 10).
|
TypeError
|
def _normalize_by_window_size(dims, strides, padding):
def rescale(outputs, inputs):
one = np.ones(inputs.shape[1:-1], dtype=inputs.dtype)
window_sizes = lax.reduce_window(one, 0.0, lax.add, dims, strides, padding)
return outputs / window_sizes[..., np.newaxis]
return rescale
|
def _normalize_by_window_size(dims, strides, padding):
def rescale(outputs, inputs):
one = np.ones(inputs.shape[1:3], dtype=inputs.dtype)
window_sizes = lax.reduce_window(one, 0.0, lax.add, dims, strides, padding)
return outputs / window_sizes
return rescale
|
https://github.com/google/jax/issues/273
|
(-1, 16, 16, 3)
()
Traceback (most recent call last):
File "minimal_example.py", line 9, in <module>
apply_fun(params, np.zeros((100, 32, 32, 3)))
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/experimental/stax.py", line 172, in apply_fun
return rescale(out, inputs) if rescale else out
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/experimental/stax.py", line 183, in rescale
return outputs / window_sizes
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 350, in true_divide
x1, x2 = _promote_shapes(x1, x2)
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 134, in _promote_shapes
nd = len(_broadcast_shapes(*shapes))
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/util.py", line 161, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 151, in _broadcast_shapes
.format(tuple(map(tuple, shapes))))
ValueError: Incompatible shapes for broadcasting: ((100, 16, 16, 3), (1, 1, 16, 16))
|
ValueError
|
def rescale(outputs, inputs):
one = np.ones(inputs.shape[1:-1], dtype=inputs.dtype)
window_sizes = lax.reduce_window(one, 0.0, lax.add, dims, strides, padding)
return outputs / window_sizes[..., np.newaxis]
|
def rescale(outputs, inputs):
one = np.ones(inputs.shape[1:3], dtype=inputs.dtype)
window_sizes = lax.reduce_window(one, 0.0, lax.add, dims, strides, padding)
return outputs / window_sizes
|
https://github.com/google/jax/issues/273
|
(-1, 16, 16, 3)
()
Traceback (most recent call last):
File "minimal_example.py", line 9, in <module>
apply_fun(params, np.zeros((100, 32, 32, 3)))
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/experimental/stax.py", line 172, in apply_fun
return rescale(out, inputs) if rescale else out
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/experimental/stax.py", line 183, in rescale
return outputs / window_sizes
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 350, in true_divide
x1, x2 = _promote_shapes(x1, x2)
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 134, in _promote_shapes
nd = len(_broadcast_shapes(*shapes))
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/util.py", line 161, in memoized_fun
ans = cache[key] = fun(*args, **kwargs)
File "/gpfs01/bethge/home/jrauber/PhD/063_jax/jax/jax/numpy/lax_numpy.py", line 151, in _broadcast_shapes
.format(tuple(map(tuple, shapes))))
ValueError: Incompatible shapes for broadcasting: ((100, 16, 16, 3), (1, 1, 16, 16))
|
ValueError
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.