author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
260,335
01.02.2019 14:46:58
28,800
75849f2b48fbbf1edcd102de700754e69c1feb07
pjit tests working again
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -37,7 +37,7 @@ from ..lib import xla_bridge as xb\nfrom .xla import (xla_shape, xla_destructure, translation_rule, abstractify,\nxla_shape_to_result_shape, jaxpr_computation)\nfrom .partial_eval import trace_to_subjaxpr, merge_pvals, JaxprTrace, PartialVal\n-from .batching import moveaxis\n+from .batching import moveaxis, dimsize\nfrom . import parallel\nfrom . import xla\nfrom . import partial_eval as pe\n@@ -47,6 +47,7 @@ map = safe_map\n### util\n+\ndef chunk_transform(fun, chunksize, name, in_axes, out_axes_dst):\n\"\"\"Rewrite SPMD operations to act first on local chunks then cross-replica.\"\"\"\ntemp_name = TempAxisName()\n@@ -74,15 +75,6 @@ def chunk_aval(chunksize, aval, axis):\nshape[axis] = chunksize\nreturn ShapedArray(tuple(shape), aval.dtype)\n-# # TODO do we need these next functions?\n-# def canonicalize_in_axis_spec(in_trees, spec_tree_prefix):\n-# \"\"\"Given argument list in_trees, canonicalize and flatten an in_axes spec.\"\"\"\n-# in_tree = tree_util.PyTreeDef(tree_util.node_types[tuple], None, in_trees)\n-# return build_axis_spec_tree(spec_tree_prefix, in_tree)\n-\n-# def canonicalize_out_axis_spec(out_tree, spec_tree_prefix):\n-# \"\"\"Given output out_tree, canonicalize and flatten an out_axes spec.\"\"\"\n-# return build_axis_spec_tree(spec_tree_prefix, out_tree)\ndef build_axis_spec_tree(spec, treedef):\n\"\"\"Given a JTupleTreeDef, canonicalize an axis spec for that treedef.\"\"\"\n@@ -102,25 +94,18 @@ def build_axis_spec_tree(spec, treedef):\nelse:\nraise TypeError(spec_type)\n-def flatten_axis_spec_tree(spec_tree):\n- \"\"\"Flatten an axis spec tree and replace no_mapped_axis with None.\"\"\"\n- spec_flat, _ = tree_util.tree_flatten(spec_tree)\n- return tuple(None if i is no_mapped_axis else i for i in spec_flat)\n-\n-def tree_flatten_axes(maybe_tree, axes):\n- if type(maybe_tree) is core.JaxTuple:\n- if maybe_tree:\n- flat_children = map(tree_flatten_axes, maybe_tree, axes)\n- return it.chain.from_iterable(flat_children)\n+def flatten(x):\n+ if type(x) is tuple:\n+ return tuple(_flatten(x))\nelse:\n- return []\n+ return x\n+\n+def _flatten(x):\n+ if type(x) is tuple:\n+ return it.chain.from_iterable((_flatten(elt) for elt in x))\nelse:\n- return [axes]\n+ return [x]\n-# We use a special symbol for 'no mapped axis' instead of using None because\n-# tree_util.py treats None as a tree node.\n-class NoMappedAxis(object): pass\n-no_mapped_axis = NoMappedAxis()\ndef shard_arg(mesh_spec, mesh_axis, axis, arg):\n\"\"\"Shard and device_put an input array argument along a logical axis.\"\"\"\n@@ -165,6 +150,7 @@ def split_array(x, num_splits, axis):\nreturn x[tuple(idx)]\nreturn map(get_nth_subarray, range(num_splits))\n+\ndef chunk_size(axis_name, mesh_axis, in_axes, args):\n\"\"\"Compute the chunk size for mapped axes, checking for errors.\"\"\"\nglobal mesh_spec\n@@ -185,21 +171,6 @@ def chunk_size(axis_name, mesh_axis, in_axes, args):\nreturn axis_size // mesh_spec()[mesh_axis]\n-def dimsize(axis, x):\n- if type(axis) is int:\n- aval = core.get_aval(x)\n- if type(aval) is core.AbstractTuple:\n- assert not aval # must be an empty tuple\n- return set()\n- elif isinstance(aval, ShapedArray):\n- return {x.shape[axis]}\n- else:\n- raise TypeError(type(aval))\n- elif type(axis) is tuple:\n- return reduce(set.union, map(dimsize, axis, x))\n- else:\n- raise TypeError(type(axis))\n-\ndef mesh_spec():\nglobal _mesh_spec\n@@ -286,7 +257,7 @@ def replicated_computation(jaxpr, axis_env, const_vals, freevar_shapes,\ndef xla_pcall_impl(fun, *args, **params):\naxis_name = params.pop('axis_name') # e.g. 'i'\n- in_axes = params.pop('in_axes') # e.g. 0 or (0, None)\n+ in_axes = params.pop('in_axes') # e.g. (0, None) or (0, 1)\nout_axes = params.pop('out_axes') # e.g. 0 or (None, 1)\nmesh_axis = params.pop('mesh_axis') # e.g. 0 or 1\nassert not params\n@@ -295,19 +266,10 @@ def xla_pcall_impl(fun, *args, **params):\nflat_args = concatenate(flat_args)\nfun, out_tree = xla.flatten_fun(fun, in_trees)\n- in_axes_trees = tuple(map(build_axis_spec_tree, in_axes, in_trees))\n- flat_in_axes, _ = unzip2(map(xla.tree_flatten, in_axes_trees))\n- flat_in_axes = tuple(concatenate(flat_in_axes))\n-\n+ flat_in_axes = flatten(tuple(map(build_axis_spec_tree, in_axes, in_trees)))\ncompiled_fun = xla_parallel_callable(fun, axis_name, flat_in_axes, mesh_axis,\nmesh_spec(), *map(abstractify, flat_args))\n-\n- out_axes_tree = build_axis_spec_tree(out_axes, out_tree())\n- if out_tree() is xla.leaf:\n- flat_out_axes = out_axes_tree\n- else:\n- flat_out_axes, _ = xla.tree_flatten(out_axes_tree)\n- flat_out_axes = concatenate(flat_out_axes)\n+ flat_out_axes = flatten(build_axis_spec_tree(out_axes, out_tree()))\nflat_ans = compiled_fun(out_tree(), flat_out_axes, *flat_args)\nif out_tree() is xla.leaf:\n@@ -341,7 +303,6 @@ def execute_replicated(in_axes, mesh_axis, mesh_spec, compiled, pval,\nif out_tree is xla.leaf:\nreturn unshard_output(mesh_spec, mesh_axis, out_axes, out_shards)\nelse:\n- raise NotImplementedError\nreturn map(partial(unshard_output, mesh_spec, mesh_axis), out_axes,\nzip(*out_shards))\n" }, { "change_type": "MODIFY", "old_path": "tests/pjit_test.py", "new_path": "tests/pjit_test.py", "diff": "@@ -49,26 +49,26 @@ class PmapTest(jtu.JaxTestCase):\nexpected = (x - x.sum(0),)\nself.assertAllClose(ans, expected, check_dtypes=False)\n-# @jtu.skip_on_devices(\"gpu\")\n-# def testTupleInput(self):\n-# f = lambda x: x[0] - psum(x[0], 'i')\n-# x = onp.arange(8., dtype=onp.float32).reshape(4, 2)\n-# f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)\n-# ans = f((x,))\n-# expected = x - x.sum(0)\n-# self.assertAllClose(ans, expected, check_dtypes=False)\n+ @jtu.skip_on_devices(\"gpu\")\n+ def testTupleInput(self):\n+ f = lambda x: x[0] - psum(x[0], 'i')\n+ x = onp.arange(8., dtype=onp.float32).reshape(4, 2)\n+ f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)\n+ ans = f((x,))\n+ expected = x - x.sum(0)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n-# @jtu.skip_on_devices(\"gpu\")\n-# def testNested(self):\n-# def f(x, y):\n-# return psum(psum(x, 'i'), 'j')\n-# f = pjit(f, 'i')\n-# f = pjit(f, 'j', out_axes=1)\n+ @jtu.skip_on_devices(\"gpu\")\n+ def testNested(self):\n+ def f(x, y):\n+ return psum(psum(x, 'i'), 'j')\n+ f = pjit(f, 'i')\n+ f = pjit(f, 'j', out_axes=1)\n-# x = onp.ones((3, 4), onp.float32)\n-# ans = f(x, x)\n-# expected = 12 * onp.ones((4, 3), onp.float32)\n-# self.assertAllClose(ans, expected, check_dtypes=True)\n+ x = onp.ones((3, 4), onp.float32)\n+ ans = f(x, x)\n+ expected = 12 * onp.ones((4, 3), onp.float32)\n+ self.assertAllClose(ans, expected, check_dtypes=True)\nif __name__ == '__main__':\n" } ]
Python
Apache License 2.0
google/jax
pjit tests working again
260,335
01.02.2019 16:30:17
28,800
f2f23c5d8c7703b71d2e42db0af37f167e06c59c
did somebody say grad of pjit?
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/ad.py", "new_path": "jax/interpreters/ad.py", "diff": "@@ -186,12 +186,10 @@ class JVPTrace(Trace):\nnonzero_tangents, in_tree_def = tree_to_jaxtuples(tangents)\nf, out_tree_def = traceable(jvp_subtrace(f, self.master), in_tree_def)\nif call_primitive is pxla.xla_pcall_p:\n- in_axes, out_axes = params['in_axes'], params['out_axes']\n- jvp_in_axes = (in_axes, prune(in_tree_def, in_axes))\n- def jvp_out_axes():\n- _, tangent_out_tree = out_tree_def().children\n- return (out_axes(), prune(tangent_out_tree, out_axes()))\n- params = dict(params, in_axes=jvp_in_axes, out_axes=jvp_out_axes)\n+ in_ax, out_ax = params['in_axes'], params['out_axes']\n+ new_params = dict(params, in_axes=(in_ax, in_ax), out_axes=(out_ax, out_ax))\n+ result = call_primitive.bind(f, pack(primals), nonzero_tangents, **new_params)\n+ else:\nresult = call_primitive.bind(f, pack(primals), nonzero_tangents, **params)\nprimal_out, tangent_out = build_tree(out_tree_def(), result)\nreturn JVPTracer(self, primal_out, tangent_out)\n@@ -394,18 +392,14 @@ def call_transpose(primitive, params, jaxpr, consts, freevar_vals, args, ct):\nfun = wrap_init(backward_pass)\nfun, out_tree_def = transposed_fun(fun, jaxpr, in_tree_def)\nall_args = pack((pack(consts), pack(freevar_vals), ct))\n+ # TODO(dougalm): consider signalling to bind that no traces in fun closure\nif primitive is pxla.xla_pcall_p:\n- in_axes, out_axes = params['in_axes'], params['out_axes']()\n- ct_axes = prune(ct_tree, out_axes)\n- transpose_in_axes = ((None,) * len(consts), (None,) * len(freevar_vals), ct_axes),\n- def transpose_out_axes():\n- return prune(out_tree_def(), (in_axes, (None,) * len(freevar_vals)))\n- new_params = dict(params, in_axes=transpose_in_axes, out_axes=transpose_out_axes)\n- ans = primitive.bind(fun, all_args, **params)\n-\n- import ipdb; ipdb.set_trace()\n+ in_axes, out_axes = params['in_axes'], params['out_axes']\n+ trans_in_axes = (None, None, out_axes),\n+ trans_out_axes = (in_axes, None)\n+ new_params = dict(params, in_axes=trans_in_axes, out_axes=trans_out_axes)\n+ ans = primitive.bind(fun, all_args, **new_params)\nelse:\n- # TODO(dougalm): consider signalling to bind that there are no traces in the closure\nans = primitive.bind(fun, all_args, **params)\nreturn build_tree(out_tree_def(), ans)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -78,21 +78,15 @@ def chunk_aval(chunksize, aval, axis):\ndef build_axis_spec_tree(spec, treedef):\n\"\"\"Given a JTupleTreeDef, canonicalize an axis spec for that treedef.\"\"\"\n- spec_type = type(spec)\nif treedef is xla.leaf:\n- if spec_type is int:\nreturn spec\n- elif spec_type is type(None):\n- return no_mapped_axis\n+ elif type(spec) is tuple:\n+ if treedef.child_specs:\n+ return tuple(map(build_axis_spec_tree, spec, treedef.child_specs))\nelse:\n- raise TypeError(spec_type)\n+ return ()\nelse:\n- if spec_type is int:\nreturn tuple(map(partial(build_axis_spec_tree, spec), treedef.child_specs))\n- elif spec_type is tuple:\n- return tuple(map(build_axis_spec_tree, spec, treedef.child_specs))\n- else:\n- raise TypeError(spec_type)\ndef flatten(x):\nif type(x) is tuple:\n" }, { "change_type": "MODIFY", "old_path": "pjit_nesting.py", "new_path": "pjit_nesting.py", "diff": "@@ -5,13 +5,13 @@ from jax import jvp, grad, pjit, pmap, make_jaxpr\nfrom jax.lax import psum\n-# def f(x, y):\n-# return psum(psum(x, 'i'), 'j')\n-# f = pjit(f, 'i')\n-# f = pjit(f, 'j', out_axes=1)\n-# x = onp.ones((3, 4), onp.float32)\n-# print make_jaxpr(f)(x, x)\n-# print f(x, x)\n+def f(x, y):\n+ return psum(psum(x, 'i'), 'j')\n+f = pjit(f, 'i')\n+f = pjit(f, 'j', out_axes=1)\n+x = onp.ones((3, 4), onp.float32)\n+print make_jaxpr(f)(x, x)\n+print f(x, x)\ndef f(x):\n@@ -24,12 +24,12 @@ g = pjit(f, axis_name='i')\nprint jvp(g, (x,), (x,))\n-# def f(x):\n-# return x - psum(x, 'i')\n+def f(x):\n+ return x - psum(x, 'i')\n-# x = np.ones(4)\n-# print grad(lambda x: np.sum(pmap(f, 'i')(x)))(x)\n-# print grad(lambda x: np.sum(x - np.sum(x)))(x)\n+x = np.ones(4)\n+print grad(lambda x: np.sum(pmap(f, 'i')(x)))(x)\n+print grad(lambda x: np.sum(x - np.sum(x)))(x)\n-# g = pjit(f, axis_name='i')\n-# print grad(lambda x: np.sum(g(x)))(x)\n+g = pjit(f, axis_name='i')\n+print grad(lambda x: np.sum(g(x)))(x)\n" } ]
Python
Apache License 2.0
google/jax
did somebody say grad of pjit?
260,335
01.02.2019 16:59:28
28,800
092712fdb0d6ee1997de92e9b8893ccdd51a8eed
move basic pjit+autodiff tests
[ { "change_type": "DELETE", "old_path": "pjit_nesting.py", "new_path": null, "diff": "-import numpy as onp\n-\n-import jax.numpy as np\n-from jax import jvp, grad, pjit, pmap, make_jaxpr\n-from jax.lax import psum\n-\n-\n-def f(x, y):\n- return psum(psum(x, 'i'), 'j')\n-f = pjit(f, 'i')\n-f = pjit(f, 'j', out_axes=1)\n-x = onp.ones((3, 4), onp.float32)\n-print make_jaxpr(f)(x, x)\n-print f(x, x)\n-\n-\n-def f(x):\n- return np.cos(x - psum(np.sin(x), 'i'))\n-\n-x = np.ones(4)\n-print jvp(pmap(f, 'i'), (x,), (x,))\n-\n-g = pjit(f, axis_name='i')\n-print jvp(g, (x,), (x,))\n-\n-\n-def f(x):\n- return x - psum(x, 'i')\n-\n-x = np.ones(4)\n-print grad(lambda x: np.sum(pmap(f, 'i')(x)))(x)\n-print grad(lambda x: np.sum(x - np.sum(x)))(x)\n-\n-g = pjit(f, axis_name='i')\n-print grad(lambda x: np.sum(g(x)))(x)\n" }, { "change_type": "MODIFY", "old_path": "tests/pjit_test.py", "new_path": "tests/pjit_test.py", "diff": "@@ -22,7 +22,7 @@ from absl.testing import parameterized\nimport jax.numpy as np\nfrom jax import test_util as jtu\n-from jax.api import pjit\n+from jax.api import pjit, pmap, jvp, grad\nfrom jax.lax import psum\nfrom jax.config import config\n@@ -70,6 +70,34 @@ class PmapTest(jtu.JaxTestCase):\nexpected = 12 * onp.ones((4, 3), onp.float32)\nself.assertAllClose(ans, expected, check_dtypes=True)\n+ @jtu.skip_on_devices(\"gpu\")\n+ def testForwardModeAutodiff(self):\n+ def f(x):\n+ return np.cos(x - psum(np.sin(x), 'i'))\n+\n+ x = np.ones(4)\n+ expected = jvp(pmap(f, 'i'), (x,), (x,))\n+\n+ g = pjit(f, axis_name='i')\n+ ans = jvp(g, (x,), (x,))\n+\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ @jtu.skip_on_devices(\"gpu\")\n+ def testReverseModeAutodiff(self):\n+ def f(x):\n+ return x - psum(x, 'i')\n+\n+ x = np.ones(4)\n+ expected1 = grad(lambda x: np.sum(pmap(f, 'i')(x)))(x)\n+ expected2 = grad(lambda x: np.sum(x - np.sum(x)))(x)\n+\n+ g = pjit(f, axis_name='i')\n+ ans = grad(lambda x: np.sum(g(x)))(x)\n+\n+ self.assertAllClose(ans, expected1, check_dtypes=False)\n+ self.assertAllClose(ans, expected2, check_dtypes=False)\n+\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
move basic pjit+autodiff tests
260,335
01.02.2019 17:06:26
28,800
9a251efdc11959ebc3ae343bf2a8afcfd05c9110
remove stale OutAxesThunk class
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -277,20 +277,6 @@ def pjit(fun, axis_name, in_axes=0, out_axes=0, mesh_axis=0):\nf_jitted.__name__ = \"pjit({})\".format(f_jitted.__name__)\nreturn f_jitted\n-class OutAxesThunk(object):\n- # This class is just used for its __repr__ method in pretty-printing jaxprs\n-\n- def __init__(self, out_tree_thunk, out_axes):\n- self.out_tree_thunk = out_tree_thunk\n- self.out_axes = out_axes\n-\n- def __call__(self):\n- out_tree = self.out_tree_thunk()\n- return pxla.canonicalize_out_axis_spec(out_tree, self.out_axes)\n-\n- def __repr__(self):\n- return repr(self())\n-\ndef pmap(fun, axis_name, in_axes=0, out_axes=0):\n\"\"\"Vectorizing pseudo-map for single-program multiple-data (SPMD) functions.\"\"\"\n" } ]
Python
Apache License 2.0
google/jax
remove stale OutAxesThunk class
260,335
02.02.2019 09:22:37
28,800
9f3060a0e6286e60dc87057e3526d827b5fc972a
index_take in terms of gather, delete index_untake (c.f.
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -255,54 +255,19 @@ def scatter_add(operand, scatter_indices, updates, dimension_numbers=None):\nupdate_consts=consts, dimension_numbers=dimension_numbers,\nupdates_shape=updates.shape)\n-\ndef index_take(src, idxs, axes):\n- pvals = [_abstractify(arg) for arg in (src,) + idxs]\n- jaxpr, _, consts = pe.trace_unwrapped_to_jaxpr(partial(_index_take, axes), pvals)\n- return index_take_p.bind(src, *idxs, axes=tuple(axes),\n- input_shape=src.shape, jaxpr=jaxpr, consts=consts)\n-\n-def _index_take(axes, src, *idxs):\n- n = idxs[0].shape[0]\n- slice_sizes = subvals(src.shape, zip(axes, [1] * len(axes)))\n-\n- def body_fun(i, state):\n- src, idxs, out = state\n- src_ind = (dynamic_index_in_dim(x, i, 0, False) for x in idxs)\n- start_indices = subvals([0] * src.ndim, zip(axes, src_ind))\n- update = dynamic_slice(src, start_indices, slice_sizes)\n- update = reshape(update, (1,) + out.shape[1:])\n- out = dynamic_update_slice(out, update, [i] + [0] * (out.ndim - 1))\n- return src, idxs, out\n-\n- out = full_like(src, 0, shape=(n,) + tuple(onp.delete(src.shape, axes)))\n- init_val = src, idxs, out\n- _, _, out = fori_loop(0, n, body_fun, init_val)\n- return out\n-\n-def index_untake(src, dst, idxs, axes):\n- pvals = [_abstractify(arg) for arg in (src, dst) + idxs]\n- jaxpr, _, consts = pe.trace_unwrapped_to_jaxpr(partial(_index_untake, axes), pvals)\n- return index_untake_p.bind(src, dst, *idxs, axes=tuple(axes),\n- jaxpr=jaxpr, consts=consts)\n-\n-def _index_untake(axes, src, dst, *idxs):\n- n = idxs[0].shape[0]\n- slice_sizes = subvals(dst.shape, zip(axes, [1] * len(axes)))\n-\n- def body_fun(i, state):\n- src, dst, idxs = state\n- vals = dynamic_slice(src, [i] + [0] * (src.ndim - 1), (1,) + src.shape[1:])\n- vals = reshape(vals, subvals(dst.shape, zip(axes, [1] * len(axes))))\n- dst_ind = (dynamic_index_in_dim(x, i, 0, False) for x in idxs)\n- start_indices = subvals([0] * dst.ndim, zip(axes, dst_ind))\n- update = add(vals, dynamic_slice(dst, start_indices, slice_sizes))\n- dst = dynamic_update_slice(dst, update, start_indices)\n- return src, dst, idxs\n-\n- init_val = src, dst, idxs\n- _, dst, _ = fori_loop(0, n, body_fun, init_val)\n- return dst\n+ indices = concatenate([reshape(i, [i.shape[0], 1]) for i in idxs], 1)\n+ slice_sizes = list(src.shape)\n+ for ax in axes:\n+ slice_sizes[ax] = 1\n+ slice_sizes = tuple(slice_sizes)\n+ offset_dims = tuple(range(1, src.ndim - indices.shape[1] + 1))\n+ dnums = GatherDimensionNumbers(\n+ offset_dims=offset_dims,\n+ collapsed_slice_dims=axes,\n+ start_index_map=axes,\n+ index_vector_dim=1)\n+ return gather(src, indices, dimension_numbers=dnums, slice_sizes=slice_sizes)\ndef transpose(operand, permutation):\npermutation = tuple(permutation)\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -1665,6 +1665,8 @@ def take_along_axis(arr, indices, axis):\nelif ndim(arr) == 1:\nreturn lax.index_take(arr, (indices,), (0,))\nelse:\n+ # TODO(mattjj): if we lower directly to lax.gather here, we might be able to\n+ # avoid the reshape on the output.\nall_indices = [lax.broadcasted_iota(_dtype(indices), shape(indices), i)\nfor i in range(ndim(arr))]\nall_indices[axis] = indices\n@@ -1737,6 +1739,8 @@ def _rewriting_take(arr, idx, axis=0):\nelif isinstance(idx, tuple) and _all(onp.ndim(elt) == 0 for elt in idx):\ncanonical_idx = _canonicalize_tuple_index(arr, idx)\nresult, axis = arr, 0\n+ # TODO(mattjj): could generate a single HLO here, rather than one for each\n+ # elt in canonical idx. For example, x[0, :, 0] generates three HLOs now.\nfor elt in (elt for elt in canonical_idx if elt is not None):\nresult = _rewriting_take(result, elt, axis=axis)\naxis += isinstance(elt, slice) # advance axis index if not eliminated\n@@ -1765,6 +1769,8 @@ def _rewriting_take(arr, idx, axis=0):\nidx = [idx]\nflat_idx = tuple([mod(ravel(x), arr.shape[i]) for i, x in enumerate(idx)])\n+ # TODO(mattjj): if we instead lower directly to lax.gather, we can probably\n+ # eliminate the reshape here.\nout = lax.index_take(arr, flat_idx, tuple(range(len(idx))))\nreturn lax.reshape(out, idx[0].shape + _shape(arr)[len(idx):])\n@@ -1782,6 +1788,8 @@ def _rewriting_take(arr, idx, axis=0):\nflat_idx = tuple(mod(ravel(x), arr_sliced.shape[i])\nfor i, x in zip(axes, idx_advanced))\n+ # TODO(mattjj): if we instead lower directly to lax.gather, we can probably\n+ # eliminate the reshape here.\nout = lax.index_take(arr_sliced, flat_idx, axes)\nshape_suffix = tuple(onp.delete(_shape(arr_sliced), axes))\nout = lax.reshape(out, idx_advanced[0].shape + shape_suffix)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -1344,28 +1344,6 @@ class LaxTest(jtu.JaxTestCase):\nfun = lambda src, idxs: lax.index_take(src, idxs, axes)\nself._CompileAndCheck(fun, args_maker, check_dtypes=True)\n- @parameterized.named_parameters(jtu.cases_from_list(\n- {\"testcase_name\": \"_dst_shape={}_idxs={}_axes={}\".format(\n- jtu.format_shape_dtype_string(dst_shape, dtype), idxs, axes),\n- \"dst_shape\": dst_shape, \"dtype\": dtype, \"idxs\": idxs, \"axes\": axes,\n- \"rng\": rng}\n- for dtype in default_dtypes\n- for dst_shape, idxs, axes in [\n- [(3, 4, 5), (onp.array([0, 2, 1]),), (0,)],\n- [(3, 4, 5), (onp.array([-1, -2]),), (0,)],\n- [(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 1)],\n- [(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 2)],\n- ]\n- for rng in [jtu.rand_default()]))\n- def testIndexUntake(self, dst_shape, dtype, idxs, axes, rng):\n- # We call lax.index_take to get the shapes right\n- src_shape = lax.index_take(rng(dst_shape, dtype), idxs, axes).shape\n- ridxs = lambda: tuple(rng(e.shape, e.dtype) for e in idxs)\n- args_maker = lambda: [rng(src_shape, dtype), rng(dst_shape, dtype), ridxs()]\n- fun = lambda src, dst, idxs: lax.index_untake(src, dst, idxs, axes)\n- self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n-\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_idxs={}_dnums={}_slice_sizes={}\".format(\njtu.format_shape_dtype_string(shape, dtype), idxs, dnums,\n@@ -2107,29 +2085,6 @@ class LaxAutodiffTest(jtu.JaxTestCase):\nindex_take = lambda src: lax.index_take(src, idxs, axes)\ncheck_grads(index_take, (src,), 2, 1e-2, 1e-2, 1e-2)\n- @parameterized.named_parameters(jtu.cases_from_list(\n- {\"testcase_name\": \"_dst_shape={}_idxs={}_axes={}\".format(\n- jtu.format_shape_dtype_string(dst_shape, dtype), idxs, axes),\n- \"dst_shape\": dst_shape, \"dtype\": dtype, \"idxs\": idxs, \"axes\": axes,\n- \"rng\": rng}\n- for dtype in float_dtypes\n- for dst_shape, idxs, axes in [\n- [(3, 4, 5), (onp.array([0, 2, 1]),), (0,)],\n- [(3, 4, 5), (onp.array([-1, -2]),), (0,)],\n- [(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 1)],\n- [(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 2)],\n- ]\n- for rng in [jtu.rand_default()]))\n- def testIndexUntakeGrad(self, dst_shape, dtype, idxs, axes, rng):\n- # We call lax.index_take to get the shapes right\n- src_shape = lax.index_take(rng(dst_shape, dtype), idxs, axes).shape\n-\n- idxs = tuple(rng(e.shape, e.dtype) for e in idxs)\n- src = rng(src_shape, dtype)\n- dst = rng(dst_shape, dtype)\n- index_untake = lambda src, dst: lax.index_untake(src, dst, idxs, axes)\n- check_grads(index_untake, (src, dst), 2, 1e-2, 1e-2, 1e-2)\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_idxs={}_dnums={}_slice_sizes={}\".format(\njtu.format_shape_dtype_string(shape, dtype), idxs, dnums,\n" } ]
Python
Apache License 2.0
google/jax
index_take in terms of gather, delete index_untake (c.f. #304)
260,335
02.02.2019 12:17:11
28,800
f5cffd722a618bb556aecf8dd63d3875e2b8851e
delete more dead index_take code
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2144,74 +2144,6 @@ ad.primitive_jvps[scatter_p] = _scatter_jvp\nad.primitive_transposes[scatter_p] = _scatter_transpose_rule\n-\n-def _index_take_shape_rule(src, *idxs, **kwargs):\n- axes = kwargs['axes']\n- return (idxs[0].shape[0],) + tuple(onp.delete(src.shape, axes))\n-\n-def _index_take_translation_rule(c, src, *idxs, **kwargs):\n- jaxpr = kwargs['jaxpr']\n- consts = kwargs['consts']\n- shapes = map(c.GetShape, (src,) + idxs)\n- xla_computation = xla.jaxpr_computation(jaxpr, consts, (), *shapes)\n- return c.Call(xla_computation, (src,) + idxs)\n-\n-def _index_take_jvp(primals, tangents, axes, input_shape, jaxpr, consts):\n- src = primals[0]\n- idxs = tuple(primals[1:])\n- g = ad.instantiate_zeros(src, tangents[0])\n- return index_take(src, idxs, axes), index_take(g, idxs, axes)\n-\n-def _index_take_transpose_rule(t, src, *idxs, **kwargs):\n- assert src is None\n- axes = kwargs['axes']\n- input_shape = kwargs['input_shape']\n- t_src = index_untake(t, _zeros(t, shape=input_shape), idxs, axes)\n- return [t_src] + [None] * len(idxs)\n-\n-index_take_p = standard_primitive(_index_take_shape_rule, _input_dtype,\n- 'index_take', _index_take_translation_rule)\n-ad.primitive_jvps[index_take_p] = _index_take_jvp\n-ad.primitive_transposes[index_take_p] = _index_take_transpose_rule\n-\n-\n-def _index_untake_shape_rule(src, dst, *idxs, **kwargs):\n- return dst.shape\n-\n-def _index_untake_translation_rule(c, src, dst, *idxs, **kwargs):\n- jaxpr = kwargs['jaxpr']\n- consts = kwargs['consts']\n- shapes = map(c.GetShape, (src, dst) + idxs)\n- xla_computation = xla.jaxpr_computation(jaxpr, consts, (), *shapes)\n- return c.Call(xla_computation, (src, dst) + idxs)\n-\n-def _index_untake_jvp(primals, tangents, axes, jaxpr, consts):\n- src, dst = primals[0], primals[1]\n- idxs = tuple(primals[2:])\n- g_src, g_dst = tangents[0], tangents[1]\n- g_src = ad.instantiate_zeros(src, g_src)\n- g_dst = ad.instantiate_zeros(dst, g_dst)\n- val_out = index_untake(src, dst, idxs, axes)\n- tangent_out = index_untake(g_src, g_dst, idxs, axes)\n- return val_out, tangent_out\n-\n-def _index_untake_transpose_rule(t, src, dst, *idxs, **kwargs):\n- axes = kwargs['axes']\n- t_src = t_dst = None\n- if src is None:\n- t_src = index_take(t, idxs, axes)\n- if dst is None:\n- t_dst = t\n-\n- return [t_src, t_dst] + [None] * len(idxs)\n-\n-index_untake_p = standard_primitive(\n- _index_untake_shape_rule, _input_dtype, 'index_untake',\n- _index_untake_translation_rule)\n-ad.primitive_jvps[index_untake_p] = _index_untake_jvp\n-ad.primitive_transposes[index_untake_p] = _index_untake_transpose_rule\n-\n-\ndef _reduce_shape_rule(operand, init_value, computation, jaxpr, consts, dimensions):\nreturn tuple(onp.delete(operand.shape, dimensions))\n" } ]
Python
Apache License 2.0
google/jax
delete more dead index_take code
260,335
02.02.2019 15:37:04
28,800
a8c2768176e270fd343f5d18f88ca760cbe464f4
make pjit out_axes=None work
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -114,8 +114,13 @@ def shard_arg(mesh_spec, mesh_axis, axis, arg):\ndef unshard_output(mesh_spec, mesh_axis, out_axis, out_shards):\n\"\"\"Collect and concatenate sharded device results.\"\"\"\n_, ids = onp.unique(shard_assignments(mesh_spec, mesh_axis), return_index=True)\n+ if out_axis is None:\n+ return out_shards[0]\n+ elif type(out_axis) is int:\nshards = [out_shards[i] for i in ids]\nreturn onp.concatenate(shards, out_axis)\n+ else:\n+ raise TypeError(type(out_axis))\ndef shard_assignments(mesh_spec, mesh_axis):\n\"\"\"Given a mesh axis long which to shard data, compute replica assignments.\"\"\"\n@@ -274,10 +279,10 @@ def xla_pcall_impl(fun, *args, **params):\n@lu.memoize\ndef xla_parallel_callable(fun, axis_name, in_axes, mesh_axis, mesh_spec,\n*abstract_args):\n- if abstract_args:\n- chunksize = next(x.shape[ax] // mesh_spec[mesh_axis]\n+ chunksize = next((x.shape[ax] // mesh_spec[mesh_axis]\nfor x, ax in zip(abstract_args, in_axes)\n- if ax is not None and type(x) is ShapedArray)\n+ if ax is not None and type(x) is ShapedArray), None)\n+ if chunksize is not None:\nabstract_args = map(partial(chunk_aval, chunksize), abstract_args, in_axes)\naxis_env = new_axis_env({axis_name: replica_groups(mesh_spec, mesh_axis)})\npvals = [PartialVal((aval, core.unit)) for aval in abstract_args]\n" }, { "change_type": "ADD", "old_path": null, "new_path": "pjit_model.py", "diff": "+from functools import partial\n+\n+import numpy.random as npr\n+\n+import jax.numpy as np\n+from jax import lax\n+from jax import pjit, grad\n+\n+\n+### set up some synthetic data\n+\n+rng = npr.RandomState(0)\n+R = lambda *shape: rng.randn(*shape).astype(\"float32\")\n+layer_sizes = [3, 2]\n+params = [(R(m, n), R(n)) for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]\n+\n+input_batch = R(5, 3)\n+target_batch = R(5, 2)\n+batch = (input_batch, target_batch)\n+\n+\n+### standard definition\n+\n+def predict(params, inputs):\n+ for W, b in params:\n+ outputs = np.dot(inputs, W) + b\n+ inputs = np.tanh(outputs)\n+ return outputs\n+\n+def loss(params, batch):\n+ inputs, targets = batch\n+ preds = predict(params, inputs)\n+ perex_loss = -np.mean(preds * targets, axis=1)\n+ return np.sum(perex_loss)\n+\n+print 'single-machine'\n+print loss(params, batch)\n+print grad(loss)(params, batch)\n+print\n+\n+\n+### writing an spmd program manually\n+\n+def predict(params, inputs):\n+ for W, b in params:\n+ outputs = np.dot(inputs, W) + b\n+ inputs = np.tanh(outputs)\n+ return outputs\n+\n+@partial(pjit, axis_name='i', in_axes=(None, 0), out_axes=None)\n+def spmd_loss(params, batch):\n+ inputs, targets = batch\n+ preds = predict(params, inputs)\n+ perex_loss = -np.mean(preds * targets)\n+ return lax.psum(perex_loss, axis_name='i') # 'allreduce-sum' on hidden axis\n+\n+print 'manual spmd program'\n+print spmd_loss(params, batch)\n+print grad(spmd_loss)(params, batch)\n+print\n+\n+\n+### getting an spmd program automagically with papply\n+\n+# TODO\n" } ]
Python
Apache License 2.0
google/jax
make pjit out_axes=None work
260,335
02.02.2019 16:18:43
28,800
b16b4ddf4bf775cb0aeff4469f1496ec6c2b6d5e
the many ways to pjit a cat
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -37,7 +37,7 @@ from ..lib import xla_bridge as xb\nfrom .xla import (xla_shape, xla_destructure, translation_rule, abstractify,\nxla_shape_to_result_shape, jaxpr_computation)\nfrom .partial_eval import trace_to_subjaxpr, merge_pvals, JaxprTrace, PartialVal\n-from .batching import moveaxis, dimsize\n+from .batching import dimsize\nfrom . import parallel\nfrom . import xla\nfrom . import partial_eval as pe\n@@ -53,7 +53,7 @@ def chunk_transform(fun, chunksize, name, in_axes, out_axes_dst):\ntemp_name = TempAxisName()\nfun = parallel.axisvar_split(fun, name, (temp_name, name))\nfun, out_axes_src = parallel.pmap_transform(fun, temp_name, in_axes)\n- fun = move_output_axis_transform(fun, chunksize, out_axes_src, out_axes_dst)\n+ fun = move_output_axis_transform(fun, name, chunksize, out_axes_src, out_axes_dst)\nreturn fun\nclass TempAxisName(object):\n@@ -61,10 +61,42 @@ class TempAxisName(object):\nreturn '<temp axis {}>'.format(hex(id(self)))\n@lu.transformation\n-def move_output_axis_transform(chunksize, src, dst, *args):\n+def move_output_axis_transform(name, chunksize, src, dst, *args):\n\"\"\"Function transformation that moves output axes from src to dst.\"\"\"\nans = yield args\n- yield moveaxis(chunksize, dst, src(), ans)\n+ yield moveaxis(name, chunksize, dst, src(), ans)\n+\n+def moveaxis(name, sz, dst, src, x):\n+ aval = core.get_aval(x)\n+ if type(aval) is core.AbstractTuple:\n+ if type(src) is tuple and type(dst) is tuple:\n+ return core.pack(map(partial(moveaxis, name, sz), dst, src, x))\n+ elif type(src) is tuple:\n+ return core.pack(map(partial(moveaxis, name, sz, dst), src, x))\n+ elif type(dst) is tuple:\n+ srcs = (src,) * len(dst)\n+ return core.pack(map(partial(moveaxis, name, sz), dst, srcs, x))\n+ else:\n+ return core.pack(map(partial(moveaxis, name, sz, dst, src), x))\n+ elif isinstance(aval, ShapedArray):\n+ dst_ = (dst % aval.ndim) if dst is not None and aval.ndim else dst\n+ if src == dst_:\n+ return x\n+ else:\n+ if src is None:\n+ x = broadcast(x, sz, force_broadcast=True)\n+ src = 0\n+ dst_ = dst % (aval.ndim + 1)\n+ elif dst is None:\n+ return x.sum(src).psum('i')\n+ if src == dst_:\n+ return x\n+ else:\n+ perm = [i for i in range(onp.ndim(x)) if i != src]\n+ perm.insert(dst_, src)\n+ return x.transpose(perm)\n+ else:\n+ raise TypeError(type(aval))\ndef chunk_aval(chunksize, aval, axis):\n\"\"\"Transform an abstract value's shape to have chunksize extent along axis.\"\"\"\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -1961,6 +1961,7 @@ for method_name in _nondiff_methods + _diff_methods:\nsetattr(ShapedArray, \"flatten\", core.aval_method(ravel))\nsetattr(ShapedArray, \"T\", core.aval_property(transpose))\nsetattr(ShapedArray, \"astype\", core.aval_method(lax.convert_element_type))\n+setattr(ShapedArray, \"psum\", core.aval_method(lax.psum))\n# Forward operators, methods, and properies on DeviceArray to lax_numpy\n" }, { "change_type": "MODIFY", "old_path": "pjit_model.py", "new_path": "pjit_model.py", "diff": "@@ -47,16 +47,30 @@ def predict(params, inputs):\ninputs = np.tanh(outputs)\nreturn outputs\n-@partial(pjit, axis_name='i', in_axes=(None, 0), out_axes=None)\ndef spmd_loss(params, batch):\ninputs, targets = batch\npreds = predict(params, inputs)\nperex_loss = -np.mean(preds * targets)\nreturn lax.psum(perex_loss, axis_name='i') # 'allreduce-sum' on hidden axis\n-print 'manual spmd program'\n-print spmd_loss(params, batch)\n-print grad(spmd_loss)(params, batch)\n+# compiling the grad function for parallel execution\n+gradfun = pjit(grad(spmd_loss), axis_name='i', in_axes=(None, 0), out_axes=None)\n+print 'manual spmd program, compile-of-grad version'\n+print gradfun(params, batch) # parallel execution, fwd and bwd fused together\n+print\n+\n+# or, grad-of-compile version\n+spmd_loss = pjit(spmd_loss, axis_name='i', in_axes=(None, 0), out_axes=None)\n+print 'manual spmd program, grad-of-compile version'\n+print spmd_loss(params, batch) # parallel execution\n+print grad(spmd_loss)(params, batch) # parallel execution, fwd and bwd separate\n+print\n+\n+# or get both with compile-of-grad-of-compile\n+gradfun = pjit(grad(spmd_loss), axis_name='i', in_axes=(None, 0), out_axes=None)\n+print 'manual spmd program, compile-of-grad-of-compile version'\n+print spmd_loss(params, batch) # parallel execution\n+print grad(spmd_loss)(params, batch) # parallel execution, fwd and bwd fused\nprint\n" } ]
Python
Apache License 2.0
google/jax
the many ways to pjit a cat
260,335
02.02.2019 17:00:52
28,800
7d1527b4d6e874ce06ed2bc329c3c0f5555cd2a4
revise parallel model file
[ { "change_type": "MODIFY", "old_path": "pjit_model.py", "new_path": "pjit_model.py", "diff": "@@ -4,7 +4,7 @@ import numpy.random as npr\nimport jax.numpy as np\nfrom jax import lax\n-from jax import pjit, grad\n+from jax import grad, pjit, papply\n### set up some synthetic data\n@@ -41,12 +41,6 @@ print\n### writing an spmd program manually\n-def predict(params, inputs):\n- for W, b in params:\n- outputs = np.dot(inputs, W) + b\n- inputs = np.tanh(outputs)\n- return outputs\n-\ndef spmd_loss(params, batch):\ninputs, targets = batch\npreds = predict(params, inputs)\n@@ -74,6 +68,11 @@ print grad(spmd_loss)(params, batch) # parallel execution, fwd and bwd fused\nprint\n-### getting an spmd program automagically with papply\n+### getting an spmd program from the standard definition with papply\n+\n+# TODO papply!\n+# spmd_loss, axis_name = papply(loss, axis_size=5, in_axes=(None, 0))\n+# spmd_loss = pjit(spmd_loss, axis_name=axis_name, in_axes=(None, 0), out_axes=None)\n-# TODO\n+# print spmd_loss(params, batch) # parallel execution\n+# print\n" } ]
Python
Apache License 2.0
google/jax
revise parallel model file
260,335
02.02.2019 17:03:07
28,800
729991725b5cfb2d19ca64839c097969f027ac68
remove pjit_model.py for merge
[ { "change_type": "DELETE", "old_path": "pjit_model.py", "new_path": null, "diff": "-from functools import partial\n-\n-import numpy.random as npr\n-\n-import jax.numpy as np\n-from jax import lax\n-from jax import grad, pjit, papply\n-\n-\n-### set up some synthetic data\n-\n-rng = npr.RandomState(0)\n-R = lambda *shape: rng.randn(*shape).astype(\"float32\")\n-layer_sizes = [3, 2]\n-params = [(R(m, n), R(n)) for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]\n-\n-input_batch = R(5, 3)\n-target_batch = R(5, 2)\n-batch = (input_batch, target_batch)\n-\n-\n-### standard definition\n-\n-def predict(params, inputs):\n- for W, b in params:\n- outputs = np.dot(inputs, W) + b\n- inputs = np.tanh(outputs)\n- return outputs\n-\n-def loss(params, batch):\n- inputs, targets = batch\n- preds = predict(params, inputs)\n- perex_loss = -np.mean(preds * targets, axis=1)\n- return np.sum(perex_loss)\n-\n-print 'single-machine'\n-print loss(params, batch)\n-print grad(loss)(params, batch)\n-print\n-\n-\n-### writing an spmd program manually\n-\n-def spmd_loss(params, batch):\n- inputs, targets = batch\n- preds = predict(params, inputs)\n- perex_loss = -np.mean(preds * targets)\n- return lax.psum(perex_loss, axis_name='i') # 'allreduce-sum' on hidden axis\n-\n-# compiling the grad function for parallel execution\n-gradfun = pjit(grad(spmd_loss), axis_name='i', in_axes=(None, 0), out_axes=None)\n-print 'manual spmd program, compile-of-grad version'\n-print gradfun(params, batch) # parallel execution, fwd and bwd fused together\n-print\n-\n-# or, grad-of-compile version\n-spmd_loss = pjit(spmd_loss, axis_name='i', in_axes=(None, 0), out_axes=None)\n-print 'manual spmd program, grad-of-compile version'\n-print spmd_loss(params, batch) # parallel execution\n-print grad(spmd_loss)(params, batch) # parallel execution, fwd and bwd separate\n-print\n-\n-# or get both with compile-of-grad-of-compile\n-gradfun = pjit(grad(spmd_loss), axis_name='i', in_axes=(None, 0), out_axes=None)\n-print 'manual spmd program, compile-of-grad-of-compile version'\n-print spmd_loss(params, batch) # parallel execution\n-print grad(spmd_loss)(params, batch) # parallel execution, fwd and bwd fused\n-print\n-\n-\n-### getting an spmd program from the standard definition with papply\n-\n-# TODO papply!\n-# spmd_loss, axis_name = papply(loss, axis_size=5, in_axes=(None, 0))\n-# spmd_loss = pjit(spmd_loss, axis_name=axis_name, in_axes=(None, 0), out_axes=None)\n-\n-# print spmd_loss(params, batch) # parallel execution\n-# print\n" } ]
Python
Apache License 2.0
google/jax
remove pjit_model.py for merge
260,335
02.02.2019 21:41:06
28,800
0afb6202c90043ba0cf90fc3adcdd97c94708340
improve error messages for lax.slice/index funs c.f.
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -540,15 +540,17 @@ def slice_in_dim(operand, start_index, limit_index, stride=1, axis=0):\nlimit_indices = list(operand.shape)\nstrides = [1] * operand.ndim\n- start_indices[axis] = start_index\n- limit_indices[axis] = limit_index\n- strides[axis] = stride\n+ axis = int(axis)\n+ start_indices[axis] = int(start_index)\n+ limit_indices[axis] = int(limit_index)\n+ strides[axis] = int(stride)\nreturn slice(operand, start_indices, limit_indices, strides)\ndef index_in_dim(operand, index, axis=0, keepdims=True):\n\"\"\"Convenience wrapper around slice to perform int indexing.\"\"\"\n+ index, axis = int(index), int(axis)\naxis_size = operand.shape[axis]\nwrapped_index = index + axis_size if index < 0 else index\nif not 0 <= wrapped_index < axis_size:\n@@ -566,8 +568,9 @@ def dynamic_slice_in_dim(operand, start_index, slice_size, axis=0):\nstart_indices = [onp.array([0])] * operand.ndim\nslice_sizes = list(operand.shape)\n+ axis = int(axis)\nstart_indices[axis] = reshape(rem(start_index, operand.shape[axis]), [1])\n- slice_sizes[axis] = slice_size\n+ slice_sizes[axis] = int(slice_size)\nstart_indices = concatenate(start_indices, 0)\nreturn dynamic_slice(operand, start_indices, slice_sizes)\n@@ -583,12 +586,14 @@ def dynamic_index_in_dim(operand, index, axis=0, keepdims=True):\ndef dynamic_update_slice_in_dim(operand, update, start_index, axis):\n+ axis = int(axis)\nstart_indices = [0] * _ndim(operand)\nstart_indices[axis] = start_index % operand.shape[axis]\nreturn dynamic_update_slice(operand, update, start_indices)\ndef dynamic_update_index_in_dim(operand, update, index, axis):\n+ axis = int(axis)\nif _ndim(update) != _ndim(operand):\nassert _ndim(update) + 1 == _ndim(operand)\nax = axis % _ndim(operand)\n" } ]
Python
Apache License 2.0
google/jax
improve error messages for lax.slice/index funs c.f. #292
260,335
03.02.2019 09:27:03
28,800
fe96c15d495907686dcc46160c9d9caf999a5ca7
generalize select batch rule (fixes
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -1755,17 +1755,15 @@ def _select_transpose_rule(t, pred, on_true, on_false):\nselect(pred, zeros, t) if on_false is None else None]\ndef _select_batch_rule(batched_args, batch_dims, **unused_kwargs):\n- oprand, on_true, on_false, = batched_args\n+ pred, on_true, on_false, = batched_args\npred_bdim, ot_bdim, of_bdim = batch_dims\n- if (ot_bdim not in {None, pred_bdim}) or (of_bdim not in {None, pred_bdim}):\n- raise NotImplementedError # TODO(schsam, mattjj): Handle more cases.\n-\n- # TODO(schsam, mattjj): Switch to using broadcast_in_dim.\n- ot = _ones(oprand) * on_true\n- of = _ones(oprand) * on_false\n-\n- return select(oprand, ot, of), pred_bdim\n+ size = next(x.shape[i] for x, i in zip(batched_args, batch_dims)\n+ if i is not None)\n+ pred = batching.bdim_at_front(pred, pred_bdim, size)\n+ on_true = batching.bdim_at_front(on_true, ot_bdim, size)\n+ on_false = batching.bdim_at_front(on_false, of_bdim, size)\n+ return select(pred, on_true, on_false), 0\nselect_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')\nad.defjvp(select_p,\n" } ]
Python
Apache License 2.0
google/jax
generalize select batch rule (fixes #311)
260,335
03.02.2019 09:52:33
28,800
5344e7aea0bbb4e0902b0f178b84c8439616f9a5
add lax.select broadcasting tests, improve rule
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/batching.py", "new_path": "jax/interpreters/batching.py", "diff": "@@ -265,9 +265,9 @@ primitive_batchers[zeros_like_p] = zeros_like_batched\n# method. To handle that case, the `broadcast` function uses a try/except.\n-def bdim_at_front(x, bdim, broadcast_size=1):\n+def bdim_at_front(x, bdim, broadcast_size=1, force_broadcast=False):\nif bdim is None:\n- return broadcast(x, broadcast_size)\n+ return broadcast(x, broadcast_size, force_broadcast=force_broadcast)\nelse:\nreturn move_dim_to_front(x, bdim)\n" }, { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -1757,12 +1757,17 @@ def _select_transpose_rule(t, pred, on_true, on_false):\ndef _select_batch_rule(batched_args, batch_dims, **unused_kwargs):\npred, on_true, on_false, = batched_args\npred_bdim, ot_bdim, of_bdim = batch_dims\n-\nsize = next(x.shape[i] for x, i in zip(batched_args, batch_dims)\nif i is not None)\n- pred = batching.bdim_at_front(pred, pred_bdim, size)\n- on_true = batching.bdim_at_front(on_true, ot_bdim, size)\n- on_false = batching.bdim_at_front(on_false, of_bdim, size)\n+\n+ # TODO(mattjj): could avoid broadcasts/transposes in some special cases\n+ pred = batching.bdim_at_front(pred, pred_bdim, size, force_broadcast=True)\n+ on_true = batching.bdim_at_front(on_true, ot_bdim, size, force_broadcast=True)\n+ on_false = batching.bdim_at_front(on_false, of_bdim, size, force_broadcast=True)\n+ assert onp.shape(on_true) == onp.shape(on_false)\n+ if 0 < onp.ndim(pred) < onp.ndim(on_true):\n+ # vmapped function had a scalar pred with nonscalar args\n+ pred = broadcast_in_dim(pred, on_true.shape, [0])\nreturn select(pred, on_true, on_false), 0\nselect_p = standard_primitive(_select_shape_rule, _select_dtype_rule, 'select')\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -485,5 +485,43 @@ class BatchingTest(jtu.JaxTestCase):\nper_example_direct = np.concatenate(per_example_direct, axis=0)\nself.assertAllClose(per_example, per_example_direct, check_dtypes=True)\n+ def testSelect(self):\n+ pred = onp.array([True, False])\n+ on_true = onp.array([0, 1])\n+ on_false = onp.array([2, 3])\n+ ans = vmap(lax.select)(pred, on_true, on_false)\n+ expected = onp.array([0, 3])\n+ self.assertAllClose(ans, expected, check_dtypes=True)\n+\n+ pred = onp.array([False, True])\n+ on_true = onp.array([0, 1])\n+ on_false = onp.array([2, 3])\n+ ans = vmap(lax.select, (0, None, None))(pred, on_true, on_false)\n+ expected = onp.array([[2, 3],\n+ [0, 1]])\n+ self.assertAllClose(ans, expected, check_dtypes=True)\n+\n+ pred = True\n+ on_true = onp.array([0, 1], onp.float32)\n+ on_false = onp.array(3, onp.float32)\n+ ans = vmap(lax.select, (None, 0, None))(pred, on_true, on_false)\n+ expected = onp.array([0, 1], onp.float32)\n+ self.assertAllClose(ans, expected, check_dtypes=True)\n+\n+ pred = onp.array([False, True])\n+ on_true = onp.array([0, 1], onp.float32)\n+ on_false = onp.array(3, onp.float32)\n+ ans = vmap(lax.select, (0, 0, None))(pred, on_true, on_false)\n+ expected = onp.array([3, 1], onp.float32)\n+ self.assertAllClose(ans, expected, check_dtypes=True)\n+\n+ pred = onp.array([False, True])\n+ on_true = onp.array([2], onp.float32)\n+ on_false = onp.array([[3, 4]], onp.float32)\n+ ans = vmap(lax.select, (0, None, 1), 1)(pred, on_true, on_false)\n+ expected = onp.array([[3, 2]], onp.float32)\n+ self.assertAllClose(ans, expected, check_dtypes=True)\n+\n+\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
add lax.select broadcasting tests, improve rule
260,335
03.02.2019 10:01:06
28,800
583b6547692cb6b790bbec71f5e068baeb06fdb2
add an efficient special case to select batch rule
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -1760,13 +1760,23 @@ def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):\nsize = next(x.shape[i] for x, i in zip(batched_args, batch_dims)\nif i is not None)\n- # TODO(mattjj): could avoid broadcasts/transposes in some special cases\n+ # avoid transposes and some broadcasts in special cases\n+ if pred_bdim == ot_bdim == of_bdim:\n+ if onp.shape(pred) == onp.shape(on_true):\n+ return select(pred, on_true, on_false), pred_bdim\n+ else:\n+ # vmapped function had a scalar pred with nonscalar args\n+ assert onp.ndim(pred) == 1\n+ pred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])\n+ return select(pred, on_true, on_false), pred_bdim\n+\npred = batching.bdim_at_front(pred, pred_bdim, size, force_broadcast=True)\non_true = batching.bdim_at_front(on_true, ot_bdim, size, force_broadcast=True)\non_false = batching.bdim_at_front(on_false, of_bdim, size, force_broadcast=True)\nassert onp.shape(on_true) == onp.shape(on_false)\nif 0 < onp.ndim(pred) < onp.ndim(on_true):\n# vmapped function had a scalar pred with nonscalar args\n+ assert onp.ndim(pred) == 1\npred = broadcast_in_dim(pred, on_true.shape, [0])\nreturn select(pred, on_true, on_false), 0\n" } ]
Python
Apache License 2.0
google/jax
add an efficient special case to select batch rule
260,335
03.02.2019 14:00:51
28,800
bf7a438c94bd46e4fffa743a41af041e13dbc57f
add more special cases of select batching rule
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -1774,6 +1774,13 @@ def _select_batch_rule(batched_args, batch_dims, **unused_kwargs):\nassert onp.ndim(pred) == 1\npred = broadcast_in_dim(pred, on_true.shape, [pred_bdim])\nreturn select(pred, on_true, on_false), pred_bdim\n+ elif onp.ndim(pred) == 0 and ot_bdim is not None and of_bdim is not None:\n+ if ot_bdim == of_bdim:\n+ return select(pred, on_true, on_false), ot_bdim\n+ else:\n+ assert onp.shape(on_true) == onp.shape(on_false)\n+ on_false = batching.moveaxis(size, ot_bdim, of_bdim, on_false)\n+ return select(pred, on_true, on_false), ot_bdim\npred = batching.bdim_at_front(pred, pred_bdim, size, force_broadcast=True)\non_true = batching.bdim_at_front(on_true, ot_bdim, size, force_broadcast=True)\n" } ]
Python
Apache License 2.0
google/jax
add more special cases of select batching rule
260,335
03.02.2019 14:37:55
28,800
9846c9c7e177ce0ec85c733667642d854ac83263
skip some cases to satisfy internal tests
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -101,8 +101,8 @@ JAX_ONE_TO_ONE_OP_RECORDS = [\nop_record(\"arccos\", 1, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\nop_record(\"arctan\", 1, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\nop_record(\"arctan2\", 2, float_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n- op_record(\"arcsinh\", 1, number_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n- op_record(\"arccosh\", 1, number_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n+ op_record(\"arcsinh\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n+ op_record(\"arccosh\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\nop_record(\"arctanh\", 1, number_dtypes, all_shapes, jtu.rand_small(), [\"rev\"]),\n]\n@@ -1079,6 +1079,10 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nfor rng_indices in [jtu.rand_int(-5, 5)]))\ndef testTake(self, shape, dtype, index_shape, index_dtype, axis, mode, rng,\nrng_indices):\n+ if (FLAGS.jax_test_dut.startswith(\"tpu\")\n+ and onp.issubdtype(dtype, onp.complexfloating)):\n+ self.skipTest(\"skipping complex dtype on TPU\") # TODO(mattjj): investigate failures\n+\ndef args_maker():\nx = rng(shape, dtype)\ni = rng_indices(index_shape, index_dtype)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -1364,6 +1364,7 @@ class LaxTest(jtu.JaxTestCase):\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()]))\n+ @jtu.skip_on_devices(\"tpu\") # TODO(mattjj): investigate failures\ndef testGather(self, shape, dtype, idxs, dnums, slice_sizes, rng, rng_idx):\nrand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\nargs_maker = lambda: [rng(shape, dtype), rand_idxs()]\n@@ -1391,6 +1392,7 @@ class LaxTest(jtu.JaxTestCase):\n]\nfor rng_idx in [jtu.rand_int(max(arg_shape))]\nfor rng in [jtu.rand_default()]))\n+ @jtu.skip_on_devices(\"tpu\") # TODO(mattjj): investigate failures\ndef testScatterAdd(self, arg_shape, dtype, idxs, update_shape, dnums, rng,\nrng_idx):\nrand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)\n@@ -2105,6 +2107,7 @@ class LaxAutodiffTest(jtu.JaxTestCase):\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()]))\n+ @jtu.skip_on_devices(\"tpu\") # TODO(mattjj): investigate_failures\ndef testGatherGrad(self, shape, dtype, idxs, dnums, slice_sizes, rng, rng_idx):\nidxs = rng_idx(idxs.shape, idxs.dtype)\ngather = lambda x: lax.gather(x, idxs, dimension_numbers=dnums,\n@@ -2133,6 +2136,7 @@ class LaxAutodiffTest(jtu.JaxTestCase):\n]\nfor rng_idx in [jtu.rand_int(max(arg_shape))]\nfor rng in [jtu.rand_default()]))\n+ @jtu.skip_on_devices(\"tpu\") # TODO(mattjj): investigate failures\ndef testScatterAddGrad(self, arg_shape, dtype, idxs, update_shape, dnums, rng,\nrng_idx):\nidxs = rng_idx(idxs.shape, idxs.dtype)\n" }, { "change_type": "MODIFY", "old_path": "tests/linalg_test.py", "new_path": "tests/linalg_test.py", "diff": "@@ -71,7 +71,7 @@ class NumpyLinalgTest(jtu.JaxTestCase):\n{\"testcase_name\":\n\"_n={}\".format(jtu.format_shape_dtype_string((n,n), dtype)),\n\"n\": n, \"dtype\": dtype, \"rng\": rng}\n- for n in [0, 4, 5, 50]\n+ for n in [0, 4, 5, 25] # TODO(mattjj): complex64 unstable on large sizes?\nfor dtype in float_types() | complex_types()\nfor rng in [jtu.rand_default()]))\n# TODO(phawkins): enable when there is an LU implementation for GPU/TPU.\n" } ]
Python
Apache License 2.0
google/jax
skip some cases to satisfy internal tests
260,335
03.02.2019 15:03:22
28,800
76decd2ade4665dae5e8eb5e527d571bc4b0e320
version bump for pypi
[ { "change_type": "MODIFY", "old_path": "setup.py", "new_path": "setup.py", "diff": "@@ -16,7 +16,7 @@ from setuptools import setup, find_packages\nsetup(\nname='jax',\n- version='0.1.16',\n+ version='0.1.17',\ndescription='Differentiate, compile, and transform Numpy code.',\nauthor='JAX team',\nauthor_email='jax-dev@google.com',\n" } ]
Python
Apache License 2.0
google/jax
version bump for pypi
260,403
04.02.2019 03:52:23
28,800
723e3c46e466aae927fffb81b95d3f97534c43cb
make einsum deterministic, correct. Fixes a nondeterministic batch-dimension reordering error that was caused by using a python set collection ordering to fix the final output permutations
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -1399,8 +1399,8 @@ def _einsum(operands, contractions):\ncontracted_names = contracted_names & (set(lhs_names) | set(rhs_names))\nbatch_names = (set(lhs_names) & set(rhs_names)) - contracted_names\n- lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))\n- for n in batch_names)\n+ lhs_batch = [i for i,l in enumerate(lhs_names) if l in batch_names]\n+ rhs_batch = [i for i,r in enumerate(rhs_names) if r in batch_names]\n# NOTE(mattjj): this can fail non-deterministically in python3, maybe\n# due to opt_einsum\n" } ]
Python
Apache License 2.0
google/jax
make einsum deterministic, correct. Fixes a nondeterministic batch-dimension reordering error that was caused by using a python set collection ordering to fix the final output permutations
260,403
04.02.2019 16:22:39
28,800
893cf82898cf61bb67d806b2e983b22fdaf94f8c
actually fix the nondeterminism error in einsum batchdims In the case where front batch_dims are already ordered correctly, fix the batch_names ordering to be correct.
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -1399,8 +1399,8 @@ def _einsum(operands, contractions):\ncontracted_names = contracted_names & (set(lhs_names) | set(rhs_names))\nbatch_names = (set(lhs_names) & set(rhs_names)) - contracted_names\n- lhs_batch = [i for i,l in enumerate(lhs_names) if l in batch_names]\n- rhs_batch = [i for i,r in enumerate(rhs_names) if r in batch_names]\n+ lhs_batch, rhs_batch = unzip2((lhs_names.find(n), rhs_names.find(n))\n+ for n in batch_names)\n# NOTE(mattjj): this can fail non-deterministically in python3, maybe\n# due to opt_einsum\n@@ -1418,7 +1418,8 @@ def _einsum(operands, contractions):\nbatch_names = ''.join(batch_names)\nelse:\nbatch_dims = tuple(lhs_batch)\n- batch_names = ''.join(lhs_names[i] for i in batch_dims)\n+ batch_names = ''.join(lhs_names[i] for i in range(len(lhs_names))\n+ if i in batch_dims)\nif contracted_names:\n# contract using lax.dot_general\n" } ]
Python
Apache License 2.0
google/jax
actually fix the nondeterminism error in einsum batchdims In the case where front batch_dims are already ordered correctly, fix the batch_names ordering to be correct.
260,403
04.02.2019 16:30:22
28,800
1d26c6bfa1b2d0227f92c9c33e2344393802d23a
add test-case for nondeterminism in front batch dim equal case in einsum This is a test for the observed (and hopefully fixed) nondeterminism in the case of already-front, already-ordered batch_dims.
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_einsum_test.py", "new_path": "tests/lax_numpy_einsum_test.py", "diff": "@@ -276,6 +276,11 @@ class EinsumTest(jtu.JaxTestCase):\ncheck(einstr, *operands)\n+ def test_ordered_front_batch_dim_case(self):\n+ x = onp.ones((1,8,20,4))\n+ y = onp.ones((1,8,20,4))\n+ s = 'ijkl,ijml->ijkm'\n+ check(s, x, y)\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
add test-case for nondeterminism in front batch dim equal case in einsum This is a test for the observed (and hopefully fixed) nondeterminism in the case of already-front, already-ordered batch_dims.
260,335
05.02.2019 07:52:56
28,800
52254d760280d94142c1f55fa1c15026f1591cc7
tweaks so einsum and spstats tests run internally
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_einsum_test.py", "new_path": "tests/lax_numpy_einsum_test.py", "diff": "@@ -34,21 +34,21 @@ config.parse_flags_with_absl()\ndef rng():\nreturn onp.random.RandomState(0)\n-def check(s, *ops):\n- a = onp.einsum(s, *ops)\n- b = np.einsum(s, *ops)\n- assert onp.allclose(a, b, atol=1e-4, rtol=1e-4)\n-\nclass EinsumTest(jtu.JaxTestCase):\n+ def _check(self, s, *ops):\n+ a = onp.einsum(s, *ops)\n+ b = np.einsum(s, *ops)\n+ self.assertAllClose(a, b, atol=1e-4, rtol=1e-4, check_dtypes=True)\n+\ndef test_three_operands_1(self):\nr = rng()\nx = r.randn(3)\ny = r.randn(4)\nz = r.randn(5)\ns = 'i,j,k->ijk'\n- check(s, x, y, z)\n+ self._check(s, x, y, z)\ndef test_three_operands_2(self):\nr = rng()\n@@ -56,42 +56,42 @@ class EinsumTest(jtu.JaxTestCase):\ny = r.randn(4)\nz = r.randn(5)\ns = 'i,j,k->ijk'\n- check(s, x, y, z)\n+ self._check(s, x, y, z)\ndef test_two_operands_1(self):\nr = rng()\nx = r.randn(3, 4)\ny = r.randn(4)\ns = 'ij,j->i'\n- check(s, x, y)\n+ self._check(s, x, y)\ndef test_two_operands_2(self):\nr = rng()\nx = r.randn(3, 4, 5)\ny = r.randn(4)\ns = 'ijk,j->i'\n- check(s, x, y)\n+ self._check(s, x, y)\ndef test_two_operands_3(self):\nr = rng()\nx = r.randn(3, 4, 3)\ny = r.randn(3)\ns = 'iji,i->j'\n- check(s, x, y)\n+ self._check(s, x, y)\ndef test_two_operands_4(self):\nr = rng()\nx = r.randn(3, 4)\ny = r.randn(3, 4)\ns = 'ij,ij->'\n- check(s, x, y)\n+ self._check(s, x, y)\ndef test_two_operands_5(self):\nr = rng()\nx = r.randn(10, 2, 3)\ny = r.randn(3, 4)\ns = 'nij,jk->nik'\n- check(s, x, y)\n+ self._check(s, x, y)\ndef test_two_operands_6(self):\n# based on https://github.com/google/jax/issues/37#issuecomment-448572187\n@@ -99,103 +99,103 @@ class EinsumTest(jtu.JaxTestCase):\nx = r.randn(2, 1)\ny = r.randn(2, 3, 4)\ns = 'sa,shb->shab'\n- check(s, x, y)\n+ self._check(s, x, y)\ndef test_one_operand_1(self):\nr = rng()\nx = r.randn(3, 4, 5)\ns = 'ijk->j'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_2(self):\nr = rng()\nx = r.randn(3, 4, 5)\ns = 'ijk->kij'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_3(self):\nr = rng()\nx = r.randn(3, 4, 5)\ns = 'ijk->ki'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_4(self):\nr = rng()\nx = r.randn(3, 4, 5)\ns = 'ijk->ki'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_5(self):\nr = rng()\nx = r.randn(2, 3, 4, 5)\ns = '...ijk->...ki'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_6(self):\nr = rng()\nx = r.randn(3, 4, 5)\ns = '...ijk->ki'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_7(self):\nr = rng()\nx = r.randn(3, 3)\ns = 'ii->'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_8(self):\nr = rng()\nx = r.randn(3, 3)\ns = 'ij->'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_9(self):\nr = rng()\nx = r.randn(3, 3, 3)\ns = 'iii->'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_10(self):\nr = rng()\nx = r.randn(3, 3)\ns = 'ii->i'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_11(self):\nr = rng()\nx = r.randn(3, 3, 4)\ns = 'iij->i'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_12(self):\nr = rng()\nx = r.randn(3, 3, 3)\ns = 'iii->i'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_13(self):\nr = rng()\nx = r.randn(3, 3, 5, 4, 4)\ns = 'iijkk->i'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_14(self):\nr = rng()\nx = r.randn(3, 3, 5, 4, 4)\ns = 'iijkk->ik'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_15(self):\nr = rng()\nx = r.randn(3, 3, 5, 4, 4)\ns = 'iijkl->il'\n- check(s, x)\n+ self._check(s, x)\ndef test_one_operand_16(self):\nr = rng()\nx = r.randn(3, 3)\ns = 'ij->ij'\n- check(s, x)\n+ self._check(s, x)\ndef test_tf_unsupported_1(self):\n# from https://www.tensorflow.org/api_docs/python/tf/einsum\n@@ -203,7 +203,7 @@ class EinsumTest(jtu.JaxTestCase):\nx = r.randn(2, 3, 5, 1)\ny = r.randn(3, 4, 5, 1)\ns = 'ij...,jk...->ik...'\n- check(s, x, y)\n+ self._check(s, x, y)\ndef test_tf_unsupported_2(self):\n# from https://www.tensorflow.org/api_docs/python/tf/einsum\n@@ -211,7 +211,7 @@ class EinsumTest(jtu.JaxTestCase):\nx = r.randn(2, 3, 3)\ny = r.randn(4)\ns = 'ijj,k->ik'\n- check(s, x, y)\n+ self._check(s, x, y)\ndef test_tf_unsupported_3(self):\n# TODO(mattjj): heisenbug! fails sometimes in python3. opt_einsum bug?\n@@ -224,7 +224,7 @@ class EinsumTest(jtu.JaxTestCase):\ny = r.randn(2, 3)\nz = r.randn(3, 4)\ns = 'ij,ij,jk->ik'\n- check(s, x, y, z)\n+ self._check(s, x, y, z)\n# these tests are based on https://github.com/dask/dask/pull/3412/files\n@parameterized.named_parameters(\n@@ -274,13 +274,14 @@ class EinsumTest(jtu.JaxTestCase):\nfor names in input_names]\noperands = [r.randn(*shape) for shape in input_shapes]\n- check(einstr, *operands)\n+ self._check(einstr, *operands)\ndef test_ordered_front_batch_dim_case(self):\nx = onp.ones((1,8,20,4))\ny = onp.ones((1,8,20,4))\ns = 'ijkl,ijml->ijkm'\n- check(s, x, y)\n+ self._check(s, x, y)\n+\nif __name__ == '__main__':\nabsltest.main()\n" }, { "change_type": "MODIFY", "old_path": "tests/scipy_stats_test.py", "new_path": "tests/scipy_stats_test.py", "diff": "@@ -26,10 +26,13 @@ import scipy.stats as osp_stats\nfrom jax import test_util as jtu\nfrom jax.scipy import stats as lsp_stats\n-from lax_scipy_test import CombosWithReplacement, float_dtypes\nall_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4)]\n+float_dtypes = [onp.float32, onp.float64]\n+\n+CombosWithReplacement = itertools.combinations_with_replacement\n+\ndef genNamedParametersNArgs(n, rng):\nreturn parameterized.named_parameters(\njtu.cases_from_list(\n@@ -38,6 +41,7 @@ def genNamedParametersNArgs(n, rng):\nfor shapes in CombosWithReplacement(all_shapes, n)\nfor dtypes in CombosWithReplacement(float_dtypes, n)))\n+\nclass LaxBackedScipyStatsTests(jtu.JaxTestCase):\n\"\"\"Tests for LAX-backed scipy.stats implementations\"\"\"\n" } ]
Python
Apache License 2.0
google/jax
tweaks so einsum and spstats tests run internally
260,335
05.02.2019 08:39:03
28,800
ceda6026b818c8a11bb6708c20bf11cdc2967849
add batching rule for cholesky PAIR=phawkins
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/batching.py", "new_path": "jax/interpreters/batching.py", "diff": "@@ -117,6 +117,7 @@ class BatchTrace(Trace):\nif all(bdim is None for bdim in dims_in):\nreturn primitive.bind(*vals_in, **params)\nelse:\n+ # TODO(mattjj,phawkins): if no rule implemented, could vmap-via-map here\nbatched_primitive = get_primitive_batcher(primitive)\nval_out, dim_out = batched_primitive(vals_in, dims_in, **params)\nreturn BatchTracer(self, val_out, dim_out)\n" }, { "change_type": "MODIFY", "old_path": "jax/lax_linalg.py", "new_path": "jax/lax_linalg.py", "diff": "@@ -24,6 +24,7 @@ from jax import lax\nfrom jax import ad_util\nfrom jax.interpreters import xla\nfrom jax.interpreters import ad\n+from jax.interpreters import batching\nfrom jax.util import partial\nfrom jax.abstract_arrays import ShapedArray\nfrom jax.core import Primitive\n@@ -83,8 +84,15 @@ def cholesky_jvp_rule(primals, tangents):\nL, tmp, left_side=True, transpose_a=False, lower=True)))\nreturn L, L_dot\n+def cholesky_batching_rule(batched_args, batch_dims):\n+ x, = batched_args\n+ bd, = batch_dims\n+ x = batching.bdim_at_front(x, bd)\n+ return cholesky(x), 0\n+\ncholesky_p = standard_unop(_float | _complex, 'cholesky')\nad.primitive_jvps[cholesky_p] = cholesky_jvp_rule\n+batching.primitive_batchers[cholesky_p] = cholesky_batching_rule\ndef cholesky_cpu_translation_rule(c, operand):\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -24,6 +24,7 @@ import jax.numpy as np\nfrom jax import test_util as jtu\nfrom jax.abstract_arrays import ShapedArray\nfrom jax import lax\n+from jax import lax_linalg\nfrom jax import random\nfrom jax.api import jit, grad, jvp, vjp, trace_to_jaxpr, jacfwd, jacrev, hessian\nfrom jax.api import vmap\n@@ -522,6 +523,22 @@ class BatchingTest(jtu.JaxTestCase):\nexpected = onp.array([[3, 2]], onp.float32)\nself.assertAllClose(ans, expected, check_dtypes=True)\n+ def testLaxLinalgCholesky(self):\n+ a = onp.random.RandomState(0).randn(10, 5, 5).astype(onp.float32)\n+ a = onp.matmul(a, onp.conj(onp.swapaxes(a, -1, -2)))\n+\n+ ans = vmap(lax_linalg.cholesky)(a)\n+ expected = onp.linalg.cholesky(a)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ b = onp.random.RandomState(0).randn(10, 5, 5).astype(onp.float32)\n+ b = onp.matmul(b, onp.conj(onp.swapaxes(b, -1, -2)))\n+ b_trans = onp.swapaxes(b, 0, 1) # shape is (5, 10, 5)\n+\n+ ans = vmap(lax_linalg.cholesky, in_axes=1, out_axes=0)(b_trans)\n+ expected = onp.linalg.cholesky(b)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
add batching rule for cholesky PAIR=phawkins
260,335
05.02.2019 10:53:45
28,800
2f5d809456b0d296f4b7f1fa6abccaa2c495f04c
parse options with absl in scipy_stats_test
[ { "change_type": "MODIFY", "old_path": "tests/scipy_stats_test.py", "new_path": "tests/scipy_stats_test.py", "diff": "@@ -27,6 +27,9 @@ import scipy.stats as osp_stats\nfrom jax import test_util as jtu\nfrom jax.scipy import stats as lsp_stats\n+from jax.config import config\n+config.parse_flags_with_absl()\n+\nall_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4)]\nfloat_dtypes = [onp.float32, onp.float64]\n" } ]
Python
Apache License 2.0
google/jax
parse options with absl in scipy_stats_test
260,335
05.02.2019 13:47:09
28,800
b0c9fb803a1e91f7b95a1e02745d7d8f18e8db9d
gate np.ptp tests by numpy version check >= 1.15
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -168,9 +168,15 @@ JAX_REDUCER_NO_DTYPE_RECORDS = [\nop_record(\"any\", 1, all_dtypes, all_shapes, jtu.rand_some_zero(), []),\nop_record(\"max\", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),\nop_record(\"min\", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),\n+]\n+\n+numpy_version = tuple(map(int, onp.version.version.split('.')))\n+if numpy_version >= (1, 15):\n+ JAX_REDUCER_NO_DTYPE_RECORDS += [\nop_record(\"ptp\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n]\n+\nJAX_ARGMINMAX_RECORDS = [\nop_record(\"argmin\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\nop_record(\"argmax\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\n" } ]
Python
Apache License 2.0
google/jax
gate np.ptp tests by numpy version check >= 1.15
260,335
06.02.2019 09:23:34
28,800
1636d058dfd80d5212e6f1d6650bb202341955b4
fix lax.full handling of DeviceConstant scalars fixes
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -462,7 +462,8 @@ def full(shape, fill_value, dtype):\nif onp.isscalar(fill_value) or type(fill_value) is onp.ndarray:\nreturn FilledConstant(onp.asarray(fill_value, dtype), shape)\nelif isinstance(fill_value, xla.DeviceValue):\n- return FilledConstant(convert_element_type(fill_value, dtype), shape)\n+ val = onp.asarray(fill_value, dtype)\n+ return FilledConstant(val, shape)\nelse:\nreturn broadcast(convert_element_type(fill_value, dtype), shape)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -1187,7 +1187,6 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nself._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)\nself._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_n={}_increasing={}\".format(\njtu.format_shape_dtype_string([shape], dtype),\n@@ -1207,6 +1206,10 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nself._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)\nself._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False)\n+ def testIssue330(self):\n+ x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash\n+ self.assertEqual(x[0, 0], 1)\n+\nif __name__ == \"__main__\":\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
fix lax.full handling of DeviceConstant scalars fixes #330
260,474
06.02.2019 11:49:21
18,000
ce74bc55ce057c8d91b446ecfe7b199b46cd1a49
Handle closed-over tracers in while loop cond and body functions
[ { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -52,6 +52,10 @@ class Jaxpr(object):\ndef __repr__(self):\nreturn self.__str__()\n+ def copy(self):\n+ return Jaxpr(self.constvars[:], self.freevars[:], self.invars[:],\n+ self.outvar, self.eqns[:])\n+\nJaxprEqn = namedtuple('JaxprEqn', ['invars', 'outvars', 'primitive',\n'bound_subjaxprs', 'destructure', 'params'])\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -71,6 +71,9 @@ def primitive_computation(prim, *shapes, **kwargs):\nraise e\ndef aval_from_xla_shape(shape):\n+ if shape.is_tuple():\n+ return AbstractTuple(map(aval_from_xla_shape, shape.tuple_shapes()))\n+ else:\nreturn ShapedArray(shape.dimensions(), shape.element_type())\ndef execute_compiled_primitive(compiled, result_handler, *args):\n@@ -156,10 +159,9 @@ def xla_destructure(c, ans):\nnum_elements = len(c.GetShape(ans).tuple_shapes())\nreturn [c.GetTupleElement(ans, i) for i in range(num_elements)]\n-def unit_constant(c, val, canonicalize_types=True):\n- assert not val # must be unit\n- return c.Tuple()\n-xb.register_constant_handler(JaxTuple, unit_constant)\n+def tuple_constant(c, val, canonicalize_types=True):\n+ return c.Tuple(*map(c.Constant, val))\n+xb.register_constant_handler(JaxTuple, tuple_constant)\ndef translation_rule(p):\nbackend_specific_rule = backend_specific_translations[xb._platform_name].get(p)\n" }, { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -425,8 +425,9 @@ def _while_loop(cond_fun, body_fun, init_val):\nbody_jaxpr, pvout, body_consts = pe.trace_to_jaxpr(flat_body_fun, (pval_flat,))\nabs_out, _ = pvout\n- params = _OpaqueParam((abs_out, cond_jaxpr, cond_consts, body_jaxpr, body_consts))\n- out_flat = while_p.bind(init_val_flat, opaque_params=params)\n+ params = _OpaqueParam((abs_out, cond_jaxpr, body_jaxpr))\n+ out_flat = while_p.bind(init_val_flat, core.pack(cond_consts), core.pack(body_consts),\n+ opaque_params=params)\nif out_tree() != in_tree:\nraise TypeError(\"body_fun input and output must have identical structure\")\nreturn build_tree(out_tree(), out_flat)\n@@ -2694,22 +2695,56 @@ ad.primitive_transposes[sort_key_val_p] = _sort_key_val_transpose_rule\nbatching.primitive_batchers[sort_key_val_p] = _sort_key_val_batch_rule\n-def _while_loop_abstract_eval(init_val, opaque_params):\n+def _while_loop_abstract_eval(init_val, cond_consts, body_consts, opaque_params):\nabs_out = opaque_params.val[0]\nreturn maybe_tracer_tuple_to_abstract_tuple(abs_out)\n-def _while_loop_translation_rule(c, init_val, opaque_params):\n- shape = c.GetShape(init_val)\n- abs_out, cond_jaxpr, cond_consts, body_jaxpr, body_consts = opaque_params.val\n- cond_computation = xla.jaxpr_computation(cond_jaxpr, cond_consts, (), shape)\n- body_computation = xla.jaxpr_computation(body_jaxpr, body_consts, (), shape)\n- return c.While(cond_computation, body_computation, init_val)\n+def _while_loop_translation_rule(c, init_val, cond_consts, body_consts, opaque_params):\n+ loop_carry = c.Tuple(init_val, cond_consts, body_consts)\n+ shape = c.GetShape(loop_carry)\n+ abs_out, cond_jaxpr, body_jaxpr = opaque_params.val\n+\n+ loop_carry_var = pe.Var(0, \"loop_carry\")\n+ outvar = pe.Var(0, \"loop_carry_out\")\n+ cond_var = pe.Var(0, \"cond_consts\")\n+ body_var = pe.Var(0, \"body_consts\")\n+\n+ assert len(cond_jaxpr.invars) == 1\n+ cond_jaxpr_converted = cond_jaxpr.copy()\n+ cond_jaxpr_converted.constvars = []\n+ cond_jaxpr_converted.invars = [loop_carry_var]\n+ cond_jaxpr_converted.eqns = (\n+ [_unpack_eqn(loop_carry_var, [cond_jaxpr.invars[0], cond_var, body_var]),\n+ _unpack_eqn(cond_var, cond_jaxpr.constvars)]\n+ + list(cond_jaxpr.eqns))\n+\n+\n+ assert len(body_jaxpr.invars) == 1\n+ body_jaxpr_converted = body_jaxpr.copy()\n+ body_jaxpr_converted.constvars = []\n+ body_jaxpr_converted.invars = [loop_carry_var]\n+ body_jaxpr_converted.outvar = outvar\n+ body_jaxpr_converted.eqns = (\n+ [_unpack_eqn(loop_carry_var, [body_jaxpr.invars[0], cond_var, body_var]),\n+ _unpack_eqn(body_var, body_jaxpr.constvars)]\n+ + list(body_jaxpr.eqns) +\n+ [_pack_eqn([body_jaxpr.outvar, cond_var, body_var], outvar)])\n+\n+ cond_computation = xla.jaxpr_computation(cond_jaxpr_converted, (), (), shape)\n+ body_computation = xla.jaxpr_computation(body_jaxpr_converted, (), (), shape)\n+ full_ans = c.While(cond_computation, body_computation, loop_carry)\n+ return c.GetTupleElement(full_ans, 0)\nwhile_p = Primitive('while')\nwhile_p.def_impl(partial(xla.apply_primitive, while_p))\nwhile_p.def_abstract_eval(_while_loop_abstract_eval)\nxla.translations[while_p] = _while_loop_translation_rule\n+def _unpack_eqn(invar, outvars):\n+ return core.JaxprEqn([invar], outvars, core.identity_p, (), True, {})\n+\n+def _pack_eqn(invars, outvar):\n+ return core.JaxprEqn(invars, [outvar], core.pack_p, (), False, {})\ndef _tie_in_transpose_rule(t):\nreturn [ad_util.zero, t]\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -1111,6 +1111,37 @@ class LaxTest(jtu.JaxTestCase):\nself.assertEqual(cloop(2), (2, 3))\nself.assertEqual(cloop(4), (4, 10))\n+ def testWhileWithClosure(self):\n+\n+ def loop(init, local_limit, inc):\n+\n+ def loop_cond(state):\n+ pos, _ = state\n+ return lax.lt(pos, local_limit)\n+\n+ def loop_body(state):\n+ effect[0] = True\n+ pos, count = state\n+ return (lax.add(pos, 1), lax.add(count, inc))\n+\n+ result = lax._while_loop(loop_cond, loop_body, (init, 0))\n+ _, count = result\n+ return count\n+\n+ cloop = api.jit(loop)\n+\n+ limit = 10\n+ effect = [False]\n+ self.assertEqual(loop(2, limit, 1), limit - 2)\n+ assert effect[0]\n+ effect[0] = False\n+ self.assertEqual(cloop(2, limit, 1), limit - 2)\n+ assert effect[0]\n+ effect[0] = False\n+ self.assertEqual(cloop(2, limit, 1), limit - 2)\n+ self.assertEqual(cloop(3, limit, 1), limit - 3)\n+ assert not effect[0]\n+\ndef testNestedWhileWithDynamicUpdateSlice(self):\nnum = 5\n@@ -1195,6 +1226,21 @@ class LaxTest(jtu.JaxTestCase):\nself.assertEqual(count(4), 6)\nself.assertEqual(count(4), cfun(4))\n+ def testForiLoopClosure(self):\n+ def count(num):\n+ def body_fun(i, tot):\n+ return lax.add(num, lax.add(tot, i))\n+ return lax.fori_loop(0, num, body_fun, 0)\n+\n+ cfun = api.jit(count)\n+\n+ self.assertEqual(count(2), 1 + 2**2)\n+ self.assertEqual(count(2), cfun(2))\n+ self.assertEqual(count(3), 3 + 3**2)\n+ self.assertEqual(count(3), cfun(3))\n+ self.assertEqual(count(4), 6 + 4**2)\n+ self.assertEqual(count(4), cfun(4))\n+\ndef testForiLoopTupleState(self):\ndef sum_first_n(arr, num):\ndef body_fun(i, state):\n" } ]
Python
Apache License 2.0
google/jax
Handle closed-over tracers in while loop cond and body functions
260,335
06.02.2019 11:02:16
28,800
5847d9616810c20a38d8290131bc0d6a0839f014
rename "minmax" -> "optimizers"
[ { "change_type": "MODIFY", "old_path": "docs/jax.experimental.minmax.rst", "new_path": "docs/jax.experimental.minmax.rst", "diff": "-jax.experimental.minmax module\n+jax.experimental.optimizers module\n==============================\n-.. automodule:: jax.experimental.minmax\n+.. automodule:: jax.experimental.optimizers\n:members:\n:undoc-members:\n:show-inheritance:\n" }, { "change_type": "MODIFY", "old_path": "docs/jax.experimental.rst", "new_path": "docs/jax.experimental.rst", "diff": "@@ -4,7 +4,7 @@ jax.experimental package\n.. toctree::\n:maxdepth: 1\n- jax.experimental.minmax\n+ jax.experimental.optimizers\njax.experimental.stax\n.. automodule:: jax.experimental\n" }, { "change_type": "MODIFY", "old_path": "examples/advi.py", "new_path": "examples/advi.py", "diff": "@@ -26,7 +26,7 @@ import matplotlib.pyplot as plt\nfrom jax.api import jit, grad, vmap\nfrom jax import random\n-from jax.experimental import minmax\n+from jax.experimental import optimizers\nimport jax.numpy as np\nimport jax.scipy.stats.norm as norm\n@@ -120,12 +120,12 @@ if __name__ == \"__main__\":\ninit_mean = np.zeros(D)\ninit_std = np.zeros(D)\ninit_params = (init_mean, init_std)\n- opt_init, opt_update = minmax.momentum(step_size=0.1, mass=0.9)\n+ opt_init, opt_update = optimizers.momentum(step_size=0.1, mass=0.9)\nopt_state = opt_init(init_params)\n@jit\ndef update(i, opt_state):\n- params = minmax.get_params(opt_state)\n+ params = optimizers.get_params(opt_state)\ngradient = grad(objective)(params, i)\nreturn opt_update(i, gradient, opt_state)\n@@ -134,6 +134,6 @@ if __name__ == \"__main__\":\nprint(\"Optimizing variational parameters...\")\nfor t in range(100):\nopt_state = update(t, opt_state)\n- params = minmax.get_params(opt_state)\n+ params = optimizers.get_params(opt_state)\ncallback(params, t)\nplt.show(block=True)\n" }, { "change_type": "MODIFY", "old_path": "examples/kernel_lsq.py", "new_path": "examples/kernel_lsq.py", "diff": "@@ -24,7 +24,7 @@ import numpy.random as npr\nimport jax.numpy as np\nfrom jax.config import config\n-from jax.experimental import minmax\n+from jax.experimental import optimizers\nfrom jax import grad, jit, make_jaxpr, vmap\n@@ -42,17 +42,17 @@ def gram(kernel, xs):\ndef minimize(f, x, num_steps=10000, step_size=0.000001, mass=0.9):\n- opt_init, opt_update = minmax.momentum(step_size, mass)\n+ opt_init, opt_update = optimizers.momentum(step_size, mass)\n@jit\ndef update(i, opt_state):\n- x = minmax.get_params(opt_state)\n+ x = optimizers.get_params(opt_state)\nreturn opt_update(i, grad(f)(x), opt_state)\nopt_state = opt_init(x)\nfor i in xrange(num_steps):\nopt_state = update(i, opt_state)\n- return minmax.get_params(opt_state)\n+ return optimizers.get_params(opt_state)\ndef train(kernel, xs, ys, regularization=0.01):\n" }, { "change_type": "MODIFY", "old_path": "examples/mnist_classifier.py", "new_path": "examples/mnist_classifier.py", "diff": "# limitations under the License.\n\"\"\"A basic MNIST example using JAX together with the mini-libraries stax, for\n-neural network building, and minmax, for first-order stochastic optimization.\n+neural network building, and optimizers, for first-order stochastic optimization.\n\"\"\"\nfrom __future__ import absolute_import\n@@ -28,7 +28,7 @@ import numpy.random as npr\nimport jax.numpy as np\nfrom jax.config import config\nfrom jax import jit, grad\n-from jax.experimental import minmax\n+from jax.experimental import optimizers\nfrom jax.experimental import stax\nfrom jax.experimental.stax import Dense, Relu, LogSoftmax\nfrom examples import datasets\n@@ -70,11 +70,11 @@ if __name__ == \"__main__\":\nyield train_images[batch_idx], train_labels[batch_idx]\nbatches = data_stream()\n- opt_init, opt_update = minmax.momentum(step_size, mass=momentum_mass)\n+ opt_init, opt_update = optimizers.momentum(step_size, mass=momentum_mass)\n@jit\ndef update(i, opt_state, batch):\n- params = minmax.get_params(opt_state)\n+ params = optimizers.get_params(opt_state)\nreturn opt_update(i, grad(loss)(params, batch), opt_state)\n_, init_params = init_random_params((-1, 28 * 28))\n@@ -88,7 +88,7 @@ if __name__ == \"__main__\":\nopt_state = update(next(itercount), opt_state, next(batches))\nepoch_time = time.time() - start_time\n- params = minmax.get_params(opt_state)\n+ params = optimizers.get_params(opt_state)\ntrain_acc = accuracy(params, (train_images, train_labels))\ntest_acc = accuracy(params, (test_images, test_labels))\nprint(\"Epoch {} in {:0.2f} sec\".format(epoch, epoch_time))\n" }, { "change_type": "MODIFY", "old_path": "examples/mnist_vae.py", "new_path": "examples/mnist_vae.py", "diff": "\"\"\"A basic variational autoencoder (VAE) on binarized MNIST using Numpy and JAX.\n-This file uses the stax network definition library and the minmax optimization\n-library.\n+This file uses the stax network definition library and the optimizers\n+optimization library.\n\"\"\"\nfrom __future__ import absolute_import\n@@ -30,7 +30,7 @@ import matplotlib.pyplot as plt\nimport jax.numpy as np\nfrom jax.config import config\nfrom jax import jit, grad, lax, random\n-from jax.experimental import minmax\n+from jax.experimental import optimizers\nfrom jax.experimental import stax\nfrom jax.experimental.stax import Dense, FanOut, Relu, Softplus\nfrom examples import datasets\n@@ -102,7 +102,7 @@ if __name__ == \"__main__\":\n_, init_decoder_params = decoder_init((batch_size, 10))\ninit_params = init_encoder_params, init_decoder_params\n- opt_init, opt_update = minmax.momentum(step_size, mass=0.9)\n+ opt_init, opt_update = optimizers.momentum(step_size, mass=0.9)\ndef binarize_batch(rng, i, images):\ni = i % num_batches\n@@ -116,7 +116,7 @@ if __name__ == \"__main__\":\nrng, elbo_rng, data_rng = random.split(rng, 3)\nbatch = binarize_batch(data_rng, i, images)\nloss = lambda params: -elbo(elbo_rng, params, batch) / batch_size\n- g = grad(loss)(minmax.get_params(opt_state))\n+ g = grad(loss)(optimizers.get_params(opt_state))\nloop_carry = rng, opt_update(i, g, opt_state), images\nreturn loop_carry\ninit_val = rng, opt_state, train_images\n@@ -125,7 +125,7 @@ if __name__ == \"__main__\":\n@jit\ndef evaluate(opt_state, images):\n- params = minmax.get_params(opt_state)\n+ params = optimizers.get_params(opt_state)\nelbo_rng, data_rng, image_rng = random.split(test_rng, 3)\nbinarized_test = random.bernoulli(data_rng, images)\ntest_elbo = elbo(elbo_rng, params, binarized_test) / images.shape[0]\n" }, { "change_type": "MODIFY", "old_path": "examples/resnet50.py", "new_path": "examples/resnet50.py", "diff": "\"\"\"A mock-up showing a ResNet50 network with training on synthetic data.\n-This file uses the stax neural network definition library and the minmax\n+This file uses the stax neural network definition library and the optimizers\noptimization library.\n\"\"\"\nfrom __future__ import absolute_import\n@@ -28,7 +28,7 @@ from six.moves import xrange\nimport jax.numpy as np\nfrom jax.config import config\nfrom jax import jit, grad\n-from jax.experimental import minmax\n+from jax.experimental import optimizers\nfrom jax.experimental import stax\nfrom jax.experimental.stax import (AvgPool, BatchNorm, Conv, Dense, FanInSum,\nFanOut, Flatten, GeneralConv, Identity,\n@@ -115,16 +115,16 @@ if __name__ == \"__main__\":\nonehot_labels = labels == np.arange(num_classes)\nyield images, onehot_labels\n- opt_init, opt_update = minmax.momentum(step_size, mass=0.9)\n+ opt_init, opt_update = optimizers.momentum(step_size, mass=0.9)\nbatches = synth_batches()\n@jit\ndef update(i, opt_state, batch):\n- params = minmax.get_params(opt_state)\n+ params = optimizers.get_params(opt_state)\nreturn opt_update(i, grad(loss)(params, batch), opt_state)\nopt_state = opt_init(init_params)\nfor i in xrange(num_steps):\nopt_state = update(i, opt_state, next(batches))\n- trained_params = minmax.get_params(opt_state)\n+ trained_params = optimizers.get_params(opt_state)\n" }, { "change_type": "MODIFY", "old_path": "jax/BUILD", "new_path": "jax/BUILD", "diff": "@@ -46,8 +46,8 @@ py_library(\n)\npy_library(\n- name = \"minmax\",\n- srcs = [\"experimental/minmax.py\"],\n+ name = \"optimizers\",\n+ srcs = [\"experimental/optimizers.py\"],\ndeps = [\":libjax\"],\n)\n" }, { "change_type": "RENAME", "old_path": "jax/experimental/minmax.py", "new_path": "jax/experimental/optimizers.py", "diff": "" }, { "change_type": "MODIFY", "old_path": "tests/minmax_test.py", "new_path": "tests/minmax_test.py", "diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n-\"\"\"Tests for the minmax optimizer module.\"\"\"\n+\"\"\"Tests for the optimizers module.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n@@ -23,7 +23,7 @@ from absl.testing import absltest\nimport jax.numpy as np\nimport jax.test_util as jtu\nfrom jax import jit, grad\n-from jax.experimental import minmax\n+from jax.experimental import optimizers\nfrom jax.lib import xla_bridge as xla\nfrom jax.config import config\n@@ -50,7 +50,7 @@ class OptimizerTests(jtu.JaxTestCase):\n# def op(infeed, x0):\n# opt_init, opt_update = optimizer(*args, **kwargs)\n- # return minmax.run_optimizer(loss, infeed, opt_update, opt_init(x0))\n+ # return optimizers.run_optimizer(loss, infeed, opt_update, opt_init(x0))\n# cop = jit(op)\n# a1, _ = op(infeeder(), x0)\n@@ -65,14 +65,14 @@ class OptimizerTests(jtu.JaxTestCase):\nx0 = 1.\nnum_iters = 100\nstep_size = 0.1\n- self._CheckOptimizer(minmax.sgd, loss, x0, num_iters, step_size)\n+ self._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)\ndef testSgdVector(self):\ndef loss(x, _): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\nstep_size = 0.1\n- self._CheckOptimizer(minmax.sgd, loss, x0, num_iters, step_size)\n+ self._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)\ndef testSgdNestedTuple(self):\ndef loss(xyz, _):\n@@ -81,7 +81,7 @@ class OptimizerTests(jtu.JaxTestCase):\nx0 = (np.ones(2), (np.ones(2), np.ones(2)))\nnum_iters = 100\nstep_size = 0.1\n- self._CheckOptimizer(minmax.sgd, loss, x0, num_iters, step_size)\n+ self._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)\ndef testMomentumVector(self):\ndef loss(x, _): return np.dot(x, x)\n@@ -89,14 +89,14 @@ class OptimizerTests(jtu.JaxTestCase):\nnum_iters = 100\nstep_size = 0.1\nmass = 0.\n- self._CheckOptimizer(minmax.momentum, loss, x0, num_iters, step_size, mass)\n+ self._CheckOptimizer(optimizers.momentum, loss, x0, num_iters, step_size, mass)\ndef testRmspropVector(self):\ndef loss(x, _): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\nstep_size = 0.1\n- self._CheckOptimizer(minmax.rmsprop, loss, x0, num_iters, step_size)\n+ self._CheckOptimizer(optimizers.rmsprop, loss, x0, num_iters, step_size)\n@jtu.skip_on_devices('cpu') # TODO(mattjj): investigate numerical failure\ndef testAdamVector(self):\n@@ -104,7 +104,7 @@ class OptimizerTests(jtu.JaxTestCase):\nx0 = np.ones(2)\nnum_iters = 100\nstep_size = 0.1\n- self._CheckOptimizer(minmax.adam, loss, x0, num_iters, step_size)\n+ self._CheckOptimizer(optimizers.adam, loss, x0, num_iters, step_size)\ndef testSgdClosure(self):\ndef loss(y, x, _): return y**2 * x**2\n@@ -113,43 +113,43 @@ class OptimizerTests(jtu.JaxTestCase):\nnum_iters = 20\nstep_size = 0.1\npartial_loss = functools.partial(loss, y)\n- self._CheckRun(minmax.sgd, partial_loss, x0, num_iters, step_size)\n+ self._CheckRun(optimizers.sgd, partial_loss, x0, num_iters, step_size)\ndef testSgdVectorExponentialDecaySchedule(self):\ndef loss(x, _): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\n- step_schedule = minmax.exponential_decay(0.1, 3, 2.)\n- self._CheckOptimizer(minmax.sgd, loss, x0, num_iters, step_schedule)\n+ step_schedule = optimizers.exponential_decay(0.1, 3, 2.)\n+ self._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_schedule)\ndef testSgdVectorInverseTimeDecaySchedule(self):\ndef loss(x, _): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\n- step_schedule = minmax.inverse_time_decay(0.1, 3, 2.)\n- self._CheckOptimizer(minmax.sgd, loss, x0, num_iters, step_schedule)\n+ step_schedule = optimizers.inverse_time_decay(0.1, 3, 2.)\n+ self._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_schedule)\ndef testAdamVectorInverseTimeDecaySchedule(self):\ndef loss(x, _): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\n- step_schedule = minmax.inverse_time_decay(0.1, 3, 2.)\n- self._CheckOptimizer(minmax.adam, loss, x0, num_iters, step_schedule)\n+ step_schedule = optimizers.inverse_time_decay(0.1, 3, 2.)\n+ self._CheckOptimizer(optimizers.adam, loss, x0, num_iters, step_schedule)\ndef testMomentumVectorInverseTimeDecayStaircaseSchedule(self):\ndef loss(x, _): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\n- step_sched = minmax.inverse_time_decay(0.1, 3, 2., staircase=True)\n+ step_sched = optimizers.inverse_time_decay(0.1, 3, 2., staircase=True)\nmass = 0.9\n- self._CheckOptimizer(minmax.momentum, loss, x0, num_iters, step_sched, mass)\n+ self._CheckOptimizer(optimizers.momentum, loss, x0, num_iters, step_sched, mass)\ndef testRmspropVectorPiecewiseConstantSchedule(self):\ndef loss(x, _): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\n- step_schedule = minmax.piecewise_constant([25, 75], [1.0, 0.5, 0.1])\n- self._CheckOptimizer(minmax.rmsprop, loss, x0, num_iters, step_schedule)\n+ step_schedule = optimizers.piecewise_constant([25, 75], [1.0, 0.5, 0.1])\n+ self._CheckOptimizer(optimizers.rmsprop, loss, x0, num_iters, step_schedule)\ndef testTracedStepSize(self):\ndef loss(x, _): return np.dot(x, x)\n@@ -157,13 +157,13 @@ class OptimizerTests(jtu.JaxTestCase):\nnum_iters = 100\nstep_size = 0.1\n- init_fun, _ = minmax.sgd(step_size)\n+ init_fun, _ = optimizers.sgd(step_size)\nopt_state = init_fun(x0)\n@jit\ndef update(opt_state, step_size):\n- _, update_fun = minmax.sgd(step_size)\n- x = minmax.get_params(opt_state)\n+ _, update_fun = optimizers.sgd(step_size)\n+ x = optimizers.get_params(opt_state)\ng = grad(loss)(x, None)\nreturn update_fun(0, g, opt_state)\n" } ]
Python
Apache License 2.0
google/jax
rename "minmax" -> "optimizers"
260,335
06.02.2019 19:20:39
28,800
75a2745da019aa9dd05a74746eb1c1eedf0c953b
add flag to disable jit globally (fixes
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -26,8 +26,10 @@ from __future__ import print_function\nimport itertools\nimport operator as op\n+import os\nimport numpy as onp\n+from distutils.util import strtobool\nfrom . import core\nfrom . import linear_util as lu\n@@ -47,10 +49,16 @@ from .interpreters import pxla\nfrom .interpreters import ad\nfrom .interpreters import batching\nfrom .interpreters import parallel\n+from .config import flags\nmap = safe_map\nzip = safe_zip\n+FLAGS = flags.FLAGS\n+flags.DEFINE_bool(\"jax_disable_jit\",\n+ strtobool(os.getenv(\"JAX_DISABLE_JIT\", \"False\")),\n+ \"Make @jit into a no-op.\")\n+\ndef jit(fun, static_argnums=()):\n\"\"\"Sets up `fun` for just-in-time compilation with XLA.\n@@ -69,6 +77,9 @@ def jit(fun, static_argnums=()):\nReturns:\nA wrapped version of `fun`, set up for just-in-time compilation.\n\"\"\"\n+ if FLAGS.jax_disable_jit:\n+ return fun\n+\n@wraps(fun)\ndef f_jitted(*args, **kwargs):\nf = lu.wrap_init(fun, kwargs)\n" } ]
Python
Apache License 2.0
google/jax
add flag to disable jit globally (fixes #252)
260,335
06.02.2019 19:44:12
28,800
a36812da301bd01d39f82f75f4294a31c9ec5319
add context manager for disabling jit
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -29,6 +29,7 @@ import operator as op\nimport os\nimport numpy as onp\n+from contextlib import contextmanager\nfrom distutils.util import strtobool\nfrom . import core\n@@ -49,7 +50,7 @@ from .interpreters import pxla\nfrom .interpreters import ad\nfrom .interpreters import batching\nfrom .interpreters import parallel\n-from .config import flags\n+from .config import flags, config\nmap = safe_map\nzip = safe_zip\n@@ -57,7 +58,7 @@ zip = safe_zip\nFLAGS = flags.FLAGS\nflags.DEFINE_bool(\"jax_disable_jit\",\nstrtobool(os.getenv(\"JAX_DISABLE_JIT\", \"False\")),\n- \"Make @jit into a no-op.\")\n+ \"Disable JIT compilation and just call original Python.\")\ndef jit(fun, static_argnums=()):\n@@ -77,11 +78,10 @@ def jit(fun, static_argnums=()):\nReturns:\nA wrapped version of `fun`, set up for just-in-time compilation.\n\"\"\"\n- if FLAGS.jax_disable_jit:\n- return fun\n-\n@wraps(fun)\ndef f_jitted(*args, **kwargs):\n+ if _jit_is_disabled or config.read('jax_disable_jit'):\n+ return fun(*args, **kwargs)\nf = lu.wrap_init(fun, kwargs)\ndyn_argnums = [i for i in range(len(args)) if i not in static_argnums]\nf, dyn_args = argnums_partial(f, dyn_argnums, args)\n@@ -95,6 +95,15 @@ def jit(fun, static_argnums=()):\nreturn f_jitted\n+@contextmanager\n+def disable_jit():\n+ global _jit_is_disabled\n+ _jit_is_disabled, prev_val = True, _jit_is_disabled\n+ yield\n+ _jit_is_disabled = prev_val\n+_jit_is_disabled = False\n+\n+\ndef grad(fun, argnums=0):\n\"\"\"Creates a function which evaluates the gradient of `fun`.\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -321,6 +321,23 @@ class APITest(jtu.JaxTestCase):\n(onp.array([0., 0.]), onp.array([0., 2.])))\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ def test_disable_jit(self):\n+ effects = []\n+\n+ @api.jit\n+ def f(x):\n+ effects.append(1)\n+ return x\n+\n+ with api.disable_jit():\n+ f(2)\n+ f(2)\n+ assert len(effects) == 2\n+\n+ f(2)\n+ f(2)\n+ assert len(effects) == 3\n+\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
add context manager for disabling jit
260,291
07.02.2019 11:09:10
10,800
d319f5805b555dc7dfc0eac66fb50a56e62f26e9
Fix toy example negative log-likelihood loss
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -243,8 +243,8 @@ def logistic_predictions(weights, inputs):\n# Training loss is the negative log-likelihood of the training labels.\ndef loss(weights, inputs, targets):\npreds = logistic_predictions(weights, inputs)\n- label_probs = preds * targets + (1 - preds) * (1 - targets)\n- return -np.sum(np.log(label_probs))\n+ label_probs = np.log(preds) * targets + np.log(1 - preds) * (1 - targets)\n+ return -np.sum(label_probs)\n# Build a toy dataset.\ninputs = np.array([[0.52, 1.12, 0.77],\n" } ]
Python
Apache License 2.0
google/jax
Fix toy example negative log-likelihood loss
260,291
08.02.2019 15:28:19
10,800
80bbdbbb75a87f721df89cb1efe7f1ae4ebb602f
Rename `label_probs` -> `label_logprobs` in README
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -243,7 +243,7 @@ def logistic_predictions(weights, inputs):\n# Training loss is the negative log-likelihood of the training labels.\ndef loss(weights, inputs, targets):\npreds = logistic_predictions(weights, inputs)\n- label_probs = np.log(preds) * targets + np.log(1 - preds) * (1 - targets)\n+ label_logprobs = np.log(preds) * targets + np.log(1 - preds) * (1 - targets)\nreturn -np.sum(label_probs)\n# Build a toy dataset.\n" } ]
Python
Apache License 2.0
google/jax
Rename `label_probs` -> `label_logprobs` in README
260,291
08.02.2019 16:24:43
10,800
197755b066dda15adcc2fde0e23691c4c4aa8e62
Fix README toy example incomplete var renaming Rename of `label_probs` into `label_logprobs` in the toy example was incomplete and, as a consequence, the toy example no longer worked. This renames `label_probs` completely and fixes the toy example.
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -244,7 +244,7 @@ def logistic_predictions(weights, inputs):\ndef loss(weights, inputs, targets):\npreds = logistic_predictions(weights, inputs)\nlabel_logprobs = np.log(preds) * targets + np.log(1 - preds) * (1 - targets)\n- return -np.sum(label_probs)\n+ return -np.sum(label_logprobs)\n# Build a toy dataset.\ninputs = np.array([[0.52, 1.12, 0.77],\n" } ]
Python
Apache License 2.0
google/jax
Fix README toy example incomplete var renaming Rename of `label_probs` into `label_logprobs` in the toy example was incomplete and, as a consequence, the toy example no longer worked. This renames `label_probs` completely and fixes the toy example.
260,335
03.02.2019 09:00:16
28,800
cde5c925fd66ea5d8d883c7abc6ba2fefd3fc30f
start to sketch out gather batching rule (WIP)
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2079,12 +2079,40 @@ def _gather_transpose_rule(t, operand, start_indices, dimension_numbers,\nindex_vector_dim=dimension_numbers.index_vector_dim)\nreturn [scatter_add(zeros, start_indices, t, scatter_dnums), ad_util.zero]\n+def _gather_batching_rule(batched_args, batch_dims, dimension_numbers,\n+ slice_sizes, operand_shape):\n+ operand, start_indices = batched_args\n+ operand_bdim, start_indices_bdim = batch_dims\n+\n+ if operand_bdim is not None and start_indices_bdim is None:\n+ slice_sizes = list(slice_sizes)\n+ slice_sizes.insert(operand_bdim, operand.shape[operand_bdim])\n+\n+ offset_dims = tuple(dimension_numbers.offset_dims) + (operand_bdim,)\n+\n+ collapsed_slice_dims = tuple(i+1 if i >= operand_bdim else i\n+ for i in dimension_numbers.collapsed_slice_dims)\n+\n+ dnums = GatherDimensionNumbers(\n+ offset_dims=offset_dims,\n+ collapsed_slice_dims=collapsed_slice_dims,\n+ start_index_map=dimension_numbers.start_index_map,\n+ index_vector_dim=dimension_numbers.index_vector_dim)\n+\n+ out_bdim = 0 # TODO\n+\n+ return gather(operand, start_indices, dimension_numbers=dnums,\n+ slice_sizes=slice_sizes), out_bdim\n+ else:\n+ raise NotImplementedError # TODO(mattjj):\n+\ngather_p = standard_primitive(\n_gather_shape_rule, _gather_dtype_rule, 'gather',\n_gather_translation_rule)\nad.defjvp(gather_p, _gather_jvp_rule, None)\nad.primitive_transposes[gather_p] = _gather_transpose_rule\n+batching.primitive_batchers[gather_p] = _gather_batching_rule\nScatterDimensionNumbers = collections.namedtuple(\n" } ]
Python
Apache License 2.0
google/jax
start to sketch out gather batching rule (WIP)
260,335
05.02.2019 09:29:47
28,800
b6cb3509cd0047e159e4ee269014c7dbacf0d473
progress on a gather vmap rule, PAIR=hawkinsp
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2085,24 +2085,38 @@ def _gather_batching_rule(batched_args, batch_dims, dimension_numbers,\noperand_bdim, start_indices_bdim = batch_dims\nif operand_bdim is not None and start_indices_bdim is None:\n+ collapsed_slice_dims = set(dimension_numbers.collapsed_slice_dims)\n+ num_preceding_window_dims = sum( # 2\n+ 1 for i in range(len(slice_sizes))\n+ if i < operand_bdim and i not in collapsed_slice_dims)\n+ offset_dims = list(dimension_numbers.offset_dims) # [2, 3, 7]\n+ if num_preceding_window_dims == 0:\n+ bdim_offset_dim = 0\n+ else:\n+ bdim_offset_dim = offset_dims[num_preceding_window_dims - 1] + 1\n+ new_offset_dims = (offset_dims[:num_preceding_window_dims]\n+ + [bdim_offset_dim]\n+ + list(onp.add(1, offset_dims[num_preceding_window_dims:])))\n+ new_offset_dims = tuple(new_offset_dims)\n+\nslice_sizes = list(slice_sizes)\nslice_sizes.insert(operand_bdim, operand.shape[operand_bdim])\n-\n- offset_dims = tuple(dimension_numbers.offset_dims) + (operand_bdim,)\n+ slice_sizes = tuple(slice_sizes)\ncollapsed_slice_dims = tuple(i + 1 if i >= operand_bdim else i\nfor i in dimension_numbers.collapsed_slice_dims)\n+ start_index_map = tuple(i + 1 if i > operand_bdim else i\n+ for i in dimension_numbers.start_index_map)\n+\ndnums = GatherDimensionNumbers(\n- offset_dims=offset_dims,\n+ offset_dims=new_offset_dims,\ncollapsed_slice_dims=collapsed_slice_dims,\n- start_index_map=dimension_numbers.start_index_map,\n+ start_index_map=start_index_map,\nindex_vector_dim=dimension_numbers.index_vector_dim)\n- out_bdim = 0 # TODO\n-\nreturn gather(operand, start_indices, dimension_numbers=dnums,\n- slice_sizes=slice_sizes), out_bdim\n+ slice_sizes=slice_sizes), bdim_offset_dim\nelse:\nraise NotImplementedError # TODO(mattjj):\n" } ]
Python
Apache License 2.0
google/jax
progress on a gather vmap rule, PAIR=hawkinsp
260,335
11.02.2019 09:28:21
28,800
cccc0304fd2f72b99425c541058f40c0304abb7e
finish gather batching rule, pair w/
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2111,19 +2111,34 @@ def _gather_batching_rule(batched_args, batch_dims, dimension_numbers,\nslice_sizes=slice_sizes), 0\nelse:\n- # TODO(mattjj,phawkins): this code is wrong\n+ # get rid of scalar index case (noticing our start_indices.ndim is\n+ # incremented by one compared to the original user code)\n+ if dimension_numbers.index_vector_dim == start_indices.ndim - 1:\n+ start_indices = reshape(start_indices, start_indices.shape + (1,))\n+\n+ # move our batch dimensions to the front to preserve sanity\noperand = batching.move_dim_to_front(operand, operand_bdim)\nstart_indices = batching.move_dim_to_front(start_indices, start_indices_bdim)\n+ # Example: user code had start_indices shape (3, 4, 5) and index_vector_dim\n+ # of 2, and we have to deal with start_indices shape (7, 3, 4, 5). We\n+ # transform that to an index_vector_dim of 3, and a start_indices of shape\n+ # (7, 3, 4, 6) where we concatenated an iota that counts along our batch\n+ # dimension to the front of the ndindex.\n+ index_vector_dim = dimension_numbers.index_vector_dim + 1\n+ counts = broadcasted_iota(start_indices.dtype, start_indices.shape, 0)\n+ start_indices = concatenate([counts, start_indices], index_vector_dim)\n+\nslice_sizes = (1,) + slice_sizes\ncollapsed_slice_dims = (0,) + tuple(onp.add(1, dimension_numbers.collapsed_slice_dims))\noffset_dims = tuple(onp.add(1, dimension_numbers.offset_dims))\n- start_index_map = tuple(onp.add(1, dimension_numbers.start_index_map))\n+ start_index_map = (0,) + tuple(onp.add(1, dimension_numbers.start_index_map))\n+\ndnums = GatherDimensionNumbers(\noffset_dims=offset_dims,\ncollapsed_slice_dims=collapsed_slice_dims,\nstart_index_map=start_index_map,\n- index_vector_dim=dimension_numbers.index_vector_dim + 1)\n+ index_vector_dim=index_vector_dim)\nreturn gather(operand, start_indices, dimension_numbers=dnums,\nslice_sizes=slice_sizes), 0\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -601,41 +601,40 @@ class BatchingTest(jtu.JaxTestCase):\nfor i in range(idxs.shape[axis])])\nself.assertAllClose(ans, expected, check_dtypes=False)\n- # TODO(mattjj,phawkins): finish this batching rule once and for all...\n- # @parameterized.named_parameters(\n- # {\"testcase_name\": \"_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}\".format(\n- # jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs,\n- # dnums, slice_sizes),\n- # \"op_axis\": op_axis, \"idxs_axis\": idxs_axis, \"shape\": shape, \"dtype\":\n- # dtype, \"idxs\": idxs, \"dnums\": dnums, \"slice_sizes\": slice_sizes,\n- # \"rng\": rng, \"rng_idx\": rng_idx}\n- # for dtype in [onp.float32, onp.int32]\n- # for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [\n- # (0, 0, (2, 5), onp.array([[0, 2], [1, 3]]), lax.GatherDimensionNumbers(\n- # offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,),\n- # index_vector_dim=1), (1,)),\n- # (1, 1, (10, 2), onp.array([[0, 0, 0], [0, 2, 1]]).T,\n- # lax.GatherDimensionNumbers(\n- # offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,),\n- # index_vector_dim=1), (2,)),\n- # (0, 1, (2, 10, 5,), onp.array([[0, 2, 1], [0, 3, 3]]).T,\n- # lax.GatherDimensionNumbers(\n- # offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\n- # index_vector_dim=1), (1, 3)),\n- # ]\n- # for rng_idx in [jtu.rand_int(max(shape))]\n- # for rng in [jtu.rand_default()])\n- # @jtu.skip_on_devices(\"tpu\") # TODO(b/123834001): re-enable when fixed\n- # def testGatherBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums,\n- # slice_sizes, rng, rng_idx):\n- # fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n- # operand = rng(shape, dtype)\n- # assert operand.shape[op_axis] == idxs.shape[idxs_axis]\n- # ans = vmap(fun, (op_axis, idxs_axis))(operand, idxs)\n- # expected = onp.stack([fun(operand[(slice(None),) * op_axis + (i,)],\n- # idxs[(slice(None),) * idxs_axis + (i,)])\n- # for i in range(idxs.shape[idxs_axis])])\n- # self.assertAllClose(ans, expected, check_dtypes=False)\n+ @parameterized.named_parameters(\n+ {\"testcase_name\": \"_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}\".format(\n+ jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs,\n+ dnums, slice_sizes),\n+ \"op_axis\": op_axis, \"idxs_axis\": idxs_axis, \"shape\": shape, \"dtype\":\n+ dtype, \"idxs\": idxs, \"dnums\": dnums, \"slice_sizes\": slice_sizes,\n+ \"rng\": rng, \"rng_idx\": rng_idx}\n+ for dtype in [onp.float32, onp.int32]\n+ for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [\n+ (0, 0, (2, 5), onp.array([[0, 2], [1, 3]]), lax.GatherDimensionNumbers(\n+ offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,),\n+ index_vector_dim=1), (1,)),\n+ (1, 1, (10, 2), onp.array([[0, 0, 0], [0, 2, 1]]).T,\n+ lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,),\n+ index_vector_dim=1), (2,)),\n+ (0, 1, (2, 10, 5,), onp.array([[0, 2, 1], [0, 3, 3]]).T,\n+ lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\n+ index_vector_dim=1), (1, 3)),\n+ ]\n+ for rng_idx in [jtu.rand_int(max(shape))]\n+ for rng in [jtu.rand_default()])\n+ @jtu.skip_on_devices(\"tpu\") # TODO(b/123834001): re-enable when fixed\n+ def testGatherBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums,\n+ slice_sizes, rng, rng_idx):\n+ fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n+ operand = rng(shape, dtype)\n+ assert operand.shape[op_axis] == idxs.shape[idxs_axis]\n+ ans = vmap(fun, (op_axis, idxs_axis))(operand, idxs)\n+ expected = onp.stack([fun(operand[(slice(None),) * op_axis + (i,)],\n+ idxs[(slice(None),) * idxs_axis + (i,)])\n+ for i in range(idxs.shape[idxs_axis])])\n+ self.assertAllClose(ans, expected, check_dtypes=False)\ndef testNumpyIndexing1(self):\na = np.arange(2 * 3 * 4).reshape((2, 3, 4))\n" } ]
Python
Apache License 2.0
google/jax
finish gather batching rule, pair w/ @hawkinsp
260,335
11.02.2019 11:21:29
28,800
d5ee720aeac945e1517f8985a0d34ee44084b4e3
more testing of gather batching rule
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2142,7 +2142,6 @@ def _gather_batching_rule(batched_args, batch_dims, dimension_numbers,\nreturn gather(operand, start_indices, dimension_numbers=dnums,\nslice_sizes=slice_sizes), 0\n-\ngather_p = standard_primitive(\n_gather_shape_rule, _gather_dtype_rule, 'gather',\n_gather_translation_rule)\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -553,13 +553,15 @@ class BatchingTest(jtu.JaxTestCase):\n(1, (10, 3), onp.array([0, 0, 0]), lax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,),\nindex_vector_dim=1), (2,)),\n- (1, (10, 3, 5,), onp.array([0, 2, 1]), lax.GatherDimensionNumbers(\n+ (1, (10, 3, 5), onp.array([0, 2, 1]), lax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\nindex_vector_dim=1), (1, 3)),\n+ (2, (10, 5, 3), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n+ index_vector_dim=1), (1, 3)),\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()])\n- @jtu.skip_on_devices(\"tpu\") # TODO(b/123834001): re-enable when fixed\ndef testGatherBatchedOperand(self, axis, shape, dtype, idxs, dnums,\nslice_sizes, rng, rng_idx):\nfun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n@@ -584,14 +586,17 @@ class BatchingTest(jtu.JaxTestCase):\nlax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,),\nindex_vector_dim=1), (2,)),\n- (1, (10, 5,), onp.array([[0, 2, 1], [0, 3, 3]]).T,\n+ (1, (10, 5), onp.array([[0, 2, 1], [0, 3, 3]]).T,\nlax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\nindex_vector_dim=1), (1, 3)),\n+ (0, (10, 5), onp.array([[[0, 2], [1, 0]],\n+ [[1, 2], [0, 3]]]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n+ index_vector_dim=1), (1, 3)),\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()])\n- @jtu.skip_on_devices(\"tpu\") # TODO(b/123834001): re-enable when fixed\ndef testGatherBatchedIndices(self, axis, shape, dtype, idxs, dnums,\nslice_sizes, rng, rng_idx):\nfun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n@@ -621,10 +626,13 @@ class BatchingTest(jtu.JaxTestCase):\nlax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\nindex_vector_dim=1), (1, 3)),\n+ (2, 0, (10, 5, 2), onp.array([[[0, 2], [1, 0]],\n+ [[1, 0], [2, 0]]]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n+ index_vector_dim=1), (1, 3)),\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()])\n- @jtu.skip_on_devices(\"tpu\") # TODO(b/123834001): re-enable when fixed\ndef testGatherBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums,\nslice_sizes, rng, rng_idx):\nfun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -1381,6 +1381,9 @@ class LaxTest(jtu.JaxTestCase):\n((10, 5,), onp.array([0, 2, 1]), lax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\nindex_vector_dim=1), (1, 3)),\n+ ((10, 5), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n+ index_vector_dim=1), (1, 3)),\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()]))\n" } ]
Python
Apache License 2.0
google/jax
more testing of gather batching rule
260,335
11.02.2019 11:30:44
28,800
90d92a5a5cd1ad59b2123358a01126b5e3c28a7d
fix gather batching rule bug
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2126,7 +2126,9 @@ def _gather_batching_rule(batched_args, batch_dims, dimension_numbers,\n# (7, 3, 4, 6) where we concatenated an iota that counts along our batch\n# dimension to the front of the ndindex.\nindex_vector_dim = dimension_numbers.index_vector_dim + 1\n- counts = broadcasted_iota(start_indices.dtype, start_indices.shape, 0)\n+ count_shape = list(start_indices.shape)\n+ count_shape[index_vector_dim] = 1\n+ counts = broadcasted_iota(start_indices.dtype, tuple(count_shape), 0)\nstart_indices = concatenate([counts, start_indices], index_vector_dim)\nslice_sizes = (1,) + slice_sizes\n" } ]
Python
Apache License 2.0
google/jax
fix gather batching rule bug
260,335
11.02.2019 10:24:21
28,800
65c023d2316a075fadfa39ac7ed7752961e8f32d
start adding scatter batching rule
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2206,7 +2206,6 @@ def _scatter_jvp(primals, tangents, update_jaxpr, update_consts,\nupdates_shape=updates_shape)\nreturn val_out, tangent_out\n-\ndef _scatter_transpose_rule(t, operand, scatter_indices, updates,\nupdate_jaxpr, update_consts, dimension_numbers,\nupdates_shape):\n@@ -2233,12 +2232,61 @@ def _scatter_transpose_rule(t, operand, scatter_indices, updates,\nslice_sizes=slice_sizes)\nreturn [operand_t, None, update_t]\n+def _scatter_batching_rule(batched_args, batch_dims, update_jaxpr,\n+ update_consts, dimension_numbers, updates_shape):\n+ operand, scatter_indices, updates = batched_args\n+ operand_bdim, scatter_indices_bdim, updates_bdim = batch_dims\n+ del update_jaxpr, update_consts, updates_shape # Unused.\n+\n+ size = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)\n+ if ax is not None)\n+ operand = batching.bdim_at_front(operand, operand_bdim, broadcast_size=size,\n+ force_broadcast=True)\n+ operand_bdim = 0\n+\n+ if scatter_indices_bdim is not None and updates_bdim is None:\n+ raise NotImplementedError # TODO(mattjj,phawkins)\n+ elif scatter_indices_bdim is None and updates_bdim is not None:\n+ updates = batching.move_dim_to_front(updates, updates_bdim)\n+ inserted_window_dims = tuple(onp.add(1, dimension_numbers.inserted_window_dims))\n+ update_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.update_window_dims))\n+ scatter_dims_to_operand_dims = tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))\n+ dnums = ScatterDimensionNumbers(\n+ update_window_dims=update_window_dims,\n+ inserted_window_dims=inserted_window_dims,\n+ scatter_dims_to_operand_dims=scatter_dims_to_operand_dims,\n+ index_vector_dim=dimension_numbers.index_vector_dim)\n+ return scatter_add(operand, scatter_indices, updates, dnums), 0\n+ else:\n+ # see the third case in _gather_batching_rule for comparison and comments\n+ if dimension_numbers.index_vector_dim == scatter_indices.ndim - 1:\n+ scatter_indices = reshape(scatter_indices, scatter_indices.shape + (1,))\n+\n+ scatter_indices = batching.move_dim_to_front(scatter_indices,\n+ scatter_indices_bdim)\n+ updates = batching.move_dim_to_front(updates, updates_bdim)\n+\n+ index_vector_dim = dimension_numbers.index_vector_dim + 1\n+ counts = broadcasted_iota(scatter_indices.dtype, scatter_indices.shape, 0)\n+ scatter_indices = concatenate([counts, scatter_indices], index_vector_dim)\n+\n+ update_window_dims = tuple(onp.add(1, dimension_numbers.update_window_dims))\n+ inserted_window_dims = (0,) + tuple(onp.add(1, dimension_numbers.inserted_window_dims))\n+ scatter_dims_to_operand_dims = (0,) + tuple(onp.add(1, dimension_numbers.scatter_dims_to_operand_dims))\n+\n+ dnums = ScatterDimensionNumbers(\n+ update_window_dims=update_window_dims,\n+ inserted_window_dims=inserted_window_dims,\n+ scatter_dims_to_operand_dims=scatter_dims_to_operand_dims,\n+ index_vector_dim=index_vector_dim)\n+ return scatter_add(operand, scatter_indices, updates, dnums), 0\nscatter_p = standard_primitive(\n_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',\n_scatter_translation_rule)\nad.primitive_jvps[scatter_p] = _scatter_jvp\nad.primitive_transposes[scatter_p] = _scatter_transpose_rule\n+batching.primitive_batchers[scatter_p] = _scatter_batching_rule\ndef _reduce_shape_rule(operand, init_value, computation, jaxpr, consts, dimensions):\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -556,9 +556,6 @@ class BatchingTest(jtu.JaxTestCase):\n(1, (10, 3, 5), onp.array([0, 2, 1]), lax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\nindex_vector_dim=1), (1, 3)),\n- (2, (10, 5, 3), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(\n- offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n- index_vector_dim=1), (1, 3)),\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()])\n@@ -571,6 +568,39 @@ class BatchingTest(jtu.JaxTestCase):\nfor i in range(operand.shape[axis])])\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ @parameterized.named_parameters(\n+ {\"testcase_name\": \"_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}\".format(\n+ jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,\n+ slice_sizes),\n+ \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"idxs\": idxs, \"dnums\": dnums,\n+ \"slice_sizes\": slice_sizes, \"rng\": rng, \"rng_idx\": rng_idx}\n+ for dtype in [onp.float32, onp.float64]\n+ for axis, shape, idxs, dnums, slice_sizes in [\n+ (0, (3, 5), onp.array([0, 2]), lax.GatherDimensionNumbers(\n+ offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,),\n+ index_vector_dim=1), (1,)),\n+ (1, (10, 3), onp.array([0, 0, 0]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,),\n+ index_vector_dim=1), (2,)),\n+ (1, (10, 3, 5), onp.array([0, 2, 1]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\n+ index_vector_dim=1), (1, 3)),\n+ (2, (10, 5, 3), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n+ index_vector_dim=1), (1, 3)),\n+ ]\n+ for rng_idx in [jtu.rand_int(max(shape))]\n+ for rng in [jtu.rand_default()])\n+ def testGatherGradBatchedOperand(self, axis, shape, dtype, idxs, dnums,\n+ slice_sizes, rng, rng_idx):\n+ fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n+ gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx))))\n+ operand = rng(shape, dtype)\n+ ans = vmap(gfun, (axis, None))(operand, idxs)\n+ expected = onp.stack([gfun(operand[(slice(None),) * axis + (i,)], idxs)\n+ for i in range(operand.shape[axis])])\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n@parameterized.named_parameters(\n{\"testcase_name\": \"_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}\".format(\njtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,\n@@ -606,6 +636,38 @@ class BatchingTest(jtu.JaxTestCase):\nfor i in range(idxs.shape[axis])])\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ @parameterized.named_parameters(\n+ {\"testcase_name\": \"_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}\".format(\n+ jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,\n+ slice_sizes),\n+ \"axis\": axis, \"shape\": shape, \"dtype\": dtype, \"idxs\": idxs, \"dnums\": dnums,\n+ \"slice_sizes\": slice_sizes, \"rng\": rng, \"rng_idx\": rng_idx}\n+ for dtype in [onp.float32, onp.float64]\n+ for axis, shape, idxs, dnums, slice_sizes in [\n+ (0, (5,), onp.array([[0, 2], [1, 3]]), lax.GatherDimensionNumbers(\n+ offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,),\n+ index_vector_dim=1), (1,)),\n+ (1, (10,), onp.array([[0, 0, 0], [0, 2, 1]]).T,\n+ lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,),\n+ index_vector_dim=1), (2,)),\n+ (1, (10, 5,), onp.array([[0, 2, 1], [0, 3, 3]]).T,\n+ lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\n+ index_vector_dim=1), (1, 3)),\n+ ]\n+ for rng_idx in [jtu.rand_int(max(shape))]\n+ for rng in [jtu.rand_default()])\n+ def testGatherGradBatchedIndices(self, axis, shape, dtype, idxs, dnums,\n+ slice_sizes, rng, rng_idx):\n+ fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n+ gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx))))\n+ operand = rng(shape, dtype)\n+ ans = vmap(gfun, (None, axis))(operand, idxs)\n+ expected = onp.stack([gfun(operand, idxs[(slice(None),) * axis + (i,)])\n+ for i in range(idxs.shape[axis])])\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n@parameterized.named_parameters(\n{\"testcase_name\": \"_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}\".format(\njtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs,\n@@ -644,6 +706,41 @@ class BatchingTest(jtu.JaxTestCase):\nfor i in range(idxs.shape[idxs_axis])])\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ @parameterized.named_parameters(\n+ {\"testcase_name\": \"_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}\".format(\n+ jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs,\n+ dnums, slice_sizes),\n+ \"op_axis\": op_axis, \"idxs_axis\": idxs_axis, \"shape\": shape, \"dtype\":\n+ dtype, \"idxs\": idxs, \"dnums\": dnums, \"slice_sizes\": slice_sizes,\n+ \"rng\": rng, \"rng_idx\": rng_idx}\n+ for dtype in [onp.float32, onp.int32]\n+ for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [\n+ (0, 0, (2, 5), onp.array([[0, 2], [1, 3]]), lax.GatherDimensionNumbers(\n+ offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,),\n+ index_vector_dim=1), (1,)),\n+ (1, 1, (10, 2), onp.array([[0, 0, 0], [0, 2, 1]]).T,\n+ lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,),\n+ index_vector_dim=1), (2,)),\n+ (0, 1, (2, 10, 5,), onp.array([[0, 2, 1], [0, 3, 3]]).T,\n+ lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\n+ index_vector_dim=1), (1, 3)),\n+ ]\n+ for rng_idx in [jtu.rand_int(max(shape))]\n+ for rng in [jtu.rand_default()])\n+ def testGatherGradBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums,\n+ slice_sizes, rng, rng_idx):\n+ fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)\n+ gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx))))\n+ operand = rng(shape, dtype)\n+ assert operand.shape[op_axis] == idxs.shape[idxs_axis]\n+ ans = vmap(gfun, (op_axis, idxs_axis))(operand, idxs)\n+ expected = onp.stack([gfun(operand[(slice(None),) * op_axis + (i,)],\n+ idxs[(slice(None),) * idxs_axis + (i,)])\n+ for i in range(idxs.shape[idxs_axis])])\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\ndef testNumpyIndexing1(self):\na = np.arange(2 * 3 * 4).reshape((2, 3, 4))\nind = onp.array([[0, 1],\n" } ]
Python
Apache License 2.0
google/jax
start adding scatter batching rule
260,335
11.02.2019 11:40:08
28,800
e3b9df14a8ce47482ecc3e1dcb7492c11c017eb4
complete scatter batching rule
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2267,7 +2267,9 @@ def _scatter_batching_rule(batched_args, batch_dims, update_jaxpr,\nupdates = batching.move_dim_to_front(updates, updates_bdim)\nindex_vector_dim = dimension_numbers.index_vector_dim + 1\n- counts = broadcasted_iota(scatter_indices.dtype, scatter_indices.shape, 0)\n+ count_shape = list(scatter_indices.shape)\n+ count_shape[index_vector_dim] = 1\n+ counts = broadcasted_iota(scatter_indices.dtype, tuple(count_shape), 0)\nscatter_indices = concatenate([counts, scatter_indices], index_vector_dim)\nupdate_window_dims = tuple(onp.add(1, dimension_numbers.update_window_dims))\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -556,6 +556,9 @@ class BatchingTest(jtu.JaxTestCase):\n(1, (10, 3, 5), onp.array([0, 2, 1]), lax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\nindex_vector_dim=1), (1, 3)),\n+ (2, (10, 5, 3), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n+ index_vector_dim=1), (1, 3)),\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()])\n@@ -651,10 +654,14 @@ class BatchingTest(jtu.JaxTestCase):\nlax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,),\nindex_vector_dim=1), (2,)),\n- (1, (10, 5,), onp.array([[0, 2, 1], [0, 3, 3]]).T,\n+ (1, (10, 5), onp.array([[0, 2, 1], [0, 3, 3]]).T,\nlax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\nindex_vector_dim=1), (1, 3)),\n+ (0, (10, 5), onp.array([[[0, 2], [1, 0]],\n+ [[1, 2], [0, 3]]]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n+ index_vector_dim=1), (1, 3)),\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()])\n@@ -726,6 +733,10 @@ class BatchingTest(jtu.JaxTestCase):\nlax.GatherDimensionNumbers(\noffset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,),\nindex_vector_dim=1), (1, 3)),\n+ (2, 0, (10, 5, 2), onp.array([[[0, 2], [1, 0]],\n+ [[1, 0], [2, 0]]]), lax.GatherDimensionNumbers(\n+ offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1),\n+ index_vector_dim=1), (1, 3)),\n]\nfor rng_idx in [jtu.rand_int(max(shape))]\nfor rng in [jtu.rand_default()])\n" } ]
Python
Apache License 2.0
google/jax
complete scatter batching rule
260,335
11.02.2019 12:46:17
28,800
f8b48cb1c4d26f63c6ac7123377907e84879764b
add comment explaining scatter batching rule logic
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -2238,6 +2238,8 @@ def _scatter_batching_rule(batched_args, batch_dims, update_jaxpr,\noperand_bdim, scatter_indices_bdim, updates_bdim = batch_dims\ndel update_jaxpr, update_consts, updates_shape # Unused.\n+ # move the operand batch dim to the front if it is not None, otherwise create\n+ # it at the front (so that we can scatter into it)\nsize = next(x.shape[ax] for x, ax in zip(batched_args, batch_dims)\nif ax is not None)\noperand = batching.bdim_at_front(operand, operand_bdim, broadcast_size=size,\n" } ]
Python
Apache License 2.0
google/jax
add comment explaining scatter batching rule logic
260,335
11.02.2019 13:32:22
28,800
0c2cd5f8589f4c3fec79efa52b1fa5ed399a56c8
tweaks to make internal tests pass
[ { "change_type": "MODIFY", "old_path": "examples/examples_test.py", "new_path": "examples/examples_test.py", "diff": "@@ -82,7 +82,7 @@ class ExamplesTest(jtu.JaxTestCase):\nxs = rng.randn(n, d)\nys = np.dot(xs, truth)\nkernel = lambda x, y: np.dot(x, y)\n- assert np.all(kernel_lsq.gram(kernel, xs) == np.dot(xs, xs.T))\n+ self.assertAllClose(kernel_lsq.gram(kernel, xs), np.dot(xs, xs.T))\ndef testKernelRegressionTrainAndPredict(self):\n# TODO(frostig): reenable this test.\n@@ -94,7 +94,7 @@ class ExamplesTest(jtu.JaxTestCase):\nys = np.dot(xs, truth)\nkernel = lambda x, y: np.dot(x, y)\npredict = kernel_lsq.train(kernel, xs, ys)\n- assert np.allclose(predict(xs), ys, atol=1e-3)\n+ self.assertAllClose(predict(xs), ys, atol=1e-3, rtol=1e-3)\nif __name__ == \"__main__\":\n" }, { "change_type": "MODIFY", "old_path": "tests/linalg_test.py", "new_path": "tests/linalg_test.py", "diff": "@@ -136,7 +136,6 @@ class NumpyLinalgTest(jtu.JaxTestCase):\nself._CompileAndCheck(partial(np.linalg.eigh, UPLO=uplo), args_maker,\ncheck_dtypes=True)\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_ord={}_axis={}_keepdims={}\".format(\njtu.format_shape_dtype_string(shape, dtype), ord, axis, keepdims),\n@@ -156,6 +155,9 @@ class NumpyLinalgTest(jtu.JaxTestCase):\nfor dtype in float_types() | complex_types()\nfor rng in [jtu.rand_default()]))\ndef testNorm(self, shape, dtype, ord, axis, keepdims, rng):\n+ # TODO(mattjj,phawkins): re-enable after checking internal tests\n+ self.skipTest(\"internal test failures\")\n+\nif (ord in ('nuc', 2, -2) and isinstance(axis, tuple) and len(axis) == 2 and\n(not FLAGS.jax_test_dut or not FLAGS.jax_test_dut.startswith(\"cpu\") or\nlen(shape) != 2)):\n@@ -168,7 +170,6 @@ class NumpyLinalgTest(jtu.JaxTestCase):\ncheck_dtypes=True, tol=1e-3)\nself._CompileAndCheck(np_fn, args_maker, check_dtypes=True)\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_n={}_full_matrices={}_compute_uv={}\".format(\njtu.format_shape_dtype_string((m, n), dtype), full_matrices, compute_uv),\n" } ]
Python
Apache License 2.0
google/jax
tweaks to make internal tests pass
260,335
11.02.2019 13:38:04
28,800
a26fa984609a47b4bfb39acd56e48f45e6d13459
fix examples_test
[ { "change_type": "MODIFY", "old_path": "examples/examples_test.py", "new_path": "examples/examples_test.py", "diff": "@@ -82,7 +82,8 @@ class ExamplesTest(jtu.JaxTestCase):\nxs = rng.randn(n, d)\nys = np.dot(xs, truth)\nkernel = lambda x, y: np.dot(x, y)\n- self.assertAllClose(kernel_lsq.gram(kernel, xs), np.dot(xs, xs.T))\n+ self.assertAllClose(kernel_lsq.gram(kernel, xs), np.dot(xs, xs.T),\n+ check_dtypes=False)\ndef testKernelRegressionTrainAndPredict(self):\n# TODO(frostig): reenable this test.\n@@ -94,7 +95,8 @@ class ExamplesTest(jtu.JaxTestCase):\nys = np.dot(xs, truth)\nkernel = lambda x, y: np.dot(x, y)\npredict = kernel_lsq.train(kernel, xs, ys)\n- self.assertAllClose(predict(xs), ys, atol=1e-3, rtol=1e-3)\n+ self.assertAllClose(predict(xs), ys, atol=1e-3, rtol=1e-3,\n+ check_dtypes=False)\nif __name__ == \"__main__\":\n" } ]
Python
Apache License 2.0
google/jax
fix examples_test
260,409
11.02.2019 16:41:51
18,000
130694affa787675b2d291b5dc5fa87c6c508766
Adding a Gaussian process example
[ { "change_type": "ADD", "old_path": null, "new_path": "examples/gaussian_process_regression.py", "diff": "+# Copyright 2018 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# https://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+\"\"\"A basic example demonstrating using JAX to do Gaussian process regression.\n+\"\"\"\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+from absl import app\n+from absl import flags\n+from jax import grad\n+from jax import jit\n+from jax.config import config\n+import jax.numpy as np\n+import jax.random as random\n+import jax.scipy as scipy\n+import matplotlib.pyplot as plt\n+\n+\n+FLAGS = flags.FLAGS\n+\n+\n+def main(unused_argv):\n+\n+ numpts = 25\n+ key = random.PRNGKey(0)\n+ eye = np.eye(numpts)\n+\n+ def sqdist(x1, x2):\n+ return (-2. * np.dot(x1, x2.T) + np.sum(x2**2, axis=1) +\n+ np.sum(x1**2, axis=1)[:, None])\n+\n+ def cov(params, x1, x2):\n+ x1 = x1/np.exp(params[2])\n+ x2 = x2/np.exp(params[2])\n+ return np.exp(params[0]) * np.exp(-sqdist(x1, x2)/(2. * np.exp(params[1])))\n+\n+ def marginal_likelihood(params, x, y):\n+ train_cov = cov(params, x, x) + eye * 1e-6\n+ chol = np.linalg.cholesky(train_cov + eye * 1e-4).T\n+ inv_chol = scipy.linalg.solve_triangular(chol, eye, lower=True)\n+ inv_train_cov = np.dot(inv_chol.T, inv_chol)\n+ ml = np.sum(\n+ -0.5 * np.dot(y.T, np.dot(inv_train_cov, y)) -\n+ 0.5 * np.sum(2.0 * np.log(np.dot(inv_chol * eye, np.ones(\n+ (numpts, 1))))) - (numpts / 2.) * np.log(2. * 3.1415))\n+ return ml\n+ grad_fun = jit(grad(marginal_likelihood))\n+\n+ def predict(params, x, y, xtest):\n+ train_cov = cov(params, x, x) + eye * 1e-6\n+ chol = np.linalg.cholesky(train_cov + eye * 1e-4)\n+ inv_chol = scipy.linalg.solve_triangular(chol, eye, lower=True)\n+ inv_train_cov = np.dot(inv_chol.T, inv_chol)\n+ cross_cov = cov(params, x, xtest)\n+ mu = np.dot(cross_cov.T, np.dot(inv_train_cov, y))\n+ var = (cov(params, xtest, xtest) -\n+ np.dot(cross_cov.T, np.dot(inv_train_cov, cross_cov)))\n+ return mu, var\n+\n+ # Covariance hyperparameters to be learned\n+ params = [np.zeros((1, 1)), # Amplitude\n+ np.zeros((1, 1)), # Bandwidth\n+ np.zeros((1, 1))] # Length-scale\n+ momentums = [p * 0. for p in params]\n+ scales = [p * 0. + 1. for p in params]\n+\n+ lr = 0.01 # Learning rate\n+ def train_step(params, momentums, scales, x, y):\n+ grads = grad_fun(params, x, y)\n+ for i in range(len(params)):\n+ momentums[i] = 0.9 * momentums[i] + 0.1 * grads[i][0]\n+ scales[i] = 0.9 * scales[i] + 0.1 * grads[i][0]**2\n+ params[i] -= lr * momentums[i]/np.sqrt(scales[i] + 1e-5)\n+ return params, momentums, scales\n+\n+ # Create a really simple toy 1D function\n+ y_fun = lambda x: np.sin(x) + 0.01 * random.normal(key, shape=(x.shape[0], 1))\n+ x = np.linspace(1., 4., numpts)[:, None]\n+ y = y_fun(x)\n+ xtest = np.linspace(0, 5., 200)[:, None]\n+ ytest = y_fun(xtest)\n+\n+ for i in range(1000):\n+ params, momentums, scales = train_step(params, momentums, scales, x, y)\n+ if i % 50 == 0:\n+ ml = marginal_likelihood(params, x, y)\n+ print(\"Step: %d, neg marginal likelihood: %f\" % (i, ml))\n+\n+ print([i.copy() for i in params])\n+ mu, var = predict(params, x, y, xtest)\n+ std = np.sqrt(np.diag(var))\n+ plt.plot(x, y, \"k.\")\n+ plt.plot(xtest, mu)\n+ plt.fill_between(xtest.flatten(),\n+ mu.flatten() - std * 2, mu.flatten() + std * 2)\n+\n+\n+if __name__ == \"__main__\":\n+ config.config_with_absl()\n+ app.run(main)\n" } ]
Python
Apache License 2.0
google/jax
Adding a Gaussian process example
260,335
11.02.2019 14:19:22
28,800
0c15bdbcffe37b2d7d2a46a3d04afba224c0cfa9
todo in multivariate_normal.logpdf dtype promotion
[ { "change_type": "MODIFY", "old_path": "jax/scipy/stats/multivariate_normal.py", "new_path": "jax/scipy/stats/multivariate_normal.py", "diff": "@@ -27,6 +27,9 @@ from ...numpy.linalg import det, inv\n@_wraps(osp_stats.multivariate_normal.logpdf)\ndef logpdf(x, mean, cov):\n+ # TODO(mattjj): osp_stats.multivariate_normal.logpdf doesn't like being fed\n+ # empty-shape arrays, so we can't use _promote_args_like as written; consider\n+ # revising the dtype promotion logic here if it's an issue.\n# x, mean, cov = _promote_args_like(osp_stats.multivariate_normal.logpdf, x, mean, cov)\nx = x.astype(cov.dtype)\nmean = mean.astype(cov.dtype)\n" } ]
Python
Apache License 2.0
google/jax
todo in multivariate_normal.logpdf dtype promotion
260,335
12.02.2019 07:26:32
28,800
adaea811fc8ea32e7992cdf9d4483b3bc95c0768
fix transpose batching rule bug, add tests
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -1722,7 +1722,7 @@ def _transpose_shape_rule(operand, permutation):\ndef _transpose_batch_rule(batched_args, batch_dims, permutation):\noperand, = batched_args\nbdim, = batch_dims\n- perm = tuple(onp.insert(onp.add(permutation, 1), bdim, 0))\n+ perm = (bdim,) + tuple(i if i < bdim else i+1 for i in permutation)\nreturn transpose(operand, perm), 0\ntranspose_p = standard_primitive(_transpose_shape_rule, _input_dtype,\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -771,6 +771,28 @@ class BatchingTest(jtu.JaxTestCase):\nexpected = onp.stack([f(a[:, i, :]) for i in range(a.shape[1])], axis=1)\nassert onp.all(ans == expected)\n+ def testTranspose(self):\n+ x = onp.arange(4 * 3 * 3).reshape((4, 3, 3))\n+ ans = vmap(lambda x: x + x.T)(x)\n+ expected = x + onp.swapaxes(x, -1, -2)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ def testTransposePermutation(self):\n+ x = onp.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))\n+ ans = vmap(lambda x: np.transpose(x, (1, 0, 2)))(x)\n+ expected = onp.transpose(x, (0, 2, 1, 3))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ x = onp.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))\n+ ans = vmap(lambda x: np.transpose(x, (1, 2, 0)))(x)\n+ expected = onp.transpose(x, (0, 2, 3, 1))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ x = onp.arange(6 * 3 * 4 * 5).reshape((3, 4, 6, 5))\n+ ans = vmap(lambda x: np.transpose(x, (1, 2, 0)), in_axes=2)(x)\n+ expected = onp.transpose(x, (2, 1, 3, 0))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
fix transpose batching rule bug, add tests
260,335
12.02.2019 11:32:25
28,800
34b07d0ea4e17033dbb2ca7dd65a97121929372d
handle jit-in-while (const handling in xla.py)
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -135,19 +135,22 @@ def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nenv = {}\nconsts_env = dict(zip(jaxpr.constvars, const_vals))\nwrite(core.unitvar, c.Tuple())\n+ if const_vals:\nmap(write, jaxpr.constvars, map(c.Constant, const_vals))\nmap(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))\n+ else:\n+ map(write, tuple(jaxpr.constvars) + tuple(jaxpr.freevars), map(c.ParameterWithShape, freevar_shapes))\nmap(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\nfor eqn in jaxpr.eqns:\nin_nodes = map(read, eqn.invars)\nin_shapes = map(c.GetShape, in_nodes)\nsubcs = [jaxpr_computation(subjaxpr,\n- [consts_env[b] for b in const_bindings],\n- map(c.GetShape, map(read, freevar_bindings)),\n+ [],\n+ map(c.GetShape, map(read, const_bindings + freevar_bindings)),\n*in_shapes)\nfor subjaxpr, const_bindings, freevar_bindings\nin eqn.bound_subjaxprs]\n- subfuns = [(subc, tuple(map(read, freevar_bindings)))\n+ subfuns = [(subc, tuple(map(read, const_bindings + freevar_bindings)))\nfor subc, (_, _, freevar_bindings)\nin zip(subcs, eqn.bound_subjaxprs)]\nans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -1142,6 +1142,38 @@ class LaxTest(jtu.JaxTestCase):\nself.assertEqual(cloop(3, limit, 1), limit - 3)\nassert not effect[0]\n+ def testWhileWithClosureJit(self):\n+\n+ def loop(init, local_limit, inc):\n+\n+ def loop_cond(state):\n+ pos, _ = state\n+ return lax.lt(pos, local_limit)\n+\n+ def loop_body(state):\n+ effect[0] = True\n+ pos, count = state\n+ f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))\n+ return api.jit(f)(pos, inc)\n+\n+ result = lax._while_loop(loop_cond, loop_body, (init, 0))\n+ _, count = result\n+ return count\n+\n+ cloop = api.jit(loop)\n+\n+ limit = 10\n+ effect = [False]\n+ self.assertEqual(loop(2, limit, 1), limit - 2)\n+ assert effect[0]\n+ effect[0] = False\n+ self.assertEqual(cloop(2, limit, 1), limit - 2)\n+ assert effect[0]\n+ effect[0] = False\n+ self.assertEqual(cloop(2, limit, 1), limit - 2)\n+ self.assertEqual(cloop(3, limit, 1), limit - 3)\n+ assert not effect[0]\n+\ndef testNestedWhileWithDynamicUpdateSlice(self):\nnum = 5\n" } ]
Python
Apache License 2.0
google/jax
handle jit-in-while (const handling in xla.py)
260,335
12.02.2019 12:03:58
28,800
2d6b0fca706dbd0e387fcf7585b5c420fc36d604
fix list comprehension bug (py3 caught it!)
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -151,7 +151,7 @@ def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nfor subjaxpr, const_bindings, freevar_bindings\nin eqn.bound_subjaxprs]\nsubfuns = [(subc, tuple(map(read, const_bindings + freevar_bindings)))\n- for subc, (_, _, freevar_bindings)\n+ for subc, (_, const_bindings, freevar_bindings)\nin zip(subcs, eqn.bound_subjaxprs)]\nans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params)\nout_nodes = xla_destructure(c, ans) if eqn.destructure else [ans]\n" } ]
Python
Apache License 2.0
google/jax
fix list comprehension bug (py3 caught it!)
260,335
12.02.2019 12:47:14
28,800
63f757c9692eb9f5844216ca0bdaf0bc8f731ae3
skip scipy.stats.multivariate_normal test gpu/tpu
[ { "change_type": "MODIFY", "old_path": "tests/scipy_stats_test.py", "new_path": "tests/scipy_stats_test.py", "diff": "@@ -102,6 +102,8 @@ class LaxBackedScipyStatsTests(jtu.JaxTestCase):\n# TODO: currently it ignores the argument \"shapes\" and only tests dim=4\n@genNamedParametersNArgs(3, jtu.rand_default())\n+ # TODO(phawkins): enable when there is an LU implementation for GPU/TPU.\n+ @jtu.skip_on_devices(\"gpu\", \"tpu\")\ndef testMultivariateNormalLogPdf(self, rng, shapes, dtypes):\nscipy_fun = osp_stats.multivariate_normal.logpdf\nlax_fun = lsp_stats.multivariate_normal.logpdf\n" } ]
Python
Apache License 2.0
google/jax
skip scipy.stats.multivariate_normal test gpu/tpu
260,335
12.02.2019 15:42:06
28,800
573f04752574890dc8a5f5e79a907bddaddf6191
tweak linearize user-level api: *args not args
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -362,7 +362,7 @@ def linearize(traceable, *primals):\nlifted_jvp = partial(lift_linearized, jaxpr, consts, (in_trees, out_tree), out_pval)\nreturn out_primal_py, lifted_jvp\n-def lift_linearized(jaxpr, consts, io_tree, out_pval, py_args):\n+def lift_linearized(jaxpr, consts, io_tree, out_pval, *py_args):\ndef fun(*args):\nprimals = pack(args) # doesn't matter what these are-they'll be ignored\ntangents = pack(args)\n" }, { "change_type": "MODIFY", "old_path": "tests/core_test.py", "new_path": "tests/core_test.py", "diff": "@@ -123,7 +123,7 @@ test_specs_base = [\ndef jvp_unlinearized(f, primals, tangents):\nout, jvp = linearize(f, *primals)\n- return out, jvp(tangents)\n+ return out, jvp(*tangents)\ntest_specs = []\nfor ts in test_specs_base:\n" } ]
Python
Apache License 2.0
google/jax
tweak linearize user-level api: *args not args
260,335
12.02.2019 19:56:00
28,800
0aae62c7c45cbafbe8a6547530eeb20bb97d4532
add test for dtype promotion against scalars
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -1210,6 +1210,11 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nx = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash\nself.assertEqual(x[0, 0], 1)\n+ def testScalarDtypePromotion(self):\n+ orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype\n+ jax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype\n+ self.assertEqual(orig_numpy_result, jax_numpy_result)\n+\nif __name__ == \"__main__\":\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
add test for dtype promotion against scalars
260,335
13.02.2019 08:06:37
28,800
cad7db762ba0aeb87ad42a115b9fde7cfcfd5bed
improve numpy dtype promo logic on Python scalars
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -571,7 +571,8 @@ def dynamic_slice_in_dim(operand, start_index, slice_size, axis=0):\nslice_sizes = list(operand.shape)\naxis = int(axis)\n- start_indices[axis] = reshape(rem(start_index, operand.shape[axis]), [1])\n+ axis_size = onp.array(operand.shape[axis], start_index.dtype)\n+ start_indices[axis] = reshape(rem(start_index, axis_size), [1])\nslice_sizes[axis] = int(slice_size)\nstart_indices = concatenate(start_indices, 0)\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -146,12 +146,11 @@ def _promote_dtypes(*args):\nif len(args) < 2:\nreturn args\nelse:\n- from_dtypes = (_dtype(x) for x in args)\n+ from_dtypes = (x if type(x) in (int, float) else _dtype(x) for x in args)\nto_dtype = xla_bridge.canonicalize_dtype(result_type(*from_dtypes))\nreturn [lax.convert_element_type(x, to_dtype)\nif _dtype(x) != to_dtype else x for x in args]\n-\ndef _promote_to_result_dtype(op, *args):\n\"\"\"Convenience function to promote args directly to the op's result dtype.\"\"\"\nto_dtype = _result_dtype(op, *args)\n@@ -1775,7 +1774,7 @@ def take(a, indices, axis=None, out=None, mode=None):\n# TODO(phawkins): we have no way to report out of bounds errors yet.\nraise NotImplementedError(\"The 'raise' mode to np.take is not supported.\")\nelif mode == \"wrap\":\n- indices = mod(indices, a.shape[axis])\n+ indices = mod(indices, onp.array(a.shape[axis], _dtype(indices)))\nelif mode != \"clip\" and mode is not None:\nraise ValueError(\"Invalid mode '{}' for np.take\".format(mode))\n@@ -1834,7 +1833,7 @@ def _rewriting_take(arr, idx, axis=0):\nif isinstance(abstract_idx, ConcreteArray) and _int(abstract_idx):\nreturn lax.index_in_dim(arr, idx, axis, False)\nelif isinstance(abstract_idx, ShapedArray) and _int(abstract_idx):\n- idx = mod(idx, arr.shape[axis])\n+ idx = mod(idx, onp.array(arr.shape[axis], _dtype(idx)))\nreturn lax.dynamic_index_in_dim(arr, idx, axis, False)\n# Handle slice index (only static, otherwise an error is raised)\n@@ -1903,7 +1902,8 @@ def _rewriting_take(arr, idx, axis=0):\n# The indexer is just a single integer array.\nidx = [idx]\n- flat_idx = tuple([mod(ravel(x), arr.shape[i]) for i, x in enumerate(idx)])\n+ flat_idx = tuple([mod(ravel(x), onp.array(arr.shape[i], _dtype(x)))\n+ for i, x in enumerate(idx)])\n# TODO(mattjj): if we instead lower directly to lax.gather, we can probably\n# eliminate the reshape here.\nout = lax.index_take(arr, flat_idx, tuple(range(len(idx))))\n@@ -1921,7 +1921,7 @@ def _rewriting_take(arr, idx, axis=0):\nidx_advanced, axes = zip(*advanced_pairs)\nidx_advanced = broadcast_arrays(*idx_advanced)\n- flat_idx = tuple(mod(ravel(x), arr_sliced.shape[i])\n+ flat_idx = tuple(mod(ravel(x), onp.array(arr_sliced.shape[i], _dtype(x)))\nfor i, x in zip(axes, idx_advanced))\n# TODO(mattjj): if we instead lower directly to lax.gather, we can probably\n# eliminate the reshape here.\n" } ]
Python
Apache License 2.0
google/jax
improve numpy dtype promo logic on Python scalars
260,335
13.02.2019 08:14:32
28,800
9425fa812aa6a8e7f927855396299868c74c5f56
add 'long' and 'complex' to pyval promotion logic
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -146,7 +146,8 @@ def _promote_dtypes(*args):\nif len(args) < 2:\nreturn args\nelse:\n- from_dtypes = (x if type(x) in (int, float) else _dtype(x) for x in args)\n+ from_dtypes = (x if type(x) in (int, float, long, complex) else _dtype(x)\n+ for x in args)\nto_dtype = xla_bridge.canonicalize_dtype(result_type(*from_dtypes))\nreturn [lax.convert_element_type(x, to_dtype)\nif _dtype(x) != to_dtype else x for x in args]\n" } ]
Python
Apache License 2.0
google/jax
add 'long' and 'complex' to pyval promotion logic
260,335
13.02.2019 08:15:48
28,800
ea9c31134988fc05a7fe289ef587c4ffa9a6172c
remove 'long' because it's not in py3
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -146,7 +146,7 @@ def _promote_dtypes(*args):\nif len(args) < 2:\nreturn args\nelse:\n- from_dtypes = (x if type(x) in (int, float, long, complex) else _dtype(x)\n+ from_dtypes = (x if type(x) in (int, float, complex) else _dtype(x)\nfor x in args)\nto_dtype = xla_bridge.canonicalize_dtype(result_type(*from_dtypes))\nreturn [lax.convert_element_type(x, to_dtype)\n" } ]
Python
Apache License 2.0
google/jax
remove 'long' because it's not in py3
260,335
13.02.2019 08:25:11
28,800
8df660e9ea29ec3a23abdd80362af17962326a7d
use more _const and _constant_like helpers
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -571,7 +571,7 @@ def dynamic_slice_in_dim(operand, start_index, slice_size, axis=0):\nslice_sizes = list(operand.shape)\naxis = int(axis)\n- axis_size = onp.array(operand.shape[axis], start_index.dtype)\n+ axis_size = _const(start_index, operand.shape[axis])\nstart_indices[axis] = reshape(rem(start_index, axis_size), [1])\nslice_sizes[axis] = int(slice_size)\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -1775,7 +1775,7 @@ def take(a, indices, axis=None, out=None, mode=None):\n# TODO(phawkins): we have no way to report out of bounds errors yet.\nraise NotImplementedError(\"The 'raise' mode to np.take is not supported.\")\nelif mode == \"wrap\":\n- indices = mod(indices, onp.array(a.shape[axis], _dtype(indices)))\n+ indices = mod(indices, _constant_like(indices, a.shape[axis]))\nelif mode != \"clip\" and mode is not None:\nraise ValueError(\"Invalid mode '{}' for np.take\".format(mode))\n@@ -1834,7 +1834,7 @@ def _rewriting_take(arr, idx, axis=0):\nif isinstance(abstract_idx, ConcreteArray) and _int(abstract_idx):\nreturn lax.index_in_dim(arr, idx, axis, False)\nelif isinstance(abstract_idx, ShapedArray) and _int(abstract_idx):\n- idx = mod(idx, onp.array(arr.shape[axis], _dtype(idx)))\n+ idx = mod(idx, _constant_like(idx, arr.shape[axis]))\nreturn lax.dynamic_index_in_dim(arr, idx, axis, False)\n# Handle slice index (only static, otherwise an error is raised)\n@@ -1903,7 +1903,7 @@ def _rewriting_take(arr, idx, axis=0):\n# The indexer is just a single integer array.\nidx = [idx]\n- flat_idx = tuple([mod(ravel(x), onp.array(arr.shape[i], _dtype(x)))\n+ flat_idx = tuple([mod(ravel(x), _constant_like(x, arr.shape[i]))\nfor i, x in enumerate(idx)])\n# TODO(mattjj): if we instead lower directly to lax.gather, we can probably\n# eliminate the reshape here.\n@@ -1922,7 +1922,7 @@ def _rewriting_take(arr, idx, axis=0):\nidx_advanced, axes = zip(*advanced_pairs)\nidx_advanced = broadcast_arrays(*idx_advanced)\n- flat_idx = tuple(mod(ravel(x), onp.array(arr_sliced.shape[i], _dtype(x)))\n+ flat_idx = tuple(mod(ravel(x), _constant_like(x, arr_sliced.shape[i]))\nfor i, x in zip(axes, idx_advanced))\n# TODO(mattjj): if we instead lower directly to lax.gather, we can probably\n# eliminate the reshape here.\n" } ]
Python
Apache License 2.0
google/jax
use more _const and _constant_like helpers
260,335
13.02.2019 08:31:48
28,800
bbf33709a6f431747c888878917b315c15c83c99
switch builtin numeric types on six.PY3
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -146,12 +146,17 @@ def _promote_dtypes(*args):\nif len(args) < 2:\nreturn args\nelse:\n- from_dtypes = (x if type(x) in (int, float, complex) else _dtype(x)\n+ from_dtypes = (x if type(x) in _builtin_numeric_types else _dtype(x)\nfor x in args)\nto_dtype = xla_bridge.canonicalize_dtype(result_type(*from_dtypes))\nreturn [lax.convert_element_type(x, to_dtype)\nif _dtype(x) != to_dtype else x for x in args]\n+if six.PY3:\n+ _builtin_numeric_types = (int, float, complex)\n+else:\n+ _builtin_numeric_types = (int, float, long, complex)\n+\ndef _promote_to_result_dtype(op, *args):\n\"\"\"Convenience function to promote args directly to the op's result dtype.\"\"\"\nto_dtype = _result_dtype(op, *args)\n" } ]
Python
Apache License 2.0
google/jax
switch builtin numeric types on six.PY3
260,335
13.02.2019 08:52:42
28,800
fcdf225183f651f9cdacdeb4c4efc330975ea200
another dtype promotion issue
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -1215,6 +1215,14 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\njax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype\nself.assertEqual(orig_numpy_result, jax_numpy_result)\n+ def testSymmetrizeDtypePromotion(self):\n+ x = onp.eye(3, dtype=onp.float32)\n+ orig_numpy_result = ((x + x.T) / 2).dtype\n+\n+ x = lnp.eye(3, dtype=lnp.float32)\n+ jax_numpy_result = ((x + x.T) / 2).dtype\n+ self.assertEqual(orig_numpy_result, jax_numpy_result)\n+\nif __name__ == \"__main__\":\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
another dtype promotion issue
260,335
13.02.2019 08:59:21
28,800
2865cfac075aebf162b7ae20d8d19a05f5c1aff7
fix shape/dtype promotion order in some numpy funs
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -326,8 +326,8 @@ logical_xor = _logical_op(onp.logical_xor, lax.bitwise_xor)\n@_wraps(onp.true_divide)\ndef true_divide(x1, x2):\n- x1, x2 = _promote_shapes(x1, x2)\nresult_dtype = _result_dtype(onp.true_divide, x1, x2)\n+ x1, x2 = _promote_shapes(x1, x2)\nreturn lax.div(lax.convert_element_type(x1, result_dtype),\nlax.convert_element_type(x2, result_dtype))\n@@ -390,7 +390,7 @@ def _float_divmod(x1, x2):\n@_wraps(onp.logaddexp)\ndef logaddexp(x1, x2):\n- x1, x2 = _promote_to_result_dtype(onp.logaddexp, *_promote_shapes(x1, x2))\n+ x1, x2 = _promote_shapes(*_promote_to_result_dtype(onp.logaddexp, x1, x2))\namax = lax.max(x1, x2)\nreturn lax.add(amax, lax.log(lax.add(lax.exp(lax.sub(x1, amax)),\nlax.exp(lax.sub(x2, amax)))))\n@@ -398,7 +398,7 @@ def logaddexp(x1, x2):\n@_wraps(onp.logaddexp2)\ndef logaddexp2(x1, x2):\n- x1, x2 = _promote_to_result_dtype(onp.logaddexp2, *_promote_shapes(x1, x2))\n+ x1, x2 = _promote_shapes(*_promote_to_result_dtype(onp.logaddexp2, x1, x2))\namax = lax.max(x1, x2)\nreturn lax.add(amax, log2(lax.add(exp2(lax.sub(x1, amax)),\nexp2(lax.sub(x2, amax)))))\n" } ]
Python
Apache License 2.0
google/jax
fix shape/dtype promotion order in some numpy funs
260,335
11.02.2019 16:18:13
28,800
78fd9e1a10b14be6e55801f791b79cb4360199d7
debug cholesky grad, remove stale dot_general check
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -831,10 +831,12 @@ standard_binop = partial(binop, _input_dtype)\n# a broadcast). but saving the shape info with the primitives isn't great either\n# because then we can't trace these ops without shape data.\ndef _brcast(x, *others):\n- # used in jvprules to make binop broadcasting explicit for transposability.\n- # requires shape info during jvp tracing, which isn't strictly necessary.\n- shapes = list(filter(None, map(onp.shape, (x,) + others)))\n- shape = tuple(shapes and onp.max(shapes, axis=0))\n+ # Used in jvprules to make binop broadcasting explicit for transposability.\n+ # Requires shape info during jvp tracing, which isn't strictly necessary.\n+ # We don't need full numpy broadcasting, but otherwise the logic is the same\n+ # so we reuse the broadcast_shapes function after filtering out scalars.\n+ shapes = tuple(filter(None, map(onp.shape, (x,) + others)))\n+ shape = shapes and broadcast_shapes(*shapes)\nif onp.shape(x) != shape:\nreturn _brcast_to(x, shape)\nelse:\n@@ -1355,10 +1357,6 @@ def _dot_general_shape_rule(lhs, rhs, dimension_numbers):\nmsg = (\"dot_general requires rhs batch dimensions to precede contracting \"\n\"and non-contracting dimensions, got rhs_batch {}.\")\nraise TypeError(msg.format(rhs_batch))\n- if not len(lhs_contracting) == len(rhs_contracting) == 1:\n- msg = (\"dot_general accepts exactly one lhs_contracting and \"\n- \"rhs_contracting dimension, got {} and {}.\")\n- raise TypeError(msg.format(lhs_contracting, rhs_contracting))\nlhs_contracting_shape = onp.take(lhs.shape, lhs_contracting)\nrhs_contracting_shape = onp.take(rhs.shape, rhs_contracting)\nif not onp.all(onp.equal(lhs_contracting_shape, rhs_contracting_shape)):\n" }, { "change_type": "MODIFY", "old_path": "jax/lax_linalg.py", "new_path": "jax/lax_linalg.py", "diff": "@@ -34,7 +34,10 @@ from jaxlib import lapack\n# traceables\n-def cholesky(x): return cholesky_p.bind(x)\n+def cholesky(x, symmetrize_input=True):\n+ if symmetrize_input:\n+ x = symmetrize(x)\n+ return cholesky_p.bind(x)\ndef eigh(x, lower=True): return eigh_p.bind(x, lower=lower)\n@@ -60,8 +63,9 @@ def triangular_solve(a, b, left_side=False, lower=False, transpose_a=False,\n# utilities\n-def _T(x):\n- return np.swapaxes(x, -1, -2)\n+def _T(x): return np.swapaxes(x, -1, -2)\n+def _H(x): return np.conj(_T(x))\n+def symmetrize(x): return (x + _H(x)) / 2\n# primitives\n@@ -76,11 +80,10 @@ def cholesky_jvp_rule(primals, tangents):\nL = cholesky_p.bind(x)\n# Forward-mode rule from https://arxiv.org/pdf/1602.07527.pdf\n- sigma_dot = (sigma_dot + _T(sigma_dot)) / 2\n- phi = lambda X: np.tril(X) / (1 + np.eye(x.shape[-1]))\n+ phi = lambda X: np.tril(X) / (1 + np.eye(X.shape[-1], dtype=X.dtype))\ntmp = triangular_solve(L, sigma_dot,\nleft_side=False, transpose_a=True, lower=True)\n- L_dot = np.matmul(L, phi(triangular_solve(\n+ L_dot = lax.batch_matmul(L, phi(triangular_solve(\nL, tmp, left_side=True, transpose_a=False, lower=True)))\nreturn L, L_dot\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/linalg.py", "new_path": "jax/numpy/linalg.py", "diff": "@@ -33,7 +33,6 @@ _EXPERIMENTAL_WARNING = \"numpy.linalg support is experimental and may cause sile\n_T = lambda x: np.swapaxes(x, -1, -2)\n-\ndef _promote_arg_dtypes(*args):\n\"\"\"Promotes `args` to a common inexact type.\"\"\"\ndef _to_inexact_type(type):\n@@ -47,7 +46,6 @@ def _promote_arg_dtypes(*args):\nreturn args\n-\n@_wraps(onp.linalg.cholesky)\ndef cholesky(a):\nwarnings.warn(_EXPERIMENTAL_WARNING)\n" }, { "change_type": "MODIFY", "old_path": "jax/scipy/linalg.py", "new_path": "jax/scipy/linalg.py", "diff": "@@ -36,7 +36,7 @@ def cholesky(a, lower=False, overwrite_a=False, check_finite=True):\nwarnings.warn(_EXPERIMENTAL_WARNING)\ndel overwrite_a, check_finite\na = np_linalg._promote_arg_dtypes(np.asarray(a))\n- l = lax_linalg.cholesky(a if lower else np.conj(_T(a)))\n+ l = lax_linalg.cholesky(a if lower else np.conj(_T(a)), symmetrize_input=False)\nreturn l if lower else np.conj(_T(l))\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -793,6 +793,45 @@ class BatchingTest(jtu.JaxTestCase):\nexpected = onp.transpose(x, (2, 1, 3, 0))\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ def testIssue354(self):\n+ psd_mat = onp.random.randn(20, 10)\n+ psd_mat = psd_mat.T.dot(psd_mat)\n+ vec = onp.random.randn(10)\n+\n+ def f(scale):\n+ scaled_mat = scale * psd_mat\n+ chol = np.linalg.cholesky(scaled_mat)\n+ return -0.5 * np.sum((np.einsum('ij,j->i', chol, vec))**2)\n+ vmapped_f = vmap(f)\n+ vmapped_f_grad = grad(lambda x: np.sum(vmapped_f(x)))\n+\n+ scales = onp.array([[0.1], [0.2], [0.3], [0.4], [0.5]])\n+ ans = vmapped_f_grad(scales) # don't crash!\n+ expected = onp.stack([grad(f)(scale) for scale in scales])\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ def testTranspose(self):\n+ x = onp.arange(4 * 3 * 3).reshape((4, 3, 3))\n+ ans = vmap(lambda x: x + x.T)(x)\n+ expected = x + onp.swapaxes(x, -1, -2)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ def testTransposePermutation(self):\n+ x = onp.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))\n+ ans = vmap(lambda x: np.transpose(x, (1, 0, 2)))(x)\n+ expected = onp.transpose(x, (0, 2, 1, 3))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ x = onp.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))\n+ ans = vmap(lambda x: np.transpose(x, (1, 2, 0)))(x)\n+ expected = onp.transpose(x, (0, 2, 3, 1))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ x = onp.arange(6 * 3 * 4 * 5).reshape((3, 4, 6, 5))\n+ ans = vmap(lambda x: np.transpose(x, (1, 2, 0)), in_axes=2)(x)\n+ expected = onp.transpose(x, (2, 1, 3, 0))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\nif __name__ == '__main__':\nabsltest.main()\n" }, { "change_type": "MODIFY", "old_path": "tests/linalg_test.py", "new_path": "tests/linalg_test.py", "diff": "@@ -61,14 +61,16 @@ class NumpyLinalgTest(jtu.JaxTestCase):\nfor rng in [jtu.rand_default()]))\ndef testCholesky(self, shape, dtype, rng):\ndef args_maker():\n- a = rng(shape, dtype)\n+ factor_shape = shape[:-1] + (2 * shape[-1],)\n+ a = rng(factor_shape, dtype)\nreturn [onp.matmul(a, np.conj(T(a)))]\nself._CheckAgainstNumpy(onp.linalg.cholesky, np.linalg.cholesky, args_maker,\ncheck_dtypes=True, tol=1e-3)\nself._CompileAndCheck(np.linalg.cholesky, args_maker, check_dtypes=True)\n- jtu.check_grads(np.linalg.cholesky, args_maker(), 1, rtol=1e-1)\n+ if onp.finfo(dtype).bits == 64:\n+ jtu.check_grads(np.linalg.cholesky, args_maker(), order=2)\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\":\n@@ -302,7 +304,6 @@ class NumpyLinalgTest(jtu.JaxTestCase):\ncheck_dtypes=True, tol=1e-3)\nself._CompileAndCheck(np.linalg.solve, args_maker, check_dtypes=True)\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\":\n\"_shape={}\".format(jtu.format_shape_dtype_string(shape, dtype)),\n@@ -376,7 +377,6 @@ class ScipyLinalgTest(jtu.JaxTestCase):\nargs_maker, check_dtypes=True, tol=1e-3)\nself._CompileAndCheck(jsp.linalg.lu_factor, args_maker, check_dtypes=True)\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\":\n\"_lhs={}_rhs={}_sym_pos={}_lower={}\".format(\n@@ -416,7 +416,6 @@ class ScipyLinalgTest(jtu.JaxTestCase):\ncheck_dtypes=True, tol=1e-3)\nself._CompileAndCheck(jsp_fun, args_maker, check_dtypes=True)\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\":\n\"_lhs={}_rhs={}_lower={}_transposea={}\".format(\n@@ -484,5 +483,6 @@ class ScipyLinalgTest(jtu.JaxTestCase):\ntrans=1 if transpose_a else 0)\njtu.check_grads(f, (A, B), 2, rtol=1e-3)\n+\nif __name__ == \"__main__\":\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
debug cholesky grad, remove stale dot_general check
260,335
13.02.2019 09:23:42
28,800
25169ef7fc2a4605f1fc508ad1a2184193c1d4f0
remove duplicate tests from a bad merge
[ { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -810,28 +810,6 @@ class BatchingTest(jtu.JaxTestCase):\nexpected = onp.stack([grad(f)(scale) for scale in scales])\nself.assertAllClose(ans, expected, check_dtypes=False)\n- def testTranspose(self):\n- x = onp.arange(4 * 3 * 3).reshape((4, 3, 3))\n- ans = vmap(lambda x: x + x.T)(x)\n- expected = x + onp.swapaxes(x, -1, -2)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- def testTransposePermutation(self):\n- x = onp.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))\n- ans = vmap(lambda x: np.transpose(x, (1, 0, 2)))(x)\n- expected = onp.transpose(x, (0, 2, 1, 3))\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- x = onp.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))\n- ans = vmap(lambda x: np.transpose(x, (1, 2, 0)))(x)\n- expected = onp.transpose(x, (0, 2, 3, 1))\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- x = onp.arange(6 * 3 * 4 * 5).reshape((3, 4, 6, 5))\n- ans = vmap(lambda x: np.transpose(x, (1, 2, 0)), in_axes=2)(x)\n- expected = onp.transpose(x, (2, 1, 3, 0))\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
remove duplicate tests from a bad merge
260,335
13.02.2019 09:55:36
28,800
0ff98a74eb6ccfb6f567f9639cd515fadab2aa5b
add random.fold_in, update mnist_vae.py loops
[ { "change_type": "MODIFY", "old_path": "examples/mnist_vae.py", "new_path": "examples/mnist_vae.py", "diff": "@@ -89,7 +89,6 @@ if __name__ == \"__main__\":\nnum_epochs = 100\nbatch_size = 32\nnrow, ncol = 10, 10 # sampled image grid size\n- rng = random.PRNGKey(0)\ntest_rng = random.PRNGKey(1) # fixed prng key for evaluation\nimfile = os.path.join(os.getenv(\"TMPDIR\", \"/tmp/\"), \"mnist_vae_{:03d}.png\")\n@@ -111,17 +110,13 @@ if __name__ == \"__main__\":\n@jit\ndef run_epoch(rng, opt_state):\n- def body_fun(i, loop_carry):\n- (rng, opt_state, images) = loop_carry\n- rng, elbo_rng, data_rng = random.split(rng, 3)\n- batch = binarize_batch(data_rng, i, images)\n+ def body_fun(i, opt_state):\n+ elbo_rng, data_rng = random.split(random.fold_in(rng, i))\n+ batch = binarize_batch(data_rng, i, train_images)\nloss = lambda params: -elbo(elbo_rng, params, batch) / batch_size\ng = grad(loss)(optimizers.get_params(opt_state))\n- loop_carry = rng, opt_update(i, g, opt_state), images\n- return loop_carry\n- init_val = rng, opt_state, train_images\n- _, opt_state, _ = lax.fori_loop(0, num_batches, body_fun, init_val)\n- return opt_state\n+ return opt_update(i, g, opt_state)\n+ return lax.fori_loop(0, num_batches, body_fun, opt_state)\n@jit\ndef evaluate(opt_state, images):\n@@ -135,8 +130,7 @@ if __name__ == \"__main__\":\nopt_state = opt_init(init_params)\nfor epoch in range(num_epochs):\ntic = time.time()\n- rng, epoch_rng = random.split(rng)\n- opt_state = run_epoch(epoch_rng, opt_state)\n+ opt_state = run_epoch(random.PRNGKey(epoch), opt_state)\ntest_elbo, sampled_images = evaluate(opt_state, test_images)\nprint(\"{: 3d} {} ({:.3f} sec)\".format(epoch, test_elbo, time.time() - tic))\nplt.imsave(imfile.format(epoch), sampled_images, cmap=plt.cm.gray)\n" }, { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "@@ -143,20 +143,36 @@ def threefry_2x32(keypair, count):\n@partial(jit, static_argnums=(1,))\ndef split(key, num=2):\n- \"\"\"Splits a PRNG key pair of 32bit unsigned integers into `num` new key pairs.\n+ \"\"\"Splits a PRNG key into `num` new keys by adding a leading axis.\nArgs:\n- key: a PRNGKey used as the random key.\n+ key: a PRNGKey (an array with shape (2,) and dtype uint32).\nnum: optional, a positive integer indicating the number of keys to produce\n(default 2).\nReturns:\n- A tuple of length `num` of new PRNGKey instances.\n+ An array with shape (num, 2) and dtype uint32 representing `num` new keys.\n\"\"\"\ncounts = lax.tie_in(key, lax.iota(onp.uint32, num * 2))\nreturn lax.reshape(threefry_2x32(key, counts), (num, 2))\n+@partial(jit, static_argnums=(1,))\n+def fold_in(key, data):\n+ \"\"\"Folds in data to a PRNG key to form a new PRNG key.\n+\n+ Args:\n+ key: a PRNGKey (an array with shape (2,) and dtype uint32).\n+ data: an integer representing data to be folded in to the key.\n+\n+ Returns:\n+ A new PRNGKey that is a deterministic function of the inputs and is\n+ statistically safe for producing a stream of new pseudo-random values.\n+ \"\"\"\n+ key2 = lax.tie_in(key, PRNGKey(data))\n+ return threefry_2x32(key, key2)\n+\n+\ndef _random_bits(key, bit_width, shape):\n\"\"\"Sample uniform random bits of given width and shape using PRNG key.\"\"\"\nif not is_prng_key(key):\n" }, { "change_type": "MODIFY", "old_path": "tests/random_test.py", "new_path": "tests/random_test.py", "diff": "@@ -153,6 +153,11 @@ class LaxRandomTest(jtu.JaxTestCase):\nx = random.randint(random.PRNGKey(10003), (), 0, 0)\nassert x == 0\n+ def testFoldIn(self):\n+ key = random.PRNGKey(0)\n+ keys = [random.fold_in(key, i) for i in range(10)]\n+ assert onp.unique(onp.ravel(keys)).shape == (20,)\n+\nif __name__ == \"__main__\":\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
add random.fold_in, update mnist_vae.py loops
260,335
13.02.2019 09:56:53
28,800
89dc3eb88eeb7dfe92d68ae4ee0a7c986e7945e5
rename lax._while_loop -> lax.while_loop
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -415,7 +415,7 @@ def sort_key_val(keys, values, dimension=-1):\nsorted_keys, sorted_values = result\nreturn sorted_keys, sorted_values\n-def _while_loop(cond_fun, body_fun, init_val):\n+def while_loop(cond_fun, body_fun, init_val):\ninit_val_flat, in_tree = pytree_to_jaxtupletree(init_val)\nflat_body_fun, out_tree = pytree_fun_to_jaxtupletree_fun(lu.wrap_init(body_fun), (in_tree,))\nflat_cond_fun, _ = pytree_fun_to_jaxtupletree_fun(lu.wrap_init(cond_fun), (in_tree,))\n@@ -618,7 +618,7 @@ def fori_loop(lower, upper, body_fun, init_val):\n\"\"\"\n# state: (upper limit, index, loop value)\n# The `lt` and `add` functions are added to the namespace programmatically.\n- _, _, result = _while_loop(\n+ _, _, result = while_loop(\nlambda upper_i_x: lt(upper_i_x[1], upper_i_x[0]),\nlambda upper_i_x: (upper_i_x[0],\nadd(upper_i_x[1], onp.array(1, _dtype(upper_i_x[1]))),\n@@ -3341,7 +3341,7 @@ def subvals(lst, replace):\ndef _abstractify(x):\n- # abstractify wrapper used internally for primitives like _while_loop\n+ # abstractify wrapper used internally for primitives like while_loop\nif isinstance(x, core.Tracer):\nreturn pe.PartialVal((xla.abstractify(x.aval), core.unit))\nelse:\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -1064,7 +1064,7 @@ class LaxTest(jtu.JaxTestCase):\nreturn (lax.add(pos, 1), lax.add(count, 1))\ndef loop(init):\n- result = lax._while_loop(loop_cond, loop_body, (init, 0))\n+ result = lax.while_loop(loop_cond, loop_body, (init, 0))\n_, count = result\nreturn count\n@@ -1087,7 +1087,7 @@ class LaxTest(jtu.JaxTestCase):\nreturn (num, lax.add(i, 1), inner_loop(i, count))\ninit_val = (num, 0, 0)\n- _, i, count = lax._while_loop(cond_fun, body_fun, init_val)\n+ _, i, count = lax.while_loop(cond_fun, body_fun, init_val)\nreturn (i, count)\ndef inner_loop(i, count): # pylint: disable=missing-docstring\n@@ -1100,7 +1100,7 @@ class LaxTest(jtu.JaxTestCase):\nreturn (i, lax.add(j, 1), lax.add(count, 1))\ninit_val = (i, 0, count)\n- _, _, count = lax._while_loop(cond_fun, body_fun, init_val)\n+ _, _, count = lax.while_loop(cond_fun, body_fun, init_val)\nreturn count\ncloop = api.jit(outer_loop)\n@@ -1124,7 +1124,7 @@ class LaxTest(jtu.JaxTestCase):\npos, count = state\nreturn (lax.add(pos, 1), lax.add(count, inc))\n- result = lax._while_loop(loop_cond, loop_body, (init, 0))\n+ result = lax.while_loop(loop_cond, loop_body, (init, 0))\n_, count = result\nreturn count\n@@ -1156,7 +1156,7 @@ class LaxTest(jtu.JaxTestCase):\nf = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))\nreturn api.jit(f)(pos, inc)\n- result = lax._while_loop(loop_cond, loop_body, (init, 0))\n+ result = lax.while_loop(loop_cond, loop_body, (init, 0))\n_, count = result\nreturn count\n@@ -1193,7 +1193,7 @@ class LaxTest(jtu.JaxTestCase):\nout = onp.zeros(arr.shape, dtype=arr.dtype)\ninit_val = (0, num, arr, out)\n- _, _, _, out = lax._while_loop(cond_fun, body_fun, init_val)\n+ _, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)\nreturn out\ndef inner_loop(i, arr, out): # pylint: disable=missing-docstring\n@@ -1210,7 +1210,7 @@ class LaxTest(jtu.JaxTestCase):\nreturn (i, lax.add(j, 1), arr, out)\ninit_val = (i, 0, arr, out)\n- _, _, _, out = lax._while_loop(cond_fun, body_fun, init_val)\n+ _, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)\nreturn out\ncloop = api.jit(outer_loop)\n@@ -1231,7 +1231,7 @@ class LaxTest(jtu.JaxTestCase):\nreturn (arr, num, lax.add(i, 1), lax.add(total, arr_i))\ninit_val = (arr, num, 0, 0.)\n- _, _, _, total = lax._while_loop(cond_fun, body_fun, init_val)\n+ _, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)\nreturn total\ncfun = api.jit(sum_first_n)\n" } ]
Python
Apache License 2.0
google/jax
rename lax._while_loop -> lax.while_loop
260,335
13.02.2019 19:42:47
28,800
11122bc8e3562e4964e4c090e4bcd7cfb17ba8be
improve jax.random docs
[ { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n-\"\"\"LAX-based pseudo-random number generators (PRNGs).\"\"\"\n+\"\"\"JAX pseudo-random number generators (PRNGs).\n+\n+The JAX PRNG system is based on \"Parallel random numbers: as easy as 1, 2, 3\"\n+(Salmon et al. 2011). For details on the design and its motivation, see:\n+\n+https://github.com/google/jax/blob/master/design_notes/prng.md\n+\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\n@@ -31,6 +37,16 @@ from jax import core\ndef PRNGKey(seed):\n+ \"\"\"Create a psuedo-random number generator (PRNG) key given an integer seed.\n+\n+ Args:\n+ seed: a 64- or 32-bit integer type used as the value of the key.\n+\n+ Returns:\n+ A PRNG key, which is modeled as an array of shape (2,) and dtype uint32. The\n+ key is constructed from a 64-bit seed by effectively bit-casting to a pair\n+ of uint32 values (or from a 32-bit seed by first padding out with zeros).\n+ \"\"\"\nif onp.shape(seed):\nraise TypeError(\"PRNGKey seed must be a scalar.\")\nconvert = lambda k: lax.reshape(lax.convert_element_type(k, onp.uint32), [1])\n@@ -43,7 +59,7 @@ def PRNGKey(seed):\nk2 = convert(lax.bitwise_and(seed, 0xFFFFFFFF))\nreturn lax.concatenate([k1, k2], 0)\n-def is_prng_key(key):\n+def _is_prng_key(key):\ntry:\nreturn key.shape == (2,) and key.dtype == onp.uint32\nexcept AttributeError:\n@@ -175,7 +191,7 @@ def fold_in(key, data):\ndef _random_bits(key, bit_width, shape):\n\"\"\"Sample uniform random bits of given width and shape using PRNG key.\"\"\"\n- if not is_prng_key(key):\n+ if not _is_prng_key(key):\nraise TypeError(\"_random_bits got invalid prng key.\")\nif bit_width not in (32, 64):\nraise TypeError(\"requires 32- or 64-bit field width.\")\n" } ]
Python
Apache License 2.0
google/jax
improve jax.random docs
260,335
13.02.2019 20:02:14
28,800
9a9c304644e0bb2a8e3a7bde04a3b6f81d8cf3b9
add version attribute following idea 3 here:
[ { "change_type": "MODIFY", "old_path": "jax/__init__.py", "new_path": "jax/__init__.py", "diff": "import os\nos.environ.setdefault('TF_CPP_MIN_LOG_LEVEL', '1')\n+version_file = os.path.join(os.path.abspath(os.path.dirname(__file__)),\n+ \"version.py\")\n+with open(version_file) as f:\n+ exec(f.read(), globals())\n+\nfrom jax.api import *\nimport jax.numpy as np # side-effecting import sets up operator overloads\n" }, { "change_type": "ADD", "old_path": null, "new_path": "jax/version.py", "diff": "+# Copyright 2018 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# https://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+__version__ = \"0.1.19\"\n" }, { "change_type": "MODIFY", "old_path": "setup.py", "new_path": "setup.py", "diff": "# limitations under the License.\nfrom setuptools import setup, find_packages\n+from os.path import join, dirname, abspath\n+\n+version_file = join(abspath(dirname(__file__)), \"jax\", \"version.py\")\n+with open('jax/version.py') as f:\n+ exec(f.read(), globals())\nsetup(\nname='jax',\n- version='0.1.19',\n+ version=__version__,\ndescription='Differentiate, compile, and transform Numpy code.',\nauthor='JAX team',\nauthor_email='jax-dev@google.com',\n" } ]
Python
Apache License 2.0
google/jax
add version attribute following idea 3 here: https://packaging.python.org/guides/single-sourcing-package-version/
260,335
13.02.2019 20:05:15
28,800
910848afe1db366bbb8bdafe0047cc8dd3d6f69d
fix typo in random.py docstring
[ { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "@@ -40,7 +40,7 @@ def PRNGKey(seed):\n\"\"\"Create a psuedo-random number generator (PRNG) key given an integer seed.\nArgs:\n- seed: a 64- or 32-bit integer type used as the value of the key.\n+ seed: a 64- or 32-bit integer used as the value of the key.\nReturns:\nA PRNG key, which is modeled as an array of shape (2,) and dtype uint32. The\n" } ]
Python
Apache License 2.0
google/jax
fix typo in random.py docstring
260,403
11.02.2019 23:26:26
28,800
8a84ae8d2a535630883648fb1596c65dd6729495
added jvp rule for eigh, tests
[ { "change_type": "MODIFY", "old_path": "jax/lax_linalg.py", "new_path": "jax/lax_linalg.py", "diff": "@@ -39,7 +39,10 @@ def cholesky(x, symmetrize_input=True):\nx = symmetrize(x)\nreturn cholesky_p.bind(x)\n-def eigh(x, lower=True): return eigh_p.bind(x, lower=lower)\n+def eigh(x, lower=True, symmetrize=True):\n+ if symmetrize:\n+ x = (x + _H(x)) / 2 # orthogonal projection onto self-adjoint matrices\n+ return eigh_p.bind(x, lower=lower)\ndef lu(x): return lu_p.bind(x)\n@@ -146,10 +149,34 @@ def eigh_cpu_translation_rule(c, operand, lower):\nraise NotImplementedError(\n\"Only unbatched eigendecomposition is implemented on CPU\")\n+def eigh_jvp_rule(primals, tangents, lower):\n+ # Derivative for eigh in the simplest case of distinct eigenvalues.\n+ # Simple case from https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\n+ # The general solution treating the case of degenerate eigenvalues is\n+ # considerably more complicated. Ambitious readers may refer to the general\n+ # methods at:\n+ # https://www.win.tue.nl/analysis/reports/rana06-33.pdf and\n+ # https://people.orie.cornell.edu/aslewis/publications/99-clarke.pdf\n+ a, = primals\n+ a_dot, = tangents\n+ v, w = eigh_p.bind((a + _H(a)) / 2.0, lower=lower)\n+ # for complex numbers we need eigenvalues to be full dtype of v, a:\n+ w = w.astype(a.dtype)\n+ eye_n = np.eye(a.shape[-1], dtype=a.dtype)\n+ # carefully build reciprocal delta-eigenvalue matrix, avoiding NaNs.\n+ Fmat = np.reciprocal(eye_n + w - w[..., np.newaxis]) - eye_n\n+ # eigh impl doesn't support batch dims, but future-proof the grad.\n+ dot = lax.dot if a.ndim == 2 else lax.batch_matmul\n+ vdag_adot_v = dot(dot(_H(v), a_dot), v)\n+ dv = dot(v, np.multiply(Fmat, vdag_adot_v))\n+ dw = np.diagonal(np.multiply(eye_n, vdag_adot_v))\n+ return core.pack((v, w)), core.pack((dv, dw))\n+\neigh_p = Primitive('eigh')\neigh_p.def_impl(eigh_impl)\neigh_p.def_abstract_eval(eigh_abstract_eval)\nxla.translations[eigh_p] = eigh_translation_rule\n+ad.primitive_jvps[eigh_p] = eigh_jvp_rule\nxla.backend_specific_translations['Host'][eigh_p] = eigh_cpu_translation_rule\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/linalg.py", "new_path": "jax/numpy/linalg.py", "diff": "@@ -93,7 +93,7 @@ def det(a):\n@_wraps(onp.linalg.eigh)\n-def eigh(a, UPLO=None):\n+def eigh(a, UPLO=None, symmetrize=True):\nif UPLO is None or UPLO == \"L\":\nlower = True\nelif UPLO == \"U\":\n@@ -103,7 +103,7 @@ def eigh(a, UPLO=None):\nraise ValueError(msg)\na = _promote_arg_dtypes(np.asarray(a))\n- v, w = lax_linalg.eigh(a, lower=lower)\n+ v, w = lax_linalg.eigh(a, lower=lower, symmetrize=symmetrize)\nreturn w, v\n" }, { "change_type": "MODIFY", "old_path": "tests/linalg_test.py", "new_path": "tests/linalg_test.py", "diff": "@@ -132,14 +132,39 @@ class NumpyLinalgTest(jtu.JaxTestCase):\na, = args_maker()\na = (a + onp.conj(a.T)) / 2\n- w, v = np.linalg.eigh(onp.tril(a) if lower else onp.triu(a), UPLO=uplo)\n-\n+ w, v = np.linalg.eigh(onp.tril(a) if lower else onp.triu(a),\n+ UPLO=uplo, symmetrize=False)\nself.assertTrue(norm(onp.eye(n) - onp.matmul(onp.conj(T(v)), v)) < 5)\nself.assertTrue(norm(onp.matmul(a, v) - w * v) < 30)\nself._CompileAndCheck(partial(np.linalg.eigh, UPLO=uplo), args_maker,\ncheck_dtypes=True)\n+ @parameterized.named_parameters(jtu.cases_from_list(\n+ {\"testcase_name\":\n+ \"_shape={}_lower={}\".format(jtu.format_shape_dtype_string(shape, dtype),\n+ lower),\n+ \"shape\": shape, \"dtype\": dtype, \"rng\": rng, \"lower\":lower}\n+ for shape in [(1, 1), (4, 4), (5, 5), (50, 50)]\n+ for dtype in float_types() | complex_types()\n+ for rng in [jtu.rand_default()]\n+ for lower in [True, False]))\n+ # TODO(phawkins): enable when there is an eigendecomposition implementation\n+ # for GPU/TPU.\n+ @jtu.skip_on_devices(\"gpu\", \"tpu\")\n+ def testEighGrad(self, shape, dtype, rng, lower):\n+ if not hasattr(lapack, \"jax_syevd\"):\n+ self.skipTest(\"No symmetric eigendecomposition implementation available\")\n+ uplo = \"L\" if lower else \"U\"\n+ a = rng(shape, dtype)\n+ a = (a + onp.conj(a.T)) / 2\n+ a = onp.tril(a) if lower else onp.triu(a)\n+ # Gradient checks will fail without symmetrization as the eigh jvp rule\n+ # is only correct for tangents in the symmetric subspace, whereas the\n+ # checker checks against unconstrained (co)tangents.\n+ f = partial(np.linalg.eigh, UPLO=uplo, symmetrize=True)\n+ jtu.check_grads(f, (a,), 2, rtol=1e-1)\n+\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_ord={}_axis={}_keepdims={}\".format(\njtu.format_shape_dtype_string(shape, dtype), ord, axis, keepdims),\n" } ]
Python
Apache License 2.0
google/jax
added jvp rule for eigh, tests
260,403
13.02.2019 23:23:39
28,800
ed437b409d28cdb5bbfd62233899d956b41b0da0
fix testing of eigh jvp rule
[ { "change_type": "MODIFY", "old_path": "jax/lax_linalg.py", "new_path": "jax/lax_linalg.py", "diff": "@@ -39,9 +39,9 @@ def cholesky(x, symmetrize_input=True):\nx = symmetrize(x)\nreturn cholesky_p.bind(x)\n-def eigh(x, lower=True, symmetrize=True):\n- if symmetrize:\n- x = (x + _H(x)) / 2 # orthogonal projection onto self-adjoint matrices\n+def eigh(x, lower=True, symmetrize_input=True):\n+ if symmetrize_input:\n+ x = symmetrize(x)\nreturn eigh_p.bind(x, lower=lower)\ndef lu(x): return lu_p.bind(x)\n@@ -151,10 +151,11 @@ def eigh_cpu_translation_rule(c, operand, lower):\ndef eigh_jvp_rule(primals, tangents, lower):\n# Derivative for eigh in the simplest case of distinct eigenvalues.\n- # Simple case from https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\n+ # This is classic nondegenerate perurbation theory, but also see\n+ # https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\n# The general solution treating the case of degenerate eigenvalues is\n# considerably more complicated. Ambitious readers may refer to the general\n- # methods at:\n+ # methods below or refer to degenerate perturbation theory in physics.\n# https://www.win.tue.nl/analysis/reports/rana06-33.pdf and\n# https://people.orie.cornell.edu/aslewis/publications/99-clarke.pdf\na, = primals\n" }, { "change_type": "MODIFY", "old_path": "tests/linalg_test.py", "new_path": "tests/linalg_test.py", "diff": "@@ -133,7 +133,7 @@ class NumpyLinalgTest(jtu.JaxTestCase):\na, = args_maker()\na = (a + onp.conj(a.T)) / 2\nw, v = np.linalg.eigh(onp.tril(a) if lower else onp.triu(a),\n- UPLO=uplo, symmetrize=False)\n+ UPLO=uplo, symmetrize_input=False)\nself.assertTrue(norm(onp.eye(n) - onp.matmul(onp.conj(T(v)), v)) < 5)\nself.assertTrue(norm(onp.matmul(a, v) - w * v) < 30)\n@@ -162,9 +162,52 @@ class NumpyLinalgTest(jtu.JaxTestCase):\n# Gradient checks will fail without symmetrization as the eigh jvp rule\n# is only correct for tangents in the symmetric subspace, whereas the\n# checker checks against unconstrained (co)tangents.\n- f = partial(np.linalg.eigh, UPLO=uplo, symmetrize=True)\n+ if dtype not in complex_types():\n+ f = partial(np.linalg.eigh, UPLO=uplo, symmetrize_input=True)\n+ else: # only check eigenvalue grads for complex matrices\n+ f = lambda a: partial(np.linalg.eigh, UPLO=uplo, symmetrize_input=True)(a)[0]\njtu.check_grads(f, (a,), 2, rtol=1e-1)\n+ @parameterized.named_parameters(jtu.cases_from_list(\n+ {\"testcase_name\":\n+ \"_shape={}_lower={}\".format(jtu.format_shape_dtype_string(shape, dtype),\n+ lower),\n+ \"shape\": shape, \"dtype\": dtype, \"rng\": rng, \"lower\":lower}\n+ for shape in [(1, 1), (4, 4), (5, 5), (50, 50)]\n+ for dtype in complex_types()\n+ for rng in [jtu.rand_default()]\n+ for lower in [True, False])\n+ for eps in [1e-4])\n+ # TODO(phawkins): enable when there is an eigendecomposition implementation\n+ # for GPU/TPU.\n+ @jtu.skip_on_devices(\"gpu\", \"tpu\")\n+ def testEighGradVectorComplex(self, shape, dtype, rng, lower, eps):\n+ # Special case to test for complex eigenvector grad correctness.\n+ # Exact eigenvector coordinate gradients are hard to test numerically for complex\n+ # eigensystem solvers given the extra degrees of per-eigenvector phase freedom.\n+ # Instead, we numerically verify the eigensystem properties on the perturbed\n+ # eigenvectors. You only ever want to optimize eigenvector directions, not coordinates!\n+ if not hasattr(lapack, \"jax_syevd\"):\n+ self.skipTest(\"No symmetric eigendecomposition implementation available\")\n+ uplo = \"L\" if lower else \"U\"\n+ a = rng(shape, dtype)\n+ a = (a + onp.conj(a.T)) / 2\n+ a = onp.tril(a) if lower else onp.triu(a)\n+ a_dot = eps * rng(shape, dtype)\n+ a_dot = (a_dot + onp.conj(a_dot.T)) / 2\n+ a_dot = onp.tril(a_dot) if lower else onp.triu(a_dot)\n+ # evaluate eigenvector gradient and groundtruth eigensystem for perturbed input matrix\n+ f = partial(np.linalg.eigh, UPLO=uplo)\n+ (w, v), (dw, dv) = jvp(f, primals=(a,), tangents=(da,))\n+ new_a = a + a_dot\n+ new_w, new_v = f(new_a)\n+ # Assert rtol eigenvalue delta between perturbed eigenvectors vs new true eigenvalues.\n+ assert onp.max(\n+ onp.abs(onp.diag(onp.dot(onp.conj((v+dv).T), onp.dot(new_a,(v+dv)))) - new_w)/eps) < RTOL\n+ # Redundant to above, but also assert rtol for eigenvector property with new true eigenvalues.\n+ assert onp.max(\n+ onp.linalg.norm(onp.abs(new_w*(v+dv) - onp.dot(new_a, (v+dv))), axis=0)/eps) < RTOL\n+\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_ord={}_axis={}_keepdims={}\".format(\njtu.format_shape_dtype_string(shape, dtype), ord, axis, keepdims),\n" } ]
Python
Apache License 2.0
google/jax
fix testing of eigh jvp rule
260,403
13.02.2019 23:44:41
28,800
8cd3f448d5d2e723fb2c0d96cc0226102d8a8308
fix missing symmetrize_input arg
[ { "change_type": "MODIFY", "old_path": "jax/lax_linalg.py", "new_path": "jax/lax_linalg.py", "diff": "@@ -160,7 +160,7 @@ def eigh_jvp_rule(primals, tangents, lower):\n# https://people.orie.cornell.edu/aslewis/publications/99-clarke.pdf\na, = primals\na_dot, = tangents\n- v, w = eigh_p.bind((a + _H(a)) / 2.0, lower=lower)\n+ v, w = eigh_p.bind(symmetrize(a), lower=lower)\n# for complex numbers we need eigenvalues to be full dtype of v, a:\nw = w.astype(a.dtype)\neye_n = np.eye(a.shape[-1], dtype=a.dtype)\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/linalg.py", "new_path": "jax/numpy/linalg.py", "diff": "@@ -93,7 +93,7 @@ def det(a):\n@_wraps(onp.linalg.eigh)\n-def eigh(a, UPLO=None, symmetrize=True):\n+def eigh(a, UPLO=None, symmetrize_input=True):\nif UPLO is None or UPLO == \"L\":\nlower = True\nelif UPLO == \"U\":\n@@ -103,7 +103,7 @@ def eigh(a, UPLO=None, symmetrize=True):\nraise ValueError(msg)\na = _promote_arg_dtypes(np.asarray(a))\n- v, w = lax_linalg.eigh(a, lower=lower, symmetrize=symmetrize)\n+ v, w = lax_linalg.eigh(a, lower=lower, symmetrize_input=symmetrize_input)\nreturn w, v\n" }, { "change_type": "MODIFY", "old_path": "tests/linalg_test.py", "new_path": "tests/linalg_test.py", "diff": "@@ -172,12 +172,12 @@ class NumpyLinalgTest(jtu.JaxTestCase):\n{\"testcase_name\":\n\"_shape={}_lower={}\".format(jtu.format_shape_dtype_string(shape, dtype),\nlower),\n- \"shape\": shape, \"dtype\": dtype, \"rng\": rng, \"lower\":lower}\n+ \"shape\": shape, \"dtype\": dtype, \"rng\": rng, \"lower\":lower, \"eps\":eps}\nfor shape in [(1, 1), (4, 4), (5, 5), (50, 50)]\nfor dtype in complex_types()\nfor rng in [jtu.rand_default()]\n- for lower in [True, False])\n- for eps in [1e-4])\n+ for lower in [True, False]\n+ for eps in [1e-4]))\n# TODO(phawkins): enable when there is an eigendecomposition implementation\n# for GPU/TPU.\n@jtu.skip_on_devices(\"gpu\", \"tpu\")\n" } ]
Python
Apache License 2.0
google/jax
fix missing symmetrize_input arg
260,403
14.02.2019 02:28:00
28,800
cd2205043541e87425d2f769ecf37c1c1c77e74a
actually test relative error
[ { "change_type": "MODIFY", "old_path": "jax/lax_linalg.py", "new_path": "jax/lax_linalg.py", "diff": "@@ -170,7 +170,7 @@ def eigh_jvp_rule(primals, tangents, lower):\ndot = lax.dot if a.ndim == 2 else lax.batch_matmul\nvdag_adot_v = dot(dot(_H(v), a_dot), v)\ndv = dot(v, np.multiply(Fmat, vdag_adot_v))\n- dw = np.diagonal(np.multiply(eye_n, vdag_adot_v))\n+ dw = np.diagonal(vdag_adot_v)\nreturn core.pack((v, w)), core.pack((dv, dw))\neigh_p = Primitive('eigh')\n" }, { "change_type": "MODIFY", "old_path": "tests/linalg_test.py", "new_path": "tests/linalg_test.py", "diff": "@@ -198,15 +198,19 @@ class NumpyLinalgTest(jtu.JaxTestCase):\na_dot = onp.tril(a_dot) if lower else onp.triu(a_dot)\n# evaluate eigenvector gradient and groundtruth eigensystem for perturbed input matrix\nf = partial(np.linalg.eigh, UPLO=uplo)\n- (w, v), (dw, dv) = jvp(f, primals=(a,), tangents=(da,))\n+ (w, v), (dw, dv) = jvp(f, primals=(a,), tangents=(a_dot,))\nnew_a = a + a_dot\nnew_w, new_v = f(new_a)\n+ new_a = (new_a + onp.conj(new_a.T)) / 2\n# Assert rtol eigenvalue delta between perturbed eigenvectors vs new true eigenvalues.\n+ RTOL=1e-2\nassert onp.max(\n- onp.abs(onp.diag(onp.dot(onp.conj((v+dv).T), onp.dot(new_a,(v+dv)))) - new_w)/eps) < RTOL\n+ onp.abs((onp.diag(onp.dot(onp.conj((v+dv).T), onp.dot(new_a,(v+dv)))) - new_w) / new_w)) < RTOL\n# Redundant to above, but also assert rtol for eigenvector property with new true eigenvalues.\nassert onp.max(\n- onp.linalg.norm(onp.abs(new_w*(v+dv) - onp.dot(new_a, (v+dv))), axis=0)/eps) < RTOL\n+ onp.linalg.norm(onp.abs(new_w*(v+dv) - onp.dot(new_a, (v+dv))), axis=0) /\n+ onp.linalg.norm(onp.abs(new_w*(v+dv)), axis=0)\n+ ) < RTOL\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_ord={}_axis={}_keepdims={}\".format(\n" } ]
Python
Apache License 2.0
google/jax
actually test relative error
260,335
15.02.2019 07:04:57
28,800
98dcf264e92428338d9b5025640c39b5cf723734
fix nan handling in pow jvp (fixes
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -957,11 +957,16 @@ _maybe_real = lambda x: real(x) if _iscomplex(x) else x\n# TODO handle broadcasting\npow_p = standard_binop([_float | _complex, _float | _complex], 'pow')\n-ad.defjvp(pow_p,\n- lambda g, x, y: mul(_brcast(g, y), mul(y, pow(x, select(\n- eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))),\n- lambda g, x, y: mul(_brcast(g, x),\n- mul(log(_replace_zero(x)), pow(x, y))))\n+\n+def pow_jvp_lhs(g, x, y):\n+ exponent = select(eq(y, _zero(y)), _ones(y), sub(y, _one(y)))\n+ x_pow_y = select(eq(x, _zero(x)), _zeros(x), pow(_replace_zero(x), exponent))\n+ return mul(_brcast(g, y), mul(y, x_pow_y))\n+\n+def pow_jvp_rhs(g, x, y):\n+ return mul(_brcast(g, x), mul(log(_replace_zero(x)), pow(x, y)))\n+\n+ad.defjvp(pow_p, pow_jvp_lhs, pow_jvp_rhs)\n_replace_zero = lambda x: select(eq(x, _const(x, 0)), _ones(x), x)\nnot_p = standard_unop(_int | _bool, 'not')\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -1223,6 +1223,18 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\njax_numpy_result = ((x + x.T) / 2).dtype\nself.assertEqual(orig_numpy_result, jax_numpy_result)\n+ def testIssue347(self):\n+ # https://github.com/google/jax/issues/347\n+ def test_fail(x):\n+ x = lnp.sqrt(lnp.sum(x ** 2, axis=1))\n+ ones = lnp.ones_like(x)\n+ x = lnp.where(x > 0.5, x, ones)\n+ return lnp.sum(x)\n+\n+ x = lnp.array([[1, 2], [3, 4], [0, 0]], dtype=lnp.float64)\n+ result = grad(test_fail)(x)\n+ assert not onp.any(onp.isnan(result))\n+\nif __name__ == \"__main__\":\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
fix nan handling in pow jvp (fixes #347)
260,268
15.02.2019 14:09:06
28,800
d8b3694bfb3b2b471fe42de8a04d748a780ab449
__invert__ doesn't take an argument.
[ { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -236,7 +236,7 @@ class Tracer(object):\ndef __ror__(self, other): return self.aval._ror(self, other)\ndef __xor__(self, other): return self.aval._xor(self, other)\ndef __rxor__(self, other): return self.aval._rxor(self, other)\n- def __invert__(self, other): return self.aval._invert(self, other)\n+ def __invert__(self): return self.aval._invert(self)\ndef __lshift__(self, other): return self.aval._lshift(self, other)\ndef __rshift__(self, other): return self.aval._rshift(self, other)\ndef __getitem__(self, idx): return self.aval._getitem(self, idx)\n" } ]
Python
Apache License 2.0
google/jax
__invert__ doesn't take an argument.
260,335
15.02.2019 18:32:50
28,800
58749c0a13049a9113478bdbb5d83f5df72fda7e
add lax._safe_mul with 0*inf=0, used in pow jvp
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -493,6 +493,9 @@ def stop_gradient(x):\nreturn stop_gradient_p.bind(x)\n+def _safe_mul(x, y): return safe_mul_p.bind(x, y)\n+\n+\ndef psum(x, axis_name):\nreturn psum_p.bind(x, axis_name=axis_name)\n@@ -959,13 +962,8 @@ _maybe_real = lambda x: real(x) if _iscomplex(x) else x\npow_p = standard_binop([_float | _complex, _float | _complex], 'pow')\ndef pow_jvp_lhs(g, x, y):\n- exponent = select(eq(y, _zero(y)), _ones(y), sub(y, _one(y)))\n- x_pow_ym1 = pow(x, exponent) # x ** (y-1), except where x==0 or y==0\n- x_pow_ym1 = select(_brcast(eq(x, _zero(y)), x_pow_ym1), # pow(0, a) is 0\n- _zeros(x_pow_ym1), x_pow_ym1) # unless a == 0\n- x_pow_ym1 = select(_brcast(eq(y, _zero(y)), x_pow_ym1), # pow(a, 0) is 0\n- _ones(x_pow_ym1), x_pow_ym1)\n- return mul(_brcast(g, y), mul(y, x_pow_ym1))\n+ jac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))\n+ return _safe_mul(_brcast(g, y), jac)\ndef pow_jvp_rhs(g, x, y):\nreturn mul(_brcast(g, x), mul(log(_replace_zero(x)), pow(x, y)))\n@@ -1001,6 +999,20 @@ mul_p = standard_binop([_num, _num], 'mul')\nad.defbilinear_broadcasting(_brcast, mul_p, mul, mul) # TODO\n+def _safe_mul_translation_rule(c, x, y):\n+ dtype = c.GetShape(x).numpy_dtype()\n+ zero = c.Constant(onp.array(0, dtype=dtype))\n+ out_shape = tuple(onp.maximum(c.GetShape(x).dimensions(),\n+ c.GetShape(y).dimensions()))\n+ return c.Select(c.Or(c.Eq(x, zero), c.Eq(y, zero)),\n+ c.Broadcast(zero, out_shape),\n+ c.Mul(x, y))\n+\n+safe_mul_p = standard_binop([_num, _num], 'safe_mul',\n+ translation_rule=_safe_mul_translation_rule)\n+ad.defbilinear_broadcasting(_brcast, safe_mul_p, _safe_mul, _safe_mul)\n+\n+\ndef _div_transpose_rule(cotangent, x, y):\nassert x is None and y is not None\nres = ad_util.zero if cotangent is ad_util.zero else div(cotangent, y)\n" }, { "change_type": "MODIFY", "old_path": "jax/lib/xla_bridge.py", "new_path": "jax/lib/xla_bridge.py", "diff": "@@ -348,7 +348,7 @@ class _JaxComputationBuilderBase(object):\ndef ConstantLike(self, example_value, value, canonicalize_types=True):\nexample_value = onp.asarray(example_value)\n- return self.Constant(onp.array(value).astype(example_value.dtype))\n+ return self.Constant(onp.array(value, dtype=example_value.dtype))\ndef Constant(self, py_val, canonicalize_types=True):\n\"\"\"Translate constant `py_val` to a constant for this ComputationBuilder.\n" } ]
Python
Apache License 2.0
google/jax
add lax._safe_mul with 0*inf=0, used in pow jvp
260,335
15.02.2019 20:56:10
28,800
635d1ad1b99d02a3f6c9ae6d94d7c787c0746528
force broadcasting in add_jaxvals_p batching rule
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/batching.py", "new_path": "jax/interpreters/batching.py", "diff": "@@ -245,7 +245,8 @@ def add_batched(batched_args, batch_dims):\nxs, ys = batched_args\nreturn add_jaxvals_p.bind(xs, ys), bdx\nelse:\n- xs, ys = map(bdim_at_front, batched_args, batch_dims)\n+ move_bdim = partial(bdim_at_front, force_broadcast=True)\n+ xs, ys = map(move_bdim, batched_args, batch_dims)\nreturn add_jaxvals_p.bind(xs, ys), 0\nprimitive_batchers[add_jaxvals_p] = add_batched\n" } ]
Python
Apache License 2.0
google/jax
force broadcasting in add_jaxvals_p batching rule
260,335
15.02.2019 21:14:11
28,800
f113e1135267c87a48a933d8b4a0acadc8626d5d
add test for issue387
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/batching.py", "new_path": "jax/interpreters/batching.py", "diff": "@@ -241,11 +241,12 @@ def reducer_batcher(prim, batched_args, batch_dims, axes, **params):\ndef add_batched(batched_args, batch_dims):\nbdx, bdy = batch_dims\n- if bdx == bdy:\nxs, ys = batched_args\n+ if bdx == bdy:\nreturn add_jaxvals_p.bind(xs, ys), bdx\nelse:\n- move_bdim = partial(bdim_at_front, force_broadcast=True)\n+ sz = (dimsize(bdx, xs) | dimsize(bdy, ys)).pop()\n+ move_bdim = partial(bdim_at_front, broadcast_size=sz, force_broadcast=True)\nxs, ys = map(move_bdim, batched_args, batch_dims)\nreturn add_jaxvals_p.bind(xs, ys), 0\nprimitive_batchers[add_jaxvals_p] = add_batched\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -810,6 +810,23 @@ class BatchingTest(jtu.JaxTestCase):\nexpected = onp.stack([grad(f)(scale) for scale in scales])\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ def testIssue387(self):\n+ # https://github.com/google/jax/issues/387\n+ R = onp.random.RandomState(0).rand(100, 2)\n+\n+ def dist_sq(R):\n+ dR = R[:, np.newaxis, :] - R[np.newaxis, :, :]\n+ zero = np.zeros_like(dR)\n+ dR = dR - np.where(np.abs(dR) < 0.5, zero, 0.5 * np.sign(dR))\n+ return np.sum(dR ** 2, axis=2)\n+\n+ @jit\n+ def f(R):\n+ dr = dist_sq(R)\n+ return np.sum(R ** 2)\n+\n+ H = hessian(f)(R) # don't crash on UnshapedArray\n+\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
add test for issue387
260,335
15.02.2019 22:21:30
28,800
639f61352c57ca5ee6878e727d0543d8fc27f9ee
tests for numpy operator overloading (some fail!)
[ { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -240,7 +240,7 @@ class Tracer(object):\ndef __ror__(self, other): return self.aval._ror(self, other)\ndef __xor__(self, other): return self.aval._xor(self, other)\ndef __rxor__(self, other): return self.aval._rxor(self, other)\n- def __invert__(self, other): return self.aval._invert(self, other)\n+ def __invert__(self): return self.aval._invert(self)\ndef __lshift__(self, other): return self.aval._lshift(self, other)\ndef __rshift__(self, other): return self.aval._rshift(self, other)\ndef __getitem__(self, idx): return self.aval._getitem(self, idx)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -141,6 +141,7 @@ JAX_COMPOUND_OP_RECORDS = [\nop_record(\"ravel\", 1, all_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\nop_record(\"real\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\nop_record(\"remainder\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"mod\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\nop_record(\"sinc\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\nop_record(\"square\", 1, number_dtypes, all_shapes, jtu.rand_default(), [\"rev\"]),\nop_record(\"sqrt\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\n@@ -187,6 +188,44 @@ JAX_ARGMINMAX_RECORDS = [\nop_record(\"argmax\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\n]\n+JAX_OPERATOR_OVERLOADS = [\n+ op_record(\"__add__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__radd__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__sub__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__rsub__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__mul__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__rmul__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__div__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__rdiv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__eq__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__ne__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__lt__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__gt__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__ge__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__neg__\", 1, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"__pow__\", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),\n+ op_record(\"__rpow__\", 2, inexact_dtypes, all_shapes, jtu.rand_positive(), []),\n+ op_record(\"__mod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__rmod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__floordiv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__rfloordiv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__truediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__rtruediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__abs__\", 1, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ # TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2\n+ op_record(\"__invert__\", 1, int_dtypes, all_shapes, jtu.rand_default(), []),\n+ # TODO(mattjj): investigate these failures\n+ # op_record(\"__or__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n+ # op_record(\"__ror__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n+ # op_record(\"__and__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ # op_record(\"__rand__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ # op_record(\"__xor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n+ # op_record(\"__rxor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n+ # op_record(\"__divmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ # op_record(\"__rdivmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ # TODO(mattjj): lshift, rshift\n+]\n+\nCombosWithReplacement = itertools.combinations_with_replacement\n@@ -237,6 +276,21 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nself._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)\nself._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n+ @parameterized.named_parameters(itertools.chain.from_iterable(\n+ jtu.cases_from_list(\n+ {\"testcase_name\": jtu.format_test_name_suffix(rec.test_name, shapes,\n+ dtypes),\n+ \"rng\": rec.rng, \"shapes\": shapes, \"dtypes\": dtypes, \"name\": rec.name}\n+ for shapes in filter(\n+ _shapes_are_broadcast_compatible,\n+ CombosWithReplacement(rec.shapes, rec.nargs))\n+ for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))\n+ for rec in JAX_OPERATOR_OVERLOADS))\n+ def testOperatorOverload(self, name, rng, shapes, dtypes):\n+ args_maker = self._GetArgsMaker(rng, shapes, dtypes)\n+ fun = lambda x, *xs: getattr(x, name)(*xs)\n+ self._CompileAndCheck(fun, args_maker, check_dtypes=True)\n+\n@parameterized.named_parameters(itertools.chain.from_iterable(\njtu.cases_from_list(\n{\"testcase_name\": jtu.format_test_name_suffix(\n" } ]
Python
Apache License 2.0
google/jax
tests for numpy operator overloading (some fail!)
260,648
16.02.2019 23:31:27
-32,400
d3acbc1c73f924ae68f8924bae0d8c5901fca5c4
Fix typo in PRNGKey docstring
[ { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "@@ -37,7 +37,7 @@ from jax import core\ndef PRNGKey(seed):\n- \"\"\"Create a psuedo-random number generator (PRNG) key given an integer seed.\n+ \"\"\"Create a pseudo-random number generator (PRNG) key given an integer seed.\nArgs:\nseed: a 64- or 32-bit integer used as the value of the key.\n" } ]
Python
Apache License 2.0
google/jax
Fix typo in PRNGKey docstring
260,335
16.02.2019 08:04:19
28,800
9bf830e1c8cd2300e6ef9736925456b10d52bb3c
only test __div__ / __rdiv__ on python2
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -24,6 +24,7 @@ from unittest import skip\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n+import six\nimport numpy as onp\n@@ -195,8 +196,6 @@ JAX_OPERATOR_OVERLOADS = [\nop_record(\"__rsub__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"__mul__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"__rmul__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n- op_record(\"__div__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n- op_record(\"__rdiv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\nop_record(\"__eq__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"__ne__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"__lt__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n@@ -226,6 +225,13 @@ JAX_OPERATOR_OVERLOADS = [\n# TODO(mattjj): lshift, rshift\n]\n+if six.PY2:\n+ JAX_OPERATOR_OVERLOADS += [\n+ op_record(\"__div__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ op_record(\"__rdiv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ ]\n+\n+\nCombosWithReplacement = itertools.combinations_with_replacement\n" } ]
Python
Apache License 2.0
google/jax
only test __div__ / __rdiv__ on python2
260,335
16.02.2019 08:08:04
28,800
6a9b741ebcf6fa202dc38f7e5c338f9cf9b4df32
add comment in pow_jvp_lhs about calling _safe_mul
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -962,6 +962,9 @@ _maybe_real = lambda x: real(x) if _iscomplex(x) else x\npow_p = standard_binop([_float | _complex, _float | _complex], 'pow')\ndef pow_jvp_lhs(g, x, y):\n+ # we call _safe_mul here so that we get the behavior 0*inf = 0, since when a\n+ # coefficient in `g` is zero we want to keep it at zero, not produce a nan.\n+ # see https://github.com/google/jax/pull/383\njac = mul(y, pow(x, select(eq(y, _zeros(y)), _ones(y), sub(y, _ones(y)))))\nreturn _safe_mul(_brcast(g, y), jac)\n" } ]
Python
Apache License 2.0
google/jax
add comment in pow_jvp_lhs about calling _safe_mul
260,331
17.02.2019 09:23:46
18,000
606351fc70a7d263b0128c4f10484d74e739fae7
Fix bad merge/edit in gufuncs.ipynb.
[ { "change_type": "MODIFY", "old_path": "notebooks/gufuncs.ipynb", "new_path": "notebooks/gufuncs.ipynb", "diff": "\"\\n\",\n\"## What is a gufunc?\\n\",\n\"\\n\",\n- \"[Generalized universal functions](https://docs.scipy.org/doc/numpy-1.15.0/reference/c-api.generalized-ufuncs.html) (\\\"gufuncs\\\") are one of my favorite abstractions from NumPy. They generalize NumPy's [\n- ing rules](https://docs.scipy.org/doc/numpy-1.15.0/user/basics.broadcasting.html) to handle non-scalar operations. When a gufuncs is applied to arrays, there are:\\n\",\n+ \"[Generalized universal functions](https://docs.scipy.org/doc/numpy-1.15.0/reference/c-api.generalized-ufuncs.html) (\\\"gufuncs\\\") are one of my favorite abstractions from NumPy. They generalize NumPy's\n+ [broadcasting rules](https://docs.scipy.org/doc/numpy-1.15.0/user/basics.broadcasting.html) to handle non-scalar operations. When a gufuncs is applied to arrays, there are:\\n\",\n\"- \\\"core dimensions\\\" over which an operation is defined.\\n\",\n\"- \\\"broadcast dimensions\\\" over which operations can be automatically vectorized.\\n\",\n\"\\n\",\n" } ]
Python
Apache License 2.0
google/jax
Fix bad merge/edit in gufuncs.ipynb.
260,331
17.02.2019 09:25:18
18,000
e6f6810b67f21d431f4d5813a8582f0772422f23
Remove my name from the XLA colab Team effort!
[ { "change_type": "MODIFY", "old_path": "notebooks/XLA_in_Python.ipynb", "new_path": "notebooks/XLA_in_Python.ipynb", "diff": "\"id\": \"AiyR1e2NubKa\"\n},\n\"source\": [\n- \"Let's exploit Peter Hawkin's XLA QR implementation to solve some eigenvalues for symmetric matrices. \\n\",\n+ \"Let's exploit the XLA QR implementation to solve some eigenvalues for symmetric matrices. \\n\",\n\"\\n\",\n\"This is the naive QR algorithm, without acceleration for closely-spaced eigenvalue convergence, nor any permutation to sort eigenvalues by magnitude.\"\n]\n" } ]
Python
Apache License 2.0
google/jax
Remove my name from the XLA colab Team effort!
260,335
17.02.2019 09:34:49
28,800
60865a5bb566e7677c0bd1ffa56b2a97ac1f77d9
fix broken dot batch rule case
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -1326,11 +1326,9 @@ def _dot_batch_rule(batched_args, batch_dims):\nassert lbd is not None and rbd is not None\nassert lhs.ndim == rhs.ndim == 2 # dot only supports rank 1 and above\n- if lbd != 0:\n- batching.move_dim_to_front(lhs, lbd)\n- if rbd != 0:\n- batching.move_dim_to_front(rhs, rbd)\n- return dot_general(lhs, rhs, [((1,), (1,)), ((0,), (0,))])\n+ lhs = batching.move_dim_to_front(lhs, lbd)\n+ rhs = batching.move_dim_to_front(rhs, rbd)\n+ return dot_general(lhs, rhs, [((1,), (1,)), ((0,), (0,))]), 0\nif lbd is None:\nassert rbd is not None\n" }, { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -274,6 +274,14 @@ class BatchingTest(jtu.JaxTestCase):\n# TODO(mattjj): this fails due to an xla error in dot_general\n# assert vecvec(np.zeros((4, 2, 3)), np.zeros((3,))).shape == (4, 2)\n+ def testDot2(self):\n+ R = onp.random.RandomState(0).randn\n+ xs = R(10, 3)\n+ ys = R(10, 3)\n+ ans = vmap(np.dot)(xs, ys)\n+ expected = onp.einsum('ni,ni->n', xs, ys)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\ndef testPad(self):\nR = onp.random.RandomState(0).randn\n" } ]
Python
Apache License 2.0
google/jax
fix broken dot batch rule case
260,335
17.02.2019 09:36:18
28,800
793e055f75db207030614c12d2ce28eacde0f28d
enable an old test case (xla bug fixed)
[ { "change_type": "MODIFY", "old_path": "tests/batching_test.py", "new_path": "tests/batching_test.py", "diff": "@@ -271,8 +271,7 @@ class BatchingTest(jtu.JaxTestCase):\nassert vecvec(np.zeros((3,)), np.zeros((3,))).shape == ()\nassert vecvec(np.zeros((2, 3)), np.zeros((3,))).shape == (2,)\n- # TODO(mattjj): this fails due to an xla error in dot_general\n- # assert vecvec(np.zeros((4, 2, 3)), np.zeros((3,))).shape == (4, 2)\n+ assert vecvec(np.zeros((4, 2, 3)), np.zeros((3,))).shape == (4, 2)\ndef testDot2(self):\nR = onp.random.RandomState(0).randn\n" } ]
Python
Apache License 2.0
google/jax
enable an old test case (xla bug fixed)
260,335
18.02.2019 12:41:07
28,800
042a20d2da36bffc2d9665422a9fc6c0ae814acd
improve loop construct docs, remove foreach_loop
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -416,6 +416,35 @@ def sort_key_val(keys, values, dimension=-1):\nreturn sorted_keys, sorted_values\ndef while_loop(cond_fun, body_fun, init_val):\n+ \"\"\"Call `body_fun` repeatedly in a loop while `cond_fun` is True.\n+\n+ Arguments:\n+ cond_fun: pure function of type `T -> Bool`.\n+ body_fun: pure function of type `T -> T`.\n+ init_val: value of type `T`, a type that can be a scalar, array, or any\n+ (nested) Python tuple/list/dict thereof.\n+\n+ Returns:\n+ The output from the final iteration of body_fun, of type `T`.\n+\n+ The semantics of `while_loop` are given by this Python implementation:\n+\n+ def while_loop(cond_fun, body_fun, init_val):\n+ val = init_val\n+ while cond_fun(val):\n+ val = body_fun(val)\n+ return val\n+\n+ Unlike that pure Python version, `while_loop` is a JAX primitive and is\n+ lowered to a single XLA While HLO. That makes it useful for reducing\n+ compilation times for jit-compiled functions, since native Python loop\n+ constructs in an `@jit` function are unrolled, leading to large XLA\n+ computations.\n+\n+ Another difference from using Python-native loop constructs is that\n+ `while_loop` is not reverse-mode differentiable because XLA computations\n+ require static bounds on memory requirements.\n+ \"\"\"\ninit_val_flat, in_tree = pytree_to_jaxtupletree(init_val)\nflat_body_fun, out_tree = pytree_fun_to_jaxtupletree_fun(lu.wrap_init(body_fun), (in_tree,))\nflat_cond_fun, _ = pytree_fun_to_jaxtupletree_fun(lu.wrap_init(cond_fun), (in_tree,))\n@@ -618,33 +647,27 @@ def fori_loop(lower, upper, body_fun, init_val):\nReturns:\nLoop value from the final iteration, of type T.\n- \"\"\"\n- # state: (upper limit, index, loop value)\n- # The `lt` and `add` functions are added to the namespace programmatically.\n- _, _, result = while_loop(\n- lambda upper_i_x: lt(upper_i_x[1], upper_i_x[0]),\n- lambda upper_i_x: (upper_i_x[0],\n- add(upper_i_x[1], onp.array(1, _dtype(upper_i_x[1]))),\n- body_fun(upper_i_x[1], upper_i_x[2])),\n- (upper, lower, init_val))\n- return result\n-\n-def foreach_loop(sequence, body_fun, init_val):\n- \"\"\"Loop over `sequence` by reduction to `while_loop`.\n+ The semantics of `fori_loop` are given by this Python implementation:\n- Arguments:\n- sequence: tuple of loop items, each of type U\n- body_fun: function of type (U, T) -> T, where T is the type of `init_val`\n- init_val: initial loop value, of type T\n+ def fori_loop(lower, upper, body_fun, init_val):\n+ val = init_val\n+ for i in range(lower, upper):\n+ val = body_fun(i, val)\n+ return val\n- Returns:\n- Loop value from the final iteration, of type T.\n+ Unlike that pure Python version, `fori_loop` is implemented in terms of a call\n+ to `while_loop`. See the docstring for `while` for more information.\n\"\"\"\n- _, result = fori_loop(\n- 0, len(sequence),\n- lambda i, seq_val: (seq_val[0], body_fun(seq_val[0][i], seq_val[1])),\n- (sequence, init_val))\n+ def while_cond_fun(loop_carry):\n+ i, _ = loop_carry\n+ return lt(i, upper)\n+\n+ def while_body_fun(loop_carry):\n+ i, x = loop_carry\n+ return add(i, _const(i, 1)), body_fun(i, x)\n+\n+ _, result = while_loop(while_cond_fun, while_body_fun, (lower, init_val))\nreturn result\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -1335,24 +1335,6 @@ class LaxTest(jtu.JaxTestCase):\nself.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\nself.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n- def testForeachLoopBasic(self):\n- def sum_squares(xs):\n- def body_fun(x, y):\n- return y + x * x\n- return lax.foreach_loop(xs, body_fun, 0)\n-\n- sum_squares_jit = api.jit(sum_squares)\n-\n- xs = onp.array([1, 2, 3, 4])\n- self.assertEqual(sum_squares(xs[:1]), 1)\n- self.assertEqual(sum_squares(xs[:1]), sum_squares_jit(xs[:1]))\n- self.assertEqual(sum_squares(xs[:2]), 5)\n- self.assertEqual(sum_squares(xs[:2]), sum_squares_jit(xs[:2]))\n- self.assertEqual(sum_squares(xs[:3]), 14)\n- self.assertEqual(sum_squares(xs[:3]), sum_squares_jit(xs[:3]))\n- self.assertEqual(sum_squares(xs[:4]), 30)\n- self.assertEqual(sum_squares(xs[:4]), sum_squares_jit(xs[:4]))\n-\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_lhs_shape={}_rhs_shape={}\"\n.format(jtu.format_shape_dtype_string(lhs_shape, dtype),\n" } ]
Python
Apache License 2.0
google/jax
improve loop construct docs, remove foreach_loop
260,335
18.02.2019 12:58:35
28,800
13834ee4f579febcc4fc8bde2e6f8892bc6eef99
add "yet" to while_loop rev-autodiff statement
[ { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -442,7 +442,7 @@ def while_loop(cond_fun, body_fun, init_val):\ncomputations.\nAnother difference from using Python-native loop constructs is that\n- `while_loop` is not reverse-mode differentiable because XLA computations\n+ `while_loop` is not (yet) reverse-mode differentiable because XLA computations\nrequire static bounds on memory requirements.\n\"\"\"\ninit_val_flat, in_tree = pytree_to_jaxtupletree(init_val)\n" } ]
Python
Apache License 2.0
google/jax
add "yet" to while_loop rev-autodiff statement
260,335
19.02.2019 17:28:43
28,800
bf4ea4c099a845a8f97d1de8d4f86bf7de003bf8
guard against onp.lcm and onp.gcd not existing
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -2058,7 +2058,7 @@ hanning = onp.hanning\nkaiser = onp.kaiser # TODO: lower via lax to allow non-constant beta.\n-@_wraps(onp.gcd)\n+@_wraps(getattr(onp, \"gcd\", None))\ndef gcd(x1, x2):\nif (not issubdtype(lax._dtype(x1), integer) or\nnot issubdtype(lax._dtype(x2), integer)):\n@@ -2077,7 +2077,7 @@ def gcd(x1, x2):\nreturn gcd\n-@_wraps(onp.lcm)\n+@_wraps(getattr(onp, \"lcm\", None))\ndef lcm(x1, x2):\nd = gcd(x1, x2)\nreturn where(d == 0, lax._const(d, 0),\n" } ]
Python
Apache License 2.0
google/jax
guard against onp.lcm and onp.gcd not existing
260,335
19.02.2019 17:32:09
28,800
34d3d8138736f24bc2a4df988fbe7ee1ca09ef08
guard lcm and gcd tests against numpy version
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -126,7 +126,6 @@ JAX_COMPOUND_OP_RECORDS = [\ntest_name=\"expm1_large\"),\nop_record(\"expm1\", 1, number_dtypes, all_shapes, jtu.rand_small_positive(), []),\nop_record(\"floor_divide\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), [\"rev\"]),\n- op_record(\"gcd\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"heaviside\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"hypot\", 2, default_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"kron\", 2, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n@@ -135,7 +134,6 @@ JAX_COMPOUND_OP_RECORDS = [\nop_record(\"isclose\", 2, all_dtypes, all_shapes, jtu.rand_small_positive(), []),\nop_record(\"iscomplex\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\nop_record(\"isreal\", 1, number_dtypes, all_shapes, jtu.rand_some_inf(), []),\n- op_record(\"lcm\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"log2\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\nop_record(\"log10\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [\"rev\"]),\nop_record(\"log1p\", 1, number_dtypes, all_shapes, jtu.rand_positive(), [],\n@@ -187,6 +185,8 @@ JAX_REDUCER_NO_DTYPE_RECORDS = [\nnumpy_version = tuple(map(int, onp.version.version.split('.')))\nif numpy_version >= (1, 15):\nJAX_REDUCER_NO_DTYPE_RECORDS += [\n+ op_record(\"gcd\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"lcm\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\nop_record(\"ptp\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n]\n" } ]
Python
Apache License 2.0
google/jax
guard lcm and gcd tests against numpy version
260,335
19.02.2019 19:35:02
28,800
509e034dd7e65b49afa95e7ce6b735335749e574
put gcd and lcm tests back in correct list
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -182,15 +182,6 @@ JAX_REDUCER_NO_DTYPE_RECORDS = [\nop_record(\"min\", 1, all_dtypes, nonempty_shapes, jtu.rand_default(), []),\n]\n-numpy_version = tuple(map(int, onp.version.version.split('.')))\n-if numpy_version >= (1, 15):\n- JAX_REDUCER_NO_DTYPE_RECORDS += [\n- op_record(\"gcd\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n- op_record(\"lcm\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n- op_record(\"ptp\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n- ]\n-\n-\nJAX_ARGMINMAX_RECORDS = [\nop_record(\"argmin\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\nop_record(\"argmax\", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal(), []),\n@@ -232,6 +223,16 @@ JAX_OPERATOR_OVERLOADS = [\n# TODO(mattjj): lshift, rshift\n]\n+numpy_version = tuple(map(int, onp.version.version.split('.')))\n+if numpy_version >= (1, 15):\n+ JAX_COMPOUND_OP_RECORDS += [\n+ op_record(\"gcd\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n+ op_record(\"lcm\", 2, int_dtypes, all_shapes, jtu.rand_default(), []),\n+ ]\n+ JAX_REDUCER_NO_DTYPE_RECORDS += [\n+ op_record(\"ptp\", 1, number_dtypes, nonempty_shapes, jtu.rand_default(), []),\n+ ]\n+\nif six.PY2:\nJAX_OPERATOR_OVERLOADS += [\nop_record(\"__div__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n" } ]
Python
Apache License 2.0
google/jax
put gcd and lcm tests back in correct list
260,335
13.02.2019 14:28:30
28,800
4a863b92cd4e5e3c1e93b7906fe02ec3f0e7e5a5
sketch out custom primitive wrapper Most of this code is copied from an old branch that and I triple-programmed together.
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -664,3 +664,23 @@ def check_scalar(x):\nraise TypeError(msg(x))\nexcept TypeError:\nraise TypeError(msg(x))\n+\n+\n+def primitive(fun):\n+ name = getattr(fun, '__name__', '<unnamed user primitive>')\n+ fun_p = core.Primitive(name)\n+ fun_p.def_impl(fun)\n+\n+ # generic transformation implementations that rely on traceability of `fun`\n+ fun_p.def_abstract_eval(partial(pe.abstract_eval_fun, fun))\n+ xla.translations[fun_p] = partial(xla.lower_fun, fun)\n+ ad.primitive_jvps[fun_p] = partial(jvp, fun)\n+ # TODO(mattjj): batching\n+\n+ @wraps(fun)\n+ def traceable(*args, **kwargs):\n+ # TODO(mattjj): pytrees to jaxtupletrees\n+ return fun_p.bind(*args, **kwargs)\n+ traceable.primitive = fun_p\n+\n+ return traceable\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/partial_eval.py", "new_path": "jax/interpreters/partial_eval.py", "diff": "@@ -116,6 +116,13 @@ def partial_eval_wrapper(avals, *consts, **kwargs):\nyield out, (out_pv, jaxpr, env)\n+def abstract_eval_fun(fun, *avals, **params):\n+ pvs_in = [PartialVal((a, unit)) for a in avals]\n+ _, pvout, _ = trace_unwrapped_to_jaxpr(fun, pvs_in, **params)\n+ aval_out, _ = pvout\n+ return aval_out\n+\n+\nclass JaxprTracer(Tracer):\n__slots__ = ['pval', 'recipe']\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -35,7 +35,8 @@ from ..abstract_arrays import ConcreteArray, ShapedArray, make_shaped_array, arr\nfrom ..core import AbstractTuple, JaxTuple, pack, valid_jaxtype\nfrom ..util import partial, partialmethod, memoize, unzip2, concatenate, safe_map, prod\nfrom ..lib import xla_bridge as xb\n-from .partial_eval import trace_to_subjaxpr, merge_pvals, JaxprTrace, PartialVal\n+from .partial_eval import (trace_to_subjaxpr, trace_unwrapped_to_jaxpr,\n+ merge_pvals, JaxprTrace, PartialVal)\nFLAGS = flags.FLAGS\nflags.DEFINE_bool('jax_device_values',\n@@ -175,6 +176,15 @@ def translation_rule(p):\n\"XLA translation rule for '{}' not implemented\".format(p))\n+def lower_fun(fun, c, *xla_args, **params):\n+ xla_shapes = map(c.GetShape, xla_args)\n+ avals = map(aval_from_xla_shape, xla_shapes)\n+ pvals = [PartialVal((a, core.unit)) for a in avals]\n+ jaxpr, pvout, consts = trace_unwrapped_to_jaxpr(fun, pvals, **params)\n+ built_c = jaxpr_computation(jaxpr, consts, (), *xla_shapes)\n+ return c.Call(built_c, xla_args)\n+\n+\ntranslations = {}\nbackend_specific_translations = defaultdict(dict)\n" } ]
Python
Apache License 2.0
google/jax
sketch out custom primitive wrapper Most of this code is copied from an old branch that @dougalm, @duvenaud, and I triple-programmed together.
260,335
20.02.2019 08:04:48
28,800
1ca1e5c468cf65d73676b496f76e88f640779c87
add some underscores to names
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -99,9 +99,9 @@ def jit(fun, static_argnums=()):\nreturn fun(*args, **kwargs)\nf = lu.wrap_init(fun, kwargs)\ndyn_argnums = [i for i in range(len(args)) if i not in static_argnums]\n- f, dyn_args = argnums_partial(f, dyn_argnums, args)\n+ f, dyn_args = _argnums_partial(f, dyn_argnums, args)\njaxtupletree_args, in_trees = unzip2(map(pytree_to_jaxtupletree, dyn_args))\n- check_args(jaxtupletree_args)\n+ _check_args(jaxtupletree_args)\njaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)\njaxtupletree_out = xla.xla_call(jaxtree_fun, *jaxtupletree_args)\nreturn build_tree(out_tree(), jaxtupletree_out)\n@@ -219,9 +219,9 @@ def value_and_grad(fun, argnums=0):\n@wraps(fun, docstr=docstr, argnums=argnums)\ndef value_and_grad_f(*args, **kwargs):\nf = lu.wrap_init(fun, kwargs)\n- f_partial, dyn_args = argnums_partial(f, argnums, args)\n+ f_partial, dyn_args = _argnums_partial(f, argnums, args)\nans, vjp_py = vjp(f_partial, *dyn_args)\n- check_scalar(ans)\n+ _check_scalar(ans)\ng = vjp_py(onp.ones((), onp.result_type(ans)))\ng = g[0] if isinstance(argnums, int) else g\nreturn (ans, g)\n@@ -253,7 +253,7 @@ def jacfwd(fun, argnums=0):\ndef jacfun(*args, **kwargs):\nf = lu.wrap_init(fun, kwargs)\n- f_partial, dyn_args = argnums_partial(f, argnums, args)\n+ f_partial, dyn_args = _argnums_partial(f, argnums, args)\npushfwd = partial(jvp, f_partial, dyn_args)\ny, jac = vmap(pushfwd, out_axes=(None, -1))(_std_basis(dyn_args))\nexample_args = dyn_args[0] if isinstance(argnums, int) else dyn_args\n@@ -284,7 +284,7 @@ def jacrev(fun, argnums=0):\n\"\"\"\ndef jacfun(*args, **kwargs):\nf = lu.wrap_init(fun, kwargs)\n- f_partial, dyn_args = argnums_partial(f, argnums, args)\n+ f_partial, dyn_args = _argnums_partial(f, argnums, args)\ny, pullback = vjp(f_partial, *dyn_args)\njac = vmap(pullback)(_std_basis(y))\njac = jac[0] if isinstance(argnums, int) else jac\n@@ -394,7 +394,7 @@ def pjit(fun, axis_name, in_axes=0, out_axes=0, mesh_axis=0):\ndef f_jitted(*args, **kwargs):\nf = lu.wrap_init(fun, kwargs)\njaxtupletree_args, in_trees = unzip2(map(pytree_to_jaxtupletree, args))\n- check_args(jaxtupletree_args)\n+ _check_args(jaxtupletree_args)\nf, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)\nin_axes_ = in_axes if isinstance(in_axes, (list, tuple)) else (in_axes,) * len(args)\nchunksize = pxla.chunk_size(axis_name, mesh_axis, in_axes_, jaxtupletree_args)\n@@ -537,7 +537,7 @@ def vjp(fun, *primals):\nif not isinstance(fun, lu.WrappedFun):\nfun = lu.wrap_init(fun)\nprimals_flat, in_trees = unzip2(map(pytree_to_jaxtupletree, primals))\n- check_args(primals_flat)\n+ _check_args(primals_flat)\njaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(fun, in_trees)\nout_primal, out_vjp = ad.vjp(jaxtree_fun, primals_flat)\nout_tree = out_tree()\n@@ -632,7 +632,7 @@ device_get_array = lambda x: x.copy() if type(x) is xla.DeviceArray else x\ndevice_get = partial(tree_map, device_get_array)\n-def argnums_partial(f, dyn_argnums, args):\n+def _argnums_partial(f, dyn_argnums, args):\nif isinstance(dyn_argnums, int):\ndyn_argnums = (dyn_argnums,)\nelse:\n@@ -640,23 +640,23 @@ def argnums_partial(f, dyn_argnums, args):\nfixed_args = tuple([None if i in dyn_argnums else WrapHashably(arg)\nfor i, arg in enumerate(args)])\ndyn_args = tuple(args[i] for i in dyn_argnums)\n- return argnums_partial_(f, dyn_argnums, fixed_args), dyn_args\n+ return _argnums_partial_(f, dyn_argnums, fixed_args), dyn_args\n@lu.transformation\n-def argnums_partial_(dyn_argnums, fixed_args, *dyn_args):\n+def _argnums_partial_(dyn_argnums, fixed_args, *dyn_args):\nargs = [None if arg is None else arg.val for arg in fixed_args]\nfor i, arg in zip(dyn_argnums, dyn_args):\nargs[i] = arg\nans = yield args\nyield ans\n-def check_args(args):\n+def _check_args(args):\nfor arg in args:\nif not (isinstance(arg, core.Tracer) or core.valid_jaxtype(arg)):\nraise TypeError(\"Argument '{}' of type {} is not a valid JAX type\"\n.format(arg, type(arg)))\n-def check_scalar(x):\n+def _check_scalar(x):\nmsg = \"Gradient only defined for scalar-output functions. Output was: {}\".format\ntry:\naval = core.get_aval(x)\n@@ -666,7 +666,7 @@ def check_scalar(x):\nraise TypeError(msg(x))\n-def primitive(fun):\n+def _primitive(fun):\nname = getattr(fun, '__name__', '<unnamed user primitive>')\nfun_p = core.Primitive(name)\nfun_p.def_impl(fun)\n" } ]
Python
Apache License 2.0
google/jax
add some underscores to names
260,335
20.02.2019 09:03:30
28,800
dc7eab94c12c2117ca6b0c4a45eadf2f71dfcd78
add jax.xla_computation to retrieve Computation pair w/
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -151,6 +151,23 @@ def disable_jit():\n_jit_is_disabled = False\n+def xla_computation(fun, static_argnums=()):\n+ def pv_like(x):\n+ aval = xla.abstractify(x)\n+ return pe.PartialVal((aval, core.unit))\n+\n+ wrapped = lu.wrap_init(fun)\n+\n+ @wraps(fun)\n+ def computation_maker(*args, **kwargs):\n+ jax_args, in_trees = unzip2(map(pytree_to_jaxtupletree, args))\n+ jaxtree_fun, out_tree = pytree_fun_to_jaxtupletree_fun(wrapped, in_trees)\n+ pvals = map(pv_like, jax_args)\n+ jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals, **kwargs)\n+ return xla.build_jaxpr(jaxpr, consts, *map(xla.abstractify, args))\n+\n+ return computation_maker\n+\ndef grad(fun, argnums=0):\n\"\"\"Creates a function which evaluates the gradient of `fun`.\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -123,6 +123,11 @@ def compile_jaxpr(jaxpr, const_vals, *abstract_args):\nresult_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())\nreturn built_c.Compile(arg_shapes, xb.get_compile_options()), result_shape\n+def build_jaxpr(jaxpr, const_vals, *abstract_args):\n+ arg_shapes = list(map(xla_shape, abstract_args))\n+ built_c = jaxpr_computation(jaxpr, const_vals, (), *arg_shapes)\n+ return built_c\n+\ndef jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nc = xb.make_computation_builder(\"jaxpr_computation\")\n" } ]
Python
Apache License 2.0
google/jax
add jax.xla_computation to retrieve Computation pair w/ @hawkinsp
260,335
20.02.2019 17:03:31
28,800
4ba7d517df80bdc4f31f30f96475d302f7c54f54
bump jaxlib version number for building new wheels
[ { "change_type": "MODIFY", "old_path": "build/setup.py", "new_path": "build/setup.py", "diff": "@@ -20,7 +20,7 @@ binary_libs = [os.path.basename(f) for f in glob('jaxlib/*.so*')]\nsetup(\nname='jaxlib',\n- version='0.1.8',\n+ version='0.1.9',\ndescription='XLA library for JAX',\nauthor='JAX team',\nauthor_email='jax-dev@google.com',\n" } ]
Python
Apache License 2.0
google/jax
bump jaxlib version number for building new wheels
260,335
21.02.2019 07:34:27
28,800
9815914d74908c663b93ebaff8c25a6c899ecd7b
remove unused jax.numpy.array case (was typo)
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -1148,24 +1148,22 @@ def atleast_3d(*arys):\nreturn [atleast_3d(arr) for arr in arys]\n-# TODO(mattjj): can this be simplified?\n@_wraps(onp.array)\ndef array(object, dtype=None, copy=True, order=\"K\", ndmin=0):\ndel copy # Unused.\nif ndmin != 0 or order != \"K\":\nraise NotImplementedError(\"Only implemented for order='K', ndmin=0.\")\n- if hasattr(object, '__asarray__'):\n- return object.__asarray__(dtype)\n- elif isinstance(object, ndarray):\n+ if isinstance(object, ndarray):\nif dtype and _dtype(object) != dtype:\nreturn lax.convert_element_type(object, dtype)\nelse:\nreturn object\n+ elif hasattr(object, '__array__'):\n+ return array(object.__array__(), dtype)\nelif isinstance(object, (list, tuple)):\nif object:\n- subarrays = [expand_dims(array(elt, dtype=dtype), 0) for elt in object]\n- return concatenate(subarrays)\n+ return stack([array(elt, dtype=dtype) for elt in object])\nelse:\nreturn onp.array([], dtype)\nelif isscalar(object):\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -942,9 +942,10 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nself._CheckAgainstNumpy(onp.array, lnp.array, args_maker, check_dtypes=True)\nself._CompileAndCheck(lnp.array, args_maker, check_dtypes=True)\n- def testArrayAsarrayMethod(self):\n+ def testArrayMethod(self):\nclass arraylike(object):\n- def __asarray__(self, dtype=None):\n+ dtype = onp.float32\n+ def __array__(self, dtype=None):\nreturn 3.\na = arraylike()\nans = lnp.array(a)\n" } ]
Python
Apache License 2.0
google/jax
remove unused jax.numpy.array case (was typo)
260,335
18.02.2019 11:37:30
28,800
ae0b56800ecd044f1bb11e11a91c555c1afbc964
conceptual progress! need to rewrite jaxpr
[ { "change_type": "ADD", "old_path": null, "new_path": "peval.py", "diff": "+from functools import partial\n+import numpy as onp\n+import jax.numpy as np\n+from jax import jit, pjit, grad, linearize, jvp, make_jaxpr\n+from jax.lax import psum\n+\n+@partial(pjit, axis_name='i')\n+def f(x):\n+ return np.sin(np.sin(x))\n+\n+x = onp.arange(2).reshape(1, 2).astype(onp.float32)\n+print f(x)\n+\n+def splitjvp(x):\n+ _, jvp = linearize(f, x)\n+ return jvp(np.ones_like(x))\n+\n+print splitjvp(x)\n+print make_jaxpr(splitjvp)(x)\n+print grad(lambda x: np.sum(np.sin(x)))(x)\n+print grad(lambda x: np.sum(f(x)))(x)\n+\n+print grad(lambda x: np.sum(splitjvp(x)))(x)\n+print grad(lambda x: np.sum(jvp(np.sin, (x,), (np.ones_like(x),))[1]))(x)\n+\n+\n+###\n+\n+@partial(pjit, axis_name='i')\n+@partial(pjit, axis_name='j')\n+def f(x):\n+ return psum(psum(x, 'i'), 'j')\n+\n+print f(x.reshape((1, 1, -1)))\n" } ]
Python
Apache License 2.0
google/jax
conceptual progress! need to rewrite jaxpr
260,335
20.02.2019 12:36:18
28,800
12c5bdff9e0698daa5f6cf09b9ac9bc09ccb1003
sketched out new stuff
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -405,20 +405,15 @@ def vmap(fun, in_axes=0, out_axes=0):\nreturn batched_fun\n-def pjit(fun, axis_name, in_axes=0, out_axes=0, mesh_axis=0):\n+def pjit(fun, axis_name):\n\"\"\"Set up SPMD function for JIT compilation and parallel execution with XLA.\"\"\"\n@wraps(fun)\ndef f_jitted(*args, **kwargs):\n- f = lu.wrap_init(fun, kwargs)\njaxtupletree_args, in_trees = unzip2(map(pytree_to_jaxtupletree, args))\n- _check_args(jaxtupletree_args)\n+ check_args(jaxtupletree_args)\n+ f = lu.wrap_init(fun, kwargs)\nf, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)\n- in_axes_ = in_axes if isinstance(in_axes, (list, tuple)) else (in_axes,) * len(args)\n- chunksize = pxla.chunk_size(axis_name, mesh_axis, in_axes_, jaxtupletree_args)\n- f = pxla.chunk_transform(f, chunksize, axis_name, in_axes_, out_axes)\n- jaxtupletree_out = pxla.xla_pcall(f, *jaxtupletree_args,\n- axis_name=axis_name, in_axes=in_axes_,\n- out_axes=out_axes, mesh_axis=mesh_axis)\n+ jaxtupletree_out = pxla.xla_pcall(f, *jaxtupletree_args, axis_name=axis_name)\nreturn build_tree(out_tree(), jaxtupletree_out)\nf_jitted.__name__ = \"pjit({})\".format(f_jitted.__name__)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/ad.py", "new_path": "jax/interpreters/ad.py", "diff": "@@ -185,8 +185,7 @@ class JVPTrace(Trace):\ntangents = [t.tangent for t in tracers]\nnonzero_tangents, in_tree_def = tree_to_jaxtuples(tangents)\nf, out_tree_def = traceable(jvp_subtrace(f, self.master), in_tree_def)\n- new_params = call_primitive_jvp_params.get(call_primitive, identity)(params)\n- result = call_primitive.bind(f, pack(primals), nonzero_tangents, **new_params)\n+ result = call_primitive.bind(f, pack(primals), nonzero_tangents, **params)\nprimal_out, tangent_out = build_tree(out_tree_def(), result)\nreturn JVPTracer(self, primal_out, tangent_out)\n@@ -295,8 +294,8 @@ def defjvp(primitive, *jvprules):\ndef standard_jvp(jvprules, primitive, primals, tangents, **params):\nval_out = primitive.bind(*primals, **params)\n- tangents_out = (rule(t, *primals, **params) for rule, t in zip(jvprules, tangents)\n- if rule is not None and t is not zero)\n+ tangents_out = [rule(t, *primals, **params) for rule, t in zip(jvprules, tangents)\n+ if rule is not None and t is not zero]\nreturn val_out, reduce(add_tangents, tangents_out, zero)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/parallel.py", "new_path": "jax/interpreters/parallel.py", "diff": "@@ -164,129 +164,6 @@ class PmapTrace(Trace):\npmap_primitive_rules = {}\n-### axis variable splitting and computation chunking\n-\n-\n-@lu.transformation\n-def axisvar_split(name, new_names, *args):\n- with new_master(SplitTrace) as master:\n- trace = SplitTrace(master, core.cur_sublevel())\n- in_tracers = map(partial(SplitTracer, trace, name, new_names), args)\n- ans = yield in_tracers\n- out_tracer = trace.full_raise(ans)\n- out_val = out_tracer.val\n- del master, out_tracer\n- yield out_val\n-\n-@lu.transformation\n-def axisvar_split_subtrace(master, name, new_names, *vals):\n- trace = SplitTrace(master, core.cur_sublevel())\n- ans = yield map(partial(SplitTracer, trace, name, new_names), vals)\n- out_tracer = trace.full_raise(ans)\n- out_val = out_tracer.val\n- yield out_val\n-\n-class SplitTracer(Tracer):\n- def __init__(self, trace, name, new_names, val):\n- self.trace = trace\n- self.name = name\n- self.new_names = new_names\n- self.val = val\n-\n- @property\n- def aval(self):\n- return core.get_aval(self.val)\n-\n- def unpack(self):\n- if self.name is None:\n- return self.full_lower()\n- else:\n- elt_tracer = partial(SplitTracer, self.trace, self.name, self.new_names)\n- return map(elt_tracer, self.val)\n-\n- def full_lower(self):\n- if self.name is None:\n- return core.full_lower(self.val)\n- else:\n- return self\n-\n-class SplitTrace(Trace):\n- def pure(self, val):\n- return SplitTracer(self, None, (), val)\n-\n- def lift(self, val):\n- return SplitTracer(self, None, (), val)\n-\n- def sublift(self, val):\n- return SplitTracer(self, val.name, val.new_names, val.val)\n-\n- def process_primitive(self, primitive, tracers, params):\n- names_in, vals_in = unzip2((t.name, t.val) for t in tracers)\n- if all(name is None for name in names_in):\n- return primitive.bind(*vals_in, **params)\n- else:\n- name = next(name for name in names_in if name is not None)\n- new_names = next(t.new_names for t in tracers if t.name is not None)\n- if primitive in pmap_primitive_rules:\n- val_in, = vals_in\n- if name == params['axis_name']:\n- new_params = {k: params[k] for k in params if k != 'axis_name'}\n- val = val_in\n- for new_name in new_names:\n- val = primitive.bind(val, axis_name=new_name, **new_params)\n- val_out = val\n- return SplitTracer(self, name, new_names, val_out)\n- else:\n- val_out = primitive.bind(val_in, **params)\n- return SplitTracer(self, name, new_names, val_out)\n- else:\n- val_out = primitive.bind(*vals_in, **params)\n- return SplitTracer(self, name, new_names, val_out)\n-\n- def process_call(self, call_primitive, f, tracers, params):\n- names_in, vals_in = unzip2((t.name, t.val) for t in tracers)\n- if all(name is None for name in names_in):\n- return call_primitive.bind(f, *vals, **params)\n- else:\n- name = next(name for name in names_in if name is not None)\n- new_names = next(t.new_names for t in tracers if t.name is not None)\n- f = axisvar_split_subtrace(f, self.master, name, new_names)\n- val_out = call_primitive.bind(f, *vals_in, **params)\n- return SplitTracer(self, name, new_names, val_out)\n-\n- def post_process_call(self, _, out_tracer):\n- name, new_names, val = out_tracer.name, out_tracer.new_names, out_tracer.val\n- master = self.master\n- def todo(x):\n- trace = SplitTrace(master, core.cur_sublevel())\n- return SplitTracer(trace, name, new_names, x)\n-\n- return val, todo\n-\n- def pack(self, tracers):\n- vals = core.pack([t.val for t in tracers])\n- name = next(t.name for t in tracers if t.name is not None)\n- new_names = next(t.new_names for t in tracers if t.name is not None)\n- return SplitTracer(self, name, new_names, vals)\n-\n-def reshape_axis(chunksize, in_axis, arg):\n- aval = core.get_aval(arg)\n- if type(aval) is core.AbstractTuple:\n- if type(in_axis) is int:\n- return core.pack(map(partial(reshape_axis, chunksize, in_axis), arg))\n- elif isinstance(in_axis, (list, tuple)):\n- return core.pack(map(partial(reshape_axis, chunksize), in_axis, arg))\n- else:\n- raise TypeError(\"unexpected in_axis type: {}\".format(type(in_axis)))\n- elif isinstance(aval, ShapedArray):\n- in_axis = in_axis % arg.ndim\n- split_shape = (arg.shape[in_axis] // chunksize, chunksize)\n- new_shape = arg.shape[:in_axis] + split_shape + arg.shape[in_axis+1:]\n- return arg.reshape(new_shape)\n- else:\n- raise TypeError(type(arg))\n-\n-\n### papply\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/partial_eval.py", "new_path": "jax/interpreters/partial_eval.py", "diff": "@@ -79,17 +79,33 @@ class JaxprTrace(Trace):\ndef process_call(self, call_primitive, f, tracers, params):\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\nfun, aux = partial_eval(f, self, in_pvs)\n- params1, params2 = call_primitive_peval_params.get(call_primitive, (identity, identity))\n- out_pv_const, consts = call_primitive.bind(fun, *in_consts, **params1(params))\n+ out_pv_const, consts = call_primitive.bind(fun, *in_consts, **params)\nout_pv, jaxpr, env = aux()\nconst_tracers = map(self.new_instantiated_const, consts)\nenv_tracers = map(self.full_raise, env)\nbound_subjaxpr = (jaxpr, const_tracers, env_tracers)\n- eqn = JaxprEqn(tracers, None, call_primitive, (bound_subjaxpr,), False,\n- params2(params))\n+ eqn = JaxprEqn(tracers, None, call_primitive, (bound_subjaxpr,), False, params)\nreturn JaxprTracer(self, PartialVal((out_pv, out_pv_const)), eqn)\n+ def process_map(self, call_primitive, f, tracers, params):\n+ in_pvs, in_consts = unzip2([t.pval for t in tracers])\n+ fun, aux = partial_eval(f, self, map(remove_axis_from_pv, in_pvs))\n+ out_pv_const, reduced_consts = call_primitive.bind(fun, *in_consts, **params)\n+ consts = map(partial(add_axis_to_pv, 0), in_pvs)\n+ out_pv, jaxpr, env = aux()\n+ const_tracers = map(self.new_instantiated_const, consts)\n+ env_tracers = map(self.full_raise, env)\n+ jaxpr_converted = jaxpr.copy()\n+ jaxpr_converted.constvars = []\n+ jaxpr_converted.invars = list(it.chain(jaxpr.constvars, jaxpr.invars))\n+ invars = tuple(it.chain(const_tracers, eqn.invars))\n+ bound_subjaxpr = (jaxpr_converted, (), env)\n+ eqn = JaxprEqn(invars, None, call_primitive, (bound_subjaxpr,), False, params)\n+ return JaxprTracer(self, PartialVal((out_pv, out_pv_const)), eqn)\n+\n+\ndef post_process_call(self, call_primitive, out_tracer):\n+ # TODO(mattjj): post_process_map\njaxpr, consts, env = tracers_to_jaxpr([], out_tracer)\nout_pv, out_pv_const = out_tracer.pval\nout = pack((out_pv_const, pack(consts)))\n@@ -402,4 +418,4 @@ compiled_call_p.def_custom_bind(compiled_call)\ncompiled_call_p.def_impl(compiled_call_impl)\n-call_primitive_peval_params = {}\n+call_peval_rewrites = {}\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -16,12 +16,8 @@ from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n-from collections import namedtuple, defaultdict\n-from distutils.util import strtobool\n-from contextlib import contextmanager\n+from collections import namedtuple\nimport itertools as it\n-import operator as op\n-import os\nimport numpy as onp\nimport six\n@@ -34,7 +30,7 @@ from .. import linear_util as lu\nfrom ..abstract_arrays import ShapedArray\nfrom ..util import partial, unzip2, concatenate, safe_map, prod\nfrom ..lib import xla_bridge as xb\n-from .xla import (xla_shape, xla_destructure, translation_rule, abstractify,\n+from .xla import (xla_shape, xla_destructure, translation_rule,\nxla_shape_to_result_shape, jaxpr_computation)\nfrom .partial_eval import trace_to_subjaxpr, merge_pvals, JaxprTrace, PartialVal\nfrom .batching import dimsize, broadcast\n@@ -49,189 +45,45 @@ map = safe_map\n### util\n-def chunk_transform(fun, chunksize, name, in_axes, out_axes_dst):\n- \"\"\"Rewrite SPMD operations to act first on local chunks then cross-replica.\"\"\"\n- temp_name = TempAxisName()\n- fun = parallel.axisvar_split(fun, name, (temp_name, name))\n- fun, out_axes_src = parallel.pmap_transform(fun, temp_name, in_axes)\n- fun = move_output_axis_transform(fun, name, chunksize, out_axes_src, out_axes_dst)\n- return fun\n-\n-class TempAxisName(object):\n- def __repr__(self):\n- return '<temp axis {}>'.format(hex(id(self)))\n-\n-@lu.transformation\n-def move_output_axis_transform(name, chunksize, src, dst, *args):\n- \"\"\"Function transformation that moves output axes from src to dst.\"\"\"\n- ans = yield args\n- yield moveaxis(name, chunksize, dst, src(), ans)\n-\n-def moveaxis(name, sz, dst, src, x):\n- aval = core.get_aval(x)\n- if type(aval) is core.AbstractTuple:\n- if type(src) is tuple and type(dst) is tuple:\n- return core.pack(map(partial(moveaxis, name, sz), dst, src, x))\n- elif type(src) is tuple:\n- return core.pack(map(partial(moveaxis, name, sz, dst), src, x))\n- elif type(dst) is tuple:\n- srcs = (src,) * len(dst)\n- return core.pack(map(partial(moveaxis, name, sz), dst, srcs, x))\n- else:\n- return core.pack(map(partial(moveaxis, name, sz, dst, src), x))\n- elif isinstance(aval, ShapedArray):\n- dst_ = (dst % aval.ndim) if dst is not None and aval.ndim else dst\n- if src == dst_:\n- return x\n- else:\n- if src is None:\n- x = broadcast(x, sz, force_broadcast=True)\n- src = 0\n- dst_ = dst % (aval.ndim + 1)\n- elif dst is None:\n- return x.sum(src).psum(name)\n- if src == dst_:\n- return x\n- else:\n- perm = [i for i in range(onp.ndim(x)) if i != src]\n- perm.insert(dst_, src)\n- return x.transpose(perm)\n- else:\n- raise TypeError(type(aval))\n-\n-def chunk_aval(chunksize, aval, axis):\n- \"\"\"Transform an abstract value's shape to have chunksize extent along axis.\"\"\"\n- if axis is None:\n- return aval\n- else:\n- shape = list(aval.shape)\n- shape[axis] = chunksize\n- return ShapedArray(tuple(shape), aval.dtype)\n-\n-\n-def build_axis_spec_tree(spec, treedef):\n- \"\"\"Given a JTupleTreeDef, canonicalize an axis spec for that treedef.\"\"\"\n- if treedef is xla.leaf:\n- return spec\n- elif type(spec) is tuple:\n- if treedef.child_specs:\n- return tuple(map(build_axis_spec_tree, spec, treedef.child_specs))\n- else:\n- return ()\n- else:\n- return tuple(map(partial(build_axis_spec_tree, spec), treedef.child_specs))\n+def shard_arg(arg):\n+ sz = arg.shape[0]\n+ shards = [arg[i] for i in range(sz)]\n+ return [xb.device_put(shards[i], n) for n, i in enumerate(assign_shards(sz))]\n-def flatten(x):\n- if type(x) is tuple:\n- return tuple(_flatten(x))\n- else:\n- return x\n-\n-def _flatten(x):\n- if type(x) is tuple:\n- return it.chain.from_iterable((_flatten(elt) for elt in x))\n- else:\n- return [x]\n-\n-\n-def shard_arg(mesh_spec, mesh_axis, axis, arg):\n- \"\"\"Shard and device_put an input array argument along a logical axis.\"\"\"\n- num_replicas = xb.get_replica_count()\n- if prod(mesh_spec) != num_replicas:\n- msg = \"mesh spec {} total size of {} doesn't match number of replicas {}.\"\n- raise ValueError(msg.format(mesh_spec, prod(mesh_spec), num_replicas))\n- shards = split_array(arg, mesh_spec[mesh_axis], axis)\n- replica_shards = [shards[i] for i in shard_assignments(mesh_spec, mesh_axis)]\n- return map(xb.device_put, replica_shards, range(num_replicas))\n-\n-def unshard_output(mesh_spec, mesh_axis, out_axis, out_shards):\n- \"\"\"Collect and concatenate sharded device results.\"\"\"\n- _, ids = onp.unique(shard_assignments(mesh_spec, mesh_axis), return_index=True)\n- if out_axis is None:\n- return out_shards[0]\n- elif type(out_axis) is int:\n- shards = [out_shards[i] for i in ids]\n- return onp.concatenate(shards, out_axis)\n- else:\n- raise TypeError(type(out_axis))\n+def unshard_output(axis_size, out_shards):\n+ _, ids = onp.unique(shard_assignments((axis_size,)), return_index=True)\n+ return onp.stack([out_shards[i] for i in ids])\n-def shard_assignments(mesh_spec, mesh_axis):\n- \"\"\"Given a mesh axis long which to shard data, compute replica assignments.\"\"\"\n- indices_shape = [1] * len(mesh_spec)\n- indices_shape[mesh_axis] = mesh_spec[mesh_axis]\n- indices = onp.arange(mesh_spec[mesh_axis]).reshape(indices_shape)\n- return tuple(onp.broadcast_to(indices, mesh_spec).ravel())\n+def assign_shards(size):\n+ groupsize, ragged = divmod(xb.get_replica_count(), size)\n+ assert not ragged\n+ indices = onp.tile(onp.arange(size)[:, None], (1, groupsize))\n+ return tuple(indices.ravel())\ndef replica_groups(mesh_spec, mesh_axis):\n- \"\"\"Given a mesh axis along which to operate, compute XLA replica_groups.\"\"\"\n+ mesh_spec = mesh_spec + [xb.get_replica_count() // prod(mesh_spec)]\ngroups = onp.split(onp.arange(prod(mesh_spec)).reshape(mesh_spec),\nmesh_spec[mesh_axis], axis=mesh_axis)\ngroups = map(onp.ravel, groups)\nreturn tuple(tuple(group) for group in zip(*groups))\n-def split_array(x, num_splits, axis):\n- \"\"\"A special-case of numpy.split implemented in terms of indexing.\"\"\"\n- if axis is None:\n- return [x] * num_splits\n- else:\n- assert x.shape[axis] % num_splits == 0\n- split_size = x.shape[axis] // num_splits\n- def get_nth_subarray(n):\n- idx = [slice(None)] * x.ndim\n- idx[axis] = slice(n * split_size, (n+1) * split_size)\n- return x[tuple(idx)]\n- return map(get_nth_subarray, range(num_splits))\n-\n-\n-def chunk_size(axis_name, mesh_axis, in_axes, args):\n- \"\"\"Compute the chunk size for mapped axes, checking for errors.\"\"\"\n- global mesh_spec\n- axis_sizes = reduce(set.union, map(dimsize, in_axes, args))\n- if len(axis_sizes) == 0:\n- msg = \"axis name '{}' not bound to any input axes.\"\n- raise ValueError(msg.format(axis_name))\n- elif len(axis_sizes) > 1:\n- msg = \"axis name '{}' bound to multiple axes with different sizes: {}.\"\n- raise ValueError(msg.format(axis_name, axis_sizes))\n- else:\n- axis_size = axis_sizes.pop()\n- if axis_size % mesh_spec()[mesh_axis]:\n- msg = (\"axis name '{}' bound to input axis of size {} mapped to mesh \"\n- \"axis index {} with size {}, which does not evenly divide {}.\")\n- raise ValueError(msg.format(axis_name, axis_size, mesh_axis,\n- mesh_spec()[mesh_axis], axis_size))\n-\n- return axis_size // mesh_spec()[mesh_axis]\n-\n-def mesh_spec():\n- global _mesh_spec\n- return _mesh_spec or (xb.get_replica_count(),)\n-_mesh_spec = None\n-\n-@contextmanager\n-def device_mesh(spec):\n- global _mesh_spec\n- _mesh_spec, prev_spec = spec, _mesh_spec\n- yield\n- _mesh_spec = prev_spec\n-\n-# axis environments are tiny, so we don't worry about the cost of copying keys\n-def new_axis_env(d): return d\n-def extend_axis_env(d1, d2): return dict(d1, **d2)\n+### xla_pcall\n-### xla_pcall\n+AxisEnv = collections.namedtuple(\"AxisEnv\", [\"names\", \"sizes\"])\n+def extend_env(axis_env, name, size):\n+ return AxisEnv(axis_env.names + [name], axis_env.sizes + [size])\n-def compile_replicated(jaxpr, axis_env, consts, *abstract_args):\n+def compile_replicated(jaxpr, axis_name, axis_size, consts, *abstract_args):\n+ axis_env = AxisEnv([axis_name], [axis_size])\narg_shapes = list(map(xla_shape, abstract_args))\n- built_c = replicated_computation(jaxpr, axis_env, consts, (), *arg_shapes)\n+ built_c = replicated_comp(jaxpr, axis_env, consts, (), *arg_shapes)\nresult_shape = xla_shape_to_result_shape(built_c.GetReturnValueShape())\nreturn built_c.Compile(arg_shapes, xb.get_compile_options()), result_shape\n-def replicated_computation(jaxpr, axis_env, const_vals, freevar_shapes,\n- *arg_shapes):\n+def replicated_comp(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes):\nc = xb.make_computation_builder(\"replicated_computation\")\ndef read(v):\n@@ -242,7 +94,6 @@ def replicated_computation(jaxpr, axis_env, const_vals, freevar_shapes,\nenv[v] = node\nenv = {}\n- consts_env = dict(zip(jaxpr.constvars, const_vals))\nwrite(core.unitvar, c.Tuple())\nif const_vals:\nmap(write, jaxpr.constvars, map(c.Constant, const_vals))\n@@ -256,9 +107,10 @@ def replicated_computation(jaxpr, axis_env, const_vals, freevar_shapes,\nif eqn.primitive in parallel_translation_rules:\n# if we see an spmd primitive (one with a parallel translation rule), then\n# call that parallel translation rule using axis_env for device_groups\n- rule = parallel_translation_rules[eqn.primitive]\n- device_groups = axis_env[eqn.params['axis_name']]\n+ name = eqn.params['axis_name']\n+ device_groups = replica_groups(axis_env.sizes, axis_env.names.index(name))\nparams = {k: eqn.params[k] for k in eqn.params if k != 'axis_name'}\n+ rule = parallel_translation_rules[eqn.primitive]\nans = rule(c, *in_nodes, device_groups=device_groups, **params)\nelif eqn.bound_subjaxprs:\n# if there are bound subjaxprs, we either recursively call\n@@ -267,16 +119,15 @@ def replicated_computation(jaxpr, axis_env, const_vals, freevar_shapes,\nif eqn.primitive is xla_pcall_p:\n# if we're processing an xla_pcall, extend the axis environment and\n# recursively call replicated_computation\n- device_groups = replica_groups(mesh_spec(), eqn.params['mesh_axis'])\n- new_axis_binding = {eqn.params['axis_name'] : device_groups}\n(subjaxpr, const_bindings, freevar_bindings), = eqn.bound_subjaxprs\n- subc = replicated_computation(\n- subjaxpr, extend_axis_env(new_axis_binding, axis_env), (),\n- map(c.GetShape, map(read, const_bindings + freevar_bindings)),\n+ subc = replicated_comp(\n+ subjaxpr,\n+ extend_env(axis_env, eqn.params['axis_name'], eqn.params['axis_size']),\n+ (), map(c.GetShape, map(read, const_bindings + freevar_bindings)),\n*in_shapes)\nsubfun = (subc, tuple(map(read, const_bindings + freevar_bindings)))\n- # we've already lowered the subfun to an spmd computation as needed, so\n- # we just generate a regular Call into it\n+ # select the correct subarray for this replica, call subfun\n+ in_nodes = map(partial(xla_split, c), in_nodes)\nans = translation_rule(eqn.primitive)(c, subfun, *in_nodes)\nelse:\n# otherwise, act like xla.jaxpr_computation\n@@ -299,57 +150,53 @@ def replicated_computation(jaxpr, axis_env, const_vals, freevar_shapes,\nmap(write, eqn.outvars, out_nodes)\nreturn c.Build(read(jaxpr.outvar))\n+def xla_split(c, x):\n+ shape = list(c.GetShape(x).dimensions())\n+ start_indices = [c.ReplicaId()] + [c.Constant(0)] * len(shape)\n+ return c.Reshape(c.DynamicSlice(x, start_indices, [1] + shape[1:]), shape[1:])\n+\ndef xla_pcall_impl(fun, *args, **params):\n- axis_name = params.pop('axis_name') # e.g. 'i'\n- in_axes = params.pop('in_axes') # e.g. (0, None) or (0, 1)\n- out_axes = params.pop('out_axes') # e.g. 0 or (None, 1)\n- mesh_axis = params.pop('mesh_axis') # e.g. 0 or 1\n+ axis_name = params.pop('axis_name')\n+ axis_size = params.pop('axis_size')\nassert not params\nflat_args, in_trees = unzip2(map(xla.tree_flatten, args))\nflat_args = concatenate(flat_args)\nfun, out_tree = xla.flatten_fun(fun, in_trees)\n- flat_in_axes = flatten(tuple(map(build_axis_spec_tree, in_axes, in_trees)))\n- compiled_fun = xla_parallel_callable(fun, axis_name, flat_in_axes, mesh_axis,\n- mesh_spec(), *map(abstractify, flat_args))\n- flat_out_axes = flatten(build_axis_spec_tree(out_axes, out_tree()))\n- flat_ans = compiled_fun(out_tree(), flat_out_axes, *flat_args)\n+ abstract_args = map(partial(abstractify, axis_size), flat_args)\n+ compiled_fun = parallel_callable(fun, axis_name, axis_size, *abstract_args)\n+ flat_ans = compiled_fun(out_tree(), *flat_args)\nif out_tree() is xla.leaf:\nreturn flat_ans\nelse:\nreturn xla.build_tree(iter(flat_ans), out_tree())\n+def abstractify(axis_size, x):\n+ assert onp.shape(x)[0] == axis_size\n+ aval = xla.abstractify(x)\n+ return ShapedArray(aval.shape[1:], aval.dtype)\n+\n@lu.memoize\n-def xla_parallel_callable(fun, axis_name, in_axes, mesh_axis, mesh_spec,\n- *abstract_args):\n- chunksize = next((x.shape[ax] // mesh_spec[mesh_axis]\n- for x, ax in zip(abstract_args, in_axes)\n- if ax is not None and type(x) is ShapedArray), None)\n- if chunksize is not None:\n- abstract_args = map(partial(chunk_aval, chunksize), abstract_args, in_axes)\n- axis_env = new_axis_env({axis_name: replica_groups(mesh_spec, mesh_axis)})\n- pvals = [PartialVal((aval, core.unit)) for aval in abstract_args]\n+def parallel_callable(fun, axis_name, axis_size, *avals):\n+ pvals = [PartialVal((aval, core.unit)) for aval in avals]\nwith core.new_master(JaxprTrace, True) as master:\njaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master).call_wrapped(pvals)\nassert not env\n- compiled, _ = compile_replicated(jaxpr, axis_env, consts, *abstract_args)\n+ compiled, _ = compile_replicated(jaxpr, axis_name, axis_size, consts, *avals)\ndel master, consts, jaxpr, env\n- return partial(execute_replicated, in_axes, mesh_axis, mesh_spec, compiled, pval)\n+ return partial(execute_replicated, compiled, pval, axis_size)\n-def execute_replicated(in_axes, mesh_axis, mesh_spec, compiled, pval,\n- out_tree, out_axes, *args):\n- input_bufs = map(partial(shard_arg, mesh_spec, mesh_axis), in_axes, args)\n- input_bufs = zip(*input_bufs) if input_bufs else [[]] * xb.get_replica_count()\n+def execute_replicated(compiled, pval, out_tree, *args):\n+ input_bufs = zip(*map(shard_arg, args)) if args else [[]] * xb.get_replica_count()\nout_bufs = compiled.ExecutePerReplica(input_bufs)\nout_shards = [merge_pvals(buf.to_py(), pval) for buf in out_bufs]\nif out_tree is xla.leaf:\n- return unshard_output(mesh_spec, mesh_axis, out_axes, out_shards)\n+ return unshard_output(axis_size, out_shards)\nelse:\n- return map(partial(unshard_output, mesh_spec, mesh_axis), out_axes,\n- zip(*out_shards))\n+ return map(partial(unshard_output, axis_size), zip(*out_shards))\nxla_pcall_p = core.Primitive('xla_pcall')\n@@ -361,32 +208,4 @@ xla_pcall_p.def_impl(xla_pcall_impl)\nxla.translations[xla_pcall_p] = xla.xla_call_translation_rule\n-def xla_pcall_transpose_params(params):\n- in_axes, out_axes = params['in_axes'], params['out_axes']\n- trans_in_axes = (None, None, out_axes),\n- trans_out_axes = (in_axes, None)\n- return dict(params, in_axes=trans_in_axes, out_axes=trans_out_axes)\n-ad.primitive_transposes[xla_pcall_p] = partial(ad.call_transpose, xla_pcall_p,\n- params_fun=xla_pcall_transpose_params)\n-\n-def xla_pcall_jvp_params(params):\n- in_ax, out_ax = params['in_axes'], params['out_axes']\n- return dict(params, in_axes=(in_ax, in_ax), out_axes=(out_ax, out_ax))\n-ad.call_primitive_jvp_params[xla_pcall_p] = xla_pcall_jvp_params\n-\n-\n-def xla_pcall_peval_params1(params):\n- out_axes = params['out_axes']\n- return dict(params, out_axes=(out_axes, 0))\n-\n-def xla_pcall_peval_params2(params):\n- # TODO this is wrong...\n- # in_axes = params['in_axes']\n- # return dict(params, in_axes=(None, 0, in_axes))\n- return params\n-\n-pe.call_primitive_peval_params[xla_pcall_p] = (xla_pcall_peval_params1,\n- xla_pcall_peval_params2)\n-\n-\nparallel_translation_rules = {}\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -386,7 +386,6 @@ def xla_shape(x):\n# instead, for values returned to the user, always destructure tuples.\n# The code here is similar to that in tree_util, but is meant to flatten\n# JaxTuple trees only.\n-# TODO(mattjj): since pjit does flattening in api.py, can move/de-duplicate this\n@lu.transformation_with_aux\ndef flatten_fun(in_trees, *flat_args):\n" }, { "change_type": "MODIFY", "old_path": "jax/lax.py", "new_path": "jax/lax.py", "diff": "@@ -3268,7 +3268,6 @@ def _while_loop_translation_rule(c, init_val, cond_consts, body_consts, opaque_p\n_unpack_eqn(cond_var, cond_jaxpr.constvars)]\n+ list(cond_jaxpr.eqns))\n-\nassert len(body_jaxpr.invars) == 1\nbody_jaxpr_converted = body_jaxpr.copy()\nbody_jaxpr_converted.constvars = []\n" }, { "change_type": "MODIFY", "old_path": "peval.py", "new_path": "peval.py", "diff": "@@ -6,7 +6,7 @@ from jax.lax import psum\n@partial(pjit, axis_name='i')\ndef f(x):\n- return np.sin(np.sin(x))\n+ return np.sin(x)\nx = onp.arange(2).reshape(1, 2).astype(onp.float32)\nprint f(x)\n@@ -15,7 +15,7 @@ def splitjvp(x):\n_, jvp = linearize(f, x)\nreturn jvp(np.ones_like(x))\n-print splitjvp(x)\n+# print splitjvp(x)\nprint make_jaxpr(splitjvp)(x)\nprint grad(lambda x: np.sum(np.sin(x)))(x)\nprint grad(lambda x: np.sum(f(x)))(x)\n" }, { "change_type": "MODIFY", "old_path": "tests/parallel_test.py", "new_path": "tests/parallel_test.py", "diff": "@@ -123,16 +123,5 @@ class PapplyTest(jtu.JaxTestCase):\nself.assertAllClose(ans, expected, check_dtypes=True)\n-class SplitTest(jtu.JaxTestCase):\n-\n- def testSplitBasic(self):\n- f = lambda x: lax.psum(np.sin(x), 'i')\n- x = onp.ones((2, 2))\n- fsplit = axisvar_split(f, 'i', ('j', 'k'))\n- ans = pmap(pmap(fsplit, 'j'), 'k')(x)\n- expected = onp.sum(onp.sin(x))\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n-\nif __name__ == '__main__':\nabsltest.main()\n" }, { "change_type": "MODIFY", "old_path": "tests/pjit_test.py", "new_path": "tests/pjit_test.py", "diff": "@@ -31,14 +31,14 @@ config.parse_flags_with_absl()\nclass PmapTest(jtu.JaxTestCase):\n- # @jtu.skip_on_devices(\"gpu\")\n- # def testBasic(self):\n- # f = lambda x: x - psum(x, 'i')\n- # x = onp.arange(8., dtype=onp.float32).reshape(4, 2)\n- # f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)\n- # ans = f(x)\n- # expected = x - x.sum(0)\n- # self.assertAllClose(ans, expected, check_dtypes=False)\n+ @jtu.skip_on_devices(\"gpu\")\n+ def testBasic(self):\n+ f = lambda x: x - psum(x, 'i')\n+ x = onp.arange(8., dtype=onp.float32).reshape(4, 2)\n+ f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)\n+ ans = f(x)\n+ expected = x - x.sum(0)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n# @jtu.skip_on_devices(\"gpu\")\n# def testTupleOutput(self):\n@@ -58,17 +58,17 @@ class PmapTest(jtu.JaxTestCase):\n# expected = x - x.sum(0)\n# self.assertAllClose(ans, expected, check_dtypes=False)\n- # @jtu.skip_on_devices(\"gpu\")\n- # def testNested(self):\n- # def f(x, y):\n- # return psum(psum(x, 'i'), 'j')\n- # f = pjit(f, 'i')\n- # f = pjit(f, 'j', out_axes=1)\n-\n- # x = onp.ones((3, 4), onp.float32)\n- # ans = f(x, x)\n- # expected = 12 * onp.ones((4, 3), onp.float32)\n- # self.assertAllClose(ans, expected, check_dtypes=True)\n+ @jtu.skip_on_devices(\"gpu\")\n+ def testNested(self):\n+ def f(x, y):\n+ return psum(psum(x, 'i'), 'j')\n+ f = pjit(f, 'i')\n+ f = pjit(f, 'j', out_axes=1)\n+\n+ x = onp.ones((3, 4), onp.float32)\n+ ans = f(x, x)\n+ expected = 12 * onp.ones((4, 3), onp.float32)\n+ self.assertAllClose(ans, expected, check_dtypes=True)\n# @jtu.skip_on_devices(\"gpu\")\n# def testForwardModeAutodiff(self):\n" } ]
Python
Apache License 2.0
google/jax
sketched out new stuff
260,335
21.02.2019 11:47:26
28,800
4c1fc9cfbd0df112ddaec8b4663f025fda7350f9
peval.py works again (some paired w/
[ { "change_type": "MODIFY", "old_path": "jax/ad_util.py", "new_path": "jax/ad_util.py", "diff": "@@ -16,8 +16,7 @@ from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n-from .core import JaxTuple, lattice_join\n-from .interpreters.partial_eval import Primitive\n+from .core import JaxTuple, lattice_join, Primitive\nfrom .tree_util import register_pytree_node\nfrom .util import safe_map\n" }, { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -82,6 +82,10 @@ class Primitive(object):\nself.impl = impl\nreturn impl\n+ def def_abstract_eval(self, abstract_eval):\n+ self.abstract_eval = abstract_eval\n+ return abstract_eval\n+\ndef def_custom_bind(self, bind):\nself.bind = bind\nreturn bind\n@@ -90,6 +94,10 @@ class Primitive(object):\nraise NotImplementedError(\"Evaluation rule for '{}' not implemented\"\n.format(self.name))\n+ def abstract_eval(self, *args, **kwargs):\n+ raise NotImplementedError(\"Abstract evaluation for '{}' not implemented\"\n+ .format(primitive.name))\n+\n# -------------------- lifting --------------------\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/partial_eval.py", "new_path": "jax/interpreters/partial_eval.py", "diff": "@@ -21,6 +21,7 @@ from collections import namedtuple, Counter, defaultdict\nfrom .. import core\nfrom .. import linear_util as lu\n+from ..abstract_arrays import ShapedArray, ConcreteArray\nfrom ..linear_util import thunk, transformation, transformation_with_aux\nfrom ..util import unzip2, safe_zip, safe_map, toposort, partial\nfrom ..core import (Trace, Tracer, new_master, Jaxpr, JaxprEqn, get_aval, pack,\n@@ -77,6 +78,8 @@ class JaxprTrace(Trace):\nreturn JaxprTracer(self, pval, eqn)\ndef process_call(self, call_primitive, f, tracers, params):\n+ if call_primitive in map_primitives:\n+ return self.process_map(call_primitive, f, tracers, params)\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\nfun, aux = partial_eval(f, self, in_pvs)\nout_pv_const, consts = call_primitive.bind(fun, *in_consts, **params)\n@@ -89,20 +92,20 @@ class JaxprTrace(Trace):\ndef process_map(self, call_primitive, f, tracers, params):\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\n- fun, aux = partial_eval(f, self, map(remove_axis_from_pv, in_pvs))\n- out_pv_const, reduced_consts = call_primitive.bind(fun, *in_consts, **params)\n- consts = map(partial(add_axis_to_pv, 0), in_pvs)\n- out_pv, jaxpr, env = aux()\n+ reduced_pvs = map(remove_axis_from_pv, in_pvs) #\n+ fun, aux = partial_eval(f, self, reduced_pvs)\n+ out_const, consts = call_primitive.bind(fun, *in_consts, **params)\n+ out_pv_reduced, jaxpr, env = aux()\n+ out_pv = add_axis_to_pv(params['axis_size'], out_pv_reduced)\nconst_tracers = map(self.new_instantiated_const, consts)\nenv_tracers = map(self.full_raise, env)\njaxpr_converted = jaxpr.copy()\njaxpr_converted.constvars = []\njaxpr_converted.invars = list(it.chain(jaxpr.constvars, jaxpr.invars))\n- invars = tuple(it.chain(const_tracers, eqn.invars))\n+ invars = tuple(it.chain(const_tracers, tracers))\nbound_subjaxpr = (jaxpr_converted, (), env)\neqn = JaxprEqn(invars, None, call_primitive, (bound_subjaxpr,), False, params)\n- return JaxprTracer(self, PartialVal((out_pv, out_pv_const)), eqn)\n-\n+ return JaxprTracer(self, PartialVal((out_pv, out_const)), eqn)\ndef post_process_call(self, call_primitive, out_tracer):\n# TODO(mattjj): post_process_map\n@@ -121,6 +124,45 @@ class JaxprTrace(Trace):\nreturn out, todo\n+map_primitives = set()\n+\n+def remove_axis_from_pv(pv):\n+ if pv is None:\n+ return pv\n+ elif isinstance(pv, AbstractValue):\n+ return remove_axis_from_aval(pv)\n+ elif type(pv) is JaxprTracerTuple:\n+ return JaxprTracerTuple(map(remove_axis_from_pv, pv))\n+ else:\n+ raise TypeError(type(pv))\n+\n+def remove_axis_from_aval(aval):\n+ if type(aval) is AbstractTuple:\n+ return AbstractTuple(map(remove_axis_from_aval, aval))\n+ elif isinstance(aval, ShapedArray):\n+ # might be raising abstraction level from Concrete here\n+ return ShapedArray(aval.shape[1:], aval.dtype)\n+ else:\n+ raise NotImplementedError # TODO(mattjj)\n+\n+def add_axis_to_pv(size, pv):\n+ if pv is None:\n+ return pv\n+ elif isinstance(pv, AbstractValue):\n+ return add_axis_to_aval(size, pv)\n+ elif type(pv) is JaxprTracerTuple:\n+ return JaxprTracerTuple(map(partial(add_axis_to_pv, size), pv))\n+ else:\n+ raise TypeError(type(pv))\n+\n+def add_axis_to_aval(size, aval):\n+ if type(aval) is AbstractTuple:\n+ return AbstractTuple(map(partial(add_axis_to_aval, size), aval))\n+ elif isinstance(aval, ShapedArray):\n+ return ShapedArray((size,) + aval.shape, aval.dtype)\n+ else:\n+ raise NotImplementedError # TODO(mattjj)\n+\ndef partial_eval(f, trace, pvs):\nf = trace_to_subjaxpr(f, trace.master)\n@@ -130,8 +172,8 @@ def partial_eval(f, trace, pvs):\n@transformation_with_aux\ndef partial_eval_wrapper(avals, *consts, **kwargs):\njaxpr, (out_pval, consts, env) = yield (map(PartialVal, zip(avals, consts)),)\n- out_pv, out_pv_const = out_pval\n- out = pack((out_pv_const, pack(consts)))\n+ out_pv, out_const = out_pval\n+ out = pack((out_const, pack(consts)))\nyield out, (out_pv, jaxpr, env)\n@@ -214,16 +256,6 @@ class PartialVal(tuple):\nvalid_pv_types = (AbstractValue, JaxprTracerTuple, type(None))\n-def def_abstract_eval(primitive, abstract_eval):\n- primitive.abstract_eval = abstract_eval\n-\n-def abstract_eval_unimplemented(primitive, *args, **kwargs):\n- raise NotImplementedError(\"Abstract evaluation for '{}' not implemented\"\n- .format(primitive.name))\n-\n-Primitive.def_abstract_eval = def_abstract_eval\n-Primitive.abstract_eval = abstract_eval_unimplemented\n-\nabstract_unit = core.AbstractTuple()\ndef merge_pvals(val, pval):\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -203,9 +203,8 @@ xla_pcall_p = core.Primitive('xla_pcall')\nxla_pcall = partial(core.call_bind, xla_pcall_p)\nxla_pcall_p.def_custom_bind(xla_pcall)\nxla_pcall_p.def_impl(xla_pcall_impl)\n-\n-\nxla.translations[xla_pcall_p] = xla.xla_call_translation_rule\n+pe.map_primitives.add(xla_pcall_p)\nparallel_translation_rules = {}\n" }, { "change_type": "MODIFY", "old_path": "peval.py", "new_path": "peval.py", "diff": "@@ -16,6 +16,4 @@ def splitjvp(x):\nreturn jvp(np.ones_like(x))\nprint splitjvp(x)\n-# print make_jaxpr(splitjvp)(x)\n-\n-# TODO TODO register process_map stuff\n+print make_jaxpr(splitjvp)(x)\n" } ]
Python
Apache License 2.0
google/jax
peval.py works again (some paired w/ @dougalm)
260,335
21.02.2019 13:42:51
28,800
8211e433a90e5333b70527abd07f50f91fa6a952
let's get this grad of pjit goin
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -203,6 +203,7 @@ xla_pcall_p = core.Primitive('xla_pcall')\nxla_pcall = partial(core.call_bind, xla_pcall_p)\nxla_pcall_p.def_custom_bind(xla_pcall)\nxla_pcall_p.def_impl(xla_pcall_impl)\n+ad.primitive_transposes[xla_pcall_p] = partial(ad.call_transpose, xla_pcall_p)\nxla.translations[xla_pcall_p] = xla.xla_call_translation_rule\npe.map_primitives.add(xla_pcall_p)\n" }, { "change_type": "MODIFY", "old_path": "peval.py", "new_path": "peval.py", "diff": "from functools import partial\nimport numpy as onp\nimport jax.numpy as np\n-from jax import jit, pjit, grad, linearize, jvp, make_jaxpr\n-from jax.lax import psum\n+from jax import jit, pjit, grad, linearize, make_jaxpr\n@partial(pjit, axis_name='i', axis_size=1)\ndef f(x):\n@@ -17,3 +16,4 @@ def splitjvp(x):\nprint splitjvp(x)\nprint make_jaxpr(splitjvp)(x)\n+print grad(lambda x: np.sum(splitjvp(x)))(x)\n" } ]
Python
Apache License 2.0
google/jax
let's get this grad of pjit goin
260,335
21.02.2019 18:37:51
28,800
2817c4ded81c80d773c88bcf6fcbdd89ab4ca854
grad of pjit!
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/ad.py", "new_path": "jax/interpreters/ad.py", "diff": "@@ -75,7 +75,8 @@ def vjp(traceable, primals):\ndef vjp_(ct):\nct = ignore_consts(ct, pval)\ndummy_primal_and_ct = pack((core.unit, ct))\n- _, arg_cts = backward_pass(jaxpr, consts, (), dummy_primal_and_ct)\n+ dummy_args = (None,) * len(jaxpr.invars)\n+ _, arg_cts = backward_pass(jaxpr, consts, (), dummy_args, dummy_primal_and_ct)\nreturn instantiate_zeros(pack(primals), arg_cts[1])\nreturn out_primal, vjp_\n@@ -100,7 +101,7 @@ def unpair_pval(pval):\naval_1, aval_2 = aval\nreturn (aval_1, const_1), (aval_2, const_2)\n-def backward_pass(jaxpr, consts, freevar_vals, cotangent_in):\n+def backward_pass(jaxpr, consts, freevar_vals, args, cotangent_in):\ndef write_cotangent(v, ct):\n# assert v not in primal_env\nif ct is not None:\n@@ -109,10 +110,11 @@ def backward_pass(jaxpr, consts, freevar_vals, cotangent_in):\ndef read_cotangent(v):\nreturn ct_env.get(v, zero)\n- primal_env = {v: val\n- for v, val in zip(jaxpr.freevars, freevar_vals)\n+ primal_env = {v: val for v, val in zip(jaxpr.freevars, freevar_vals)\nif val is not None}\nprimal_env.update(zip(jaxpr.constvars, consts))\n+ primal_env.update((v, val) for v, val in zip(jaxpr.invars, args)\n+ if val is not None)\nct_env = {jaxpr.outvar: cotangent_in}\nfor eqn in jaxpr.eqns[::-1]:\n@@ -371,25 +373,23 @@ def traceable(in_tree_def, new_primals, new_tangents):\n@transformation_with_aux\ndef transposed_fun(jaxpr, in_tree_def, args):\n- consts, freevar_vals, ct = args\n- ct, freevar_vals = build_tree(in_tree_def, (ct, freevar_vals))\n- freevar_cts, cotangents_out = yield jaxpr, consts, freevar_vals, ct\n+ args, consts, freevar_vals, ct = args\n+ args, ct, freevar_vals = build_tree(in_tree_def, (args, ct, freevar_vals))\n+ freevar_cts, cotangents_out = yield jaxpr, consts, freevar_vals, args, ct\nout_jtuple, tree_def = tree_to_jaxtuples((cotangents_out, freevar_cts))\nyield out_jtuple, tree_def\n-def call_transpose(primitive, params, jaxpr, consts, freevar_vals, args, ct,\n- params_fun=identity):\n+def call_transpose(primitive, params, jaxpr, consts, freevar_vals, args, ct):\njaxpr, = jaxpr\nconsts, = consts\nfreevar_vals, = freevar_vals\nassert isinstance(jaxpr, core.Jaxpr)\n- assert all(a is None for a in args), \"TODO(dougalm): handle non-tangent primal args\"\n- (ct, freevar_vals), in_tree_def = tree_to_jaxtuples((ct, freevar_vals))\n+ (args, ct, freevar_vals), in_tree_def = tree_to_jaxtuples((args, ct, freevar_vals))\nfun = wrap_init(backward_pass)\nfun, out_tree_def = transposed_fun(fun, jaxpr, in_tree_def)\n- all_args = pack((pack(consts), pack(freevar_vals), ct))\n+ all_args = pack((pack(args), pack(consts), pack(freevar_vals), ct))\n# TODO(dougalm): consider signalling to bind that no traces in fun closure\n- ans = primitive.bind(fun, all_args, **params_fun(params))\n+ ans = primitive.bind(fun, all_args, **params)\nreturn build_tree(out_tree_def(), ans)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -105,20 +105,15 @@ def replicated_comp(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes):\nfor eqn in jaxpr.eqns:\nin_nodes = map(read, eqn.invars)\nif eqn.primitive in parallel_translation_rules:\n- # if we see an spmd primitive (one with a parallel translation rule), then\n- # call that parallel translation rule using axis_env for device_groups\nname = eqn.params['axis_name']\ndevice_groups = replica_groups(axis_env.sizes, axis_env.names.index(name))\nparams = {k: eqn.params[k] for k in eqn.params if k != 'axis_name'}\nrule = parallel_translation_rules[eqn.primitive]\nans = rule(c, *in_nodes, device_groups=device_groups, **params)\nelif eqn.bound_subjaxprs:\n- # if there are bound subjaxprs, we either recursively call\n- # replicated_computation or call into xla.jaxpr_computation\n- in_shapes = map(c.GetShape, in_nodes)\nif eqn.primitive is xla_pcall_p:\n- # if we're processing an xla_pcall, extend the axis environment and\n- # recursively call replicated_computation\n+ in_nodes = map(partial(xla_split, c), in_nodes)\n+ in_shapes = map(c.GetShape, in_nodes)\n(subjaxpr, const_bindings, freevar_bindings), = eqn.bound_subjaxprs\nsubc = replicated_comp(\nsubjaxpr,\n@@ -126,11 +121,9 @@ def replicated_comp(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes):\n(), map(c.GetShape, map(read, const_bindings + freevar_bindings)),\n*in_shapes)\nsubfun = (subc, tuple(map(read, const_bindings + freevar_bindings)))\n- # select the correct subarray for this replica, call subfun\n- in_nodes = map(partial(xla_split, c), in_nodes)\n- ans = translation_rule(eqn.primitive)(c, subfun, *in_nodes)\n+ ans = xla.xla_call_translation_rule(c, subfun, *in_nodes)\nelse:\n- # otherwise, act like xla.jaxpr_computation\n+ in_shapes = map(c.GetShape, in_nodes)\nsubcs = [\njaxpr_computation(\nsubjaxpr, (),\n@@ -142,8 +135,6 @@ def replicated_comp(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes):\nin zip(subcs, eqn.bound_subjaxprs)]\nans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params)\nelse:\n- # if this is a standard translation rule (not an spmd primitive, not a\n- # call with bound subjaxprs) then we lower like xla.jaxpr_computation does\nans = translation_rule(eqn.primitive)(c, *in_nodes, **eqn.params)\nout_nodes = xla_destructure(c, ans) if eqn.destructure else [ans]\n@@ -152,8 +143,10 @@ def replicated_comp(jaxpr, axis_env, const_vals, freevar_shapes, *arg_shapes):\ndef xla_split(c, x):\nshape = list(c.GetShape(x).dimensions())\n- start_indices = [c.ReplicaId()] + [c.Constant(0)] * len(shape)\n- return c.Reshape(c.DynamicSlice(x, start_indices, [1] + shape[1:]), shape[1:])\n+ start_indices = c.Constant(onp.array([0] * len(shape)))\n+ # start_indices = [c.ReplicaId()] + [c.Constant(0)] * len(shape) # TODO\n+ return c.Reshape(c.DynamicSlice(x, start_indices, [1] + shape[1:]),\n+ None, shape[1:])\ndef xla_pcall_impl(fun, *args, **params):\n@@ -204,7 +197,7 @@ xla_pcall = partial(core.call_bind, xla_pcall_p)\nxla_pcall_p.def_custom_bind(xla_pcall)\nxla_pcall_p.def_impl(xla_pcall_impl)\nad.primitive_transposes[xla_pcall_p] = partial(ad.call_transpose, xla_pcall_p)\n-xla.translations[xla_pcall_p] = xla.xla_call_translation_rule\n+# xla.translations[xla_pcall_p] = xla.xla_call_translation_rule # TODO\npe.map_primitives.add(xla_pcall_p)\n" }, { "change_type": "MODIFY", "old_path": "peval.py", "new_path": "peval.py", "diff": "from functools import partial\nimport numpy as onp\nimport jax.numpy as np\n-from jax import jit, pjit, grad, linearize, make_jaxpr\n+from jax import jit, pjit, grad, linearize, jvp, make_jaxpr\n+from jax.lax import psum\n@partial(pjit, axis_name='i', axis_size=1)\ndef f(x):\n@@ -16,4 +17,18 @@ def splitjvp(x):\nprint splitjvp(x)\nprint make_jaxpr(splitjvp)(x)\n+print grad(lambda x: np.sum(np.sin(x)))(x)\n+print grad(lambda x: np.sum(f(x)))(x)\n+\nprint grad(lambda x: np.sum(splitjvp(x)))(x)\n+print grad(lambda x: np.sum(jvp(np.sin, (x,), (np.ones_like(x),))[1]))(x)\n+\n+\n+###\n+\n+@partial(pjit, axis_name='i', axis_size=1)\n+@partial(pjit, axis_name='j', axis_size=1)\n+def f(x):\n+ return psum(psum(x, 'i'), 'j')\n+\n+print f(x.reshape((1, 1, -1)))\n" } ]
Python
Apache License 2.0
google/jax
grad of pjit!
260,335
21.02.2019 21:41:55
28,800
72e2e5da0661eea8313f8a8452523fb413c01c85
remove old pjit tests
[ { "change_type": "MODIFY", "old_path": "tests/pjit_test.py", "new_path": "tests/pjit_test.py", "diff": "@@ -29,84 +29,8 @@ from jax.config import config\nconfig.parse_flags_with_absl()\n-class PmapTest(jtu.JaxTestCase):\n-\n- @jtu.skip_on_devices(\"gpu\")\n- def testBasic(self):\n- f = lambda x: x - psum(x, 'i')\n- x = onp.arange(8., dtype=onp.float32).reshape(4, 2)\n- f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)\n- ans = f(x)\n- expected = x - x.sum(0)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- # @jtu.skip_on_devices(\"gpu\")\n- # def testTupleOutput(self):\n- # f = lambda x: (x - psum(x, 'i'),)\n- # x = onp.arange(8., dtype=onp.float32).reshape(4, 2)\n- # f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)\n- # ans = f(x)\n- # expected = (x - x.sum(0),)\n- # self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- # @jtu.skip_on_devices(\"gpu\")\n- # def testTupleInput(self):\n- # f = lambda x: x[0] - psum(x[0], 'i')\n- # x = onp.arange(8., dtype=onp.float32).reshape(4, 2)\n- # f = pjit(f, axis_name='i', in_axes=0, out_axes=0, mesh_axis=0)\n- # ans = f((x,))\n- # expected = x - x.sum(0)\n- # self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- @jtu.skip_on_devices(\"gpu\")\n- def testNested(self):\n- def f(x, y):\n- return psum(psum(x, 'i'), 'j')\n- f = pjit(f, 'i')\n- f = pjit(f, 'j', out_axes=1)\n-\n- x = onp.ones((3, 4), onp.float32)\n- ans = f(x, x)\n- expected = 12 * onp.ones((4, 3), onp.float32)\n- self.assertAllClose(ans, expected, check_dtypes=True)\n-\n- # @jtu.skip_on_devices(\"gpu\")\n- # def testForwardModeAutodiff(self):\n- # def f(x):\n- # return np.cos(x - psum(np.sin(x), 'i'))\n-\n- # x = np.ones(4)\n- # expected = jvp(pmap(f, 'i'), (x,), (x,))\n-\n- # g = pjit(f, axis_name='i')\n- # ans = jvp(g, (x,), (x,))\n-\n- # self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- # @jtu.skip_on_devices(\"gpu\")\n- # def testReverseModeAutodiff(self):\n- # def f(x):\n- # return x - psum(x, 'i')\n-\n- # x = np.ones(4)\n- # expected1 = grad(lambda x: np.sum(pmap(f, 'i')(x)))(x)\n- # expected2 = grad(lambda x: np.sum(x - np.sum(x)))(x)\n-\n- # g = pjit(f, axis_name='i')\n- # ans = grad(lambda x: np.sum(g(x)))(x)\n-\n- # self.assertAllClose(ans, expected1, check_dtypes=False)\n- # self.assertAllClose(ans, expected2, check_dtypes=False)\n-\n- @jtu.skip_on_devices(\"gpu\")\n- def testStuff(self):\n- def f(x):\n- return np.sin(np.sin(np.sin(x)))\n-\n- x = np.ones(4)\n-\n- g = pjit(f, axis_name='i')\n- ans = grad(lambda x: np.sum(g(x)))(x)\n+class PjitTest(jtu.JaxTestCase):\n+ pass # TODO(mattjj)\nif __name__ == '__main__':\n" } ]
Python
Apache License 2.0
google/jax
remove old pjit tests
260,335
22.02.2019 07:56:13
28,800
121e74f5cc509ada4ccaafda8bd480e6416dcb78
fix import in api_test.py
[ { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -26,7 +26,6 @@ import jax.numpy as np\nfrom jax import jit, grad, device_get, device_put, jacfwd, jacrev, hessian\nfrom jax import api\nfrom jax.core import Primitive\n-from jax.interpreters.partial_eval import def_abstract_eval\nfrom jax.interpreters.ad import defjvp\nfrom jax.interpreters.xla import DeviceArray\nfrom jax.abstract_arrays import concretization_err_msg\n@@ -216,7 +215,7 @@ class APITest(jtu.JaxTestCase):\njtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,\n\"Forward-mode differentiation rule for 'foo' not implemented\")\n- def_abstract_eval(foo_p, lambda x: x)\n+ foo_p.def_abstract_eval(lambda x: x)\njtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,\n\"XLA translation rule for 'foo' not implemented\")\n" } ]
Python
Apache License 2.0
google/jax
fix import in api_test.py
260,335
22.02.2019 08:13:46
28,800
45c41d9e5876e6fc5e71d9b56e6edbeb46bb1323
fix typo in abstract_eval NotImplementedError
[ { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -96,7 +96,7 @@ class Primitive(object):\ndef abstract_eval(self, *args, **kwargs):\nraise NotImplementedError(\"Abstract evaluation for '{}' not implemented\"\n- .format(primitive.name))\n+ .format(self.name))\n# -------------------- lifting --------------------\n" } ]
Python
Apache License 2.0
google/jax
fix typo in abstract_eval NotImplementedError
260,335
22.02.2019 09:18:48
28,800
1c648477a953025870c0f8fc8fbd35f5df5f2cf6
fix typo in pjit
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -418,7 +418,7 @@ def pjit(fun, axis_name):\naxis_size = axis_sizes.pop()\njaxtupletree_args, in_trees = unzip2(map(pytree_to_jaxtupletree, args))\n- check_args(jaxtupletree_args)\n+ _check_args(jaxtupletree_args)\nf = lu.wrap_init(fun, kwargs)\nf, out_tree = pytree_fun_to_jaxtupletree_fun(f, in_trees)\njaxtupletree_out = pxla.xla_pcall(f, *jaxtupletree_args,\n" } ]
Python
Apache License 2.0
google/jax
fix typo in pjit