author int64 658 755k | date stringlengths 19 19 | timezone int64 -46,800 43.2k | hash stringlengths 40 40 | message stringlengths 5 490 | mods list | language stringclasses 20 values | license stringclasses 3 values | repo stringlengths 5 68 | original_message stringlengths 12 491 |
|---|---|---|---|---|---|---|---|---|---|
260,335 | 03.05.2019 14:43:10 | 25,200 | 8b3baf25c0cbbd2758a86e03af9bf215ad3dd61f | add sm3 (PAIR w/ and rohananil@) | [
{
"change_type": "MODIFY",
"old_path": "jax/experimental/optimizers.py",
"new_path": "jax/experimental/optimizers.py",
"diff": "@@ -51,13 +51,13 @@ functions, where the component functions have these signatures:\nArgs:\nstep: integer representing the step index.\n- grads: a pytree with the same structure as `get_params(opt_state)` representing\n- the gradients to be used in updating the optimizer state.\n+ grads: a pytree with the same structure as `get_params(opt_state)`\n+ representing the gradients to be used in updating the optimizer state.\nopt_state: a pytree representing the optimizer state to be updated.\nReturns:\n- A pytree with the same structure as the `opt_state` argument representing the\n- updated optimizer state.\n+ A pytree with the same structure as the `opt_state` argument representing\n+ the updated optimizer state.\nNotice that an optimizer implementation has a lot of flexibility in the form of\n@@ -74,6 +74,8 @@ import collections\nimport functools\nimport operator\n+from six.moves import reduce\n+\nimport jax.numpy as np\nfrom jax.core import pack\nfrom jax.util import partial, safe_zip, safe_map, unzip2\n@@ -275,6 +277,52 @@ def adam(step_size, b1=0.9, b2=0.999, eps=1e-8):\nreturn x\nreturn init, update, get_params\n+@optimizer\n+def sm3(step_size, momentum=0.9):\n+ \"\"\"Construct optimizer triple for SM3.\n+\n+ Memory-Efficient Adaptive Optimization for Large-Scale Learning.\n+ https://arxiv.org/abs/1901.11150\n+\n+ Args:\n+ step_size: positive scalar, or a callable representing a step size schedule\n+ that maps the iteration index to positive scalar.\n+ momentum: optional, a positive scalar value for momentum\n+\n+ Returns:\n+ An (init_fun, update_fun, get_params) triple.\n+ \"\"\"\n+ step_size = make_schedule(step_size)\n+\n+ def splice(seq, i, x):\n+ lst = list(seq)\n+ lst[i:i+1] = x\n+ return lst\n+\n+ def broadcast_into(ndim, x, axis):\n+ idx = splice([None] * ndim, axis, [slice(None)])\n+ return x[tuple(idx)]\n+\n+ def init(x0):\n+ vs = [np.zeros(sz, dtype=x0.dtype) for sz in x0.shape]\n+ return (x0, np.zeros_like(x0), vs)\n+\n+ def update(i, g, state):\n+ x, m, vs = state\n+ vs = [broadcast_into(g.ndim, v, i) for i, v in enumerate(vs)]\n+ accum = reduce(np.minimum, vs[1:], vs[0]) + g ** 2\n+ accum_inv_sqrt = np.where(accum > 0, 1. / np.sqrt(accum), 0)\n+ m = (1. - momentum) * (g * accum_inv_sqrt) + momentum * m\n+ x = x - step_size(i) * m\n+ vs = [accum.max(splice(range(x.ndim), j, [])) for j in range(x.ndim)]\n+ return x, m, vs\n+\n+ def get_params(state):\n+ x, _, _ = state\n+ return x\n+\n+ return init, update, get_params\n+\n### learning rate schedules\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/optimizers_test.py",
"new_path": "tests/optimizers_test.py",
"diff": "@@ -31,8 +31,6 @@ from jax.config import config\nconfig.parse_flags_with_absl()\n-dummy_data = None\n-\nclass OptimizerTests(jtu.JaxTestCase):\ndef _CheckOptimizer(self, optimizer, loss, x0, num_steps, *args, **kwargs):\n@@ -40,50 +38,50 @@ class OptimizerTests(jtu.JaxTestCase):\nself._CheckRun(optimizer, loss, x0, num_steps, *args, **kwargs)\ndef _CheckFuns(self, optimizer, loss, x0, *args):\n- init_fun, update_fun, get_params_fun = optimizer(*args)\n+ init_fun, update_fun, get_params = optimizer(*args)\nopt_state = init_fun(x0)\n- self.assertAllClose(x0, get_params_fun(opt_state), check_dtypes=True)\n- opt_state2 = update_fun(0, grad(loss)(x0, dummy_data), opt_state) # doesn't crash\n+ self.assertAllClose(x0, get_params(opt_state), check_dtypes=True)\n+ opt_state2 = update_fun(0, grad(loss)(x0), opt_state) # doesn't crash\nself.assertEqual(tree_util.tree_structure(opt_state),\ntree_util.tree_structure(opt_state2))\n@jtu.skip_on_devices('gpu')\ndef _CheckRun(self, optimizer, loss, x0, num_steps, *args, **kwargs):\n- init_fun, update_fun, get_params_fun = optimizer(*args)\n+ init_fun, update_fun, get_params = optimizer(*args)\nopt_state = init_fun(x0)\nfor i in range(num_steps):\n- x = get_params_fun(opt_state)\n- g = grad(loss)(x, dummy_data)\n+ x = get_params(opt_state)\n+ g = grad(loss)(x)\nopt_state = update_fun(i, g, opt_state)\n- xstar = get_params_fun(opt_state)\n- self.assertLess(loss(xstar, dummy_data), 1e-2)\n+ xstar = get_params(opt_state)\n+ self.assertLess(loss(xstar), 1e-2)\nupdate_fun_jitted = jit(update_fun)\nopt_state = init_fun(x0)\nfor i in range(num_steps):\n- x = get_params_fun(opt_state)\n- g = grad(loss)(x, dummy_data)\n+ x = get_params(opt_state)\n+ g = grad(loss)(x)\nopt_state = update_fun_jitted(i, g, opt_state)\n- xstar = get_params_fun(opt_state)\n- self.assertLess(loss(xstar, dummy_data), 1e-2)\n+ xstar = get_params(opt_state)\n+ self.assertLess(loss(xstar), 1e-2)\ndef testSgdScalar(self):\n- def loss(x, _): return x**2\n+ def loss(x): return x**2\nx0 = 1.\nnum_iters = 100\nstep_size = 0.1\nself._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)\ndef testSgdVector(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\nstep_size = 0.1\nself._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)\ndef testSgdNestedTuple(self):\n- def loss(xyz, _):\n+ def loss(xyz):\nx, (y, z) = xyz\nreturn sum(np.dot(a, a) for a in [x, y, z])\nx0 = (np.ones(2), (np.ones(2), np.ones(2)))\n@@ -92,7 +90,7 @@ class OptimizerTests(jtu.JaxTestCase):\nself._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)\ndef testMomentumVector(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\nstep_size = 0.1\n@@ -100,7 +98,7 @@ class OptimizerTests(jtu.JaxTestCase):\nself._CheckOptimizer(optimizers.momentum, loss, x0, num_iters, step_size, mass)\ndef testMomentumDict(self):\n- def loss(dct, _): return np.dot(dct['x'], dct['x'])\n+ def loss(dct): return np.dot(dct['x'], dct['x'])\nx0 = {'x': np.ones(2)}\nnum_iters = 100\nstep_size = 0.1\n@@ -108,7 +106,7 @@ class OptimizerTests(jtu.JaxTestCase):\nself._CheckOptimizer(optimizers.momentum, loss, x0, num_iters, step_size, mass)\ndef testRmspropVector(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\nstep_size = 0.1\n@@ -116,14 +114,14 @@ class OptimizerTests(jtu.JaxTestCase):\n@jtu.skip_on_devices('cpu') # TODO(mattjj): investigate numerical failure\ndef testAdamVector(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nnum_iters = 100\nstep_size = 0.1\nself._CheckOptimizer(optimizers.adam, loss, x0, num_iters, step_size)\ndef testSgdClosure(self):\n- def loss(y, x, _): return y**2 * x**2\n+ def loss(y, x): return y**2 * x**2\nx0 = 1.\ny = 1.\nnum_iters = 20\n@@ -131,39 +129,49 @@ class OptimizerTests(jtu.JaxTestCase):\npartial_loss = functools.partial(loss, y)\nself._CheckRun(optimizers.sgd, partial_loss, x0, num_iters, step_size)\n+ def testSM3(self):\n+ def loss(xs):\n+ x1, x2 = xs\n+ return np.sum(x1 ** 2) + np.sum(x2 ** 2)\n+\n+ num_iters = 100\n+ step_size = 0.1\n+ x0 = (np.ones(2), np.ones((2, 2)))\n+ self._CheckOptimizer(optimizers.sm3, loss, x0, num_iters, step_size)\n+\ndef testSgdVectorExponentialDecaySchedule(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nstep_schedule = optimizers.exponential_decay(0.1, 3, 2.)\nself._CheckFuns(optimizers.sgd, loss, x0, step_schedule)\ndef testSgdVectorInverseTimeDecaySchedule(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nstep_schedule = optimizers.inverse_time_decay(0.1, 3, 2.)\nself._CheckFuns(optimizers.sgd, loss, x0, step_schedule)\ndef testAdamVectorInverseTimeDecaySchedule(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nstep_schedule = optimizers.inverse_time_decay(0.1, 3, 2.)\nself._CheckFuns(optimizers.adam, loss, x0, step_schedule)\ndef testMomentumVectorInverseTimeDecayStaircaseSchedule(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nstep_sched = optimizers.inverse_time_decay(0.1, 3, 2., staircase=True)\nmass = 0.9\nself._CheckFuns(optimizers.momentum, loss, x0, step_sched, mass)\ndef testRmspropVectorPiecewiseConstantSchedule(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nstep_schedule = optimizers.piecewise_constant([25, 75], [1.0, 0.5, 0.1])\nself._CheckFuns(optimizers.rmsprop, loss, x0, step_schedule)\ndef testTracedStepSize(self):\n- def loss(x, _): return np.dot(x, x)\n+ def loss(x): return np.dot(x, x)\nx0 = np.ones(2)\nstep_size = 0.1\n@@ -174,7 +182,7 @@ class OptimizerTests(jtu.JaxTestCase):\ndef update(opt_state, step_size):\n_, update_fun, get_params = optimizers.sgd(step_size)\nx = get_params(opt_state)\n- g = grad(loss)(x, None)\n+ g = grad(loss)(x)\nreturn update_fun(0, g, opt_state)\nupdate(opt_state, 0.9) # doesn't crash\n"
}
] | Python | Apache License 2.0 | google/jax | add sm3 (PAIR w/ @lukaszkaiser and rohananil@) |
260,335 | 03.05.2019 17:57:17 | 25,200 | f3264f5705c643a3aae32eb71ae79b4aa7980362 | reviewer-suggested edits (thanks | [
{
"change_type": "MODIFY",
"old_path": "jax/experimental/optimizers.py",
"new_path": "jax/experimental/optimizers.py",
"diff": "\"\"\"Optimizers for use with JAX.\n-This short module contains some convenient optimizer definitions, specifically\n+This module contains some convenient optimizer definitions, specifically\ninitialization and update functions, which can be used with ndarrays or\narbitrarily-nested tuple/list/dicts of ndarrays.\n@@ -34,17 +34,6 @@ functions, where the component functions have these signatures:\nmomentum. The optimizer state pytree structure generally differs from that\nof `params`.\n-::\n-\n- get_params(opt_state)\n-\n- Args:\n- opt_state: pytree representing an optimizer state.\n-\n- Returns:\n- A pytree representing the parameters extracted from `opt_state`, such that\n- the invariant `params == get_params(init_params(params))` holds true.\n-\n::\nupdate_fun(step, grads, opt_state)\n@@ -59,6 +48,17 @@ functions, where the component functions have these signatures:\nA pytree with the same structure as the `opt_state` argument representing\nthe updated optimizer state.\n+::\n+\n+ get_params(opt_state)\n+\n+ Args:\n+ opt_state: pytree representing an optimizer state.\n+\n+ Returns:\n+ A pytree representing the parameters extracted from `opt_state`, such that\n+ the invariant `params == get_params(init_params(params))` holds true.\n+\nNotice that an optimizer implementation has a lot of flexibility in the form of\nopt_state: it just has to be a pytree of JaxTypes (so that it can be passed to\n@@ -70,7 +70,7 @@ from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n-import collections\n+from collections import namedtuple\nimport functools\nimport operator\n@@ -97,15 +97,21 @@ zip = safe_zip\n# dispatch to a `jit`-compiled `update_fun`. That JaxTuple-of-JaxTuples is\n# stored together with the tree structure data in an OptimizerState instance.\n-OptimizerState = collections.namedtuple(\"OptimizerState\",\n- [\"packed_state\", \"tree\", \"subtrees\"])\n-register_pytree_node(OptimizerState,\n- lambda xs: ((xs.packed_state,), (xs.tree, xs.subtrees)),\n+OptimizerState = namedtuple(\"OptimizerState\",\n+ [\"packed_state\", \"tree_def\", \"subtree_defs\"])\n+register_pytree_node(\n+ OptimizerState,\n+ lambda xs: ((xs.packed_state,), (xs.tree_def, xs.subtree_defs)),\nlambda data, xs: OptimizerState(xs[0], data[0], data[1]))\ndef optimizer(opt_maker):\n\"\"\"Decorator to make an optimizer defined for arrays generalize to containers.\n+ With this decorator, you can write init, update, and get_params functions that\n+ each operate only on single arrays, and convert them to corresponding\n+ functions that operate on pytrees of parameters. See the optimizers defined in\n+ optimizers.py for examples.\n+\nArgs:\nopt_maker: a function that returns an ``(init_fun, update_fun, get_params)``\ntriple of functions that might only work with ndarrays, as per\n@@ -147,12 +153,16 @@ def optimizer(opt_maker):\ndef tree_update(i, grad_tree, opt_state):\npacked_state, tree, subtrees = opt_state\ngrad_flat, tree2 = tree_flatten(grad_tree)\n- assert tree == tree2\n+ if tree2 != tree:\n+ msg = (\"optimizer update function was passed a gradient tree that did \"\n+ \"not match the parameter tree structure with which it was \"\n+ \"initialized: parameter tree {} and grad tree {}.\")\n+ raise TypeError(msg.format(tree, tree2))\nstates = map(tree_unflatten, subtrees, packed_state)\nnew_states = map(partial(update, i), grad_flat, states)\nnew_states_flat, subtrees2 = unzip2(map(tree_flatten, new_states))\nfor subtree, subtree2 in zip(subtrees, subtrees2):\n- if subtree != subtree2:\n+ if subtree2 != subtree:\nmsg = (\"optimizer update function produced an output structure that \"\n\"did not match its input structure: input {} and output {}.\")\nraise TypeError(msg.format(subtree, subtree2))\n@@ -305,12 +315,12 @@ def sm3(step_size, momentum=0.9):\ndef init(x0):\nvs = [np.zeros(sz, dtype=x0.dtype) for sz in x0.shape]\n- return (x0, np.zeros_like(x0), vs)\n+ return x0, np.zeros_like(x0), vs\ndef update(i, g, state):\nx, m, vs = state\nvs = [broadcast_into(g.ndim, v, i) for i, v in enumerate(vs)]\n- accum = reduce(np.minimum, vs[1:], vs[0]) + g ** 2\n+ accum = reduce(np.minimum, vs) + g ** 2\naccum_inv_sqrt = np.where(accum > 0, 1. / np.sqrt(accum), 0)\nm = (1. - momentum) * (g * accum_inv_sqrt) + momentum * m\nx = x - step_size(i) * m\n"
}
] | Python | Apache License 2.0 | google/jax | reviewer-suggested edits (thanks @skye) |
260,335 | 04.05.2019 08:58:35 | 25,200 | dffc0b76d00a222adbee56d774e03109a44b6d02 | skip a linalg test on gpu/tpu | [
{
"change_type": "MODIFY",
"old_path": "tests/linalg_test.py",
"new_path": "tests/linalg_test.py",
"diff": "@@ -396,6 +396,7 @@ class NumpyLinalgTest(jtu.JaxTestCase):\nself._CompileAndCheck(np.linalg.inv, args_maker, check_dtypes=True)\n# Regression test for incorrect type for eigenvalues of a complex matrix.\n+ @jtu.skip_on_devices(\"gpu\", \"tpu\")\ndef testIssue669(self):\ndef test(x):\nval, vec = np.linalg.eigh(x)\n"
}
] | Python | Apache License 2.0 | google/jax | skip a linalg test on gpu/tpu |
260,335 | 04.05.2019 10:45:56 | 25,200 | 21e020e89c38d310dbff1fce2f6b973b36cfa581 | fix pxla.py _slice to handle ShardedDeviceTuples | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -77,8 +77,7 @@ def shard_arg(device_ordinals, axis_size, arg):\ndef _slice(x, i):\n\"\"\"Return the ith slice of a JaxType (tuple or array).\"\"\"\n- t = type(x)\n- if t is core.JaxTuple or t is xla.DeviceTuple:\n+ if isinstance(x, core.JaxTuple):\nreturn core.pack(_slice(elt, i) for elt in x)\nelse:\nreturn x[i]\n"
}
] | Python | Apache License 2.0 | google/jax | fix pxla.py _slice to handle ShardedDeviceTuples |
260,510 | 02.05.2019 17:47:34 | 25,200 | e742a2605a08f310afafeb29ba6c1281d086f142 | add low-rank svd jvp; fixes one case for | [
{
"change_type": "MODIFY",
"old_path": "jax/lax_linalg.py",
"new_path": "jax/lax_linalg.py",
"diff": "@@ -468,6 +468,31 @@ def svd_abstract_eval(operand, full_matrices, compute_uv):\nvt = operand\nreturn core.AbstractTuple((s, u, vt))\n+def svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\n+ if full_matrices:\n+ raise NotImplementedError(\"Singular value decomposition JVP not implemented for full matrices\")\n+ A, = primals\n+ dA, = tangents\n+ s, U, Vt = svd_p.bind(A, full_matrices=False, compute_uv=True)\n+\n+ k = s.shape[-1]\n+ Ut, V = U.T, Vt.T\n+ s_dim = s[..., None, :]\n+ dS = Ut.dot(dA).dot(V)\n+ ds = np.diag(dS)\n+ F = 1 / (np.square(s_dim) - np.square(s_dim.T) + np.eye(k)) - np.eye(k)\n+ dSS = s_dim * dS\n+ SdS = s_dim.T * dS\n+ dU = U.dot(F * (dSS + dSS.T))\n+ dV = V.dot(F * (SdS + SdS.T))\n+\n+ m, n = A.shape[-2], A.shape[-1]\n+ if m > n:\n+ dU = dU + (np.eye(m) - U.dot(Ut)).dot(dA).dot(V) / s_dim\n+ if n > m:\n+ dV = dV + (np.eye(n) - V.dot(Vt)).dot(dA.T).dot(U) / s_dim\n+ return core.pack((s, U, Vt)), core.pack((ds, dU, dV.T))\n+\ndef svd_cpu_translation_rule(c, operand, full_matrices, compute_uv):\nshape = c.GetShape(operand)\ndtype = shape.element_type().type\n@@ -491,4 +516,5 @@ svd_p.def_impl(svd_impl)\nsvd_p.def_abstract_eval(svd_abstract_eval)\nxla.translations[svd_p] = svd_translation_rule\nxla.backend_specific_translations['cpu'][svd_p] = svd_cpu_translation_rule\n+ad.primitive_jvps[svd_p] = svd_jvp_rule\nbatching.primitive_batchers[svd_p] = svd_batching_rule\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/linalg_test.py",
"new_path": "tests/linalg_test.py",
"diff": "@@ -284,6 +284,9 @@ class NumpyLinalgTest(jtu.JaxTestCase):\nself._CompileAndCheck(partial(np.linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv),\nargs_maker, check_dtypes=True)\n+ if not full_matrices:\n+ svd = partial(np.linalg.svd, full_matrices=False)\n+ jtu.check_jvp(svd, partial(jvp, svd), (a,))\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_fullmatrices={}\".format(\n"
}
] | Python | Apache License 2.0 | google/jax | add low-rank svd jvp; fixes one case for #508 |
260,510 | 02.05.2019 20:08:08 | 25,200 | f259e913b42f797018747fd049941c887eaeb006 | fixes tests for complex numbers | [
{
"change_type": "MODIFY",
"old_path": "jax/lax_linalg.py",
"new_path": "jax/lax_linalg.py",
"diff": "@@ -476,7 +476,7 @@ def svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\ns, U, Vt = svd_p.bind(A, full_matrices=False, compute_uv=True)\nk = s.shape[-1]\n- Ut, V = U.T, Vt.T\n+ Ut, V = np.conj(U).T, np.conj(Vt).T\ns_dim = s[..., None, :]\ndS = Ut.dot(dA).dot(V)\nds = np.diag(dS)\n@@ -490,7 +490,7 @@ def svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\nif m > n:\ndU = dU + (np.eye(m) - U.dot(Ut)).dot(dA).dot(V) / s_dim\nif n > m:\n- dV = dV + (np.eye(n) - V.dot(Vt)).dot(dA.T).dot(U) / s_dim\n+ dV = dV + (np.eye(n) - V.dot(Vt)).dot(np.conj(dA).T).dot(U) / s_dim\nreturn core.pack((s, U, Vt)), core.pack((ds, dU, dV.T))\ndef svd_cpu_translation_rule(c, operand, full_matrices, compute_uv):\n"
}
] | Python | Apache License 2.0 | google/jax | fixes tests for complex numbers |
260,510 | 02.05.2019 20:24:19 | 25,200 | 92e07cceab255ba9f0436faac0c32be285fc2bf9 | adds TODO for full matrix case | [
{
"change_type": "MODIFY",
"old_path": "jax/lax_linalg.py",
"new_path": "jax/lax_linalg.py",
"diff": "@@ -470,6 +470,7 @@ def svd_abstract_eval(operand, full_matrices, compute_uv):\ndef svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\nif full_matrices:\n+ #TODO: implement full matrices case, documented here: https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\nraise NotImplementedError(\"Singular value decomposition JVP not implemented for full matrices\")\nA, = primals\ndA, = tangents\n"
}
] | Python | Apache License 2.0 | google/jax | adds TODO for full matrix case |
260,510 | 04.05.2019 15:05:11 | 25,200 | 4c8355a54d3fcf0ecd0e8ddee58b1503d66a257b | remove unnecessary hermetian; change ATOL for svd jvp x64 tests | [
{
"change_type": "MODIFY",
"old_path": "jax/lax_linalg.py",
"new_path": "jax/lax_linalg.py",
"diff": "@@ -491,7 +491,7 @@ def svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\nif m > n:\ndU = dU + (np.eye(m) - U.dot(Ut)).dot(dA).dot(V) / s_dim\nif n > m:\n- dV = dV + (np.eye(n) - V.dot(Vt)).dot(np.conj(dA).T).dot(U) / s_dim\n+ dV = dV + (np.eye(n) - V.dot(Vt)).dot(dA.T).dot(U) / s_dim\nreturn core.pack((s, U, Vt)), core.pack((ds, dU, dV.T))\ndef svd_cpu_translation_rule(c, operand, full_matrices, compute_uv):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/linalg_test.py",
"new_path": "tests/linalg_test.py",
"diff": "@@ -286,7 +286,7 @@ class NumpyLinalgTest(jtu.JaxTestCase):\nargs_maker, check_dtypes=True)\nif not full_matrices:\nsvd = partial(np.linalg.svd, full_matrices=False)\n- jtu.check_jvp(svd, partial(jvp, svd), (a,))\n+ jtu.check_jvp(svd, partial(jvp, svd), (a,), atol=1e-2 if FLAGS.jax_enable_x64 else jtu.ATOL)\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_fullmatrices={}\".format(\n"
}
] | Python | Apache License 2.0 | google/jax | remove unnecessary hermetian; change ATOL for svd jvp x64 tests |
260,510 | 04.05.2019 15:43:28 | 25,200 | 2b35b1b524785d2dd787adf21c6dedf6b9e0e146 | replace .dot with np.dot in svd jvp | [
{
"change_type": "MODIFY",
"old_path": "jax/lax_linalg.py",
"new_path": "jax/lax_linalg.py",
"diff": "@@ -479,19 +479,19 @@ def svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\nk = s.shape[-1]\nUt, V = np.conj(U).T, np.conj(Vt).T\ns_dim = s[..., None, :]\n- dS = Ut.dot(dA).dot(V)\n+ dS = np.dot(np.dot(Ut, dA), V)\nds = np.diag(dS)\nF = 1 / (np.square(s_dim) - np.square(s_dim.T) + np.eye(k)) - np.eye(k)\ndSS = s_dim * dS\nSdS = s_dim.T * dS\n- dU = U.dot(F * (dSS + dSS.T))\n- dV = V.dot(F * (SdS + SdS.T))\n+ dU = np.dot(U, F * (dSS + dSS.T))\n+ dV = np.dot(V, F * (SdS + SdS.T))\nm, n = A.shape[-2], A.shape[-1]\nif m > n:\n- dU = dU + (np.eye(m) - U.dot(Ut)).dot(dA).dot(V) / s_dim\n+ dU = dU + np.dot(np.eye(m) - np.dot(U, Ut), np.dot(dA, V)) / s_dim\nif n > m:\n- dV = dV + (np.eye(n) - V.dot(Vt)).dot(dA.T).dot(U) / s_dim\n+ dV = dV + np.dot(np.eye(n) - np.dot(V, Vt), np.dot(np.conj(dA).T, U)) / s_dim\nreturn core.pack((s, U, Vt)), core.pack((ds, dU, dV.T))\ndef svd_cpu_translation_rule(c, operand, full_matrices, compute_uv):\n"
}
] | Python | Apache License 2.0 | google/jax | replace .dot with np.dot in svd jvp |
260,510 | 04.05.2019 15:48:48 | 25,200 | 10fa1f99407b62fc6729c893f97c0e4ecd490f0f | tests pass with ATOL=1e-1 | [
{
"change_type": "MODIFY",
"old_path": "tests/linalg_test.py",
"new_path": "tests/linalg_test.py",
"diff": "@@ -286,7 +286,7 @@ class NumpyLinalgTest(jtu.JaxTestCase):\nargs_maker, check_dtypes=True)\nif not full_matrices:\nsvd = partial(np.linalg.svd, full_matrices=False)\n- jtu.check_jvp(svd, partial(jvp, svd), (a,), atol=1e-2 if FLAGS.jax_enable_x64 else jtu.ATOL)\n+ jtu.check_jvp(svd, partial(jvp, svd), (a,), atol=1e-1 if FLAGS.jax_enable_x64 else jtu.ATOL)\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_fullmatrices={}\".format(\n"
}
] | Python | Apache License 2.0 | google/jax | tests pass with ATOL=1e-1 |
260,335 | 06.05.2019 06:50:15 | 25,200 | bf6c15b59afceddf395127edc421244ecc1322c9 | update pmap to flatten correctly (was a perf bug)
also temporarily avoid DeviceTuples in optimizer states | [
{
"change_type": "MODIFY",
"old_path": "jax/api.py",
"new_path": "jax/api.py",
"diff": "@@ -492,13 +492,12 @@ def pmap(fun, axis_name=None):\ndef f_jitted(*args, **kwargs):\naxis_size = _pmap_axis_size(args)\nf = lu.wrap_init(fun)\n- jaxtuple_kwargs, kwargs_tree = pytree_to_jaxtupletree(kwargs)\n- jaxtuple_args, in_trees = unzip2(map(pytree_to_jaxtupletree, args))\n- _check_args(jaxtuple_args)\n- f, out_tree = pytree_fun_to_jaxtupletree_fun2(f, kwargs_tree, in_trees)\n- out = pxla.xla_pmap(f, jaxtuple_kwargs, *jaxtuple_args,\n+ args_flat, in_tree = tree_flatten((args, kwargs))\n+ _check_args(args_flat)\n+ flat_fun, out_tree = flatten_fun(f, in_tree)\n+ out = pxla.xla_pmap(flat_fun, *args_flat,\naxis_name=axis_name, axis_size=axis_size)\n- return build_tree(out_tree(), out)\n+ return tree_unflatten(out_tree(), out)\nnamestr = \"pmap({}, axis_name={})\".format\nf_jitted.__name__ = namestr(f_jitted.__name__, axis_name)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -450,7 +450,10 @@ pytype_aval_mappings = {}\n# can be true.\nclass _TupleMeta(type(tuple)):\ndef __instancecheck__(self, instance):\n+ try:\nreturn type(get_aval(instance)) is AbstractTuple\n+ except TypeError:\n+ return False\nclass JaxTuple(six.with_metaclass(_TupleMeta)):\n__slots__ = ['xs']\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/experimental/optimizers.py",
"new_path": "jax/experimental/optimizers.py",
"diff": "@@ -77,7 +77,6 @@ import operator\nfrom six.moves import reduce\nimport jax.numpy as np\n-from jax.core import pack\nfrom jax.util import partial, safe_zip, safe_map, unzip2\nfrom jax import tree_util\nfrom jax.tree_util import tree_flatten, tree_unflatten, register_pytree_node\n@@ -97,6 +96,9 @@ zip = safe_zip\n# dispatch to a `jit`-compiled `update_fun`. That JaxTuple-of-JaxTuples is\n# stored together with the tree structure data in an OptimizerState instance.\n+# TODO(mattjj): replace this with core.pack when DeviceTuples are ready\n+pack = tuple\n+\nOptimizerState = namedtuple(\"OptimizerState\",\n[\"packed_state\", \"tree_def\", \"subtree_defs\"])\nregister_pytree_node(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/optimizers_test.py",
"new_path": "tests/optimizers_test.py",
"diff": "@@ -187,14 +187,15 @@ class OptimizerTests(jtu.JaxTestCase):\nupdate(opt_state, 0.9) # doesn't crash\n- def testDeviceTupleState(self):\n- init_fun, update_fun, _ = optimizers.sgd(0.1)\n- opt_state = init_fun(np.zeros(3))\n- self.assertIsInstance(opt_state, optimizers.OptimizerState)\n- self.assertIsInstance(opt_state.packed_state, core.JaxTuple)\n- opt_state = jit(update_fun)(0, np.zeros(3), opt_state)\n- self.assertIsInstance(opt_state, optimizers.OptimizerState)\n- self.assertIsInstance(opt_state.packed_state, xla.DeviceTuple)\n+ # TODO(mattjj): re-enable when DeviceTuples are ready\n+ # def testDeviceTupleState(self):\n+ # init_fun, update_fun, _ = optimizers.sgd(0.1)\n+ # opt_state = init_fun(np.zeros(3))\n+ # self.assertIsInstance(opt_state, optimizers.OptimizerState)\n+ # self.assertIsInstance(opt_state.packed_state, core.JaxTuple)\n+ # opt_state = jit(update_fun)(0, np.zeros(3), opt_state)\n+ # self.assertIsInstance(opt_state, optimizers.OptimizerState)\n+ # self.assertIsInstance(opt_state.packed_state, xla.DeviceTuple)\ndef testUpdateFunStructureMismatchErrorMessage(self):\n@optimizers.optimizer\n"
}
] | Python | Apache License 2.0 | google/jax | update pmap to flatten correctly (was a perf bug)
also temporarily avoid DeviceTuples in optimizer states |
260,335 | 06.05.2019 12:30:22 | 25,200 | b5d95f8b84a435aba511665b6f2f02e561b71c7a | skip a test on tpu | [
{
"change_type": "MODIFY",
"old_path": "tests/lax_test.py",
"new_path": "tests/lax_test.py",
"diff": "@@ -389,7 +389,7 @@ class LaxTest(jtu.JaxTestCase):\nself, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dilation,\nrhs_dilation, rng):\n# TODO(mattjj): make this test pass\n- return SkipTest(\"this test is incomplete\")\n+ raise SkipTest(\"this test is incomplete\")\nargs_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\ndef fun(lhs, rhs):\n@@ -1895,6 +1895,8 @@ class LaxAutodiffTest(jtu.JaxTestCase):\n]\nfor rng in [jtu.rand_small()]))\ndef testReduceGrad(self, op, init_val, shape, dtype, dims, rng):\n+ if \"tpu\" in FLAGS.jax_test_dut and op is lax.mul:\n+ raise SkipTest(\"unimplemented case\")\ntol = 1e-2 if onp.finfo(dtype).bits == 32 else None\noperand = rng(shape, dtype)\ninit_val = onp.asarray(init_val, dtype=dtype)\n"
}
] | Python | Apache License 2.0 | google/jax | skip a test on tpu |
260,335 | 03.05.2019 17:47:40 | 25,200 | 9adfb806253baeda9efcb1cac862c32b1c006ee9 | add advanced indexing support to jax index ops
fixes
This commit adds advanced indexing support to jax index operations,
namely index_update and index_add, but does *not* add support for mixed
advanced indexing and slicing. That's left as a NotImplementedError.
This commit also added a segment_sum convenience wrapper. | [
{
"change_type": "MODIFY",
"old_path": "jax/numpy/lax_numpy.py",
"new_path": "jax/numpy/lax_numpy.py",
"diff": "@@ -2153,9 +2153,7 @@ def _is_slice_none(idx):\ndef _is_advanced_int_indexer(idx):\n\"\"\"Returns True if idx should trigger int array indexing, False otherwise.\"\"\"\n# https://docs.scipy.org/doc/numpy/reference/arrays.indexing.html#advanced-indexing\n- if isinstance(idx, (tuple, list)):\n- # We assume this check comes *after* the check for non-advanced tuple index,\n- # and hence we already know at least one element is a sequence if it's a tuple\n+ if isinstance(idx, (tuple, list)) and _any(onp.ndim(elt) != 0 for elt in idx):\nreturn _all(e is None or e is Ellipsis or isinstance(e, slice)\nor _is_int_arraylike(e) for e in idx)\nelse:\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/ops/__init__.py",
"new_path": "jax/ops/__init__.py",
"diff": "from __future__ import absolute_import\n-from .scatter import index, index_add, index_update\n\\ No newline at end of file\n+from .scatter import index, index_add, index_update, segment_sum\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/ops/scatter.py",
"new_path": "jax/ops/scatter.py",
"diff": "@@ -33,11 +33,19 @@ def _scatter_update(x, idx, y, scatter_op):\nx[idx] op= y\nexcept in a pure functional way, with no in-place updating.\n- Support NumPy-style basic indexing only, i.e., `idx` must be\n- `None`, an integer, a `slice` object, or ellipses, or a tuple of the above.\n+ Args:\n+ x: ndarray to be updated.\n+ idx: None, an integer, a slice, an ellipsis, an ndarray with integer dtype,\n+ or a tuple of those indicating the locations of `x` into which to scatter-\n+ update the values in `y`.\n+ y: values to be scattered.\n+ scatter_op: callable, either lax.scatter or lax.scatter_add.\n- TODO(phawkins): support advanced indexing.\n+ Returns:\n+ An ndarray representing an updated `x` after performing the scatter-update.\n\"\"\"\n+ # For more clues on the logic of this implementation, see the code for\n+ # jax.numpy._rewriting_take (which has links to NumPy docs).\nx = np.asarray(x)\ny = np.asarray(y)\n@@ -45,14 +53,42 @@ def _scatter_update(x, idx, y, scatter_op):\ny_shape = np.shape(y)\ny = lax.convert_element_type(y, lax.dtype(x))\n+ # Check if there's advanced indexing going on, and handle differently based on\n+ # whether it is or isn't mixed with basic indexing.\n+ if np._is_advanced_int_indexer_without_slices(idx):\n+ if isinstance(idx, (tuple, list)):\n+ if any(onp.shape(e) for e in idx):\n+ # At least one sequence element in the index list means broadcasting.\n+ idx = np.broadcast_arrays(*idx)\n+ else:\n+ # The index list is a flat list of integers.\n+ idx = [lax.concatenate([lax.reshape(e, (1,)) for e in idx], 0)]\n+ else:\n+ # The indexer is just a single integer array.\n+ idx = [idx]\n+\n+ stacked_idx = np.concatenate(\n+ [np.mod(np.reshape(a, (-1, 1)), np._constant_like(a, x.shape[i]))\n+ for i, a in enumerate(idx)], axis=1)\n+\n+ y = np.broadcast_to(y, idx[0].shape + onp.shape(x)[len(idx):])\n+ y = lax.reshape(y, (stacked_idx.shape[0],) + onp.shape(x)[len(idx):])\n+\n+ dnums = lax.ScatterDimensionNumbers(\n+ update_window_dims=tuple(range(1, y.ndim)),\n+ inserted_window_dims=tuple(range(len(idx))),\n+ scatter_dims_to_operand_dims=tuple(range(len(idx))))\n+ return scatter_op(x, stacked_idx, y, dnums)\n+ elif np._is_advanced_int_indexer(idx):\n+ # TODO(mattjj, phawkins): one of us is going to implement this case someday\n+ msg = \"Unimplemented case for indexed update. Open a feature request!\"\n+ raise NotImplementedError(msg)\n+\n+ # At this point there's no advanced indexing going on, so we process each\n+ # element of the index one at a time to build up a scatter.\nif not isinstance(idx, tuple):\nidx = (idx,)\n- # Test for unsupported advanced indexing and report an error.\n- if any(onp.ndim(elt) != 0 for elt in idx):\n- raise NotImplementedError(\"Unimplemented case for indexed update. Advanced \"\n- \"indexing is not yet implemented.\")\n-\n# Remove ellipses and add trailing slice(None)s.\nidx = np._canonicalize_tuple_index(x, idx)\n@@ -90,6 +126,7 @@ def _scatter_update(x, idx, y, scatter_op):\ni = lax.convert_element_type(i, np.int32)\ni = np.broadcast_to(i, tuple(scatter_indices.shape[:-1]) + (1,))\nscatter_indices = np.concatenate((scatter_indices, i), -1)\n+ # (a, b, c, 3) -> (a, b, c, 4)\ninserted_window_dims.append(x_axis)\nscatter_dims_to_operand_dims.append(x_axis)\nx_axis += 1\n@@ -189,10 +226,11 @@ def index_add(x, idx, y):\n(e.g., due to concurrency on some hardware platforms).\nArgs:\n- x: an array.\n- idx: a Numpy-style basic index, consisting of `None`, integers, `slice`\n- objects, ellipses, or a tuple of the above. A convenient syntactic sugar\n- for forming indices is via the :data:`jax.ops.index` object.\n+ x: an array with the values to be updated.\n+ idx: a Numpy-style index, consisting of `None`, integers, `slice` objects,\n+ ellipses, ndarrays with integer dtypes, or a tuple of the above. A\n+ convenient syntactic sugar for forming indices is via the\n+ :data:`jax.ops.index` object.\ny: the array of updates. `y` must be broadcastable to the shape of the\narray that would be returned by `x[idx]`.\n@@ -225,10 +263,11 @@ def index_update(x, idx, y):\nupdates on some hardware platforms).\nArgs:\n- x: an array.\n- idx: a Numpy-style basic index, consisting of `None`, integers, `slice`\n- objects, ellipses, or a tuple of the above. A convenient syntactic sugar\n- for forming indices is via the :data:`jax.ops.index` object.\n+ x: an array with the values to be updated.\n+ idx: a Numpy-style index, consisting of `None`, integers, `slice` objects,\n+ ellipses, ndarrays with integer dtypes, or a tuple of the above. A\n+ convenient syntactic sugar for forming indices is via the\n+ :data:`jax.ops.index` object.\ny: the array of updates. `y` must be broadcastable to the shape of the\narray that would be returned by `x[idx]`.\n@@ -244,3 +283,32 @@ def index_update(x, idx, y):\n[1., 1., 1., 6., 6., 6.]], dtype=float32)\n\"\"\"\nreturn _scatter_update(x, idx, y, lax.scatter)\n+\n+def segment_sum(data, segment_ids, num_segments=None):\n+ \"\"\"Computes the sum within segments of an array.\n+\n+ Similar to TensorFlow's segment_sum:\n+ https://www.tensorflow.org/api_docs/python/tf/math/segment_sum\n+\n+ Args:\n+ data: an array with the values to be summed.\n+ segment_ids: an array with integer dtype that indicates the segments of\n+ `data` (along its leading axis) to be summed. Values can be repeated and\n+ need not be sorted. Values outside of the range [0, num_segments) are\n+ wrapped into that range by applying np.mod.\n+ num_segments: optional, an int with positive value indicating the number of\n+ segments. The default is ``max(segment_ids % data.shape[0]) + 1`` but\n+ since `num_segments` determines the size of the output, a static value\n+ must be provided to use `segment_sum` in a `jit`-compiled function.\n+\n+ Returns:\n+ An array with shape ``(num_segments,) + data.shape[1:]`` representing the\n+ segment sums.\n+ \"\"\"\n+ if num_segments is None:\n+ num_segments = np.max(np.mod(segment_ids, data.shape[0])) + 1\n+ num_segments = int(num_segments)\n+\n+ out = np.zeros((num_segments,) + data.shape[1:], dtype=data.dtype)\n+ segment_ids = np.mod(segment_ids, num_segments)\n+ return index_add(out, segment_ids, data)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_indexing_test.py",
"new_path": "tests/lax_numpy_indexing_test.py",
"diff": "@@ -216,6 +216,157 @@ STATIC_INDEXING_GRAD_TESTS = [\n# ]),\n]\n+ADVANCED_INDEXING_TESTS = [\n+ (\"One1DIntArrayIndex\",\n+ [IndexSpec(shape=(3,), indexer=onp.array([0, 1])),\n+ IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 1])),\n+ IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 0, 1])),\n+ IndexSpec(shape=(3,), indexer=onp.array([-1, 1])),\n+ IndexSpec(shape=(3,), indexer=onp.array([-2, -1])),\n+ ]),\n+ (\"One2DIntArrayIndex\",\n+ [IndexSpec(shape=(3,), indexer=onp.array([[0, 0]])),\n+ IndexSpec(shape=(3, 3), indexer=onp.array([[1, 2, 1],\n+ [0, 1, -1]])),\n+ IndexSpec(shape=(3, 4, 5), indexer=onp.array([[0, 2, 0, 1],\n+ [-1, -2, 1, 0]])),\n+ ]),\n+ (\"Two1DIntArrayIndicesNoBroadcasting\",\n+ [IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]),\n+ onp.array([1, 2])]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2, 0, 1]),\n+ onp.array([-1, 0, -1, 2])]),\n+ ]),\n+ (\"Two1DIntArrayIndicesWithBroadcasting\",\n+ [IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]),\n+ onp.array([1, 2])]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[onp.array([[0, 2, 0, 1]]),\n+ onp.array([-1, 0, -1, 2])]),\n+ ]),\n+ (\"ListOfPythonInts\",\n+ [IndexSpec(shape=(3,), indexer=[0, 1, 0]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[0, -1]),\n+ ]),\n+ (\"ListOfListsOfPythonInts\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0, 3]]]),\n+ ]),\n+ (\"TupleOfListsOfPythonInts\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=([0, 1])),\n+ IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]])),\n+ ]),\n+ (\"ListOfPythonIntsAndIntArrays\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[0, 1,\n+ onp.array([[2, 3, 0, 3]])]),\n+ ]),\n+ (\"ListOfListsOfPythonIntsAndIntArrays\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]],\n+ onp.array([[2, 3, 0, 3]])]),\n+ ]),\n+]\n+\n+ADVANCED_INDEXING_TESTS_NO_REPEATS = [\n+ (\"One1DIntArrayIndex\",\n+ [IndexSpec(shape=(3,), indexer=onp.array([0, 1])),\n+ IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 0])),\n+ IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 1])),\n+ IndexSpec(shape=(3,), indexer=onp.array([-1, 1])),\n+ IndexSpec(shape=(3,), indexer=onp.array([-2, -1])),\n+ ]),\n+ (\"One2DIntArrayIndex\",\n+ [IndexSpec(shape=(3,), indexer=onp.array([[0, 1]])),\n+ IndexSpec(shape=(6, 6), indexer=onp.array([[1, 2, 0],\n+ [3, 4, -1]])),\n+ ]),\n+ (\"Two1DIntArrayIndicesNoBroadcasting\",\n+ [IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]),\n+ onp.array([1, 2])]),\n+ IndexSpec(shape=(4, 5, 6), indexer=[onp.array([0, 2, 1, 3]),\n+ onp.array([-1, 0, -2, 1])]),\n+ ]),\n+ (\"Two1DIntArrayIndicesWithBroadcasting\",\n+ [IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]),\n+ onp.array([1, 2])]),\n+ IndexSpec(shape=(4, 5, 6), indexer=[onp.array([[0, 2, -1, 1]]),\n+ onp.array([-1, 0, -2, 2])]),\n+ ]),\n+ (\"ListOfPythonInts\",\n+ [IndexSpec(shape=(3,), indexer=[0, 2, 1]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[0, -1]),\n+ ]),\n+ (\"ListOfListsOfPythonInts\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0]]]),\n+ ]),\n+ (\"TupleOfListsOfPythonInts\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=([0, 1])),\n+ IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0]])),\n+ ]),\n+ (\"ListOfPythonIntsAndIntArrays\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[0, 1,\n+ onp.array([[2, 3, 0]])]),\n+ ]),\n+ (\"ListOfListsOfPythonIntsAndIntArrays\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]),\n+ IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]],\n+ onp.array([[2, 3, 0]])]),\n+ ]),\n+]\n+\n+MIXED_ADVANCED_INDEXING_TESTS = [\n+ (\"SlicesAndOneIntArrayIndex\",\n+ [IndexSpec(shape=(2, 3), indexer=(onp.array([0, 1]), slice(1, 2))),\n+ IndexSpec(shape=(2, 3), indexer=(slice(0, 2),\n+ onp.array([0, 2]))),\n+ IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,\n+ onp.array([0, 2]),\n+ slice(None))),\n+ IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,\n+ onp.array([[0, 2], [1, 1]]),\n+ slice(None))),\n+ ]),\n+ (\"SlicesAndTwoIntArrayIndices\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,\n+ onp.array([0, 2]),\n+ onp.array([-1, 2]))),\n+ IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n+ Ellipsis,\n+ onp.array([-1, 2]))),\n+ IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n+ onp.array([-1, 2]),\n+ Ellipsis)),\n+ IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n+ onp.array([-1, 2]),\n+ slice(1, 3))),\n+ IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n+ slice(1, 3),\n+ onp.array([-1, 2]))),\n+ IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2, -2]),\n+ slice(None, None, 2),\n+ onp.array([-1, 2, -1]))),\n+ IndexSpec(shape=(3, 4, 5), indexer=(onp.array([[0, 2], [2, 0]]),\n+ Ellipsis,\n+ onp.array([[1, 0], [1, 0]]))),\n+ ]),\n+ (\"NonesAndIntArrayIndices\",\n+ [IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2]),\n+ None,\n+ onp.array([-1, 2])]),\n+ IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n+ None,\n+ None,\n+ onp.array([-1, 2]))),\n+ IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,\n+ onp.array([0, 2]),\n+ None,\n+ None,\n+ onp.array([-1, 2]))),\n+ ]),\n+]\n+\nclass IndexingTest(jtu.JaxTestCase):\n\"\"\"Tests for Numpy indexing translation rules.\"\"\"\n@@ -371,56 +522,7 @@ class IndexingTest(jtu.JaxTestCase):\n{\"testcase_name\": \"{}_inshape={}_indexer={}\"\n.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n\"shape\": shape, \"dtype\": dtype, \"rng\": rng, \"indexer\": indexer}\n- for name, index_specs in [\n- (\"One1DIntArrayIndex\",\n- [IndexSpec(shape=(3,), indexer=onp.array([0, 1])),\n- IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 1])),\n- IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 0, 1])),\n- IndexSpec(shape=(3,), indexer=onp.array([-1, 1])),\n- IndexSpec(shape=(3,), indexer=onp.array([-2, -1])),\n- ]),\n- (\"One2DIntArrayIndex\",\n- [IndexSpec(shape=(3,), indexer=onp.array([[0, 0]])),\n- IndexSpec(shape=(3, 3), indexer=onp.array([[1, 2, 1],\n- [0, 1, -1]])),\n- IndexSpec(shape=(3, 4, 5), indexer=onp.array([[0, 2, 0, 1],\n- [-1, -2, 1, 0]])),\n- ]),\n- (\"Two1DIntArrayIndicesNoBroadcasting\",\n- [IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]),\n- onp.array([1, 2])]),\n- IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2, 0, 1]),\n- onp.array([-1, 0, -1, 2])]),\n- ]),\n- (\"Two1DIntArrayIndicesWithBroadcasting\",\n- [IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]),\n- onp.array([1, 2])]),\n- IndexSpec(shape=(3, 4, 5), indexer=[onp.array([[0, 2, 0, 1]]),\n- onp.array([-1, 0, -1, 2])]),\n- ]),\n- (\"ListOfPythonInts\",\n- [IndexSpec(shape=(3,), indexer=[0, 1, 0]),\n- IndexSpec(shape=(3, 4, 5), indexer=[0, -1]),\n- ]),\n- (\"ListOfListsOfPythonInts\",\n- [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]),\n- IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0, 3]]]),\n- ]),\n- (\"TupleOfListsOfPythonInts\",\n- [IndexSpec(shape=(3, 4, 5), indexer=([0, 1])),\n- IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]])),\n- ]),\n- (\"ListOfPythonIntsAndIntArrays\",\n- [IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]),\n- IndexSpec(shape=(3, 4, 5), indexer=[0, 1,\n- onp.array([[2, 3, 0, 3]])]),\n- ]),\n- (\"ListOfListsOfPythonIntsAndIntArrays\",\n- [IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]),\n- IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]],\n- onp.array([[2, 3, 0, 3]])]),\n- ]),\n- ]\n+ for name, index_specs in ADVANCED_INDEXING_TESTS\nfor shape, indexer in index_specs\nfor dtype in all_dtypes\nfor rng in [jtu.rand_default()])\n@@ -492,56 +594,7 @@ class IndexingTest(jtu.JaxTestCase):\n{\"testcase_name\": \"{}_inshape={}_indexer={}\"\n.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),\n\"shape\": shape, \"dtype\": dtype, \"rng\": rng, \"indexer\": indexer}\n- for name, index_specs in [\n- (\"SlicesAndOneIntArrayIndex\",\n- [IndexSpec(shape=(2, 3), indexer=(onp.array([0, 1]), slice(1, 2))),\n- IndexSpec(shape=(2, 3), indexer=(slice(0, 2),\n- onp.array([0, 2]))),\n- IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,\n- onp.array([0, 2]),\n- slice(None))),\n- IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,\n- onp.array([[0, 2], [1, 1]]),\n- slice(None))),\n- ]),\n- (\"SlicesAndTwoIntArrayIndices\",\n- [IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,\n- onp.array([0, 2]),\n- onp.array([-1, 2]))),\n- IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n- Ellipsis,\n- onp.array([-1, 2]))),\n- IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n- onp.array([-1, 2]),\n- Ellipsis)),\n- IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n- onp.array([-1, 2]),\n- slice(1, 3))),\n- IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n- slice(1, 3),\n- onp.array([-1, 2]))),\n- IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2, -2]),\n- slice(None, None, 2),\n- onp.array([-1, 2, -1]))),\n- IndexSpec(shape=(3, 4, 5), indexer=(onp.array([[0, 2], [2, 0]]),\n- Ellipsis,\n- onp.array([[1, 0], [1, 0]]))),\n- ]),\n- (\"NonesAndIntArrayIndices\",\n- [IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2]),\n- None,\n- onp.array([-1, 2])]),\n- IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),\n- None,\n- None,\n- onp.array([-1, 2]))),\n- IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,\n- onp.array([0, 2]),\n- None,\n- None,\n- onp.array([-1, 2]))),\n- ]),\n- ]\n+ for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS\nfor shape, indexer in index_specs\nfor dtype in all_dtypes\nfor rng in [jtu.rand_default()])\n@@ -706,6 +759,39 @@ class IndexedUpdateTest(jtu.JaxTestCase):\nself._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True)\nself._CompileAndCheck(jax_fn, args_maker, check_dtypes=True)\n+ @parameterized.named_parameters(jtu.cases_from_list({\n+ \"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n+ name, jtu.format_shape_dtype_string(shape, dtype), indexer,\n+ jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),\n+ \"shape\": shape, \"dtype\": dtype, \"rng\": rng, \"indexer\": indexer,\n+ \"update_shape\": update_shape, \"update_dtype\": update_dtype,\n+ \"op\": op\n+ } for name, index_specs in ADVANCED_INDEXING_TESTS_NO_REPEATS\n+ for shape, indexer in index_specs\n+ for op in [UpdateOps.UPDATE, UpdateOps.ADD]\n+ for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes)\n+ for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))\n+ for update_dtype in ([dtype] if op == UpdateOps.ADD else all_dtypes)\n+ for rng in [jtu.rand_default()]))\n+ def testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,\n+ rng, indexer, op):\n+ if FLAGS.jax_test_dut == \"cpu\" and not shape:\n+ # TODO(b/127315062): this case causes an XLA crash on CPU. Reenable when\n+ # fixed.\n+ raise unittest.SkipTest(\"Test case crashes on CPU\")\n+ args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\n+ def onp_fn(x, y):\n+ x = x.copy()\n+ if op == UpdateOps.UPDATE:\n+ x[indexer] = y\n+ else:\n+ x[indexer] += y\n+ return x\n+\n+ jax_op = ops.index_update if op == UpdateOps.UPDATE else ops.index_add\n+ jax_fn = lambda x, y: jax_op(x, indexer, y)\n+ self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True)\n+ self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True)\n@parameterized.named_parameters(jtu.cases_from_list({\n\"testcase_name\": \"{}_inshape={}_indexer={}_update={}_op={}\".format(\n@@ -729,6 +815,32 @@ class IndexedUpdateTest(jtu.JaxTestCase):\ny = rng(update_shape, update_dtype)\ncheck_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.)\n+ def testSegmentSumBehavior(self):\n+ # testAdvancedIndexing compares against NumPy, and as a result doesn't check\n+ # repeated indices. This test is just a simple manual check, based on\n+ # https://www.tensorflow.org/api_docs/python/tf/math/segment_sum\n+ data = onp.array([5, 1, 7, 2, 3, 4, 1, 3])\n+ segment_ids = onp.array([0, 0, 0, 1, 2, 2, 3, 3])\n+\n+ ans = ops.index_add(onp.zeros(onp.max(segment_ids) + 1), segment_ids, data)\n+ expected = onp.array([13, 2, 7, 4])\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ def testSegmentSum(self):\n+ data = onp.array([5, 1, 7, 2, 3, 4, 1, 3])\n+ segment_ids = onp.array([0, 0, 0, 1, 2, 2, 3, 3])\n+\n+ # test with explicit num_segments\n+ ans = ops.segment_sum(data, segment_ids, num_segments=4)\n+ expected = onp.array([13, 2, 7, 4])\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ # test without explicit num_segments\n+ ans = ops.segment_sum(data, segment_ids)\n+ expected = onp.array([13, 2, 7, 4])\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+\nif __name__ == \"__main__\":\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | add advanced indexing support to jax index ops
fixes #658
This commit adds advanced indexing support to jax index operations,
namely index_update and index_add, but does *not* add support for mixed
advanced indexing and slicing. That's left as a NotImplementedError.
This commit also added a segment_sum convenience wrapper. |
260,335 | 04.05.2019 16:39:39 | 25,200 | 41a7a9448d58ec5f8900355eb6dc8963e3e1a990 | fix up the is_advanced_int_indexer logic | [
{
"change_type": "MODIFY",
"old_path": "jax/ops/scatter.py",
"new_path": "jax/ops/scatter.py",
"diff": "@@ -26,6 +26,19 @@ from .. import lax\nfrom ..numpy import lax_numpy as np\n+# TODO(mattjj): clean up this logic\n+def _is_advanced_int_indexer(idx):\n+ _int = lambda aval: not aval.shape and onp.issubdtype(aval.dtype, onp.integer)\n+ try:\n+ abstract_idx = core.get_aval(idx)\n+ except TypeError:\n+ abstract_idx = None\n+ out = not (isinstance(abstract_idx, ConcreteArray) and _int(abstract_idx) or\n+ isinstance(abstract_idx, ShapedArray) and _int(abstract_idx) or\n+ isinstance(idx, slice) or\n+ isinstance(idx, tuple) and all(onp.ndim(elt) == 0 for elt in idx))\n+ return out and np._is_advanced_int_indexer(idx)\n+\ndef _scatter_update(x, idx, y, scatter_op):\n\"\"\"Helper for indexed updates.\n@@ -55,6 +68,7 @@ def _scatter_update(x, idx, y, scatter_op):\n# Check if there's advanced indexing going on, and handle differently based on\n# whether it is or isn't mixed with basic indexing.\n+ if _is_advanced_int_indexer(idx):\nif np._is_advanced_int_indexer_without_slices(idx):\nif isinstance(idx, (tuple, list)):\nif any(onp.shape(e) for e in idx):\n@@ -83,6 +97,8 @@ def _scatter_update(x, idx, y, scatter_op):\n# TODO(mattjj, phawkins): one of us is going to implement this case someday\nmsg = \"Unimplemented case for indexed update. Open a feature request!\"\nraise NotImplementedError(msg)\n+ else:\n+ assert False # unreachable\n# At this point there's no advanced indexing going on, so we process each\n# element of the index one at a time to build up a scatter.\n"
}
] | Python | Apache License 2.0 | google/jax | fix up the is_advanced_int_indexer logic |
260,335 | 06.05.2019 14:37:41 | 25,200 | 3ab52646f230c204be76ba4eb1c36ac3bb04857e | reviewer-suggested fixes | [
{
"change_type": "MODIFY",
"old_path": "jax/ops/scatter.py",
"new_path": "jax/ops/scatter.py",
"diff": "@@ -142,7 +142,6 @@ def _scatter_update(x, idx, y, scatter_op):\ni = lax.convert_element_type(i, np.int32)\ni = np.broadcast_to(i, tuple(scatter_indices.shape[:-1]) + (1,))\nscatter_indices = np.concatenate((scatter_indices, i), -1)\n- # (a, b, c, 3) -> (a, b, c, 4)\ninserted_window_dims.append(x_axis)\nscatter_dims_to_operand_dims.append(x_axis)\nx_axis += 1\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_indexing_test.py",
"new_path": "tests/lax_numpy_indexing_test.py",
"diff": "@@ -775,10 +775,6 @@ class IndexedUpdateTest(jtu.JaxTestCase):\nfor rng in [jtu.rand_default()]))\ndef testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,\nrng, indexer, op):\n- if FLAGS.jax_test_dut == \"cpu\" and not shape:\n- # TODO(b/127315062): this case causes an XLA crash on CPU. Reenable when\n- # fixed.\n- raise unittest.SkipTest(\"Test case crashes on CPU\")\nargs_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]\ndef onp_fn(x, y):\nx = x.copy()\n"
}
] | Python | Apache License 2.0 | google/jax | reviewer-suggested fixes |
260,335 | 06.05.2019 16:04:48 | 25,200 | e75118936689524ad08d8e40406dd4ef5cdd9e22 | add optimizer utilities (fixes and | [
{
"change_type": "MODIFY",
"old_path": "jax/experimental/optimizers.py",
"new_path": "jax/experimental/optimizers.py",
"diff": "@@ -79,7 +79,8 @@ from six.moves import reduce\nimport jax.numpy as np\nfrom jax.util import partial, safe_zip, safe_map, unzip2\nfrom jax import tree_util\n-from jax.tree_util import tree_flatten, tree_unflatten, register_pytree_node\n+from jax.tree_util import (tree_map, tree_flatten, tree_unflatten,\n+ register_pytree_node)\nmap = safe_map\nzip = safe_zip\n@@ -376,3 +377,17 @@ def make_schedule(scalar_or_schedule):\nreturn constant(scalar_or_schedule)\nelse:\nraise TypeError(type(scalar_or_schedule))\n+\n+\n+### utilities\n+\n+def l2_norm(tree):\n+ \"\"\"Compute the l2 norm of a pytree of arrays. Useful for weight decay.\"\"\"\n+ leaves, _ = tree_flatten(tree)\n+ return np.sqrt(sum(np.vdot(x, x) for x in leaves))\n+\n+def clip_grads(grad_tree, max_norm):\n+ \"\"\"Clip gradients stored as a pytree of arrays to maximum norm `max_norm`.\"\"\"\n+ norm = l2_norm(grad_tree)\n+ normalize = lambda g: np.where(norm < max_norm, g, g * (max_norm / norm))\n+ return tree_map(normalize, grad_tree)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/optimizers_test.py",
"new_path": "tests/optimizers_test.py",
"diff": "@@ -20,6 +20,8 @@ from __future__ import print_function\nimport functools\nfrom absl.testing import absltest\n+import numpy as onp\n+\nimport jax.numpy as np\nimport jax.test_util as jtu\nfrom jax import jit, grad\n@@ -213,6 +215,26 @@ class OptimizerTests(jtu.JaxTestCase):\nopt_state = init_fun(np.zeros(3))\nself.assertRaises(TypeError, lambda: update_fun(opt_state))\n+ def testUtilityNorm(self):\n+ x0 = (np.ones(2), (np.ones(3), np.ones(4)))\n+ norm = optimizers.l2_norm(x0)\n+ expected = onp.sqrt(onp.sum(onp.ones(2+3+4)**2))\n+ self.assertAllClose(norm, expected, check_dtypes=False)\n+\n+ def testUtilityClipGrads(self):\n+ g = (np.ones(2), (np.ones(3), np.ones(4)))\n+ norm = optimizers.l2_norm(g)\n+\n+ ans = optimizers.clip_grads(g, 1.1 * norm)\n+ expected = g\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ ans = optimizers.l2_norm(optimizers.clip_grads(g, 0.9 * norm))\n+ expected = 0.9 * norm\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+\n+\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | add optimizer utilities (fixes #244 and #681) |
260,335 | 06.05.2019 16:18:34 | 25,200 | 690301357d85474fdd683252c04e26452026d591 | improve pmap error messages | [
{
"change_type": "MODIFY",
"old_path": "jax/api.py",
"new_path": "jax/api.py",
"diff": "@@ -507,10 +507,10 @@ def _pmap_axis_size(args):\nleaves, _ = tree_flatten(args)\naxis_sizes = reduce(set.union, map(_jaxtype_axis_size, leaves), set())\nif len(axis_sizes) == 0:\n- raise TypeError(\"pmap requires a leading axis to map over\")\n+ raise ValueError(\"pmap requires a leading axis to map over.\")\nif len(axis_sizes) > 1:\nmsg = \"pmap requires all leading axes to have equal length, got {}.\"\n- raise TypeError(msg.format(axis_sizes))\n+ raise ValueError(msg.format(axis_sizes))\nreturn axis_sizes.pop()\ndef _jaxtype_axis_size(x):\n@@ -520,7 +520,10 @@ def _aval_axis_size(aval):\nif isinstance(aval, core.AbstractTuple):\nreturn reduce(set.union, map(_aval_axis_size, aval), set())\nelse:\n+ if aval.shape:\nreturn {aval.shape[0]}\n+ else:\n+ raise ValueError(\"pmap can't map over scalars.\")\ndef _serial_pmap(fun, axis_name=None, in_axes=0, out_axes=0):\n"
}
] | Python | Apache License 2.0 | google/jax | improve pmap error messages |
260,335 | 06.05.2019 18:21:13 | 25,200 | 535773fe89f56b30bf94ab79e36d39ee07b5a8f7 | avoid destructuring on len(sharded_device_tuple) | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -355,7 +355,7 @@ class ShardedDeviceTuple(xla.DeviceTuple):\nreturn iter(elts)\ndef __len__(self):\n- return len(self.device_buffers[0].destructure())\n+ return len(self.aval)\ndef __repr__(self):\nreturn 'ShardedDeviceTuple(len={length})'.format(length=len(self))\n"
}
] | Python | Apache License 2.0 | google/jax | avoid destructuring on len(sharded_device_tuple) |
260,335 | 06.05.2019 22:43:31 | 25,200 | 19e0f8de45141c5eeee6a7f17ab7cff58c7e3537 | fix tuple unpacking problems | [
{
"change_type": "MODIFY",
"old_path": "jax/api.py",
"new_path": "jax/api.py",
"diff": "@@ -505,7 +505,7 @@ def pmap(fun, axis_name=None):\ndef _pmap_axis_size(args):\nleaves, _ = tree_flatten(args)\n- axis_sizes = reduce(set.union, map(_jaxtype_axis_size, leaves), set())\n+ axis_sizes = reduce(set.union, map(_axis_size, leaves), set())\nif len(axis_sizes) == 0:\nraise ValueError(\"pmap requires a leading axis to map over.\")\nif len(axis_sizes) > 1:\n@@ -513,8 +513,12 @@ def _pmap_axis_size(args):\nraise ValueError(msg.format(axis_sizes))\nreturn axis_sizes.pop()\n-def _jaxtype_axis_size(x):\n- return _aval_axis_size(core.get_aval(x))\n+def _axis_size(x):\n+ if isinstance(x, core.Tracer):\n+ aval = x.aval\n+ else:\n+ aval = xla.abstractify(x)\n+ return _aval_axis_size(aval)\ndef _aval_axis_size(aval):\nif isinstance(aval, core.AbstractTuple):\n@@ -835,10 +839,18 @@ def _argnums_partial_(dyn_argnums, fixed_args, *dyn_args, **kwargs):\ndef _check_args(args):\nfor arg in args:\n- if not (isinstance(arg, core.Tracer) or core.valid_jaxtype(arg)):\n+ if not (isinstance(arg, core.Tracer) or _valid_jaxtype(arg)):\nraise TypeError(\"Argument '{}' of type {} is not a valid JAX type\"\n.format(arg, type(arg)))\n+def _valid_jaxtype(arg):\n+ try:\n+ xla.abstractify(arg)\n+ except TypeError:\n+ return False\n+ else:\n+ return True\n+\ndef custom_transforms(fun):\nname = getattr(fun, '__name__', '<unnamed user primitive>')\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -422,6 +422,7 @@ def valid_jaxtype(x):\nconcrete_aval(x)\nexcept TypeError:\nreturn False\n+ else:\nreturn True\n"
}
] | Python | Apache License 2.0 | google/jax | fix tuple unpacking problems |
260,335 | 06.05.2019 22:43:46 | 25,200 | 9fc47e51f57d1be35539b824ba390e9860a932af | use DeviceTuples in optimizer states again | [
{
"change_type": "MODIFY",
"old_path": "jax/experimental/optimizers.py",
"new_path": "jax/experimental/optimizers.py",
"diff": "@@ -77,6 +77,7 @@ import operator\nfrom six.moves import reduce\nimport jax.numpy as np\n+from jax.core import pack\nfrom jax.util import partial, safe_zip, safe_map, unzip2\nfrom jax import tree_util\nfrom jax.tree_util import (tree_map, tree_flatten, tree_unflatten,\n@@ -97,9 +98,6 @@ zip = safe_zip\n# dispatch to a `jit`-compiled `update_fun`. That JaxTuple-of-JaxTuples is\n# stored together with the tree structure data in an OptimizerState instance.\n-# TODO(mattjj): replace this with core.pack when DeviceTuples are ready\n-pack = tuple\n-\nOptimizerState = namedtuple(\"OptimizerState\",\n[\"packed_state\", \"tree_def\", \"subtree_defs\"])\nregister_pytree_node(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/optimizers_test.py",
"new_path": "tests/optimizers_test.py",
"diff": "@@ -189,15 +189,14 @@ class OptimizerTests(jtu.JaxTestCase):\nupdate(opt_state, 0.9) # doesn't crash\n- # TODO(mattjj): re-enable when DeviceTuples are ready\n- # def testDeviceTupleState(self):\n- # init_fun, update_fun, _ = optimizers.sgd(0.1)\n- # opt_state = init_fun(np.zeros(3))\n- # self.assertIsInstance(opt_state, optimizers.OptimizerState)\n- # self.assertIsInstance(opt_state.packed_state, core.JaxTuple)\n- # opt_state = jit(update_fun)(0, np.zeros(3), opt_state)\n- # self.assertIsInstance(opt_state, optimizers.OptimizerState)\n- # self.assertIsInstance(opt_state.packed_state, xla.DeviceTuple)\n+ def testDeviceTupleState(self):\n+ init_fun, update_fun, _ = optimizers.sgd(0.1)\n+ opt_state = init_fun(np.zeros(3))\n+ self.assertIsInstance(opt_state, optimizers.OptimizerState)\n+ self.assertIsInstance(opt_state.packed_state, core.JaxTuple)\n+ opt_state = jit(update_fun)(0, np.zeros(3), opt_state)\n+ self.assertIsInstance(opt_state, optimizers.OptimizerState)\n+ self.assertIsInstance(opt_state.packed_state, xla.DeviceTuple)\ndef testUpdateFunStructureMismatchErrorMessage(self):\n@optimizers.optimizer\n"
}
] | Python | Apache License 2.0 | google/jax | use DeviceTuples in optimizer states again |
260,335 | 07.05.2019 15:50:22 | 25,200 | 9788a3584a458750792cd91e550bf2b8f802a4b6 | bump version for pypi, leave DeviceTuples off | [
{
"change_type": "MODIFY",
"old_path": "jax/experimental/optimizers.py",
"new_path": "jax/experimental/optimizers.py",
"diff": "@@ -77,7 +77,6 @@ import operator\nfrom six.moves import reduce\nimport jax.numpy as np\n-from jax.core import pack\nfrom jax.util import partial, safe_zip, safe_map, unzip2\nfrom jax import tree_util\nfrom jax.tree_util import (tree_map, tree_flatten, tree_unflatten,\n@@ -98,6 +97,8 @@ zip = safe_zip\n# dispatch to a `jit`-compiled `update_fun`. That JaxTuple-of-JaxTuples is\n# stored together with the tree structure data in an OptimizerState instance.\n+pack = tuple # TODO(mattjj): replace with core.pack\n+\nOptimizerState = namedtuple(\"OptimizerState\",\n[\"packed_state\", \"tree_def\", \"subtree_defs\"])\nregister_pytree_node(\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/version.py",
"new_path": "jax/version.py",
"diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n-__version__ = \"0.1.26\"\n+__version__ = \"0.1.27\"\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/optimizers_test.py",
"new_path": "tests/optimizers_test.py",
"diff": "@@ -189,14 +189,15 @@ class OptimizerTests(jtu.JaxTestCase):\nupdate(opt_state, 0.9) # doesn't crash\n- def testDeviceTupleState(self):\n- init_fun, update_fun, _ = optimizers.sgd(0.1)\n- opt_state = init_fun(np.zeros(3))\n- self.assertIsInstance(opt_state, optimizers.OptimizerState)\n- self.assertIsInstance(opt_state.packed_state, core.JaxTuple)\n- opt_state = jit(update_fun)(0, np.zeros(3), opt_state)\n- self.assertIsInstance(opt_state, optimizers.OptimizerState)\n- self.assertIsInstance(opt_state.packed_state, xla.DeviceTuple)\n+ # TODO(mattjj): re-enable\n+ # def testDeviceTupleState(self):\n+ # init_fun, update_fun, _ = optimizers.sgd(0.1)\n+ # opt_state = init_fun(np.zeros(3))\n+ # self.assertIsInstance(opt_state, optimizers.OptimizerState)\n+ # self.assertIsInstance(opt_state.packed_state, core.JaxTuple)\n+ # opt_state = jit(update_fun)(0, np.zeros(3), opt_state)\n+ # self.assertIsInstance(opt_state, optimizers.OptimizerState)\n+ # self.assertIsInstance(opt_state.packed_state, xla.DeviceTuple)\ndef testUpdateFunStructureMismatchErrorMessage(self):\n@optimizers.optimizer\n"
}
] | Python | Apache License 2.0 | google/jax | bump version for pypi, leave DeviceTuples off |
260,609 | 25.04.2019 00:56:45 | 18,000 | 725a4695389f09777642a5c8d67d4b7d1d0f3b35 | Started work on np.average | [
{
"change_type": "MODIFY",
"old_path": "jax/numpy/lax_numpy.py",
"new_path": "jax/numpy/lax_numpy.py",
"diff": "@@ -978,6 +978,50 @@ def mean(a, axis=None, dtype=None, out=None, keepdims=False):\nsum(a, axis, dtype=dtype, keepdims=keepdims),\nlax.convert_element_type(normalizer, dtype))\n+@_wraps(onp.average)\n+def average(a, axis=None, weights=None, returned=False):\n+ a = asarray(a)\n+\n+ if weights is None: # Treat all weights as 1\n+ avg = mean(a, axis=axis)\n+ if axis is None:\n+ weights_sum = full((), size(a), dtype=avg.dtype)\n+ else:\n+ weights_sum = full_like(avg, avg.shape[axis], dtype=avg.dtype)\n+ else:\n+ weights = asarray(weights)\n+\n+ out_dtype = _result_dtype(onp.average, a, axis, weights, returned=False)\n+\n+ a_shape = shape(a)\n+ a_ndim = len(a_shape)\n+ weights_shape = shape(weights)\n+ axis = _canonicalize_axis(axis, a_ndim)\n+\n+ if a_shape != weights_shape:\n+ # Make sure the dimensions work out\n+ if axis is None:\n+ raise ValueError(\"Axis must be specified when shapes of a and \"\n+ \"weights differ.\")\n+ if len(weights_shape) != 1:\n+ raise ValueError(\"1D weights expected when shapes of a and \"\n+ \"weights differ.\")\n+ if weights_shape[0] != a_shape[axis]:\n+ raise ValueError(\"Length of weights not \"\n+ \"compatible with specified axis.\")\n+\n+ weights = broadcast_to(weights, weights_shape + (a_ndim - 1) * (1,))\n+ weights = moveaxis(weights, 0, axis)\n+\n+ weights_sum = sum(weights, axis=axis, dtype=out_dtype)\n+ avg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum\n+\n+ if returned:\n+ if avg.shape != weights_sum.shape:\n+ weights_sum = broadcast_to(weights_sum, avg.shape)\n+ return avg, weights_sum\n+ return avg\n+\n@_wraps(onp.var)\ndef var(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_test.py",
"new_path": "tests/lax_numpy_test.py",
"diff": "@@ -994,6 +994,29 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nself._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\nself._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n+ @parameterized.named_parameters(jtu.cases_from_list(\n+ {\"testcase_name\": \"_shape={}_axis={}_weights={}_returned={}\".format(\n+ jtu.format_shape_dtype_string(shape, dtype),\n+ (None if weights_shape == None else jtu.format_shape_dtype_string(weights_shape, dtype)),\n+ axis, returned),\n+ \"rng\": jtu.rand_default(), \"shape\": shape, \"dtype\": dtype,\n+ \"axis\": axis, \"weights_shape\": weights_shape, \"returned\": returned}\n+ for shape in all_shapes\n+ for dtype in number_dtypes\n+ for axis in set(range(-len(shape), len(shape))) | set([None])\n+ # `weights_shape` is either `None`, same as the averaged axis, or same as\n+ # that of the input\n+ for weights_shape in ([None, shape] if axis is None else [None, (shape[axis],), shape])\n+ for returned in [False, True]))\n+ def testAverage(self, shape, dtype, axis, weights_shape, returned, rng):\n+ onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)\n+ lnp_fun = lambda x, weights: lnp.average(x, axis, weights, returned)\n+ args_maker = lambda: [rng(shape, dtype),\n+ None if weights_shape is None else rng(weights_shape, dtype)]\n+\n+ self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n+ self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n+\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_arg{}\".format(i), \"arg\": arg}\nfor i, arg in enumerate([\n"
}
] | Python | Apache License 2.0 | google/jax | Started work on np.average |
260,609 | 28.04.2019 15:00:11 | 18,000 | 3468e87f02808d497c7c396e6cb76cb60c269178 | Average fixes. Doesn't work for non-empty shapes | [
{
"change_type": "MODIFY",
"old_path": "jax/numpy/lax_numpy.py",
"new_path": "jax/numpy/lax_numpy.py",
"diff": "@@ -987,11 +987,11 @@ def average(a, axis=None, weights=None, returned=False):\nif axis is None:\nweights_sum = full((), size(a), dtype=avg.dtype)\nelse:\n- weights_sum = full_like(avg, avg.shape[axis], dtype=avg.dtype)\n+ weights_sum = full_like(avg, a.shape[axis], dtype=avg.dtype)\nelse:\nweights = asarray(weights)\n- out_dtype = _result_dtype(onp.average, a, axis, weights, returned=False)\n+ out_dtype = _result_dtype(onp.average, a, axis, weights)\na_shape = shape(a)\na_ndim = len(a_shape)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_test.py",
"new_path": "tests/lax_numpy_test.py",
"diff": "@@ -997,11 +997,12 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_shape={}_axis={}_weights={}_returned={}\".format(\njtu.format_shape_dtype_string(shape, dtype),\n+ axis,\n(None if weights_shape == None else jtu.format_shape_dtype_string(weights_shape, dtype)),\n- axis, returned),\n+ returned),\n\"rng\": jtu.rand_default(), \"shape\": shape, \"dtype\": dtype,\n\"axis\": axis, \"weights_shape\": weights_shape, \"returned\": returned}\n- for shape in all_shapes\n+ for shape in nonempty_shapes\nfor dtype in number_dtypes\nfor axis in set(range(-len(shape), len(shape))) | set([None])\n# `weights_shape` is either `None`, same as the averaged axis, or same as\n"
}
] | Python | Apache License 2.0 | google/jax | Average fixes. Doesn't work for non-empty shapes |
260,609 | 07.05.2019 02:17:42 | 18,000 | 4ef154a3afea1541e57e3a84cf20b06eccd306c8 | Fixes, and skip test if ZeroDivisionError | [
{
"change_type": "MODIFY",
"old_path": "jax/numpy/lax_numpy.py",
"new_path": "jax/numpy/lax_numpy.py",
"diff": "@@ -991,12 +991,17 @@ def average(a, axis=None, weights=None, returned=False):\nelse:\nweights = asarray(weights)\n- out_dtype = _result_dtype(onp.average, a, axis, weights)\n+ if issubdtype(a.dtype, integer) or issubdtype(a.dtype, bool_):\n+ out_dtype = xla_bridge.canonicalize_dtype(result_type(a.dtype,\n+ weights.dtype,\n+ floating))\n+ else:\n+ out_dtype = xla_bridge.canonicalize_dtype(result_type(a.dtype, weights.dtype))\na_shape = shape(a)\na_ndim = len(a_shape)\nweights_shape = shape(weights)\n- axis = _canonicalize_axis(axis, a_ndim)\n+ axis = None if axis is None else _canonicalize_axis(axis, a_ndim)\nif a_shape != weights_shape:\n# Make sure the dimensions work out\n@@ -1010,8 +1015,8 @@ def average(a, axis=None, weights=None, returned=False):\nraise ValueError(\"Length of weights not \"\n\"compatible with specified axis.\")\n- weights = broadcast_to(weights, weights_shape + (a_ndim - 1) * (1,))\n- weights = moveaxis(weights, 0, axis)\n+ weights = broadcast_to(weights, (a_ndim - 1) * (1,) + weights_shape)\n+ weights = moveaxis(weights, -1, axis)\nweights_sum = sum(weights, axis=axis, dtype=out_dtype)\navg = sum(multiply(a, weights), axis=axis, dtype=out_dtype) / weights_sum\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_test.py",
"new_path": "tests/lax_numpy_test.py",
"diff": "@@ -1000,8 +1000,8 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\naxis,\n(None if weights_shape == None else jtu.format_shape_dtype_string(weights_shape, dtype)),\nreturned),\n- \"rng\": jtu.rand_default(), \"shape\": shape, \"dtype\": dtype,\n- \"axis\": axis, \"weights_shape\": weights_shape, \"returned\": returned}\n+ \"rng\": jtu.rand_default(), \"shape\": shape, \"dtype\": dtype, \"axis\": axis,\n+ \"weights_shape\": weights_shape, \"returned\": returned}\nfor shape in nonempty_shapes\nfor dtype in number_dtypes\nfor axis in set(range(-len(shape), len(shape))) | set([None])\n@@ -1015,7 +1015,10 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nargs_maker = lambda: [rng(shape, dtype),\nNone if weights_shape is None else rng(weights_shape, dtype)]\n+ try:\nself._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)\n+ except ZeroDivisionError:\n+ self.skipTest(\"don't support checking for ZeroDivisionError\")\nself._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n@parameterized.named_parameters(jtu.cases_from_list(\n"
}
] | Python | Apache License 2.0 | google/jax | Fixes, and skip test if ZeroDivisionError |
260,335 | 26.03.2019 15:27:16 | 25,200 | 14d0c8ed4663155da0f33d070a4aeb91e36ea8f9 | one more zip* needed on _scan_jvp | [
{
"change_type": "MODIFY",
"old_path": "jax/scan.py",
"new_path": "jax/scan.py",
"diff": "@@ -149,8 +149,10 @@ def _scan_jvp(primals, tangents, avals, jaxpr):\n# TODO: plumb symbolic zeros in and out of jvp transformation so we can test\n# that they're the same as the inputs and re-run if not\n- return scan_p.bind(core.pack(consts), init_dual, xs_dual,\n+ ans = scan_p.bind(core.pack(consts), init_dual, xs_dual,\navals=avals, jaxpr=jvp_jaxpr)\n+ (y, y_dot), (carry_out, carry_out_dot) = ans\n+ return core.pack((y, carry_out)), core.pack((y_dot, carry_out_dot))\nscan_p = core.Primitive(\"scan\")\n"
}
] | Python | Apache License 2.0 | google/jax | one more zip* needed on _scan_jvp
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 26.03.2019 15:32:45 | 25,200 | 7c86d60b019af8194adfb2a8f0491100f24b8223 | do some zip* on scan_reference for consistency | [
{
"change_type": "MODIFY",
"old_path": "jax/scan.py",
"new_path": "jax/scan.py",
"diff": "@@ -50,7 +50,8 @@ def scan_reference(f, init, xs):\n(y, carry) = f(x, carry)\nys.append(y)\n- return ys, carry\n+ ys = core.pack(map(np.stack, zip(*ys)))\n+ return ys, np.array(carry)\ndef demote_aval_rank(xs):\nif isinstance(xs, core.AbstractTuple):\n@@ -91,7 +92,8 @@ def update_arrays(i, aval, xs, x):\n# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\ndef scan(f, init, xs):\nconsts, avals, jaxpr = trace_scan_fun(f, init, xs)\n- return scan_p.bind(core.pack(consts), init, xs, avals=avals, jaxpr=jaxpr)\n+ ys, carry = scan_p.bind(core.pack(consts), init, xs, avals=avals, jaxpr=jaxpr)\n+ return ys, carry\ndef trace_scan_fun(f, init, xs):\nf = lu.wrap_init(f)\n@@ -127,6 +129,8 @@ def _scan_jvp(primals, tangents, avals, jaxpr):\nconsts_dot, init_dot, xs_dot = tangents\nf = partial(core.eval_jaxpr, jaxpr)\n+ # TODO: plumb symbolic zeros in and out of jvp transformation so we can test\n+ # that they're the same as the inputs and re-run if not\nconsts_dot = ad.instantiate_zeros(consts, consts_dot)\ninit_dot = ad.instantiate_zeros(init , init_dot)\nxs_dot = ad.instantiate_zeros(xs , xs_dot)\n@@ -147,8 +151,6 @@ def _scan_jvp(primals, tangents, avals, jaxpr):\nxs_dual = core.pack((xs , xs_dot))\nconsts, avals, jvp_jaxpr = trace_scan_fun(f_jvp_c, init_dual, xs_dual)\n- # TODO: plumb symbolic zeros in and out of jvp transformation so we can test\n- # that they're the same as the inputs and re-run if not\nans = scan_p.bind(core.pack(consts), init_dual, xs_dual,\navals=avals, jaxpr=jvp_jaxpr)\n(y, y_dot), (carry_out, carry_out_dot) = ans\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "-from jax.scan import scan\n+from jax.scan import scan, scan_reference\nfrom jax.core import pack\nimport jax.core as core\nimport jax.numpy as np\n@@ -11,26 +11,23 @@ def f(x, carry):\ny = pack((carry**2, -carry))\nreturn pack((y, carry))\n-x = np.array(np.arange(4), dtype=np.float64)\n+print scan(f, 0.0, np.arange(4))\n+print scan_reference(f, 0.0, np.arange(4))\n-[0, 1, 9, 36]\n+# def cumsum(xs):\n+# def f(x, carry):\n+# carry = carry + x\n+# return pack((carry, carry))\n-ans = scan(f, 0.0, np.arange(4))\n+# ys, _ = scan(f, 0.0, xs)\n+# return ys\n-def cumsum(xs):\n- def f(x, carry):\n- carry = carry + x\n- return pack((carry, carry))\n-\n- ys, _ = scan(f, 0.0, xs)\n- return ys\n-\n-x = np.linspace(0, 3, 4)\n+# x = np.linspace(0, 3, 4)\n-print x\n-print np.cumsum(x)\n-print cumsum(x)\n+# print x\n+# print np.cumsum(x)\n+# print cumsum(x)\n-print jvp(np.cumsum, (x,), (x*0.1,))\n-print jvp(cumsum, (x,), (x*0.1,))\n+# print jvp(np.cumsum, (x,), (x*0.1,))\n+# print jvp(cumsum, (x,), (x*0.1,))\n"
}
] | Python | Apache License 2.0 | google/jax | do some zip* on scan_reference for consistency
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 26.03.2019 15:33:47 | 25,200 | 4be3e07769f74e7c719873acdc595d0093a378c2 | un-comment scan_test.py | [
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -13,21 +13,22 @@ def f(x, carry):\nprint scan(f, 0.0, np.arange(4))\nprint scan_reference(f, 0.0, np.arange(4))\n+print\n-# def cumsum(xs):\n-# def f(x, carry):\n-# carry = carry + x\n-# return pack((carry, carry))\n-\n-# ys, _ = scan(f, 0.0, xs)\n-# return ys\n+def cumsum(xs):\n+ def f(x, carry):\n+ carry = carry + x\n+ return pack((carry, carry))\n-# x = np.linspace(0, 3, 4)\n+ ys, _ = scan(f, 0.0, xs)\n+ return ys\n-# print x\n-# print np.cumsum(x)\n-# print cumsum(x)\n+x = np.linspace(0, 3, 4)\n+print np.cumsum(x)\n+print cumsum(x)\n+print\n-# print jvp(np.cumsum, (x,), (x*0.1,))\n-# print jvp(cumsum, (x,), (x*0.1,))\n+print jvp(np.cumsum, (x,), (x*0.1,))\n+print jvp(cumsum, (x,), (x*0.1,))\n+print\n"
}
] | Python | Apache License 2.0 | google/jax | un-comment scan_test.py
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 26.03.2019 15:42:52 | 25,200 | 42d8415869b4b305fe72777c186a15c737c9e871 | add harder scan test case | [
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "+from functools import partial\n+\nfrom jax.scan import scan, scan_reference\nfrom jax.core import pack\nimport jax.core as core\n@@ -7,12 +9,14 @@ from jax import jvp\n# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\ndef f(x, carry):\n- carry = carry + x\n+ carry = carry + np.sin(x)\ny = pack((carry**2, -carry))\nreturn pack((y, carry))\nprint scan(f, 0.0, np.arange(4))\n+print jvp(partial(scan, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\nprint scan_reference(f, 0.0, np.arange(4))\n+print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\nprint\ndef cumsum(xs):\n"
}
] | Python | Apache License 2.0 | google/jax | add harder scan test case
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,474 | 27.03.2019 17:16:54 | 14,400 | 39f181872d63bac13587e9fcc914780bab79fbc7 | Progress on scan partial eval | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -66,6 +66,10 @@ class JaxprTrace(Trace):\nraise TypeError(pv)\ndef process_primitive(self, primitive, tracers, params):\n+ from jax.scan import scan_p\n+ if primitive is scan_p:\n+ return scan_process_primitive(self, *tracers, **params)\n+\ntracers = map(self.instantiate_const, tracers)\navals = [t.aval for t in tracers]\nout_aval = primitive.abstract_eval(*avals, **params)\n@@ -126,6 +130,48 @@ class JaxprTrace(Trace):\nmap_primitives = set()\n+def unzip_scan_jaxpr(jaxpr, consts, init, xs, avals):\n+ f = lu.wrap_init(partial(core.eval_jaxpr, jaxpr))\n+\n+\n+ assert False\n+\n+\n+def scan_process_primitive(trace, consts, init, xs, avals, jaxpr):\n+ jaxpr1, jaxpr2, avals1, avals2, ans_pv = unzip_scan_jaxpr(\n+ jaxpr, consts, init, xs, avals)\n+ const_pv , consts_const = consts\n+ init_pv , inits_const = init\n+ xs_pv , xs_const = xs\n+\n+ ans = scan_p.bind(consts_const, inits_const, xs_const,\n+ avals=avals1, jaxpr=jaxpr1)\n+\n+ params_out = {'avals' : avals2, 'jaxpr' : jaxpr2}\n+ eqn = JaxprEqn([consts, init, xs], None, scan_p, (), False, params_out)\n+ return JaxprTracer(trace, PartialVal((ans, ans_pv)), )\n+\n+ # in_pvs, in_consts = unzip2([t.pval for t in tracers])\n+ # fun, aux = partial_eval(f, self, in_pvs)\n+ # out_pv_const, consts = call_primitive.bind(fun, *in_consts, **params)\n+ # out_pv, jaxpr, env = aux()\n+ # const_tracers = map(self.new_instantiated_const, consts)\n+ # env_tracers = map(self.full_raise, env)\n+ # bound_subjaxpr = (jaxpr, const_tracers, env_tracers)\n+ # eqn = JaxprEqn(tracers, None, call_primitive, (bound_subjaxpr,), False, params)\n+ # return JaxprTracer(self, PartialVal((out_pv, out_pv_const)), eqn)\n+\n+\n+\n+ # tracers = map(self.instantiate_const, tracers)\n+ # avals = [t.aval for t in tracers]\n+ # out_aval = primitive.abstract_eval(*avals, **params)\n+ # eqn = JaxprEqn(tracers, None, primitive, (), False, params)\n+ # return JaxprTracer(self, PartialVal((out_aval, unit)), eqn)\n+ assert False\n+\n+\n+\ndef remove_axis_from_pv(pv):\nif pv is None:\nreturn pv\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/scan.py",
"new_path": "jax/scan.py",
"diff": "@@ -43,6 +43,9 @@ map = ju.safe_map\n# pro: feels cleaner for transposition\n# pro: accumulation without saving intermediates\n+# design indecision: store intermediates from within f vs *just inputs*\n+# design indecision: *jvp splits scan* or partial eval splits scan\n+\ndef scan_reference(f, init, xs):\ncarry = init\nys = []\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -4,7 +4,7 @@ from jax.scan import scan, scan_reference\nfrom jax.core import pack\nimport jax.core as core\nimport jax.numpy as np\n-from jax import jvp\n+from jax import jvp, linearize\n# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\n@@ -36,3 +36,5 @@ print\nprint jvp(np.cumsum, (x,), (x*0.1,))\nprint jvp(cumsum, (x,), (x*0.1,))\nprint\n+print linearize(np.cumsum, x)[1](x*0.1)\n+print linearize(cumsum, x)[1](x*0.1)\n"
}
] | Python | Apache License 2.0 | google/jax | Progress on scan partial eval
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,474 | 01.04.2019 16:03:56 | 14,400 | 1d54d37f75fe8233f09bbf5b916f151d55609a21 | Started building jaxpr-to-jaxpr "initial-style" versions of each transformation (that internally wrap the final style versions). | [
{
"change_type": "ADD",
"old_path": null,
"new_path": "jax/initial_style.py",
"diff": "+from functools import partial\n+\n+import jax.core as core\n+import jax.linear_util as lu\n+import jax.numpy as np\n+import jax.lax as lax\n+\n+from jax.util import curry, unzip2\n+from jax.lax import _abstractify\n+from jax.abstract_arrays import ShapedArray\n+from jax.interpreters import partial_eval as pe\n+from jax.interpreters import ad\n+\n+def pvals_with_zeros(zero_components, aval):\n+ if zero_components is True:\n+ return pe.PartialVal((None, ad.zero))\n+ elif zero_components is False:\n+ return pe.PartialVal((aval, core.unit))\n+ elif isinstance(zero_components, ZeroTuple):\n+ avals, consts = unzip(map, pvals_with_zeros, zero_components, aval)\n+ return pe.PartialVal((AbstractTuple(avals), core.JaxprTracerTuple(consts)))\n+\n+def transpose_jaxpr(jaxpr, avals, tangent_components):\n+ assert False\n+\n+\n+@curry\n+def jaxpr_as_fun(jaxpr, consts, *args):\n+ return core.eval_jaxpr(jaxpr, consts, (), *args)\n+\n+\n+def call_initial(f, *args):\n+ pvals = map(_abstractify, args)\n+ avals = [aval for (aval, _) in pvals]\n+ jaxpr, pval_out, consts = pe.trace_to_jaxpr(\n+ lu.wrap_init(f), pvals, instantiate=True)\n+ return call_initial_p.bind(core.pack(consts), *args, jaxpr=jaxpr)\n+\n+def _call_initial_impl(consts, *args, **kwargs):\n+ jaxpr = kwargs.pop('jaxpr')\n+ return jaxpr_as_fun(jaxpr)(consts, *args)\n+\n+def _call_initial_jvp(primals, tangents, jaxpr):\n+ avals = [aval for (aval, _) in map(_abstractify, primals)]\n+ zeros = map(ad.get_zeros, tangents)\n+ jaxpr_jvp, consts, zeros_out = ad.jvp_jaxpr(jaxpr, avals, zeros)\n+ primal_out, tangent_out = call_initial_p.bind(core.pack(consts),\n+ core.pack(primals),\n+ core.pack(tangents),\n+ jaxpr=jaxpr_jvp)\n+ tangent_out_zeros = ad.put_zeros(ad.TangentTuple, zeros_out, tangent_out)\n+ return primal_out, tangent_out_zeros\n+\n+def _call_initial_partial_eval(trace, *tracers, **kwargs):\n+ jaxpr = kwargs.pop('jaxpr')\n+ in_pvs, in_consts = unzip2([t.pval for t in tracers])\n+ first_components = map(is_const, in_pvs)\n+ avals = map(as_aval, in_pvs, in_consts)\n+ jaxpr_1, jaxpr_2, aval_out, first_components_out = partial_eval_jaxpr(\n+ jaxpr, avals, first_components)\n+ call_initial_p.bind(*in_consts, jaxpr=jaxpr_2)\n+ eqn = core.JaxprEqn(tracers, None, call_initial_p, (), False, dict(jaxpr=jaxpr_2))\n+ return pe.JaxprTracer(self, PartialVal((aval_out, out_const)), eqn)\n+\n+\n+def _call_initial_transpose():\n+ assert False\n+\n+call_initial_p = core.Primitive(\"scan\")\n+call_initial_p.def_impl(_call_initial_impl)\n+ad.primitive_jvps[call_initial_p] = _call_initial_jvp\n+pe.custom_partial_eval_rules[call_initial_p] = _call_initial_partial_eval\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -34,21 +34,23 @@ map = safe_map\ndef identity(x): return x\n-def jvp(fun, has_aux=False):\n+def jvp(fun, has_aux=False, instantiate=True):\nif not has_aux:\n- return jvpfun(jvp_subtrace(fun))\n+ return jvpfun(jvp_subtrace(fun), instantiate)\nelse:\n- fun, aux = jvp_subtrace_aux(fun)\n- return jvpfun(fun), aux\n+ fun, aux = jvp_subtrace_aux(fun, instantiate)\n+ return jvpfun(fun, instantiate), aux\n@transformation\n-def jvpfun(primals, tangents):\n+def jvpfun(instantiate, primals, tangents):\nwith new_master(JVPTrace) as master:\nout_primal, out_tangent = yield master, primals, tangents\ndel master\n+ if instantiate:\nout_tangent = instantiate_zeros(out_primal, out_tangent)\nyield (out_primal, out_tangent)\n+\n@transformation\ndef jvp_subtrace(master, primals, tangents):\ntrace = JVPTrace(master, core.cur_sublevel())\n@@ -445,6 +447,51 @@ def map_transpose(primitive, params, jaxpr, consts, freevar_vals, args, ct):\nfreevar_cts = tree_map(lambda x: x.sum(0), freevar_cts)\nreturn cts_out, freevar_cts\n+def jaxpr_as_fun(jaxpr, consts, *args):\n+ return core.eval_jaxpr(jaxpr, consts, (), *args)\n+\n+def get_zeros(tangent):\n+ if tangent is zero:\n+ return True\n+ elif isinstance(tangent, TangentTuple):\n+ return tuple(map(get_zeros, tangent))\n+ else:\n+ return False\n+\n+def put_zeros(pack, iszero, x):\n+ if iszero is True:\n+ return zero\n+ elif iszero is False:\n+ return x\n+ else:\n+ return pack(map(partial(put_zeros, pack), iszero, x))\n+\n+def strip_zeros(unit, pack, iszero, x):\n+ if iszero is True:\n+ return unit\n+ elif iszero is False:\n+ return x\n+ else:\n+ return pack(map(partial(strip_zeros, unit, pack), iszero, x))\n+\n+@transformation_with_aux\n+def f_jvp_traceable(zero_components, primals, tangents):\n+ tangents_zeros = map(partial(put_zeros, TangentTuple), zero_components, tangents)\n+ primal_out, tangent_out = yield primals, tangents_zeros\n+ zeros_out = get_zeros(tangent_out)\n+ tangent_out_nozero = strip_zeros(core.unit, pack, zeros_out, tangent_out)\n+ yield core.pack((primal_out, tangent_out_nozero)), zeros_out\n+\n+def jvp_jaxpr(jaxpr, avals, zeros):\n+ f = wrap_init(partial(jaxpr_as_fun, jaxpr))\n+ f_jvp, out_zeros = f_jvp_traceable(jvp(f, instantiate=False), zeros)\n+ primal_aval = core.AbstractTuple(avals)\n+ tangent_aval = strip_zeros(core.AbstractTuple(()), core.AbstractTuple, zeros, primal_aval)\n+ primal_pvals = pe.PartialVal((primal_aval , core.unit))\n+ tangent_pvals = pe.PartialVal((tangent_aval, core.unit))\n+ jaxpr_out, pval_out, consts_out = pe.trace_to_jaxpr(\n+ f_jvp, (primal_pvals, tangent_pvals), instantiate=True)\n+ return jaxpr_out, consts_out, out_zeros()\nprimitive_transposes[core.call_p] = partial(call_transpose, call_p)\nprimitive_transposes[pe.compiled_call_p] = partial(call_transpose, pe.compiled_call_p)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -66,15 +66,16 @@ class JaxprTrace(Trace):\nraise TypeError(pv)\ndef process_primitive(self, primitive, tracers, params):\n- from jax.scan import scan_p\n- if primitive is scan_p:\n- return scan_process_primitive(self, *tracers, **params)\n-\n+ if primitive in custom_partial_eval_rules:\n+ partial_eval = custom_partial_eval_rules[primitive]\n+ return partial_eval(self, *tracers, **params)\n+ else:\ntracers = map(self.instantiate_const, tracers)\navals = [t.aval for t in tracers]\nout_aval = primitive.abstract_eval(*avals, **params)\n+ partial_val = PartialVal((out_aval, unit))\neqn = JaxprEqn(tracers, None, primitive, (), False, params)\n- return JaxprTracer(self, PartialVal((out_aval, unit)), eqn)\n+ return JaxprTracer(self, partial_val, eqn)\ndef pack(self, tracers):\neqn = JaxprEqn(tracers, None, core.pack_p, (), False, {})\n@@ -211,7 +212,7 @@ def add_axis_to_aval(size, aval):\ndef partial_eval(f, trace, pvs):\n- f = trace_to_subjaxpr(f, trace.master)\n+ f = trace_to_subjaxpr(f, trace.master, False)\nreturn partial_eval_wrapper(f, tuple(pvs))\n@@ -384,8 +385,9 @@ def trace_unwrapped_to_jaxpr(fun, pvals, **kwargs):\ndef trace_to_jaxpr(fun, pvals, **kwargs):\n\"\"\"Traces a function, given abstract inputs, to a jaxpr.\"\"\"\n+ instantiate = kwargs.pop('instantiate', False)\nwith new_master(JaxprTrace) as master:\n- fun = trace_to_subjaxpr(fun, master)\n+ fun = trace_to_subjaxpr(fun, master, instantiate)\njaxpr, (out_pval, consts, env) = fun.call_wrapped(pvals, **kwargs)\nassert not env\ndel master\n@@ -393,12 +395,16 @@ def trace_to_jaxpr(fun, pvals, **kwargs):\nreturn jaxpr, out_pval, consts\n@transformation\n-def trace_to_subjaxpr(master, pvals):\n+def trace_to_subjaxpr(master, instantiate, pvals):\nassert all([isinstance(pv, PartialVal) for pv in pvals]), pvals\ntrace = JaxprTrace(master, core.cur_sublevel())\nin_tracers = map(trace.new_arg, pvals)\nout_tracer = yield in_tracers\nout_tracer = trace.full_raise(out_tracer)\n+\n+ if instantiate:\n+ out_tracer = trace.instantiate_const(out_tracer)\n+\njaxpr, consts, env = tracers_to_jaxpr(in_tracers, out_tracer)\nout_pval = out_tracer.pval\ndel trace, in_tracers, out_tracer\n@@ -513,7 +519,7 @@ def eval_jaxpr_raw(jaxpr, consts, freevar_vals, *args):\ndef compiled_call_impl(fun, *args, **kwargs):\nwith new_master(JaxprTrace, True) as master:\npvals = map(abstractify, args)\n- jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master).call_wrapped(pvals)\n+ jaxpr, (pval, consts, env) = trace_to_subjaxpr(fun, master, False).call_wrapped(pvals)\njaxpr_ans = eval_jaxpr_raw(jaxpr, consts, env, *args)\nans = merge_pvals(jaxpr_ans, pval)\ndel master, pvals, pval, consts, env, jaxpr_ans, jaxpr\n@@ -523,3 +529,19 @@ compiled_call_p = Primitive('compiled_call')\ncompiled_call = partial(core.call_bind, compiled_call_p)\ncompiled_call_p.def_custom_bind(compiled_call)\ncompiled_call_p.def_impl(compiled_call_impl)\n+\n+\n+\n+def partial_eval_traceable():\n+ assert False\n+\n+def partial_eval_jaxpr(jaxpr, avals, first_components):\n+ f = wrap_init(partial(jaxpr_as_fun, jaxpr))\n+ pvals = map(as_pval, avals, first_components)\n+ f_pe = partial_eval_traceable(f)\n+ jaxpr_1, out_pval, consts = trace_to_jaxpr(f_pe, pvals)\n+ return jaxpr_1, jaxpr_2, avals_out, first_components_out\n+\n+\n+\n+custom_partial_eval_rules = {}\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "tests/call_initial_test.py",
"diff": "+from functools import partial\n+\n+from jax.core import pack\n+import jax.core as core\n+import jax.numpy as np\n+from jax import jvp, linearize\n+from jax.initial_style import call_initial\n+\n+\n+def f1(x, y, z):\n+ return core.pack((np.sin(x * y), y, 1.0))\n+\n+# def f1(x, y, z):\n+# return core.pack((np.sin(x * y), y))\n+\n+f2 = partial(call_initial, f1)\n+\n+xs = (1., 2., 3.)\n+xst = (4., 5., 6.)\n+\n+print \"\\neval\"\n+print f1(*xs)\n+print f2(*xs)\n+\n+\n+print \"\\njvp\"\n+print jvp(f1, xs, xst)\n+print jvp(f2, xs, xst)\n+\n+print \"\\nlinearize\"\n+print linearize(f1, *xs)[1](*xst)\n+print linearize(f2, *xs)[1](*xst)\n+\n+\n"
}
] | Python | Apache License 2.0 | google/jax | Started building jaxpr-to-jaxpr "initial-style" versions of each transformation (that internally wrap the final style versions).
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 05.04.2019 12:02:24 | 25,200 | 2be64eb563c7bfe41345f02436741a1895e12d96 | scan pairing w/ dougalm | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -217,7 +217,7 @@ def partial_eval(f, trace, pvs):\n@transformation_with_aux\n-def partial_eval_wrapper(avals, *consts, **kwargs):\n+def partial_eval_wrapper(avals, *consts):\njaxpr, (out_pval, consts, env) = yield (map(PartialVal, zip(avals, consts)),)\nout_pv, out_const = out_pval\nout = pack((out_const, pack(consts)))\n@@ -532,15 +532,67 @@ compiled_call_p.def_impl(compiled_call_impl)\n-def partial_eval_traceable():\n- assert False\n+# @transformation_with_aux\n+# def partial_eval_traceable(first_components, pvals):\n+\n+\n+\n+def unzip_tracer_tuple(pvals):\n+ pvs, consts = unzip2(pvals)\n+ return PartialVal((JaxprTracerTuple(pvs), pack(consts)))\n+\n+def as_pval(aval, is_known, val):\n+ t = type(is_known)\n+ if t is tuple:\n+ return unzip_tracer_tuple(map(as_pval, aval, is_known, val))\n+ elif t is bool:\n+ if is_known:\n+ return PartialVal((None, val))\n+ else:\n+ return PartialVal((aval, core.unit))\n+ else:\n+ raise TypeError(t)\n+\n+def as_pval2(aval, is_known):\n+ t = type(is_known)\n+ if t is tuple:\n+ return unzip_tracer_tuple(map(as_pval2, aval, is_known))\n+ elif t is bool:\n+ if is_known:\n+ return PartialVal((aval, core.unit))\n+ else:\n+ return PartialVal((core.AbstractTuple(()), core.unit))\n+ else:\n+ raise TypeError(t)\n+\n+def jaxpr_as_fun(jaxpr, consts, *args):\n+ consts = core.full_lower(consts)\n+ args = map(core.full_lower, args)\n+ return core.eval_jaxpr(jaxpr, consts, (), *args)\ndef partial_eval_jaxpr(jaxpr, avals, first_components):\n- f = wrap_init(partial(jaxpr_as_fun, jaxpr))\n- pvals = map(as_pval, avals, first_components)\n- f_pe = partial_eval_traceable(f)\n- jaxpr_1, out_pval, consts = trace_to_jaxpr(f_pe, pvals)\n- return jaxpr_1, jaxpr_2, avals_out, first_components_out\n+ f = lu.wrap_init(partial(jaxpr_as_fun, jaxpr))\n+ cell = []\n+\n+ def fun(*vals):\n+ pvals = map(as_pval, avals, first_components, vals)\n+ jaxpr, out_pval, consts = trace_to_jaxpr(f, pvals)\n+ out_pv, out_const = out_pval\n+ out = pack((out_const, pack(consts)))\n+ cell.append((out_pv, jaxpr))\n+ return out\n+\n+ pvals = map(as_pval2, avals, first_components)\n+ jaxpr, out_pval, consts = trace_to_jaxpr(lu.wrap_init(fun), pvals)\n+ assert False # turn these guys into trues and falses\n+\n+ jaxpr_2, out_pval, consts = trace_to_jaxpr(f, pvals)\n+\n+ f_pe, jaxpr_1 = partial_eval_traceable(f)\n+ jaxpr_2, out_pval, consts = trace_to_jaxpr(f_pe, pvals)\n+ first_components_out = undefined\n+ avals_out = undefined\n+ return jaxpr_1(), jaxpr_2, avals_out, first_components_out\n"
},
{
"change_type": "ADD",
"old_path": null,
"new_path": "temp.py",
"diff": "+from jax import make_jaxpr\n+import jax.numpy as np\n+\n+def f(x, y):\n+ return (np.sin(x), np.cos(x) * y, np.tanh(y))\n+ # return (2. * np.sin(x), 3. * np.cos(x) * y, np.tanh(y), 4.)\n+\n+jaxpr = make_jaxpr(f)(2., 3.)\n+\n+\n+import jax.interpreters.partial_eval as pe\n+from jax.abstract_arrays import ShapedArray\n+from jax.core import AbstractTuple\n+\n+avals = (AbstractTuple(()),\n+ ShapedArray((), np.float32),\n+ ShapedArray((), np.float32))\n+pe.partial_eval_jaxpr(jaxpr, avals, ((), True, False))\n"
}
] | Python | Apache License 2.0 | google/jax | scan pairing w/ dougalm
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 05.04.2019 19:44:38 | 25,200 | b60eb29b319d10c17f77ff4de58c61c58c499b07 | call_initial_test passes! | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -18,7 +18,8 @@ def pvals_with_zeros(zero_components, aval):\nreturn pe.PartialVal((aval, core.unit))\nelif isinstance(zero_components, ZeroTuple):\navals, consts = unzip(map, pvals_with_zeros, zero_components, aval)\n- return pe.PartialVal((AbstractTuple(avals), core.JaxprTracerTuple(consts)))\n+ return pe.PartialVal((core.AbstractTuple(avals),\n+ core.JaxprTracerTuple(consts)))\ndef transpose_jaxpr(jaxpr, avals, tangent_components):\nassert False\n@@ -42,25 +43,50 @@ def _call_initial_impl(consts, *args, **kwargs):\ndef _call_initial_jvp(primals, tangents, jaxpr):\navals = [aval for (aval, _) in map(_abstractify, primals)]\n- zeros = map(ad.get_zeros, tangents)\n- jaxpr_jvp, consts, zeros_out = ad.jvp_jaxpr(jaxpr, avals, zeros)\n- primal_out, tangent_out = call_initial_p.bind(core.pack(consts),\n- core.pack(primals),\n- core.pack(tangents),\n- jaxpr=jaxpr_jvp)\n- tangent_out_zeros = ad.put_zeros(ad.TangentTuple, zeros_out, tangent_out)\n+ where_zeros = map(ad.get_zeros, tangents)\n+ nonzero_tangents = ad.strip_zeros(core.unit, core.pack, where_zeros,\n+ tangents)\n+ jaxpr_jvp, consts, where_zeros_out = ad.jvp_jaxpr(jaxpr, avals, where_zeros)\n+ primal_out, tangent_out = call_initial_p.bind(\n+ core.pack(consts), core.pack(primals),\n+ core.pack(nonzero_tangents), jaxpr=jaxpr_jvp)\n+ tangent_out_zeros = ad.put_zeros(ad.TangentTuple, where_zeros_out,\n+ tangent_out)\nreturn primal_out, tangent_out_zeros\n+def is_const(x):\n+ if x is None:\n+ return True\n+ elif type(x) is pe.JaxprTracerTuple:\n+ return tuple(map(is_const, x))\n+ elif isinstance(x, core.AbstractValue):\n+ return False\n+ else:\n+ raise TypeError(type(x))\n+\n+def as_aval(pv, const):\n+ if pv is None:\n+ pv, _ = _abstractify(const)\n+ return pv\n+ elif type(pv) is pe.JaxprTracerTuple:\n+ return map(as_aval, pv, const)\n+ elif isinstance(pv, core.AbstractValue):\n+ return pv\n+ else:\n+ raise TypeError((pv, const))\n+\ndef _call_initial_partial_eval(trace, *tracers, **kwargs):\njaxpr = kwargs.pop('jaxpr')\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\nfirst_components = map(is_const, in_pvs)\navals = map(as_aval, in_pvs, in_consts)\n- jaxpr_1, jaxpr_2, aval_out, first_components_out = partial_eval_jaxpr(\n+ jaxpr_1, jaxpr_2, out_pv, first_components_out = pe.partial_eval_jaxpr(\njaxpr, avals, first_components)\n- call_initial_p.bind(*in_consts, jaxpr=jaxpr_2)\n- eqn = core.JaxprEqn(tracers, None, call_initial_p, (), False, dict(jaxpr=jaxpr_2))\n- return pe.JaxprTracer(self, PartialVal((aval_out, out_const)), eqn)\n+ out_pv_const, consts = call_initial_p.bind(core.unit, *in_consts, jaxpr=jaxpr_1)\n+ const_tracers = core.pack(map(trace.new_instantiated_const, consts))\n+ eqn = core.JaxprEqn((const_tracers,) + tracers, None, call_initial_p, (), False,\n+ dict(jaxpr=jaxpr_2))\n+ return pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_pv_const)), eqn)\ndef _call_initial_transpose():\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -565,6 +565,16 @@ def as_pval2(aval, is_known):\nelse:\nraise TypeError(t)\n+def isnone(x):\n+ if x is None:\n+ return True\n+ elif type(x) is JaxprTracerTuple:\n+ return tuple(map(isnone, x))\n+ elif isinstance(x, AbstractValue):\n+ return False\n+ else:\n+ raise TypeError(type(x))\n+\ndef jaxpr_as_fun(jaxpr, consts, *args):\nconsts = core.full_lower(consts)\nargs = map(core.full_lower, args)\n@@ -572,8 +582,8 @@ def jaxpr_as_fun(jaxpr, consts, *args):\ndef partial_eval_jaxpr(jaxpr, avals, first_components):\nf = lu.wrap_init(partial(jaxpr_as_fun, jaxpr))\n- cell = []\n+ cell = []\ndef fun(*vals):\npvals = map(as_pval, avals, first_components, vals)\njaxpr, out_pval, consts = trace_to_jaxpr(f, pvals)\n@@ -583,17 +593,11 @@ def partial_eval_jaxpr(jaxpr, avals, first_components):\nreturn out\npvals = map(as_pval2, avals, first_components)\n- jaxpr, out_pval, consts = trace_to_jaxpr(lu.wrap_init(fun), pvals)\n- assert False # turn these guys into trues and falses\n-\n- jaxpr_2, out_pval, consts = trace_to_jaxpr(f, pvals)\n-\n- f_pe, jaxpr_1 = partial_eval_traceable(f)\n- jaxpr_2, out_pval, consts = trace_to_jaxpr(f_pe, pvals)\n- first_components_out = undefined\n- avals_out = undefined\n- return jaxpr_1(), jaxpr_2, avals_out, first_components_out\n-\n+ jaxpr_1, out_pval, consts = trace_to_jaxpr(lu.wrap_init(fun), pvals)\n+ assert not consts\n+ out_pv_2, jaxpr_2 = cell[0]\n+ first_component_out = isnone(out_pv_2)\n+ return jaxpr_1, jaxpr_2, out_pv_2, first_component_out\ncustom_partial_eval_rules = {}\n"
},
{
"change_type": "DELETE",
"old_path": "temp.py",
"new_path": null,
"diff": "-from jax import make_jaxpr\n-import jax.numpy as np\n-\n-def f(x, y):\n- return (np.sin(x), np.cos(x) * y, np.tanh(y))\n- # return (2. * np.sin(x), 3. * np.cos(x) * y, np.tanh(y), 4.)\n-\n-jaxpr = make_jaxpr(f)(2., 3.)\n-\n-\n-import jax.interpreters.partial_eval as pe\n-from jax.abstract_arrays import ShapedArray\n-from jax.core import AbstractTuple\n-\n-avals = (AbstractTuple(()),\n- ShapedArray((), np.float32),\n- ShapedArray((), np.float32))\n-pe.partial_eval_jaxpr(jaxpr, avals, ((), True, False))\n"
}
] | Python | Apache License 2.0 | google/jax | call_initial_test passes!
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 08.04.2019 07:42:54 | 25,200 | 97628d144f6d2bfd1a5907c20669e822992f9383 | scan impl works, about to change jvp_jaxpr | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -24,6 +24,8 @@ def pvals_with_zeros(zero_components, aval):\ndef transpose_jaxpr(jaxpr, avals, tangent_components):\nassert False\n+strip_zeros = partial(ad.strip_zeros, core.unit, core.pack)\n+\n@curry\ndef jaxpr_as_fun(jaxpr, consts, *args):\n@@ -33,7 +35,7 @@ def jaxpr_as_fun(jaxpr, consts, *args):\ndef call_initial(f, *args):\npvals = map(_abstractify, args)\navals = [aval for (aval, _) in pvals]\n- jaxpr, pval_out, consts = pe.trace_to_jaxpr(\n+ jaxpr, _, consts = pe.trace_to_jaxpr(\nlu.wrap_init(f), pvals, instantiate=True)\nreturn call_initial_p.bind(core.pack(consts), *args, jaxpr=jaxpr)\n@@ -44,8 +46,7 @@ def _call_initial_impl(consts, *args, **kwargs):\ndef _call_initial_jvp(primals, tangents, jaxpr):\navals = [aval for (aval, _) in map(_abstractify, primals)]\nwhere_zeros = map(ad.get_zeros, tangents)\n- nonzero_tangents = ad.strip_zeros(core.unit, core.pack, where_zeros,\n- tangents)\n+ nonzero_tangents = strip_zeros(where_zeros, tangents)\njaxpr_jvp, consts, where_zeros_out = ad.jvp_jaxpr(jaxpr, avals, where_zeros)\nprimal_out, tangent_out = call_initial_p.bind(\ncore.pack(consts), core.pack(primals),\n@@ -92,7 +93,123 @@ def _call_initial_partial_eval(trace, *tracers, **kwargs):\ndef _call_initial_transpose():\nassert False\n-call_initial_p = core.Primitive(\"scan\")\n+call_initial_p = core.Primitive(\"call_initial\")\ncall_initial_p.def_impl(_call_initial_impl)\nad.primitive_jvps[call_initial_p] = _call_initial_jvp\npe.custom_partial_eval_rules[call_initial_p] = _call_initial_partial_eval\n+\n+\n+###\n+\n+\n+def demote_aval_rank(xs):\n+ if isinstance(xs, core.AbstractTuple):\n+ return core.AbstractTuple(map(demote_aval_rank, xs))\n+ else:\n+ return ShapedArray(xs.shape[1:], xs.dtype)\n+\n+def promote_aval_rank(n, xs):\n+ if isinstance(xs, core.AbstractTuple):\n+ return core.AbstractTuple(map(partial(promote_aval_rank, n), xs))\n+ else:\n+ return ShapedArray((n,) + xs.shape, xs.dtype)\n+\n+def leading_dim_size(xs):\n+ if isinstance(xs, core.JaxTuple):\n+ return leading_dim_size(xs[0])\n+ else:\n+ return xs.shape[0]\n+\n+def empty_arrays(aval):\n+ if isinstance(aval, core.AbstractTuple):\n+ return core.pack(map(empty_arrays, aval))\n+ else:\n+ return lax.full(aval.shape, 0, aval.dtype)\n+\n+def index_arrays(i, aval, xs):\n+ if isinstance(aval, core.AbstractTuple):\n+ return core.pack(map(partial(index_arrays, i), aval, xs))\n+ else:\n+ return lax.dynamic_index_in_dim(xs, i, keepdims=False)\n+\n+def update_arrays(i, aval, xs, x):\n+ if isinstance(aval, core.AbstractTuple):\n+ return core.pack(map(partial(update_arrays, i), aval, xs, x))\n+ else:\n+ return lax.dynamic_update_index_in_dim(xs, x[None, ...], i, axis=0)\n+\n+\n+# scan :: (a -> c -> (b, c)) -> c -> [a] -> ([b], c)\n+def scan_initial(f, init, xs):\n+ carry_pval = carry_aval, _ = _abstractify(init)\n+ xs_aval, _ = _abstractify(xs)\n+ x_aval = demote_aval_rank(xs_aval)\n+ x_pval = pe.PartialVal((x_aval, core.unit))\n+ jaxpr, pval_out, consts = pe.trace_to_jaxpr(\n+ lu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n+ (y_aval, carry_aval_out), _ = pval_out\n+ assert carry_aval == carry_aval_out\n+ consts_aval, _ = unzip2(map(_abstractify, consts))\n+ avals = (consts_aval, x_aval, y_aval, carry_aval)\n+ return scan_initial_p.bind(core.pack(consts), init, xs,\n+ avals=avals, jaxpr=jaxpr)\n+\n+\n+# scan_p :: (d -> a -> c -> (b, c)) -> d -> c -> [a] -> ([b], c)\n+def _scan_initial_impl(consts, init, xs, avals, jaxpr):\n+ # TODO maybe can do this work in the traceable, not every impl call\n+ length = leading_dim_size(xs)\n+ (_, x_aval, y_aval, _) = avals\n+ ys_aval = promote_aval_rank(length, y_aval)\n+\n+ def body_fun(i, vals):\n+ carry, ys = vals\n+ x = index_arrays(i, x_aval, xs)\n+ y, carry_out = jaxpr_as_fun(jaxpr)(consts, x, carry)\n+ ys_out = update_arrays(i, y_aval, ys, y)\n+ return (carry_out, ys_out)\n+\n+ ys_init = empty_arrays(ys_aval)\n+ carry, ys = lax.fori_loop(0, length, body_fun, (init, ys_init))\n+ return core.pack((ys, carry))\n+\n+\n+def _scan_initial_jvp(primals, tangents, avals, jaxpr):\n+ consts, init, xs = primals\n+ consts_dot, init_dot, xs_dot = tangents\n+ consts_aval, x_aval, y_aval, carry_aval = avals\n+\n+ consts_where_zeros = ad.get_zeros(consts_dot)\n+ nonzero_consts_dot = strip_zeros(consts_where_zeros, consts_dot)\n+\n+ where_init_zeros = ad.get_zeros(init_dot)\n+ nonzero_init_dot = strip_zeros(where_init_zeros, init_dot)\n+\n+ where_xs_zeros = ad.get_zeros(xs_dot) # same as where_x_zeros b/c arrays\n+ nonzero_xs_dot = strip_zeros(where_xs_zeros, xs_dot)\n+\n+\n+ jaxpr_jvp, new_consts, where_zeros_out = ad.jvp_jaxpr(\n+ jaxpr, (consts_aval, carry_aval, x_aval),\n+ (where_consts_zeros, where_init_zeros, where_xs_zeros))\n+ _, where_carry_zeros, _ = where_zeros_out\n+ assert where_carry_zeros == where_init_zeros # TODO while\n+\n+ # TODO we realized consts are tricky... can't just add a new arg every time we\n+ # jvp like in n-ary call\n+\n+ # out = scan_initial_p.bind(\n+ # (ys, ys_dot), (carry, carry_dot) =\n+\n+ # primal_out, tangent_out = call_initial_p.bind(\n+ # core.pack(consts), core.pack(primals),\n+ # core.pack(nonzero_tangents), jaxpr=jaxpr_jvp)\n+ # tangent_out_zeros = ad.put_zeros(ad.TangentTuple, where_zeros_out,\n+ # tangent_out)\n+ # return primal_out, tangent_out_zeros\n+\n+\n+scan_initial_p = core.Primitive(\"scan_initial\")\n+scan_initial_p.def_impl(_scan_initial_impl)\n+ad.primitive_jvps[scan_initial_p] = _scan_initial_jvp\n+# pe.custom_partial_eval_rules[scan_initial_p] = _scan_initial_partial_eval\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/call_initial_test.py",
"new_path": "tests/call_initial_test.py",
"diff": "@@ -30,5 +30,3 @@ print jvp(f2, xs, xst)\nprint \"\\nlinearize\"\nprint linearize(f1, *xs)[1](*xst)\nprint linearize(f2, *xs)[1](*xst)\n-\n-\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "from functools import partial\n-from jax.scan import scan, scan_reference\n+import numpy as onp\n+\n+from jax.scan import scan_reference\n+from jax.initial_style import scan_initial\nfrom jax.core import pack\nimport jax.core as core\nimport jax.numpy as np\n@@ -8,23 +11,14 @@ from jax import jvp, linearize\n# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\n-def f(x, carry):\n- carry = carry + np.sin(x)\n- y = pack((carry**2, -carry))\n- return pack((y, carry))\n-\n-print scan(f, 0.0, np.arange(4))\n-print jvp(partial(scan, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-print scan_reference(f, 0.0, np.arange(4))\n-print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-print\n+###\ndef cumsum(xs):\ndef f(x, carry):\ncarry = carry + x\nreturn pack((carry, carry))\n- ys, _ = scan(f, 0.0, xs)\n+ ys, _ = scan_initial(f, 0.0, xs)\nreturn ys\nx = np.linspace(0, 3, 4)\n@@ -33,8 +27,29 @@ print np.cumsum(x)\nprint cumsum(x)\nprint\n-print jvp(np.cumsum, (x,), (x*0.1,))\n-print jvp(cumsum, (x,), (x*0.1,))\n+# print jvp(np.cumsum, (x,), (x*0.1,))\n+# print jvp(cumsum, (x,), (x*0.1,))\n+# print\n+# print linearize(np.cumsum, x)[1](x*0.1)\n+# print linearize(cumsum, x)[1](x*0.1)\n+\n+\n+###\n+\n+\n+def f(x, carry):\n+ carry = carry + np.sin(x)\n+ y = pack((carry**2, -carry))\n+ return pack((y, carry))\n+\n+ys, z = scan_initial(f, 0.0, np.arange(4.))\n+ys_ref, z_ref = scan_reference(f, 0.0, np.arange(4.))\n+print onp.allclose(z, z_ref)\n+\n+print ys\n+print ys_ref\n+print z\n+print z_ref\n+# print jvp(partial(scan_initial, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n+# print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\nprint\n-print linearize(np.cumsum, x)[1](x*0.1)\n-print linearize(cumsum, x)[1](x*0.1)\n"
}
] | Python | Apache License 2.0 | google/jax | scan impl works, about to change jvp_jaxpr
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 08.04.2019 08:04:06 | 25,200 | b208c49cd7b46e3fe43104a3e5a7eab192638b13 | minor fixes, will revise jvp_jaxpr | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -149,7 +149,7 @@ def scan_initial(f, init, xs):\nlu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n(y_aval, carry_aval_out), _ = pval_out\nassert carry_aval == carry_aval_out\n- consts_aval, _ = unzip2(map(_abstractify, consts))\n+ consts_aval, _ = _abstractify(core.pack(consts))\navals = (consts_aval, x_aval, y_aval, carry_aval)\nreturn scan_initial_p.bind(core.pack(consts), init, xs,\navals=avals, jaxpr=jaxpr)\n@@ -179,8 +179,8 @@ def _scan_initial_jvp(primals, tangents, avals, jaxpr):\nconsts_dot, init_dot, xs_dot = tangents\nconsts_aval, x_aval, y_aval, carry_aval = avals\n- consts_where_zeros = ad.get_zeros(consts_dot)\n- nonzero_consts_dot = strip_zeros(consts_where_zeros, consts_dot)\n+ where_consts_zeros = ad.get_zeros(consts_dot)\n+ nonzero_consts_dot = strip_zeros(where_consts_zeros, consts_dot)\nwhere_init_zeros = ad.get_zeros(init_dot)\nnonzero_init_dot = strip_zeros(where_init_zeros, init_dot)\n@@ -188,11 +188,13 @@ def _scan_initial_jvp(primals, tangents, avals, jaxpr):\nwhere_xs_zeros = ad.get_zeros(xs_dot) # same as where_x_zeros b/c arrays\nnonzero_xs_dot = strip_zeros(where_xs_zeros, xs_dot)\n-\njaxpr_jvp, new_consts, where_zeros_out = ad.jvp_jaxpr(\njaxpr, (consts_aval, carry_aval, x_aval),\n(where_consts_zeros, where_init_zeros, where_xs_zeros))\n- _, where_carry_zeros, _ = where_zeros_out\n+ _, where_carry_zeros = where_zeros_out\n+ assert not new_consts # TODO\n+\n+ import ipdb; ipdb.set_trace()\nassert where_carry_zeros == where_init_zeros # TODO while\n# TODO we realized consts are tricky... can't just add a new arg every time we\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -8,6 +8,7 @@ from jax.core import pack\nimport jax.core as core\nimport jax.numpy as np\nfrom jax import jvp, linearize\n+from jax import lax\n# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\n@@ -15,7 +16,8 @@ from jax import jvp, linearize\ndef cumsum(xs):\ndef f(x, carry):\n- carry = carry + x\n+ # carry = carry + x # TODO\n+ carry = carry + x + 3.14\nreturn pack((carry, carry))\nys, _ = scan_initial(f, 0.0, xs)\n@@ -23,12 +25,12 @@ def cumsum(xs):\nx = np.linspace(0, 3, 4)\n-print np.cumsum(x)\n+# print np.cumsum(x)\nprint cumsum(x)\nprint\n# print jvp(np.cumsum, (x,), (x*0.1,))\n-# print jvp(cumsum, (x,), (x*0.1,))\n+print jvp(cumsum, (x,), (x*0.1,))\n# print\n# print linearize(np.cumsum, x)[1](x*0.1)\n# print linearize(cumsum, x)[1](x*0.1)\n"
}
] | Python | Apache License 2.0 | google/jax | minor fixes, will revise jvp_jaxpr
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 08.04.2019 09:17:45 | 25,200 | 27dede72598eb7ba2badbc952ecd92d14153cd37 | fixed point!!!! | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -188,14 +188,26 @@ def _scan_initial_jvp(primals, tangents, avals, jaxpr):\nwhere_xs_zeros = ad.get_zeros(xs_dot) # same as where_x_zeros b/c arrays\nnonzero_xs_dot = strip_zeros(where_xs_zeros, xs_dot)\n- jaxpr_jvp, new_consts, where_zeros_out = ad.jvp_jaxpr(\n+ where_carry_zeros = where_init_zeros\n+ while True:\n+ jaxpr_jvp, new_consts, where_zeros_out = ad.jvp_jaxpr2(\njaxpr, (consts_aval, carry_aval, x_aval),\n- (where_consts_zeros, where_init_zeros, where_xs_zeros))\n- _, where_carry_zeros = where_zeros_out\n+ (where_consts_zeros, where_carry_zeros, where_xs_zeros))\nassert not new_consts # TODO\n+ _, where_carry_zeros_out = where_zeros_out\n+ if where_carry_zeros_out == where_carry_zeros:\n+ break\n+ else:\n+ where_carry_zeros = zeros_join(where_carry_zeros_out, where_carry_zeros)\nimport ipdb; ipdb.set_trace()\n- assert where_carry_zeros == where_init_zeros # TODO while\n+\n+def zeros_join(a, b):\n+ if type(a) is tuple:\n+ return tuple(map(zeros_join, a, b))\n+ else:\n+ return a and b\n+\n# TODO we realized consts are tricky... can't just add a new arg every time we\n# jvp like in n-ary call\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -483,6 +483,7 @@ def f_jvp_traceable(zero_components, primals, tangents):\nyield core.pack((primal_out, tangent_out_nozero)), zeros_out\ndef jvp_jaxpr(jaxpr, avals, zeros):\n+ # jaxpr :: d | a -> b -> c\nf = wrap_init(partial(jaxpr_as_fun, jaxpr))\nf_jvp, out_zeros = f_jvp_traceable(jvp(f, instantiate=False), zeros)\nprimal_aval = core.AbstractTuple(avals)\n@@ -491,8 +492,41 @@ def jvp_jaxpr(jaxpr, avals, zeros):\ntangent_pvals = pe.PartialVal((tangent_aval, core.unit))\njaxpr_out, pval_out, consts_out = pe.trace_to_jaxpr(\nf_jvp, (primal_pvals, tangent_pvals), instantiate=True)\n+ # jaxpr_out :: d2 | (d, a, b) -> (d', a', b') -> (c, c')\n+ # consts_out :: d2\n+ # out_zeros :: zeros(c)\nreturn jaxpr_out, consts_out, out_zeros()\n+\n+@transformation_with_aux\n+def f_jvp_traceable2(zero_components, *primal_tangent_pairs):\n+ primals, tangents = unzip2(primal_tangent_pairs)\n+ tangents_zeros = map(partial(put_zeros, TangentTuple), zero_components, tangents)\n+ primal_out, tangent_out = yield primals, tangents_zeros\n+ # TODO check output is tuple\n+ zeros_out = get_zeros(tangent_out)\n+ tangent_out_nonzero = strip_zeros(core.unit, pack, zeros_out, tangent_out)\n+ primal_tangent_pairs_out = [pack((p, t)) for p, t in zip(primal_out, tangent_out_nonzero)]\n+ yield pack(primal_tangent_pairs_out), zeros_out\n+\n+def jvp_jaxpr2(jaxpr, avals, zeros):\n+ # jaxpr :: d | a -> b -> (c1, c2)\n+ # avals = (d, a, b)\n+ f = wrap_init(partial(jaxpr_as_fun, jaxpr)) # f :: d -> a -> b -> (c1, c2)\n+ f_jvp, out_zeros = f_jvp_traceable2(jvp(f, instantiate=False), zeros)\n+ # f_jvp :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\n+ tangent_avals = map(partial(strip_zeros, core.AbstractTuple(()), core.AbstractTuple),\n+ zeros, avals)\n+ pt_pvals = [pe.PartialVal((core.AbstractTuple((p_aval, t_aval)), core.unit))\n+ for p_aval, t_aval in zip(avals, tangent_avals)]\n+ jaxpr_out, pval_out, consts_out = pe.trace_to_jaxpr(\n+ f_jvp, pt_pvals, instantiate=True)\n+ # jaxpr_out :: d2 | (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\n+ # consts_out :: d2\n+ # out_zeros :: (zeros(c1), zeros(c2))\n+ return jaxpr_out, consts_out, out_zeros()\n+\n+\nprimitive_transposes[core.call_p] = partial(call_transpose, call_p)\nprimitive_transposes[pe.compiled_call_p] = partial(call_transpose, pe.compiled_call_p)\n"
}
] | Python | Apache License 2.0 | google/jax | fixed point!!!!
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 09.04.2019 08:45:34 | 25,200 | 836a71ef7342e7117b1f3023c3405667eacdc9b8 | improved initial transforms, call! | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -6,7 +6,7 @@ import jax.numpy as np\nimport jax.lax as lax\nfrom jax.util import curry, unzip2\n-from jax.lax import _abstractify\n+from jax.lax import _abstractify, _unpack_eqn\nfrom jax.abstract_arrays import ShapedArray\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import ad\n@@ -32,27 +32,31 @@ def jaxpr_as_fun(jaxpr, consts, *args):\nreturn core.eval_jaxpr(jaxpr, consts, (), *args)\n+_call_const = pe.gensym('_consts')\n+\ndef call_initial(f, *args):\npvals = map(_abstractify, args)\navals = [aval for (aval, _) in pvals]\njaxpr, _, consts = pe.trace_to_jaxpr(\nlu.wrap_init(f), pvals, instantiate=True)\n- return call_initial_p.bind(core.pack(consts), *args, jaxpr=jaxpr)\n+ lifted_jaxpr = pe._closure_convert_jaxpr(jaxpr, _call_const)\n+ lifted_args = (core.pack(consts),) + args\n+ return call_initial_p.bind(*lifted_args, jaxpr=lifted_jaxpr, consts=())\n-def _call_initial_impl(consts, *args, **kwargs):\n+def _call_initial_impl(*args, **kwargs):\njaxpr = kwargs.pop('jaxpr')\n- return jaxpr_as_fun(jaxpr)(consts, *args)\n+ consts = kwargs.pop('consts')\n+ return jaxpr_as_fun(jaxpr, consts)(*args)\n-def _call_initial_jvp(primals, tangents, jaxpr):\n+def _call_initial_jvp(primals, tangents, jaxpr, consts):\navals = [aval for (aval, _) in map(_abstractify, primals)]\nwhere_zeros = map(ad.get_zeros, tangents)\nnonzero_tangents = strip_zeros(where_zeros, tangents)\n- jaxpr_jvp, consts, where_zeros_out = ad.jvp_jaxpr(jaxpr, avals, where_zeros)\n+ jaxpr_jvp, new_consts, where_zeros_out = ad.jvp_jaxpr(jaxpr, consts, avals, where_zeros)\nprimal_out, tangent_out = call_initial_p.bind(\n- core.pack(consts), core.pack(primals),\n- core.pack(nonzero_tangents), jaxpr=jaxpr_jvp)\n- tangent_out_zeros = ad.put_zeros(ad.TangentTuple, where_zeros_out,\n- tangent_out)\n+ core.pack(primals), core.pack(nonzero_tangents), jaxpr=jaxpr_jvp,\n+ consts=new_consts)\n+ tangent_out_zeros = ad.put_zeros(ad.TangentTuple, where_zeros_out, tangent_out)\nreturn primal_out, tangent_out_zeros\ndef is_const(x):\n@@ -78,15 +82,17 @@ def as_aval(pv, const):\ndef _call_initial_partial_eval(trace, *tracers, **kwargs):\njaxpr = kwargs.pop('jaxpr')\n+ consts = kwargs.pop('consts')\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\nfirst_components = map(is_const, in_pvs)\navals = map(as_aval, in_pvs, in_consts)\n- jaxpr_1, jaxpr_2, out_pv, first_components_out = pe.partial_eval_jaxpr(\n- jaxpr, avals, first_components)\n- out_pv_const, consts = call_initial_p.bind(core.unit, *in_consts, jaxpr=jaxpr_1)\n- const_tracers = core.pack(map(trace.new_instantiated_const, consts))\n- eqn = core.JaxprEqn((const_tracers,) + tracers, None, call_initial_p, (), False,\n- dict(jaxpr=jaxpr_2))\n+ (jaxpr_1, consts_1), (jaxpr_2, consts_2), out_pv, first_components_out = \\\n+ pe.partial_eval_jaxpr(jaxpr, consts, avals, first_components)\n+ out_pv_const, residuals = call_initial_p.bind(\n+ *in_consts, jaxpr=jaxpr_1, consts=consts_1)\n+ residual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\n+ eqn = core.JaxprEqn((residual_tracers,) + tracers, None, call_initial_p, (),\n+ False, dict(jaxpr=jaxpr_2, consts=consts_2))\nreturn pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_pv_const)), eqn)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -482,9 +482,9 @@ def f_jvp_traceable(zero_components, primals, tangents):\ntangent_out_nozero = strip_zeros(core.unit, pack, zeros_out, tangent_out)\nyield core.pack((primal_out, tangent_out_nozero)), zeros_out\n-def jvp_jaxpr(jaxpr, avals, zeros):\n- # jaxpr :: d | a -> b -> c\n- f = wrap_init(partial(jaxpr_as_fun, jaxpr))\n+def jvp_jaxpr(jaxpr, consts, avals, zeros):\n+ # jaxpr :: a -> b -> c [with consts]\n+ f = wrap_init(partial(jaxpr_as_fun, jaxpr, consts))\nf_jvp, out_zeros = f_jvp_traceable(jvp(f, instantiate=False), zeros)\nprimal_aval = core.AbstractTuple(avals)\ntangent_aval = strip_zeros(core.AbstractTuple(()), core.AbstractTuple, zeros, primal_aval)\n@@ -492,8 +492,7 @@ def jvp_jaxpr(jaxpr, avals, zeros):\ntangent_pvals = pe.PartialVal((tangent_aval, core.unit))\njaxpr_out, pval_out, consts_out = pe.trace_to_jaxpr(\nf_jvp, (primal_pvals, tangent_pvals), instantiate=True)\n- # jaxpr_out :: d2 | (d, a, b) -> (d', a', b') -> (c, c')\n- # consts_out :: d2\n+ # jaxpr_out :: (a, b) -> (a', b') -> (c, c') [with consts]\n# out_zeros :: zeros(c)\nreturn jaxpr_out, consts_out, out_zeros()\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -580,24 +580,42 @@ def jaxpr_as_fun(jaxpr, consts, *args):\nargs = map(core.full_lower, args)\nreturn core.eval_jaxpr(jaxpr, consts, (), *args)\n-def partial_eval_jaxpr(jaxpr, avals, first_components):\n- f = lu.wrap_init(partial(jaxpr_as_fun, jaxpr))\n+_partial_eval_gensym = gensym('_peval')\n+\n+def partial_eval_jaxpr(jaxpr, consts, avals, first_components):\n+ f = lu.wrap_init(partial(jaxpr_as_fun, jaxpr, consts))\ncell = []\ndef fun(*vals):\npvals = map(as_pval, avals, first_components, vals)\n- jaxpr, out_pval, consts = trace_to_jaxpr(f, pvals)\n+ jaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\nout_pv, out_const = out_pval\n- out = pack((out_const, pack(consts)))\n- cell.append((out_pv, jaxpr))\n+ out = pack((out_const, pack(consts_2)))\n+ cell.append((out_pv, jaxpr_2))\nreturn out\npvals = map(as_pval2, avals, first_components)\n- jaxpr_1, out_pval, consts = trace_to_jaxpr(lu.wrap_init(fun), pvals)\n- assert not consts\n+ jaxpr_1, out_pval, consts_1 = trace_to_jaxpr(\n+ lu.wrap_init(fun), pvals, instantiate=True)\nout_pv_2, jaxpr_2 = cell[0]\n+ lifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\nfirst_component_out = isnone(out_pv_2)\n- return jaxpr_1, jaxpr_2, out_pv_2, first_component_out\n+ return (jaxpr_1, consts_1), (lifted_jaxpr_2, ()), out_pv_2, first_component_out\n+\n+def _closure_convert_jaxpr(jaxpr, newvar):\n+ lifted_jaxpr = jaxpr.copy()\n+ lifted_jaxpr.constvars = ()\n+ consts_var = newvar()\n+ lifted_jaxpr.invars = [consts_var] + jaxpr.invars\n+ lifted_jaxpr.eqns = (\n+ [_unpack_eqn(consts_var, jaxpr.constvars)] + list(jaxpr.eqns))\n+ return lifted_jaxpr\n+\n+def _unpack_eqn(invar, outvars):\n+ return core.JaxprEqn([invar], outvars, core.identity_p, (), True, {})\n+\n+def _pack_eqn(invars, outvar):\n+ return core.JaxprEqn(invars, [outvar], core.pack_p, (), False, {})\ncustom_partial_eval_rules = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax.py",
"new_path": "jax/lax.py",
"diff": "@@ -48,6 +48,7 @@ from .abstract_arrays import (UnshapedArray, ShapedArray, ConcreteArray,\nfrom .api_util import (pytree_fun_to_jaxtupletree_fun, pytree_to_jaxtupletree,\npytree_fun_to_flatjaxtuple_fun, pytree_to_flatjaxtuple)\nfrom .interpreters import partial_eval as pe\n+from .interpreters.partial_eval import _unpack_eqn, _pack_eqn\nfrom .interpreters import xla\nfrom .interpreters import pxla\nfrom .interpreters import ad\n@@ -960,6 +961,7 @@ def cond(pred, true_operand, true_fun, false_operand, false_fun):\nout = pe.merge_pvals(out, joined_pval)\nreturn tree_unflatten(true_tree(), out)\n+# TODO(mattjj, dougalm): gensym broken under nesting\ndef _revise_cond_jaxpr(new_pval, old_pval, jaxpr, consts):\nnew_pv, new_const = new_pval\nold_pv, old_const = old_pval\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -25,33 +25,33 @@ def cumsum(xs):\nx = np.linspace(0, 3, 4)\n-# print np.cumsum(x)\n-print cumsum(x)\n-print\n-\n-# print jvp(np.cumsum, (x,), (x*0.1,))\n-print jvp(cumsum, (x,), (x*0.1,))\n+# # print np.cumsum(x)\n+# print cumsum(x)\n# print\n-# print linearize(np.cumsum, x)[1](x*0.1)\n-# print linearize(cumsum, x)[1](x*0.1)\n+# # print jvp(np.cumsum, (x,), (x*0.1,))\n+# print jvp(cumsum, (x,), (x*0.1,))\n+# # print\n+# # print linearize(np.cumsum, x)[1](x*0.1)\n+# # print linearize(cumsum, x)[1](x*0.1)\n-###\n+# ###\n-def f(x, carry):\n- carry = carry + np.sin(x)\n- y = pack((carry**2, -carry))\n- return pack((y, carry))\n-\n-ys, z = scan_initial(f, 0.0, np.arange(4.))\n-ys_ref, z_ref = scan_reference(f, 0.0, np.arange(4.))\n-print onp.allclose(z, z_ref)\n-\n-print ys\n-print ys_ref\n-print z\n-print z_ref\n-# print jvp(partial(scan_initial, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-# print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-print\n+\n+# def f(x, carry):\n+# carry = carry + np.sin(x)\n+# y = pack((carry**2, -carry))\n+# return pack((y, carry))\n+\n+# ys, z = scan_initial(f, 0.0, np.arange(4.))\n+# ys_ref, z_ref = scan_reference(f, 0.0, np.arange(4.))\n+# print onp.allclose(z, z_ref)\n+\n+# print ys\n+# print ys_ref\n+# print z\n+# print z_ref\n+# # print jvp(partial(scan_initial, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n+# # print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n+# print\n"
}
] | Python | Apache License 2.0 | google/jax | improved initial transforms, call!
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 10.04.2019 08:26:27 | 25,200 | 9a22d2ebe12547ce48267f5e8b1d903ee62a3a30 | about to embark on scan jvp for the last time | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -144,6 +144,7 @@ def update_arrays(i, aval, xs, x):\nelse:\nreturn lax.dynamic_update_index_in_dim(xs, x[None, ...], i, axis=0)\n+_scan_const = pe.gensym('_consts')\n# scan :: (a -> c -> (b, c)) -> c -> [a] -> ([b], c)\ndef scan_initial(f, init, xs):\n@@ -155,14 +156,15 @@ def scan_initial(f, init, xs):\nlu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n(y_aval, carry_aval_out), _ = pval_out\nassert carry_aval == carry_aval_out\n+ lifted_jaxpr = pe._closure_convert_jaxpr(jaxpr, _scan_const)\nconsts_aval, _ = _abstractify(core.pack(consts))\navals = (consts_aval, x_aval, y_aval, carry_aval)\nreturn scan_initial_p.bind(core.pack(consts), init, xs,\n- avals=avals, jaxpr=jaxpr)\n+ avals=avals, jaxpr=lifted_jaxpr, true_consts=())\n# scan_p :: (d -> a -> c -> (b, c)) -> d -> c -> [a] -> ([b], c)\n-def _scan_initial_impl(consts, init, xs, avals, jaxpr):\n+def _scan_initial_impl(consts, init, xs, avals, jaxpr, true_consts):\n# TODO maybe can do this work in the traceable, not every impl call\nlength = leading_dim_size(xs)\n(_, x_aval, y_aval, _) = avals\n@@ -171,7 +173,7 @@ def _scan_initial_impl(consts, init, xs, avals, jaxpr):\ndef body_fun(i, vals):\ncarry, ys = vals\nx = index_arrays(i, x_aval, xs)\n- y, carry_out = jaxpr_as_fun(jaxpr)(consts, x, carry)\n+ y, carry_out = jaxpr_as_fun(jaxpr, true_consts)(consts, x, carry)\nys_out = update_arrays(i, y_aval, ys, y)\nreturn (carry_out, ys_out)\n@@ -180,7 +182,7 @@ def _scan_initial_impl(consts, init, xs, avals, jaxpr):\nreturn core.pack((ys, carry))\n-def _scan_initial_jvp(primals, tangents, avals, jaxpr):\n+def _scan_initial_jvp(primals, tangents, avals, jaxpr, true_consts):\nconsts, init, xs = primals\nconsts_dot, init_dot, xs_dot = tangents\nconsts_aval, x_aval, y_aval, carry_aval = avals\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -497,35 +497,6 @@ def jvp_jaxpr(jaxpr, consts, avals, zeros):\nreturn jaxpr_out, consts_out, out_zeros()\n-@transformation_with_aux\n-def f_jvp_traceable2(zero_components, *primal_tangent_pairs):\n- primals, tangents = unzip2(primal_tangent_pairs)\n- tangents_zeros = map(partial(put_zeros, TangentTuple), zero_components, tangents)\n- primal_out, tangent_out = yield primals, tangents_zeros\n- # TODO check output is tuple\n- zeros_out = get_zeros(tangent_out)\n- tangent_out_nonzero = strip_zeros(core.unit, pack, zeros_out, tangent_out)\n- primal_tangent_pairs_out = [pack((p, t)) for p, t in zip(primal_out, tangent_out_nonzero)]\n- yield pack(primal_tangent_pairs_out), zeros_out\n-\n-def jvp_jaxpr2(jaxpr, avals, zeros):\n- # jaxpr :: d | a -> b -> (c1, c2)\n- # avals = (d, a, b)\n- f = wrap_init(partial(jaxpr_as_fun, jaxpr)) # f :: d -> a -> b -> (c1, c2)\n- f_jvp, out_zeros = f_jvp_traceable2(jvp(f, instantiate=False), zeros)\n- # f_jvp :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\n- tangent_avals = map(partial(strip_zeros, core.AbstractTuple(()), core.AbstractTuple),\n- zeros, avals)\n- pt_pvals = [pe.PartialVal((core.AbstractTuple((p_aval, t_aval)), core.unit))\n- for p_aval, t_aval in zip(avals, tangent_avals)]\n- jaxpr_out, pval_out, consts_out = pe.trace_to_jaxpr(\n- f_jvp, pt_pvals, instantiate=True)\n- # jaxpr_out :: d2 | (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\n- # consts_out :: d2\n- # out_zeros :: (zeros(c1), zeros(c2))\n- return jaxpr_out, consts_out, out_zeros()\n-\n-\nprimitive_transposes[core.call_p] = partial(call_transpose, call_p)\nprimitive_transposes[pe.compiled_call_p] = partial(call_transpose, pe.compiled_call_p)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -16,8 +16,7 @@ from jax import lax\ndef cumsum(xs):\ndef f(x, carry):\n- # carry = carry + x # TODO\n- carry = carry + x + 3.14\n+ carry = carry + x\nreturn pack((carry, carry))\nys, _ = scan_initial(f, 0.0, xs)\n@@ -25,9 +24,9 @@ def cumsum(xs):\nx = np.linspace(0, 3, 4)\n-# # print np.cumsum(x)\n-# print cumsum(x)\n-# print\n+print np.cumsum(x)\n+print cumsum(x)\n+print\n# # print jvp(np.cumsum, (x,), (x*0.1,))\n# print jvp(cumsum, (x,), (x*0.1,))\n"
}
] | Python | Apache License 2.0 | google/jax | about to embark on scan jvp for the last time
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 10.04.2019 09:42:17 | 25,200 | 4e2edb5a5fa2eca1b6823b276092cd6a972ff69a | wow that went well | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -25,7 +25,18 @@ def transpose_jaxpr(jaxpr, avals, tangent_components):\nassert False\nstrip_zeros = partial(ad.strip_zeros, core.unit, core.pack)\n+strip_zeros_aval = partial(ad.strip_zeros, core.AbstractTuple(()), core.AbstractTuple)\n+def convert_zeros(keep_symbolic, example, tangent):\n+ if tangent is ad.zero:\n+ if keep_symbolic:\n+ return core.unit\n+ else:\n+ return ad.zeros_like_jaxval(example)\n+ elif type(tangent) is ad.TangentTuple:\n+ return core.pack(map(convert_zeros, keep_symbolic, example, tangent))\n+ else:\n+ return tangent\n@curry\ndef jaxpr_as_fun(jaxpr, consts, *args):\n@@ -188,27 +199,58 @@ def _scan_initial_jvp(primals, tangents, avals, jaxpr, true_consts):\nconsts_aval, x_aval, y_aval, carry_aval = avals\nwhere_consts_zeros = ad.get_zeros(consts_dot)\n- nonzero_consts_dot = strip_zeros(where_consts_zeros, consts_dot)\n-\nwhere_init_zeros = ad.get_zeros(init_dot)\n- nonzero_init_dot = strip_zeros(where_init_zeros, init_dot)\n-\nwhere_xs_zeros = ad.get_zeros(xs_dot) # same as where_x_zeros b/c arrays\n- nonzero_xs_dot = strip_zeros(where_xs_zeros, xs_dot)\nwhere_carry_zeros = where_init_zeros\nwhile True:\njaxpr_jvp, new_consts, where_zeros_out = ad.jvp_jaxpr2(\n- jaxpr, (consts_aval, carry_aval, x_aval),\n+ jaxpr, true_consts, (consts_aval, carry_aval, x_aval),\n(where_consts_zeros, where_carry_zeros, where_xs_zeros))\n- assert not new_consts # TODO\n- _, where_carry_zeros_out = where_zeros_out\n+ where_ys_zeros, where_carry_zeros_out = where_zeros_out\nif where_carry_zeros_out == where_carry_zeros:\nbreak\nelse:\nwhere_carry_zeros = zeros_join(where_carry_zeros_out, where_carry_zeros)\n- import ipdb; ipdb.set_trace()\n+ # convert_zeros is like strip_zeros but uses explicit lattice information to\n+ # instantiate zeros in some cases, namely in init_dot based on the fixed point\n+ nonzero_init_dot = convert_zeros(where_carry_zeros, init, init_dot)\n+ nonzero_consts_dot = convert_zeros(where_consts_zeros, consts, consts_dot)\n+ nonzero_xs_dot = convert_zeros(where_xs_zeros, xs, xs_dot)\n+\n+ consts_dual = core.pack((consts, nonzero_consts_dot))\n+ init_dual = core.pack((init, nonzero_init_dot))\n+ xs_dual = core.pack((xs, nonzero_xs_dot))\n+\n+ consts_dual_aval = core.AbstractTuple((consts_aval, strip_zeros_aval(where_consts_zeros, consts_aval)))\n+ x_dual_aval = core.AbstractTuple((x_aval, strip_zeros_aval(where_xs_zeros, x_aval)))\n+ y_dual_aval = core.AbstractTuple((y_aval, strip_zeros_aval(where_ys_zeros, y_aval)))\n+ carry_dual_aval = core.AbstractTuple((carry_aval, strip_zeros_aval(where_carry_zeros, carry_aval)))\n+ avals = (consts_dual_aval, x_dual_aval, y_dual_aval, carry_dual_aval)\n+\n+ ys_dual, carry_out_dual = scan_initial_p.bind(\n+ consts_dual, init_dual, xs_dual, avals=avals, jaxpr=jaxpr_jvp,\n+ true_consts=new_consts)\n+\n+ ys, ys_dot = ys_dual\n+ ys_dot = ad.put_zeros(ad.TangentTuple, where_ys_zeros, ys_dot)\n+\n+ carry_out, carry_out_dot = carry_out_dual\n+ carry_out_dot = ad.put_zeros(ad.TangentTuple, where_carry_zeros_out, carry_out_dot)\n+\n+ return core.pack((ys, carry_out)), ad.TangentTuple((ys_dot, carry_out_dot))\n+\n+def instantiate_zeros(example, tangent, keep_symbolic):\n+ if tangent is ad.zero:\n+ if keep_symbolic:\n+ return tangent\n+ else:\n+ return ad.zeros_like_jaxval(example)\n+ elif isinstance(tangent, ad.TangentTuple):\n+ return ad.TangentTuple(map(instantiate_zeros, example, tangent, keep_symbolic))\n+ else:\n+ return tangent\ndef zeros_join(a, b):\nif type(a) is tuple:\n@@ -217,20 +259,6 @@ def zeros_join(a, b):\nreturn a and b\n- # TODO we realized consts are tricky... can't just add a new arg every time we\n- # jvp like in n-ary call\n-\n- # out = scan_initial_p.bind(\n- # (ys, ys_dot), (carry, carry_dot) =\n-\n- # primal_out, tangent_out = call_initial_p.bind(\n- # core.pack(consts), core.pack(primals),\n- # core.pack(nonzero_tangents), jaxpr=jaxpr_jvp)\n- # tangent_out_zeros = ad.put_zeros(ad.TangentTuple, where_zeros_out,\n- # tangent_out)\n- # return primal_out, tangent_out_zeros\n-\n-\nscan_initial_p = core.Primitive(\"scan_initial\")\nscan_initial_p.def_impl(_scan_initial_impl)\nad.primitive_jvps[scan_initial_p] = _scan_initial_jvp\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -497,6 +497,39 @@ def jvp_jaxpr(jaxpr, consts, avals, zeros):\nreturn jaxpr_out, consts_out, out_zeros()\n+# TODO ideas to simplify:\n+# - try jaxpr munging\n+# - try writing as an @transform\n+\n+@transformation_with_aux\n+def f_jvp_traceable2(zero_components, *primal_tangent_pairs):\n+ primals, tangents = unzip2(primal_tangent_pairs)\n+ tangents_zeros = map(partial(put_zeros, TangentTuple), zero_components, tangents)\n+ primal_out, tangent_out = yield primals, tangents_zeros\n+ # TODO check output is tuple\n+ zeros_out = get_zeros(tangent_out)\n+ tangent_out_nonzero = strip_zeros(core.unit, pack, zeros_out, tangent_out)\n+ primal_tangent_pairs_out = [pack((p, t)) for p, t in zip(primal_out, tangent_out_nonzero)]\n+ yield pack(primal_tangent_pairs_out), zeros_out\n+\n+def jvp_jaxpr2(jaxpr, consts, avals, zeros):\n+ # jaxpr :: d -> a -> b -> (c1, c2)\n+ # avals = (d, a, b)\n+ f = wrap_init(partial(jaxpr_as_fun, jaxpr, consts)) # f :: d -> a -> b -> (c1, c2) [with consts]\n+ f_jvp, out_zeros = f_jvp_traceable2(jvp(f, instantiate=False), zeros)\n+ # f_jvp :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\n+ tangent_avals = map(partial(strip_zeros, core.AbstractTuple(()), core.AbstractTuple),\n+ zeros, avals)\n+ pt_pvals = [pe.PartialVal((core.AbstractTuple((p_aval, t_aval)), core.unit))\n+ for p_aval, t_aval in zip(avals, tangent_avals)]\n+ jaxpr_out, pval_out, consts_out = pe.trace_to_jaxpr(\n+ f_jvp, pt_pvals, instantiate=True)\n+ # jaxpr_out :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2')) [with consts]\n+ # out_zeros :: (zeros(c1), zeros(c2))\n+ return jaxpr_out, consts_out, out_zeros()\n+\n+\n+\nprimitive_transposes[core.call_p] = partial(call_transpose, call_p)\nprimitive_transposes[pe.compiled_call_p] = partial(call_transpose, pe.compiled_call_p)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -28,9 +28,10 @@ print np.cumsum(x)\nprint cumsum(x)\nprint\n-# # print jvp(np.cumsum, (x,), (x*0.1,))\n-# print jvp(cumsum, (x,), (x*0.1,))\n-# # print\n+print jvp(np.cumsum, (x,), (x*0.1,))\n+print jvp(cumsum, (x,), (x*0.1,))\n+print\n+\n# # print linearize(np.cumsum, x)[1](x*0.1)\n# # print linearize(cumsum, x)[1](x*0.1)\n@@ -38,19 +39,20 @@ print\n# ###\n-# def f(x, carry):\n-# carry = carry + np.sin(x)\n-# y = pack((carry**2, -carry))\n-# return pack((y, carry))\n-\n-# ys, z = scan_initial(f, 0.0, np.arange(4.))\n-# ys_ref, z_ref = scan_reference(f, 0.0, np.arange(4.))\n-# print onp.allclose(z, z_ref)\n-\n-# print ys\n-# print ys_ref\n-# print z\n-# print z_ref\n-# # print jvp(partial(scan_initial, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-# # print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-# print\n+def f(x, carry):\n+ carry = carry + np.sin(x)\n+ y = pack((carry**2, -carry))\n+ return pack((y, carry))\n+\n+ys, z = scan_initial(f, 0.0, np.arange(4.))\n+ys_ref, z_ref = scan_reference(f, 0.0, np.arange(4.))\n+print onp.allclose(z, z_ref)\n+\n+print ys\n+print ys_ref\n+print z\n+print z_ref\n+print\n+print jvp(partial(scan_initial, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n+print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n+print\n"
}
] | Python | Apache License 2.0 | google/jax | wow that went well
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 11.04.2019 14:50:58 | 25,200 | 0a20a9f7743c85a7e51288195d011b2302e673de | progress on scan partial_eval | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -99,12 +99,12 @@ def _call_initial_partial_eval(trace, *tracers, **kwargs):\navals = map(as_aval, in_pvs, in_consts)\n(jaxpr_1, consts_1), (jaxpr_2, consts_2), out_pv, first_components_out = \\\npe.partial_eval_jaxpr(jaxpr, consts, avals, first_components)\n- out_pv_const, residuals = call_initial_p.bind(\n+ out_const, residuals = call_initial_p.bind(\n*in_consts, jaxpr=jaxpr_1, consts=consts_1)\nresidual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\neqn = core.JaxprEqn((residual_tracers,) + tracers, None, call_initial_p, (),\nFalse, dict(jaxpr=jaxpr_2, consts=consts_2))\n- return pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_pv_const)), eqn)\n+ return pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\ndef _call_initial_transpose():\n@@ -211,7 +211,7 @@ def _scan_initial_jvp(primals, tangents, avals, jaxpr, true_consts):\nif where_carry_zeros_out == where_carry_zeros:\nbreak\nelse:\n- where_carry_zeros = zeros_join(where_carry_zeros_out, where_carry_zeros)\n+ where_carry_zeros = binary_lattice_join(where_carry_zeros_out, where_carry_zeros)\n# convert_zeros is like strip_zeros but uses explicit lattice information to\n# instantiate zeros in some cases, namely in init_dot based on the fixed point\n@@ -252,14 +252,44 @@ def instantiate_zeros(example, tangent, keep_symbolic):\nelse:\nreturn tangent\n-def zeros_join(a, b):\n+def binary_lattice_join(a, b):\nif type(a) is tuple:\n- return tuple(map(zeros_join, a, b))\n+ return tuple(map(binary_lattice_join, a, b))\nelse:\nreturn a and b\n+def _scan_initial_partial_eval(trace, *tracers, **kwargs):\n+ jaxpr = kwargs.pop('jaxpr')\n+ true_consts = kwargs.pop('true_consts')\n+ in_pvs, in_consts = unzip2([t.pval for t in tracers])\n+ fc_consts, fc_init, fc_xs = map(is_const, in_pvs)\n+ avals = map(as_aval, in_pvs, in_consts)\n+\n+ fc_carry = fc_init\n+ while True:\n+ first_components = (fc_consts, fc_carry, fc_xs)\n+ (jaxpr_1, consts_1), (jaxpr_2, consts_2), out_pv, fc_carry_out = \\\n+ pe.partial_eval_jaxpr2(jaxpr, true_consts, avals, first_components)\n+ if fc_carry_out == fc_carry:\n+ break\n+ else:\n+ fc_carry = binary_lattice_join(fc_carry, fc_carry_out)\n+ # TODO lift args according to lattice join result fc_carry\n+ # TODO maybe update tracers too... instantiate_const\n+\n+ (ys, residuals), out_const = scan_initial_p.bind(\n+ *in_consts, avals=avals_1, jaxpr=jaxpr_1, true_consts=consts_1)\n+ residual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\n+ d, c, a = tracers\n+ new_tracers = (d, c, core.pack((residual_tracers, a)))\n+ eqn = core.JaxprEqn(new_tracers, None, scan_initial_p, (), False,\n+ dict(jaxpr=jaxpr_2, true_consts=consts_2, avals=avals_2))\n+ import ipdb; ipdb.set_trace()\n+ return pe.JaxprTracer((trace, pe.PartialVal((out_pv, out_const)), eqn))\n+\n+\nscan_initial_p = core.Primitive(\"scan_initial\")\nscan_initial_p.def_impl(_scan_initial_impl)\nad.primitive_jvps[scan_initial_p] = _scan_initial_jvp\n-# pe.custom_partial_eval_rules[scan_initial_p] = _scan_initial_partial_eval\n+pe.custom_partial_eval_rules[scan_initial_p] = _scan_initial_partial_eval\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -583,6 +583,7 @@ def jaxpr_as_fun(jaxpr, consts, *args):\n_partial_eval_gensym = gensym('_peval')\ndef partial_eval_jaxpr(jaxpr, consts, avals, first_components):\n+ # jaxpr :: a -> b -> c\nf = lu.wrap_init(partial(jaxpr_as_fun, jaxpr, consts))\ncell = []\n@@ -590,9 +591,8 @@ def partial_eval_jaxpr(jaxpr, consts, avals, first_components):\npvals = map(as_pval, avals, first_components, vals)\njaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\nout_pv, out_const = out_pval\n- out = pack((out_const, pack(consts_2)))\ncell.append((out_pv, jaxpr_2))\n- return out\n+ return pack((out_const, pack(consts_2)))\npvals = map(as_pval2, avals, first_components)\njaxpr_1, out_pval, consts_1 = trace_to_jaxpr(\n@@ -600,8 +600,11 @@ def partial_eval_jaxpr(jaxpr, consts, avals, first_components):\nout_pv_2, jaxpr_2 = cell[0]\nlifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\nfirst_component_out = isnone(out_pv_2)\n+ # jaxpr_1 :: a1 -> b1 -> (c1, res)\n+ # lifted_jaxpr_2 :: res -> a2 -> b2 -> c2\nreturn (jaxpr_1, consts_1), (lifted_jaxpr_2, ()), out_pv_2, first_component_out\n+\ndef _closure_convert_jaxpr(jaxpr, newvar):\nlifted_jaxpr = jaxpr.copy()\nlifted_jaxpr.constvars = ()\n@@ -618,4 +621,43 @@ def _pack_eqn(invars, outvar):\nreturn core.JaxprEqn(invars, [outvar], core.pack_p, (), False, {})\n+def partial_eval_jaxpr2(jaxpr, consts, avals, first_components):\n+ # jaxpr :: d -> c -> a -> (b, c)\n+ f = lu.wrap_init(partial(jaxpr_as_fun, jaxpr, consts))\n+\n+ cell = []\n+ def fun(*vals):\n+ pvals = map(as_pval, avals, first_components, vals)\n+ jaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\n+ (out_pv_b, out_pv_c), (out_const_b, out_const_c) = out_pval\n+ cell.append(((out_pv_b, out_pv_c), jaxpr_2))\n+ return pack((pack((out_const_b, pack(consts_2))), out_const_c))\n+\n+ pvals = map(as_pval2, avals, first_components)\n+ jaxpr_1, out_pval, consts_1 = trace_to_jaxpr(\n+ lu.wrap_init(fun), pvals, instantiate=True)\n+ out_pv_2, jaxpr_2 = cell[0]\n+\n+ # jaxpr_1 :: d1 -> c1 -> a1 -> ((b1, res), c1)\n+ # jaxpr_2 :: res | d2 -> c2 -> a2 -> (b2, c2)\n+ # lifted_jaxpr_2 :: res -> d2 -> c2 -> a2 -> (b2, c2)\n+ lifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\n+ # doubly_lifted_jaxpr_2 :: d2 -> c2 -> (res, a2) -> (b2, c2)\n+ doubly_lifted_jaxpr_2 = _move_and_pair_arg(lifted_jaxpr_2, _partial_eval_gensym)\n+ out_pv_b, out_pv_c = out_pv_2\n+ first_component_c_out = isnone(out_pv_c)\n+\n+ return ((jaxpr_1, consts_1), (doubly_lifted_jaxpr_2, ()),\n+ out_pv_2, first_component_c_out)\n+\n+def _move_and_pair_arg(jaxpr, newvar):\n+ moved_jaxpr = jaxpr.copy()\n+ res, d, c, a = jaxpr.invars\n+ pair_var = newvar()\n+ moved_jaxpr.invars = [d, c, pair_var]\n+ moved_jaxpr.eqns = (\n+ [_unpack_eqn(pair_var, [res, a])] + list(jaxpr.eqns))\n+ return moved_jaxpr\n+\n+\ncustom_partial_eval_rules = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -32,8 +32,8 @@ print jvp(np.cumsum, (x,), (x*0.1,))\nprint jvp(cumsum, (x,), (x*0.1,))\nprint\n-# # print linearize(np.cumsum, x)[1](x*0.1)\n-# # print linearize(cumsum, x)[1](x*0.1)\n+print linearize(np.cumsum, x)[1](x*0.1)\n+print linearize(cumsum, x)[1](x*0.1)\n# ###\n"
}
] | Python | Apache License 2.0 | google/jax | progress on scan partial_eval
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 18.04.2019 07:19:04 | 25,200 | d03cdc63975d8d16be2ae57d80c051826fd1ad75 | introduce typedjaxpr to carry around literals etc | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -56,6 +56,13 @@ class Jaxpr(object):\nreturn Jaxpr(self.constvars[:], self.freevars[:], self.invars[:],\nself.outvar, self.eqns[:])\n+class TypedJaxpr(namedtuple('TypedJaxpr', ['jaxpr', 'literals', 'in_avals', 'out_aval'])):\n+ def __init__(self, jaxpr, literals, in_avals, out_aval):\n+ assert type(jaxpr) is Jaxpr\n+ assert len(literals) == len(jaxpr.constvars)\n+ assert len(in_avals) == len(jaxpr.invars)\n+ super(TypedJaxpr, self).__init__(jaxpr, literals, in_avals, out_aval)\n+\nJaxprEqn = namedtuple('JaxprEqn', ['invars', 'outvars', 'primitive',\n'bound_subjaxprs', 'destructure', 'params'])\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "from functools import partial\n+from collections import namedtuple\nimport jax.core as core\nimport jax.linear_util as lu\n@@ -11,6 +12,7 @@ from jax.abstract_arrays import ShapedArray\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import ad\n+\ndef pvals_with_zeros(zero_components, aval):\nif zero_components is True:\nreturn pe.PartialVal((None, ad.zero))\n@@ -85,7 +87,7 @@ def as_aval(pv, const):\npv, _ = _abstractify(const)\nreturn pv\nelif type(pv) is pe.JaxprTracerTuple:\n- return map(as_aval, pv, const)\n+ return core.AbstractTuple(map(as_aval, pv, const))\nelif isinstance(pv, core.AbstractValue):\nreturn pv\nelse:\n@@ -169,22 +171,24 @@ def scan_initial(f, init, xs):\nassert carry_aval == carry_aval_out\nlifted_jaxpr = pe._closure_convert_jaxpr(jaxpr, _scan_const)\nconsts_aval, _ = _abstractify(core.pack(consts))\n- avals = (consts_aval, x_aval, y_aval, carry_aval)\n- return scan_initial_p.bind(core.pack(consts), init, xs,\n- avals=avals, jaxpr=lifted_jaxpr, true_consts=())\n+ in_avals = (consts_aval, carry_aval, x_aval)\n+ out_aval = core.AbstractTuple((y_aval, carry_aval))\n+ jaxpr = core.TypedJaxpr(lifted_jaxpr, (), in_avals, out_aval)\n+ return scan_initial_p.bind(core.pack(consts), init, xs, jaxpr=jaxpr)\n# scan_p :: (d -> a -> c -> (b, c)) -> d -> c -> [a] -> ([b], c)\n-def _scan_initial_impl(consts, init, xs, avals, jaxpr, true_consts):\n+def _scan_initial_impl(consts, init, xs, jaxpr):\n# TODO maybe can do this work in the traceable, not every impl call\nlength = leading_dim_size(xs)\n- (_, x_aval, y_aval, _) = avals\n+ _, _, x_aval = jaxpr.in_avals\n+ y_aval, _ = jaxpr.out_aval\nys_aval = promote_aval_rank(length, y_aval)\ndef body_fun(i, vals):\ncarry, ys = vals\nx = index_arrays(i, x_aval, xs)\n- y, carry_out = jaxpr_as_fun(jaxpr, true_consts)(consts, x, carry)\n+ y, carry_out = jaxpr_as_fun(jaxpr.jaxpr, jaxpr.literals)(consts, x, carry)\nys_out = update_arrays(i, y_aval, ys, y)\nreturn (carry_out, ys_out)\n@@ -193,10 +197,11 @@ def _scan_initial_impl(consts, init, xs, avals, jaxpr, true_consts):\nreturn core.pack((ys, carry))\n-def _scan_initial_jvp(primals, tangents, avals, jaxpr, true_consts):\n+def _scan_initial_jvp(primals, tangents, jaxpr):\nconsts, init, xs = primals\nconsts_dot, init_dot, xs_dot = tangents\n- consts_aval, x_aval, y_aval, carry_aval = avals\n+ consts_aval, carry_aval, x_aval = jaxpr.in_avals\n+ y_aval, _ = jaxpr.out_aval\nwhere_consts_zeros = ad.get_zeros(consts_dot)\nwhere_init_zeros = ad.get_zeros(init_dot)\n@@ -204,9 +209,8 @@ def _scan_initial_jvp(primals, tangents, avals, jaxpr, true_consts):\nwhere_carry_zeros = where_init_zeros\nwhile True:\n- jaxpr_jvp, new_consts, where_zeros_out = ad.jvp_jaxpr2(\n- jaxpr, true_consts, (consts_aval, carry_aval, x_aval),\n- (where_consts_zeros, where_carry_zeros, where_xs_zeros))\n+ where_zeros = (where_consts_zeros, where_carry_zeros, where_xs_zeros)\n+ jaxpr_jvp, where_zeros_out = ad.jvp_jaxpr2(jaxpr, where_zeros)\nwhere_ys_zeros, where_carry_zeros_out = where_zeros_out\nif where_carry_zeros_out == where_carry_zeros:\nbreak\n@@ -223,22 +227,14 @@ def _scan_initial_jvp(primals, tangents, avals, jaxpr, true_consts):\ninit_dual = core.pack((init, nonzero_init_dot))\nxs_dual = core.pack((xs, nonzero_xs_dot))\n- consts_dual_aval = core.AbstractTuple((consts_aval, strip_zeros_aval(where_consts_zeros, consts_aval)))\n- x_dual_aval = core.AbstractTuple((x_aval, strip_zeros_aval(where_xs_zeros, x_aval)))\n- y_dual_aval = core.AbstractTuple((y_aval, strip_zeros_aval(where_ys_zeros, y_aval)))\n- carry_dual_aval = core.AbstractTuple((carry_aval, strip_zeros_aval(where_carry_zeros, carry_aval)))\n- avals = (consts_dual_aval, x_dual_aval, y_dual_aval, carry_dual_aval)\n-\nys_dual, carry_out_dual = scan_initial_p.bind(\n- consts_dual, init_dual, xs_dual, avals=avals, jaxpr=jaxpr_jvp,\n- true_consts=new_consts)\n+ consts_dual, init_dual, xs_dual, jaxpr=jaxpr_jvp)\nys, ys_dot = ys_dual\nys_dot = ad.put_zeros(ad.TangentTuple, where_ys_zeros, ys_dot)\ncarry_out, carry_out_dot = carry_out_dual\ncarry_out_dot = ad.put_zeros(ad.TangentTuple, where_carry_zeros_out, carry_out_dot)\n-\nreturn core.pack((ys, carry_out)), ad.TangentTuple((ys_dot, carry_out_dot))\ndef instantiate_zeros(example, tangent, keep_symbolic):\n@@ -253,10 +249,17 @@ def instantiate_zeros(example, tangent, keep_symbolic):\nreturn tangent\ndef binary_lattice_join(a, b):\n- if type(a) is tuple:\n+ t = (type(a), type(b))\n+ if t == (tuple, tuple):\nreturn tuple(map(binary_lattice_join, a, b))\n- else:\n+ elif t == (tuple, bool):\n+ return tuple(map(binary_lattice_join, a, (b,) * len(a)))\n+ elif t == (bool, tuple):\n+ return tuple(map(binary_lattice_join, (a,) * len(b), b))\n+ elif t == (bool, bool):\nreturn a and b\n+ else:\n+ raise TypeError((type(a), type(b)))\ndef _scan_initial_partial_eval(trace, *tracers, **kwargs):\n@@ -269,25 +272,46 @@ def _scan_initial_partial_eval(trace, *tracers, **kwargs):\nfc_carry = fc_init\nwhile True:\nfirst_components = (fc_consts, fc_carry, fc_xs)\n- (jaxpr_1, consts_1), (jaxpr_2, consts_2), out_pv, fc_carry_out = \\\n+ full_jaxpr_1, full_jaxpr_2, out_pv, fc_carry_out = \\\npe.partial_eval_jaxpr2(jaxpr, true_consts, avals, first_components)\n+ jaxpr_1, consts_1, avals_1 = full_jaxpr_1\n+ jaxpr_2, consts_2, avals_2 = full_jaxpr_2\nif fc_carry_out == fc_carry:\nbreak\nelse:\nfc_carry = binary_lattice_join(fc_carry, fc_carry_out)\n- # TODO lift args according to lattice join result fc_carry\n- # TODO maybe update tracers too... instantiate_const\n+\n+ consts_tracer, init_tracer, xs_tracer = tracers\n+ lifted_init_tracer = _lift_tracer(trace, init_tracer, fc_carry)\n+ lifted_tracers = consts_tracer, lifted_init_tracer, xs_tracer\n+ in_pvs, in_consts = unzip2([t.pval for t in lifted_tracers])\n+\n+ # TODO first_components reconciled with aval splitting\n+ # TODO avals_2 reconciled with residuals\n(ys, residuals), out_const = scan_initial_p.bind(\n*in_consts, avals=avals_1, jaxpr=jaxpr_1, true_consts=consts_1)\nresidual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\n- d, c, a = tracers\n+ d, c, a = lifted_tracers\nnew_tracers = (d, c, core.pack((residual_tracers, a)))\neqn = core.JaxprEqn(new_tracers, None, scan_initial_p, (), False,\ndict(jaxpr=jaxpr_2, true_consts=consts_2, avals=avals_2))\nimport ipdb; ipdb.set_trace()\nreturn pe.JaxprTracer((trace, pe.PartialVal((out_pv, out_const)), eqn))\n+def _lift_tracer(trace, tracer, is_const):\n+ t = type(is_const)\n+ if t is bool:\n+ if not is_const:\n+ return trace.instantiate_const(tracer)\n+ else:\n+ return tracer\n+ elif t is tuple:\n+ tracers = map(trace.full_raise, tracer)\n+ return core.pack(map(partial(_lift_tracer, trace), tracers, is_const))\n+ else:\n+ raise TypeError(t)\n+\nscan_initial_p = core.Primitive(\"scan_initial\")\nscan_initial_p.def_impl(_scan_initial_impl)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -512,21 +512,25 @@ def f_jvp_traceable2(zero_components, *primal_tangent_pairs):\nprimal_tangent_pairs_out = [pack((p, t)) for p, t in zip(primal_out, tangent_out_nonzero)]\nyield pack(primal_tangent_pairs_out), zeros_out\n-def jvp_jaxpr2(jaxpr, consts, avals, zeros):\n+def jvp_jaxpr2(jaxpr, zeros):\n# jaxpr :: d -> a -> b -> (c1, c2)\n# avals = (d, a, b)\n- f = wrap_init(partial(jaxpr_as_fun, jaxpr, consts)) # f :: d -> a -> b -> (c1, c2) [with consts]\n+ # f :: d -> a -> b -> (c1, c2)\n+ f = wrap_init(partial(jaxpr_as_fun, jaxpr.jaxpr, jaxpr.literals))\nf_jvp, out_zeros = f_jvp_traceable2(jvp(f, instantiate=False), zeros)\n# f_jvp :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\ntangent_avals = map(partial(strip_zeros, core.AbstractTuple(()), core.AbstractTuple),\n- zeros, avals)\n+ zeros, jaxpr.in_avals)\npt_pvals = [pe.PartialVal((core.AbstractTuple((p_aval, t_aval)), core.unit))\n- for p_aval, t_aval in zip(avals, tangent_avals)]\n- jaxpr_out, pval_out, consts_out = pe.trace_to_jaxpr(\n+ for p_aval, t_aval in zip(jaxpr.in_avals, tangent_avals)]\n+ jaxpr_out, pval_out, literals_out = pe.trace_to_jaxpr(\nf_jvp, pt_pvals, instantiate=True)\n- # jaxpr_out :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2')) [with consts]\n+ # jaxpr_out :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\n# out_zeros :: (zeros(c1), zeros(c2))\n- return jaxpr_out, consts_out, out_zeros()\n+ in_avals = tuple(map(core.AbstractTuple, zip(jaxpr.in_avals, tangent_avals)))\n+ out_aval, _ = pval_out\n+ jaxpr_out = core.TypedJaxpr(jaxpr_out, literals_out, in_avals, out_aval)\n+ return jaxpr_out, out_zeros()\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -647,7 +647,8 @@ def partial_eval_jaxpr2(jaxpr, consts, avals, first_components):\nout_pv_b, out_pv_c = out_pv_2\nfirst_component_c_out = isnone(out_pv_c)\n- return ((jaxpr_1, consts_1), (doubly_lifted_jaxpr_2, ()),\n+ avals_1, avals_2 = unzip2(map(_split_avals, first_components, avals))\n+ return ((jaxpr_1, consts_1, avals_1), (doubly_lifted_jaxpr_2, (), avals_2),\nout_pv_2, first_component_c_out)\ndef _move_and_pair_arg(jaxpr, newvar):\n@@ -659,5 +660,18 @@ def _move_and_pair_arg(jaxpr, newvar):\n[_unpack_eqn(pair_var, [res, a])] + list(jaxpr.eqns))\nreturn moved_jaxpr\n+def _split_avals(first_component, aval):\n+ t = type(first_component)\n+ if t is tuple:\n+ assert type(aval) is core.AbstractTuple\n+ return unzip2(map(_split_avals, first_component, aval))\n+ elif t is bool:\n+ if first_component:\n+ return aval, core.AbstractTuple(())\n+ else:\n+ return core.AbstractTuple(()), aval\n+ else:\n+ raise TypeError(t)\n+\ncustom_partial_eval_rules = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -32,27 +32,27 @@ print jvp(np.cumsum, (x,), (x*0.1,))\nprint jvp(cumsum, (x,), (x*0.1,))\nprint\n-print linearize(np.cumsum, x)[1](x*0.1)\n-print linearize(cumsum, x)[1](x*0.1)\n+# print linearize(np.cumsum, x)[1](x*0.1)\n+# print linearize(cumsum, x)[1](x*0.1)\n# ###\n-def f(x, carry):\n- carry = carry + np.sin(x)\n- y = pack((carry**2, -carry))\n- return pack((y, carry))\n-\n-ys, z = scan_initial(f, 0.0, np.arange(4.))\n-ys_ref, z_ref = scan_reference(f, 0.0, np.arange(4.))\n-print onp.allclose(z, z_ref)\n-\n-print ys\n-print ys_ref\n-print z\n-print z_ref\n-print\n-print jvp(partial(scan_initial, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-print\n+# def f(x, carry):\n+# carry = carry + np.sin(x)\n+# y = pack((carry**2, -carry))\n+# return pack((y, carry))\n+\n+# ys, z = scan_initial(f, 0.0, np.arange(4.))\n+# ys_ref, z_ref = scan_reference(f, 0.0, np.arange(4.))\n+# print onp.allclose(z, z_ref)\n+\n+# print ys\n+# print ys_ref\n+# print z\n+# print z_ref\n+# print\n+# print jvp(partial(scan_initial, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n+# print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n+# print\n"
}
] | Python | Apache License 2.0 | google/jax | introduce typedjaxpr to carry around literals etc
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 18.04.2019 09:00:11 | 25,200 | 63ebb9a2465b927e7bbad7666e1649dd44f26783 | refactored partial_eval_jaxpr2 | [
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -264,23 +264,21 @@ def binary_lattice_join(a, b):\ndef _scan_initial_partial_eval(trace, *tracers, **kwargs):\njaxpr = kwargs.pop('jaxpr')\n- true_consts = kwargs.pop('true_consts')\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\nfc_consts, fc_init, fc_xs = map(is_const, in_pvs)\n- avals = map(as_aval, in_pvs, in_consts)\nfc_carry = fc_init\nwhile True:\nfirst_components = (fc_consts, fc_carry, fc_xs)\n- full_jaxpr_1, full_jaxpr_2, out_pv, fc_carry_out = \\\n- pe.partial_eval_jaxpr2(jaxpr, true_consts, avals, first_components)\n- jaxpr_1, consts_1, avals_1 = full_jaxpr_1\n- jaxpr_2, consts_2, avals_2 = full_jaxpr_2\n+ jaxpr_1, jaxpr_2, fc_out = pe.partial_eval_jaxpr2(jaxpr, first_components)\n+ fc_ys, fc_carry_out = fc_out\nif fc_carry_out == fc_carry:\nbreak\nelse:\nfc_carry = binary_lattice_join(fc_carry, fc_carry_out)\n+ import ipdb; ipdb.set_trace()\n+\nconsts_tracer, init_tracer, xs_tracer = tracers\nlifted_init_tracer = _lift_tracer(trace, init_tracer, fc_carry)\nlifted_tracers = consts_tracer, lifted_init_tracer, xs_tracer\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -533,7 +533,6 @@ def jvp_jaxpr2(jaxpr, zeros):\nreturn jaxpr_out, out_zeros()\n-\nprimitive_transposes[core.call_p] = partial(call_transpose, call_p)\nprimitive_transposes[pe.compiled_call_p] = partial(call_transpose, pe.compiled_call_p)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -26,7 +26,7 @@ from ..linear_util import thunk, transformation, transformation_with_aux\nfrom ..util import unzip2, safe_zip, safe_map, toposort, partial\nfrom ..core import (Trace, Tracer, new_master, Jaxpr, JaxprEqn, get_aval, pack,\nAbstractValue, AbstractTuple, unit, unitvar, Primitive,\n- call_p)\n+ call_p, TypedJaxpr)\nmap = safe_map\nzip = safe_zip\n@@ -575,6 +575,7 @@ def isnone(x):\nelse:\nraise TypeError(type(x))\n+# TODO revise for typedjaxprs\ndef jaxpr_as_fun(jaxpr, consts, *args):\nconsts = core.full_lower(consts)\nargs = map(core.full_lower, args)\n@@ -621,19 +622,19 @@ def _pack_eqn(invars, outvar):\nreturn core.JaxprEqn(invars, [outvar], core.pack_p, (), False, {})\n-def partial_eval_jaxpr2(jaxpr, consts, avals, first_components):\n+def partial_eval_jaxpr2(jaxpr, first_components):\n# jaxpr :: d -> c -> a -> (b, c)\n- f = lu.wrap_init(partial(jaxpr_as_fun, jaxpr, consts))\n+ f = lu.wrap_init(partial(jaxpr_as_fun, jaxpr.jaxpr, jaxpr.literals))\ncell = []\ndef fun(*vals):\n- pvals = map(as_pval, avals, first_components, vals)\n+ pvals = map(as_pval, jaxpr.in_avals, first_components, vals)\njaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\n(out_pv_b, out_pv_c), (out_const_b, out_const_c) = out_pval\ncell.append(((out_pv_b, out_pv_c), jaxpr_2))\nreturn pack((pack((out_const_b, pack(consts_2))), out_const_c))\n- pvals = map(as_pval2, avals, first_components)\n+ pvals = map(as_pval2, jaxpr.in_avals, first_components)\njaxpr_1, out_pval, consts_1 = trace_to_jaxpr(\nlu.wrap_init(fun), pvals, instantiate=True)\nout_pv_2, jaxpr_2 = cell[0]\n@@ -641,15 +642,35 @@ def partial_eval_jaxpr2(jaxpr, consts, avals, first_components):\n# jaxpr_1 :: d1 -> c1 -> a1 -> ((b1, res), c1)\n# jaxpr_2 :: res | d2 -> c2 -> a2 -> (b2, c2)\n# lifted_jaxpr_2 :: res -> d2 -> c2 -> a2 -> (b2, c2)\n- lifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\n# doubly_lifted_jaxpr_2 :: d2 -> c2 -> (res, a2) -> (b2, c2)\n+ lifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\ndoubly_lifted_jaxpr_2 = _move_and_pair_arg(lifted_jaxpr_2, _partial_eval_gensym)\nout_pv_b, out_pv_c = out_pv_2\n- first_component_c_out = isnone(out_pv_c)\n+ fc_out = fc_b_out, fc_c_out = isnone(out_pv_b), isnone(out_pv_c)\n+\n+ in_avals_1, in_avals_2 = unzip2(map(_split_avals, first_components,\n+ jaxpr.in_avals))\n+ out_aval_1, out_aval_2 = _split_avals(fc_out, jaxpr.out_aval)\n+\n+ # in_avals_1 is already (d1, c1, a1), and out_aval_2 is already (b2, c2), but\n+ # we must munge:\n+ # 1. form out_aval_1 to include the residuals as ((b1, res), c1)\n+ # 2. form in_avals_2 to include the residuals as (d2, c2, (res, a2))\n+\n+ out_pv, _ = out_pval\n+ (_, res), _ = out_pv\n+ assert isinstance(res, AbstractValue)\n+\n+ b1, c1 = out_aval_1\n+ lifted_out_aval_1 = AbstractTuple((AbstractTuple((b1, res)), c1))\n+\n+ d2, c2, a2 = in_avals_2\n+ lifted_in_avals_2 = AbstractTuple((d2, c2, AbstractTuple((res, a2))))\n- avals_1, avals_2 = unzip2(map(_split_avals, first_components, avals))\n- return ((jaxpr_1, consts_1, avals_1), (doubly_lifted_jaxpr_2, (), avals_2),\n- out_pv_2, first_component_c_out)\n+ typed_jaxpr_1 = TypedJaxpr(jaxpr_1, consts_1, in_avals_1, lifted_out_aval_1)\n+ typed_jaxpr_2 = TypedJaxpr(doubly_lifted_jaxpr_2, (), lifted_in_avals_2,\n+ out_aval_2)\n+ return typed_jaxpr_1, typed_jaxpr_2, fc_out\ndef _move_and_pair_arg(jaxpr, newvar):\nmoved_jaxpr = jaxpr.copy()\n@@ -663,13 +684,13 @@ def _move_and_pair_arg(jaxpr, newvar):\ndef _split_avals(first_component, aval):\nt = type(first_component)\nif t is tuple:\n- assert type(aval) is core.AbstractTuple\n+ assert type(aval) is AbstractTuple\nreturn unzip2(map(_split_avals, first_component, aval))\nelif t is bool:\nif first_component:\n- return aval, core.AbstractTuple(())\n+ return aval, AbstractTuple(())\nelse:\n- return core.AbstractTuple(()), aval\n+ return AbstractTuple(()), aval\nelse:\nraise TypeError(t)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -32,8 +32,8 @@ print jvp(np.cumsum, (x,), (x*0.1,))\nprint jvp(cumsum, (x,), (x*0.1,))\nprint\n-# print linearize(np.cumsum, x)[1](x*0.1)\n-# print linearize(cumsum, x)[1](x*0.1)\n+print linearize(np.cumsum, x)[1](x*0.1)\n+print linearize(cumsum, x)[1](x*0.1)\n# ###\n"
}
] | Python | Apache License 2.0 | google/jax | refactored partial_eval_jaxpr2
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 23.04.2019 09:15:16 | 25,200 | 673682302142de32b8913dd41864e29dd3b5ccfe | victory! patial eval of scan (+ linearize!) | [
{
"change_type": "MODIFY",
"old_path": "jax/abstract_arrays.py",
"new_path": "jax/abstract_arrays.py",
"diff": "@@ -49,6 +49,9 @@ class UnshapedArray(core.AbstractValue):\ndef __eq__(self, other):\nreturn type(self) is type(other) and self.dtype == other.dtype\n+ def __ne__(self, other):\n+ return not self == other\n+\ndef __hash__(self):\nreturn hash(str(self.dtype))\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -24,13 +24,13 @@ import six\nimport types\nfrom . import linear_util as lu\n-from .util import unzip2, safe_zip, safe_map, partial\n+from .util import unzip2, safe_zip, safe_map, partial, curry\nfrom .pprint_util import pp, vcat, hcat, pp_kv_pairs\n# TODO(dougalm): the trace cache breaks the leak detector. Consisder solving.\ncheck_leaks = False\n# TODO(dougalm): put this behind a flag that's enabled during testing\n-skip_checks = True # not __debug__ # google doesn't use -O\n+skip_checks = False # not __debug__ # google doesn't use -O\nzip = safe_zip\nmap = safe_map\n@@ -64,6 +64,20 @@ class TypedJaxpr(namedtuple('TypedJaxpr', ['jaxpr', 'literals', 'in_avals', 'out\nsuper(TypedJaxpr, self).__init__(jaxpr, literals, in_avals, out_aval)\n+@curry\n+def jaxpr_as_fun(typed_jaxpr, *args):\n+ from jax.lax import _abstractify\n+ for arg, in_aval in zip(args, typed_jaxpr.in_avals):\n+ arg_aval, _ = _abstractify(arg)\n+ if arg_aval != in_aval:\n+ raise TypeError(\"input type tag mismatch\")\n+ out = eval_jaxpr(typed_jaxpr.jaxpr, typed_jaxpr.literals, (), *args)\n+ out_aval, _ = _abstractify(out)\n+ if out_aval != typed_jaxpr.out_aval:\n+ raise TypeError(\"output type tag mismatch\")\n+ return out\n+\n+\nJaxprEqn = namedtuple('JaxprEqn', ['invars', 'outvars', 'primitive',\n'bound_subjaxprs', 'destructure', 'params'])\n@@ -466,6 +480,11 @@ class JaxTuple(tuple):\nclass AbstractTuple(AbstractValue, tuple):\n+ def __new__(cls, elts=()):\n+ elts = tuple(elts)\n+ assert skip_checks or all(isinstance(e, AbstractValue) for e in elts)\n+ return tuple.__new__(cls, elts)\n+\n@staticmethod\ndef _iter(tracer):\nreturn map(full_lower, tracer.unpack())\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -40,10 +40,6 @@ def convert_zeros(keep_symbolic, example, tangent):\nelse:\nreturn tangent\n-@curry\n-def jaxpr_as_fun(jaxpr, consts, *args):\n- return core.eval_jaxpr(jaxpr, consts, (), *args)\n-\n_call_const = pe.gensym('_consts')\n@@ -59,7 +55,7 @@ def call_initial(f, *args):\ndef _call_initial_impl(*args, **kwargs):\njaxpr = kwargs.pop('jaxpr')\nconsts = kwargs.pop('consts')\n- return jaxpr_as_fun(jaxpr, consts)(*args)\n+ return core.jaxpr_as_fun(jaxpr, consts)(*args)\ndef _call_initial_jvp(primals, tangents, jaxpr, consts):\navals = [aval for (aval, _) in map(_abstractify, primals)]\n@@ -159,7 +155,7 @@ def update_arrays(i, aval, xs, x):\n_scan_const = pe.gensym('_consts')\n-# scan :: (a -> c -> (b, c)) -> c -> [a] -> ([b], c)\n+# scan :: (c -> a -> (b, c)) -> c -> [a] -> (c, [b])\ndef scan_initial(f, init, xs):\ncarry_pval = carry_aval, _ = _abstractify(init)\nxs_aval, _ = _abstractify(xs)\n@@ -167,41 +163,40 @@ def scan_initial(f, init, xs):\nx_pval = pe.PartialVal((x_aval, core.unit))\njaxpr, pval_out, consts = pe.trace_to_jaxpr(\nlu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n- (y_aval, carry_aval_out), _ = pval_out\n+ (carry_aval_out, y_aval), _ = pval_out\nassert carry_aval == carry_aval_out\nlifted_jaxpr = pe._closure_convert_jaxpr(jaxpr, _scan_const)\nconsts_aval, _ = _abstractify(core.pack(consts))\nin_avals = (consts_aval, carry_aval, x_aval)\n- out_aval = core.AbstractTuple((y_aval, carry_aval))\n+ out_aval = core.AbstractTuple((carry_aval, y_aval))\njaxpr = core.TypedJaxpr(lifted_jaxpr, (), in_avals, out_aval)\n- return scan_initial_p.bind(core.pack(consts), init, xs, jaxpr=jaxpr)\n+ length = leading_dim_size(xs)\n+ return scan_initial_p.bind(core.pack(consts), init, xs,\n+ length=length, jaxpr=jaxpr)\n-# scan_p :: (d -> a -> c -> (b, c)) -> d -> c -> [a] -> ([b], c)\n-def _scan_initial_impl(consts, init, xs, jaxpr):\n- # TODO maybe can do this work in the traceable, not every impl call\n- length = leading_dim_size(xs)\n+def _scan_initial_impl(consts, init, xs, length, jaxpr):\n_, _, x_aval = jaxpr.in_avals\n- y_aval, _ = jaxpr.out_aval\n+ _, y_aval = jaxpr.out_aval\nys_aval = promote_aval_rank(length, y_aval)\ndef body_fun(i, vals):\ncarry, ys = vals\nx = index_arrays(i, x_aval, xs)\n- y, carry_out = jaxpr_as_fun(jaxpr.jaxpr, jaxpr.literals)(consts, x, carry)\n+ carry_out, y = core.jaxpr_as_fun(jaxpr)(consts, carry, x)\nys_out = update_arrays(i, y_aval, ys, y)\nreturn (carry_out, ys_out)\nys_init = empty_arrays(ys_aval)\ncarry, ys = lax.fori_loop(0, length, body_fun, (init, ys_init))\n- return core.pack((ys, carry))\n+ return core.pack((carry, ys))\n-def _scan_initial_jvp(primals, tangents, jaxpr):\n+def _scan_initial_jvp(primals, tangents, length, jaxpr):\nconsts, init, xs = primals\nconsts_dot, init_dot, xs_dot = tangents\nconsts_aval, carry_aval, x_aval = jaxpr.in_avals\n- y_aval, _ = jaxpr.out_aval\n+ _, y_aval = jaxpr.out_aval\nwhere_consts_zeros = ad.get_zeros(consts_dot)\nwhere_init_zeros = ad.get_zeros(init_dot)\n@@ -211,7 +206,7 @@ def _scan_initial_jvp(primals, tangents, jaxpr):\nwhile True:\nwhere_zeros = (where_consts_zeros, where_carry_zeros, where_xs_zeros)\njaxpr_jvp, where_zeros_out = ad.jvp_jaxpr2(jaxpr, where_zeros)\n- where_ys_zeros, where_carry_zeros_out = where_zeros_out\n+ where_carry_zeros_out, where_ys_zeros = where_zeros_out\nif where_carry_zeros_out == where_carry_zeros:\nbreak\nelse:\n@@ -227,15 +222,15 @@ def _scan_initial_jvp(primals, tangents, jaxpr):\ninit_dual = core.pack((init, nonzero_init_dot))\nxs_dual = core.pack((xs, nonzero_xs_dot))\n- ys_dual, carry_out_dual = scan_initial_p.bind(\n- consts_dual, init_dual, xs_dual, jaxpr=jaxpr_jvp)\n+ carry_out_dual, ys_dual = scan_initial_p.bind(\n+ consts_dual, init_dual, xs_dual, length=length, jaxpr=jaxpr_jvp)\nys, ys_dot = ys_dual\nys_dot = ad.put_zeros(ad.TangentTuple, where_ys_zeros, ys_dot)\ncarry_out, carry_out_dot = carry_out_dual\ncarry_out_dot = ad.put_zeros(ad.TangentTuple, where_carry_zeros_out, carry_out_dot)\n- return core.pack((ys, carry_out)), ad.TangentTuple((ys_dot, carry_out_dot))\n+ return core.pack((carry_out, ys)), ad.TangentTuple((carry_out_dot, ys_dot))\ndef instantiate_zeros(example, tangent, keep_symbolic):\nif tangent is ad.zero:\n@@ -264,6 +259,7 @@ def binary_lattice_join(a, b):\ndef _scan_initial_partial_eval(trace, *tracers, **kwargs):\njaxpr = kwargs.pop('jaxpr')\n+ length = kwargs.pop('length')\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\nfc_consts, fc_init, fc_xs = map(is_const, in_pvs)\n@@ -271,31 +267,28 @@ def _scan_initial_partial_eval(trace, *tracers, **kwargs):\nwhile True:\nfirst_components = (fc_consts, fc_carry, fc_xs)\njaxpr_1, jaxpr_2, fc_out = pe.partial_eval_jaxpr2(jaxpr, first_components)\n- fc_ys, fc_carry_out = fc_out\n+ fc_carry_out, fc_ys = fc_out\nif fc_carry_out == fc_carry:\nbreak\nelse:\nfc_carry = binary_lattice_join(fc_carry, fc_carry_out)\n- import ipdb; ipdb.set_trace()\n-\nconsts_tracer, init_tracer, xs_tracer = tracers\nlifted_init_tracer = _lift_tracer(trace, init_tracer, fc_carry)\nlifted_tracers = consts_tracer, lifted_init_tracer, xs_tracer\nin_pvs, in_consts = unzip2([t.pval for t in lifted_tracers])\n- # TODO first_components reconciled with aval splitting\n- # TODO avals_2 reconciled with residuals\n+ out_pv = _put_known_pvs(fc_out, jaxpr.out_aval)\n- (ys, residuals), out_const = scan_initial_p.bind(\n- *in_consts, avals=avals_1, jaxpr=jaxpr_1, true_consts=consts_1)\n+ out_carry, (ys, residuals) = scan_initial_p.bind(\n+ *in_consts, length=length, jaxpr=jaxpr_1)\n+ out_const = core.pack((out_carry, ys))\nresidual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\nd, c, a = lifted_tracers\n- new_tracers = (d, c, core.pack((residual_tracers, a)))\n+ new_tracers = (d, c, core.pack((a, residual_tracers)))\neqn = core.JaxprEqn(new_tracers, None, scan_initial_p, (), False,\n- dict(jaxpr=jaxpr_2, true_consts=consts_2, avals=avals_2))\n- import ipdb; ipdb.set_trace()\n- return pe.JaxprTracer((trace, pe.PartialVal((out_pv, out_const)), eqn))\n+ dict(length=length, jaxpr=jaxpr_2))\n+ return pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\ndef _lift_tracer(trace, tracer, is_const):\nt = type(is_const)\n@@ -310,6 +303,14 @@ def _lift_tracer(trace, tracer, is_const):\nelse:\nraise TypeError(t)\n+def _put_known_pvs(is_known, aval):\n+ if is_known is True:\n+ return None\n+ elif is_known is False:\n+ return aval\n+ else:\n+ return pe.JaxprTracerTuple(map(_put_known_pvs, is_known, aval))\n+\nscan_initial_p = core.Primitive(\"scan_initial\")\nscan_initial_p.def_impl(_scan_initial_impl)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -600,10 +600,10 @@ def partial_eval_jaxpr(jaxpr, consts, avals, first_components):\nlu.wrap_init(fun), pvals, instantiate=True)\nout_pv_2, jaxpr_2 = cell[0]\nlifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\n- first_component_out = isnone(out_pv_2)\n+ fc_out = isnone(out_pv_2)\n# jaxpr_1 :: a1 -> b1 -> (c1, res)\n# lifted_jaxpr_2 :: res -> a2 -> b2 -> c2\n- return (jaxpr_1, consts_1), (lifted_jaxpr_2, ()), out_pv_2, first_component_out\n+ return (jaxpr_1, consts_1), (lifted_jaxpr_2, ()), out_pv_2, fc_out\ndef _closure_convert_jaxpr(jaxpr, newvar):\n@@ -623,49 +623,50 @@ def _pack_eqn(invars, outvar):\ndef partial_eval_jaxpr2(jaxpr, first_components):\n- # jaxpr :: d -> c -> a -> (b, c)\n- f = lu.wrap_init(partial(jaxpr_as_fun, jaxpr.jaxpr, jaxpr.literals))\n+ # jaxpr :: d -> c -> a -> (c, b)\n+ f = lu.wrap_init(core.jaxpr_as_fun(jaxpr))\ncell = []\n+ # we do some final-style output munging to place residuals\n+ # fun :: d1 -> c1 -> a1 -> (c1, (b1, res))\ndef fun(*vals):\npvals = map(as_pval, jaxpr.in_avals, first_components, vals)\njaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\n- (out_pv_b, out_pv_c), (out_const_b, out_const_c) = out_pval\n- cell.append(((out_pv_b, out_pv_c), jaxpr_2))\n- return pack((pack((out_const_b, pack(consts_2))), out_const_c))\n+ (out_pv_c, out_pv_b), (out_const_c, out_const_b) = out_pval\n+ cell.append((out_pv_c, out_pv_b, jaxpr_2))\n+ return pack((out_const_c, pack((out_const_b, pack(consts_2)))))\npvals = map(as_pval2, jaxpr.in_avals, first_components)\njaxpr_1, out_pval, consts_1 = trace_to_jaxpr(\nlu.wrap_init(fun), pvals, instantiate=True)\n- out_pv_2, jaxpr_2 = cell[0]\n+ out_pv_c, out_pv_b, jaxpr_2 = cell[0]\n- # jaxpr_1 :: d1 -> c1 -> a1 -> ((b1, res), c1)\n- # jaxpr_2 :: res | d2 -> c2 -> a2 -> (b2, c2)\n- # lifted_jaxpr_2 :: res -> d2 -> c2 -> a2 -> (b2, c2)\n- # doubly_lifted_jaxpr_2 :: d2 -> c2 -> (res, a2) -> (b2, c2)\n+ # jaxpr_1 :: d1 -> c1 -> a1 -> (c1, (b1, res))\n+ # jaxpr_2 :: res | d2 -> c2 -> a2 -> (c2, b2)\n+ # lifted_jaxpr_2 :: res -> d2 -> c2 -> a2 -> (c2, b2)\n+ # doubly_lifted_jaxpr_2 :: d2 -> c2 -> (a2, res) -> (c2, b2)\nlifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\ndoubly_lifted_jaxpr_2 = _move_and_pair_arg(lifted_jaxpr_2, _partial_eval_gensym)\n- out_pv_b, out_pv_c = out_pv_2\n- fc_out = fc_b_out, fc_c_out = isnone(out_pv_b), isnone(out_pv_c)\n+ fc_out = fc_c_out, fc_b_out = isnone(out_pv_c), isnone(out_pv_b)\nin_avals_1, in_avals_2 = unzip2(map(_split_avals, first_components,\njaxpr.in_avals))\nout_aval_1, out_aval_2 = _split_avals(fc_out, jaxpr.out_aval)\n- # in_avals_1 is already (d1, c1, a1), and out_aval_2 is already (b2, c2), but\n+ # in_avals_1 is already (d1, c1, a1), and out_aval_2 is already (c2, b2), but\n# we must munge:\n- # 1. form out_aval_1 to include the residuals as ((b1, res), c1)\n- # 2. form in_avals_2 to include the residuals as (d2, c2, (res, a2))\n+ # 1. form out_aval_1 to include the residuals as (c1, (b1, res))\n+ # 2. form in_avals_2 to include the residuals as (d2, c2, (a2, res))\nout_pv, _ = out_pval\n- (_, res), _ = out_pv\n+ _, (_, res) = out_pv\nassert isinstance(res, AbstractValue)\n- b1, c1 = out_aval_1\n- lifted_out_aval_1 = AbstractTuple((AbstractTuple((b1, res)), c1))\n+ c1, b1 = out_aval_1\n+ lifted_out_aval_1 = AbstractTuple((c1, AbstractTuple((b1, res))))\nd2, c2, a2 = in_avals_2\n- lifted_in_avals_2 = AbstractTuple((d2, c2, AbstractTuple((res, a2))))\n+ lifted_in_avals_2 = (d2, c2, AbstractTuple((a2, res)))\ntyped_jaxpr_1 = TypedJaxpr(jaxpr_1, consts_1, in_avals_1, lifted_out_aval_1)\ntyped_jaxpr_2 = TypedJaxpr(doubly_lifted_jaxpr_2, (), lifted_in_avals_2,\n@@ -678,14 +679,15 @@ def _move_and_pair_arg(jaxpr, newvar):\npair_var = newvar()\nmoved_jaxpr.invars = [d, c, pair_var]\nmoved_jaxpr.eqns = (\n- [_unpack_eqn(pair_var, [res, a])] + list(jaxpr.eqns))\n+ [_unpack_eqn(pair_var, [a, res])] + list(jaxpr.eqns))\nreturn moved_jaxpr\ndef _split_avals(first_component, aval):\nt = type(first_component)\nif t is tuple:\nassert type(aval) is AbstractTuple\n- return unzip2(map(_split_avals, first_component, aval))\n+ avals1, avals2 = unzip2(map(_split_avals, first_component, aval))\n+ return AbstractTuple(avals1), AbstractTuple(avals2)\nelif t is bool:\nif first_component:\nreturn aval, AbstractTuple(())\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/scan.py",
"new_path": "jax/scan.py",
"diff": "@@ -50,7 +50,7 @@ def scan_reference(f, init, xs):\ncarry = init\nys = []\nfor x in xs:\n- (y, carry) = f(x, carry)\n+ (carry, y) = f(x, carry)\nys.append(y)\nys = core.pack(map(np.stack, zip(*ys)))\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -2,7 +2,6 @@ from functools import partial\nimport numpy as onp\n-from jax.scan import scan_reference\nfrom jax.initial_style import scan_initial\nfrom jax.core import pack\nimport jax.core as core\n@@ -10,30 +9,40 @@ import jax.numpy as np\nfrom jax import jvp, linearize\nfrom jax import lax\n-# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\n-\n###\n-def cumsum(xs):\n- def f(x, carry):\n- carry = carry + x\n- return pack((carry, carry))\n-\n- ys, _ = scan_initial(f, 0.0, xs)\n- return ys\n-\n-x = np.linspace(0, 3, 4)\n-\n-print np.cumsum(x)\n-print cumsum(x)\n+def scan_reference(f, init, xs):\n+ carry = init\n+ ys = []\n+ for x in xs:\n+ (carry, y) = f(carry, x)\n+ ys.append(y)\n+ ys = np.stack(ys)\n+ return core.pack((np.array(carry), ys))\n+\n+d = np.zeros(2)\n+def f(c, a):\n+ assert a.shape == (3,)\n+ assert c.shape == (4,)\n+ b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ c = np.sin(c * b)\n+ assert b.shape == ()\n+ return core.pack((c, b))\n+\n+as_ = np.ones((5, 3))\n+c = np.ones(4)\n+\n+print scan_reference(f, c, as_)\n+print scan_initial(f, c, as_)\nprint\n-print jvp(np.cumsum, (x,), (x*0.1,))\n-print jvp(cumsum, (x,), (x*0.1,))\n+print jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n+print jvp(lambda c, as_: scan_initial(f, c, as_), (c, as_), (c, as_))[1]\nprint\n-print linearize(np.cumsum, x)[1](x*0.1)\n-print linearize(cumsum, x)[1](x*0.1)\n+print linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n+print linearize(lambda c, as_: scan_initial(f, c, as_), c, as_)[1](c, as_)\n+print\n# ###\n"
}
] | Python | Apache License 2.0 | google/jax | victory! patial eval of scan (+ linearize!)
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 24.04.2019 16:40:29 | 25,200 | 1c9035efca61dcddc5c8cd976ba47bb50f55b62e | start scan transpose, but "nonlinear pack"!! | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -61,6 +61,7 @@ class TypedJaxpr(namedtuple('TypedJaxpr', ['jaxpr', 'literals', 'in_avals', 'out\nassert type(jaxpr) is Jaxpr\nassert len(literals) == len(jaxpr.constvars)\nassert len(in_avals) == len(jaxpr.invars)\n+ assert not jaxpr.freevars\nsuper(TypedJaxpr, self).__init__(jaxpr, literals, in_avals, out_aval)\n@@ -70,11 +71,11 @@ def jaxpr_as_fun(typed_jaxpr, *args):\nfor arg, in_aval in zip(args, typed_jaxpr.in_avals):\narg_aval, _ = _abstractify(arg)\nif arg_aval != in_aval:\n- raise TypeError(\"input type tag mismatch\")\n+ raise TypeError(\"input type mismatch\")\nout = eval_jaxpr(typed_jaxpr.jaxpr, typed_jaxpr.literals, (), *args)\nout_aval, _ = _abstractify(out)\nif out_aval != typed_jaxpr.out_aval:\n- raise TypeError(\"output type tag mismatch\")\n+ raise TypeError(\"output type mismatch\")\nreturn out\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -23,9 +23,6 @@ def pvals_with_zeros(zero_components, aval):\nreturn pe.PartialVal((core.AbstractTuple(avals),\ncore.JaxprTracerTuple(consts)))\n-def transpose_jaxpr(jaxpr, avals, tangent_components):\n- assert False\n-\nstrip_zeros = partial(ad.strip_zeros, core.unit, core.pack)\nstrip_zeros_aval = partial(ad.strip_zeros, core.AbstractTuple(()), core.AbstractTuple)\n@@ -155,7 +152,7 @@ def update_arrays(i, aval, xs, x):\n_scan_const = pe.gensym('_consts')\n-# scan :: (c -> a -> (b, c)) -> c -> [a] -> (c, [b])\n+# scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])\ndef scan_initial(f, init, xs):\ncarry_pval = carry_aval, _ = _abstractify(init)\nxs_aval, _ = _abstractify(xs)\n@@ -285,7 +282,14 @@ def _scan_initial_partial_eval(trace, *tracers, **kwargs):\nout_const = core.pack((out_carry, ys))\nresidual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\nd, c, a = lifted_tracers\n- new_tracers = (d, c, core.pack((a, residual_tracers)))\n+ new_tracers = (d, c, core.pack((a, residual_tracers))) # TODO nonlin pack\n+ # TODO adapt scan to\n+ # option #1:\n+ # scan :: (d -> c -> a -> b) -> d -> c -> [a] -> [b]\n+ # scan :: (d -> c -> a -> alin -> b) -> d -> c -> [a] -> [alin] -> [b]\n+ # option #2:\n+ # extend jaxpr language to have destructuring tuples of variables in invars\n+ # b = g(a, (x, a))\neqn = core.JaxprEqn(new_tracers, None, scan_initial_p, (), False,\ndict(length=length, jaxpr=jaxpr_2))\nreturn pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\n@@ -312,7 +316,44 @@ def _put_known_pvs(is_known, aval):\nreturn pe.JaxprTracerTuple(map(_put_known_pvs, is_known, aval))\n+def _scan_initial_transpose(ct, consts, init, xs, length, jaxpr):\n+ assert consts is None and init is None\n+ import ipdb; ipdb.set_trace() # TODO but xs is also None!\n+\n+ # jaxpr :: d -> c -> (a, res) -> (c, b)\n+ # jaxpr_lifted :: res -> (d, c, a) -> (c, b)\n+ # jaxpr_lifted_trans :: res -> (CT c, CT b) -> (CT d, CT c, CT a)\n+ # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n+ jaxpr_lifted = _move_res_and_uncurry(jaxpr)\n+ jaxpr_lifted_trans = transpose_jaxpr2(jaxpr_lifted)\n+ jaxpr_trans = _move_stuff_and_add_add(jaxpr_lifted_trans)\n+\n+ assert False\n+ # c_bar, bs_bar = ct\n+ # d_bar = zeros\n+\n+ # return scan_initial_p.bind(core.unit,\n+\n+# transpose_jaxpr :: (res -> a -> b) -> (res -> CT b -> CT a)\n+def transpose_jaxpr2(jaxpr):\n+ assert len(jaxpr.in_avals) == 2\n+ def transposed(res, b_bar):\n+ _, a_bar = ad.backward_pass(jaxpr.jaxpr, jaxpr.literals, (),\n+ (res, None), b_bar)\n+ return a_bar\n+ return make_typed_jaxpr(transposed, (jaxpr.in_avals[0], jaxpr.out_aval))\n+\n+def make_typed_jaxpr(py_callable, in_avals):\n+ pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]\n+ jaxpr, pval_out, consts = pe.trace_to_jaxpr(\n+ lu.wrap_init(py_callable), pvals, instantiate=True)\n+ out_aval, _ = pval_out\n+ assert isinstance(out_aval, core.AbstractValue)\n+ return core.TypedJaxpr(jaxpr, consts, in_avals, out_aval)\n+\n+\nscan_initial_p = core.Primitive(\"scan_initial\")\nscan_initial_p.def_impl(_scan_initial_impl)\nad.primitive_jvps[scan_initial_p] = _scan_initial_jvp\n+ad.primitive_transposes[scan_initial_p] = _scan_initial_transpose\npe.custom_partial_eval_rules[scan_initial_p] = _scan_initial_partial_eval\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -6,7 +6,7 @@ from jax.initial_style import scan_initial\nfrom jax.core import pack\nimport jax.core as core\nimport jax.numpy as np\n-from jax import jvp, linearize\n+from jax import jvp, linearize, grad\nfrom jax import lax\n###\n@@ -44,6 +44,9 @@ print linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\nprint linearize(lambda c, as_: scan_initial(f, c, as_), c, as_)[1](c, as_)\nprint\n+# print grad(lambda c, as_: scan_reference(f, c, as_)[0].sum())(c, as_)\n+print grad(lambda c, as_: list(scan_initial(f, c, as_))[0].sum())(c, as_)\n+print\n# ###\n"
}
] | Python | Apache License 2.0 | google/jax | start scan transpose, but "nonlinear pack"!!
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 25.04.2019 10:43:50 | 25,200 | a17f8e4ca87b818bf0740ed2db5d740f3e18e491 | add jaxpr eqn structured input, transpose progress | [
{
"change_type": "MODIFY",
"old_path": "jax/abstract_arrays.py",
"new_path": "jax/abstract_arrays.py",
"diff": "@@ -176,3 +176,10 @@ array_types = [onp.ndarray, onp.float64, onp.float32, onp.float16,\nfor t in array_types:\ncore.pytype_aval_mappings[t] = ConcreteArray\nad_util.jaxval_zeros_likers[t] = zeros_like_array\n+\n+\n+def zeros_like_shaped_array(aval):\n+ assert isinstance(aval, ShapedArray)\n+ return onp.zeros(aval.shape, dtype=aval.dtype)\n+\n+ad_util.aval_zeros_likers[ShapedArray] = zeros_like_shaped_array\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/ad_util.py",
"new_path": "jax/ad_util.py",
"diff": "@@ -16,7 +16,7 @@ from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n-from .core import JaxTuple, lattice_join, Primitive\n+from .core import JaxTuple, lattice_join, Primitive, AbstractTuple\nfrom .tree_util import register_pytree_node\nfrom .util import safe_map\n@@ -52,6 +52,15 @@ jaxval_zeros_likers = {}\njaxval_zeros_likers[JaxTuple] = zeros_like_impl_jaxtuple\n+def zeros_like_aval(aval):\n+ return aval_zeros_likers[type(aval)](aval)\n+aval_zeros_likers = {}\n+\n+def zeros_like_abstract_tuple(tup):\n+ return AbstractTuple(map(zeros_like_aval, tup))\n+aval_zeros_likers[AbstractTuple] = zeros_like_abstract_tuple\n+\n+\ndef zeros_like_jaxval(val):\nreturn zeros_like_p.bind(val)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -80,7 +80,8 @@ def jaxpr_as_fun(typed_jaxpr, *args):\nJaxprEqn = namedtuple('JaxprEqn', ['invars', 'outvars', 'primitive',\n- 'bound_subjaxprs', 'destructure', 'params'])\n+ 'bound_subjaxprs', 'restructure',\n+ 'destructure', 'params'])\nclass Primitive(object):\ndef __init__(self, name):\n@@ -137,7 +138,11 @@ def eval_jaxpr(jaxpr, consts, freevar_vals, *args):\nmap(write, jaxpr.invars, args)\nmap(write, jaxpr.freevars, freevar_vals)\nfor eqn in jaxpr.eqns:\n+ if not eqn.restructure:\nin_vals = map(read, eqn.invars)\n+ else:\n+ in_vals = [pack(map(read, invars)) if type(invars) is tuple else read(invars)\n+ for invars in eqn.invars]\nsubfuns = [partial(eval_jaxpr, subjaxpr, map(read, const_bindings),\nmap(read, freevar_bindings))\nfor subjaxpr, const_bindings, freevar_bindings\n@@ -615,7 +620,11 @@ def check_jaxpr(jaxpr):\nmap(write, jaxpr.freevars)\nmap(write, jaxpr.invars)\nfor eqn in jaxpr.eqns:\n+ if not eqn.restructure:\nmap(read, eqn.invars)\n+ else:\n+ [map(read, invar) if type(invar) is tuple else read(invar)\n+ for invar in eqn.invars]\nfor subjaxpr, constvars, freevars in eqn.bound_subjaxprs:\nmap(read, freevars)\nmap(read_const, constvars)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -11,6 +11,7 @@ from jax.lax import _abstractify, _unpack_eqn\nfrom jax.abstract_arrays import ShapedArray\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import ad\n+from jax import ad_util\ndef pvals_with_zeros(zero_components, aval):\n@@ -98,7 +99,7 @@ def _call_initial_partial_eval(trace, *tracers, **kwargs):\n*in_consts, jaxpr=jaxpr_1, consts=consts_1)\nresidual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\neqn = core.JaxprEqn((residual_tracers,) + tracers, None, call_initial_p, (),\n- False, dict(jaxpr=jaxpr_2, consts=consts_2))\n+ False, False, dict(jaxpr=jaxpr_2, consts=consts_2))\nreturn pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\n@@ -150,7 +151,7 @@ def update_arrays(i, aval, xs, x):\nelse:\nreturn lax.dynamic_update_index_in_dim(xs, x[None, ...], i, axis=0)\n-_scan_const = pe.gensym('_consts')\n+_scan_newvar = pe.gensym('_scan')\n# scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])\ndef scan_initial(f, init, xs):\n@@ -162,26 +163,27 @@ def scan_initial(f, init, xs):\nlu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n(carry_aval_out, y_aval), _ = pval_out\nassert carry_aval == carry_aval_out\n- lifted_jaxpr = pe._closure_convert_jaxpr(jaxpr, _scan_const)\n+ lifted_jaxpr = pe._closure_convert_jaxpr(jaxpr, _scan_newvar)\nconsts_aval, _ = _abstractify(core.pack(consts))\nin_avals = (consts_aval, carry_aval, x_aval)\nout_aval = core.AbstractTuple((carry_aval, y_aval))\njaxpr = core.TypedJaxpr(lifted_jaxpr, (), in_avals, out_aval)\nlength = leading_dim_size(xs)\nreturn scan_initial_p.bind(core.pack(consts), init, xs,\n- length=length, jaxpr=jaxpr)\n+ forward=True, length=length, jaxpr=jaxpr)\n-def _scan_initial_impl(consts, init, xs, length, jaxpr):\n+def _scan_initial_impl(consts, init, xs, forward, length, jaxpr):\n_, _, x_aval = jaxpr.in_avals\n_, y_aval = jaxpr.out_aval\nys_aval = promote_aval_rank(length, y_aval)\ndef body_fun(i, vals):\n+ idx = i if forward else length - i - 1\ncarry, ys = vals\n- x = index_arrays(i, x_aval, xs)\n+ x = index_arrays(idx, x_aval, xs)\ncarry_out, y = core.jaxpr_as_fun(jaxpr)(consts, carry, x)\n- ys_out = update_arrays(i, y_aval, ys, y)\n+ ys_out = update_arrays(idx, y_aval, ys, y)\nreturn (carry_out, ys_out)\nys_init = empty_arrays(ys_aval)\n@@ -189,7 +191,7 @@ def _scan_initial_impl(consts, init, xs, length, jaxpr):\nreturn core.pack((carry, ys))\n-def _scan_initial_jvp(primals, tangents, length, jaxpr):\n+def _scan_initial_jvp(primals, tangents, forward, length, jaxpr):\nconsts, init, xs = primals\nconsts_dot, init_dot, xs_dot = tangents\nconsts_aval, carry_aval, x_aval = jaxpr.in_avals\n@@ -220,7 +222,8 @@ def _scan_initial_jvp(primals, tangents, length, jaxpr):\nxs_dual = core.pack((xs, nonzero_xs_dot))\ncarry_out_dual, ys_dual = scan_initial_p.bind(\n- consts_dual, init_dual, xs_dual, length=length, jaxpr=jaxpr_jvp)\n+ consts_dual, init_dual, xs_dual,\n+ forward=forward, length=length, jaxpr=jaxpr_jvp)\nys, ys_dot = ys_dual\nys_dot = ad.put_zeros(ad.TangentTuple, where_ys_zeros, ys_dot)\n@@ -257,6 +260,8 @@ def binary_lattice_join(a, b):\ndef _scan_initial_partial_eval(trace, *tracers, **kwargs):\njaxpr = kwargs.pop('jaxpr')\nlength = kwargs.pop('length')\n+ forward = kwargs.pop('forward')\n+ assert not kwargs\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\nfc_consts, fc_init, fc_xs = map(is_const, in_pvs)\n@@ -278,20 +283,13 @@ def _scan_initial_partial_eval(trace, *tracers, **kwargs):\nout_pv = _put_known_pvs(fc_out, jaxpr.out_aval)\nout_carry, (ys, residuals) = scan_initial_p.bind(\n- *in_consts, length=length, jaxpr=jaxpr_1)\n+ *in_consts, forward=forward, length=length, jaxpr=jaxpr_1)\nout_const = core.pack((out_carry, ys))\n- residual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\n+ residuals_tracer = trace.new_instantiated_const(core.pack(residuals))\nd, c, a = lifted_tracers\n- new_tracers = (d, c, core.pack((a, residual_tracers))) # TODO nonlin pack\n- # TODO adapt scan to\n- # option #1:\n- # scan :: (d -> c -> a -> b) -> d -> c -> [a] -> [b]\n- # scan :: (d -> c -> a -> alin -> b) -> d -> c -> [a] -> [alin] -> [b]\n- # option #2:\n- # extend jaxpr language to have destructuring tuples of variables in invars\n- # b = g(a, (x, a))\n- eqn = core.JaxprEqn(new_tracers, None, scan_initial_p, (), False,\n- dict(length=length, jaxpr=jaxpr_2))\n+ new_tracers = (d, c, (a, residuals_tracer))\n+ eqn = core.JaxprEqn(new_tracers, None, scan_initial_p, (), True, False,\n+ dict(forward=forward, length=length, jaxpr=jaxpr_2))\nreturn pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\ndef _lift_tracer(trace, tracer, is_const):\n@@ -316,25 +314,57 @@ def _put_known_pvs(is_known, aval):\nreturn pe.JaxprTracerTuple(map(_put_known_pvs, is_known, aval))\n-def _scan_initial_transpose(ct, consts, init, xs, length, jaxpr):\n+def _scan_initial_transpose(ct, consts, init, xs, forward, length, jaxpr):\nassert consts is None and init is None\n- import ipdb; ipdb.set_trace() # TODO but xs is also None!\n+ assert type(xs) is tuple\n+ a, res = xs\n+ assert a is None and res is not None\n# jaxpr :: d -> c -> (a, res) -> (c, b)\n# jaxpr_lifted :: res -> (d, c, a) -> (c, b)\n# jaxpr_lifted_trans :: res -> (CT c, CT b) -> (CT d, CT c, CT a)\n# jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n- jaxpr_lifted = _move_res_and_uncurry(jaxpr)\n+ jaxpr_lifted = _move_res_and_uncurry(jaxpr, _scan_newvar)\n+ import ipdb; ipdb.set_trace()\njaxpr_lifted_trans = transpose_jaxpr2(jaxpr_lifted)\njaxpr_trans = _move_stuff_and_add_add(jaxpr_lifted_trans)\n- assert False\n- # c_bar, bs_bar = ct\n- # d_bar = zeros\n+ ct_c, ct_bs = ct\n+ carry_ct = core.pack((ct_c, ad_util.zeros_like_aval(jaxpr.in_avals[0])))\n+\n+ # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n+ # scan_p.bind :: (d -> c -> a -> (c, b)) -> d -> c -> [a] -> (c, [b])\n+ scan_initial_p.bind(\n+ core.unit, carry_ct, core.pack((ct_bs, res)),\n+ forward=not forward, length=length, jaxpr=jaxpr_trans)\n+\n+def _move_res_and_uncurry(jaxpr, newvar):\n+ # jaxpr :: d -> c -> (a, res) -> (c, b)\n+ # jaxpr_lifted :: res -> (d, c, a) -> (c, b)\n+ assert len(jaxpr.in_avals) == 3\n+ assert type(jaxpr.in_avals[2]) is core.AbstractTuple\n+\n+ d_aval, c_aval, (a_aval, res_aval) = jaxpr.in_avals\n+ in_avals = [res_aval, core.AbstractTuple((d_aval, c_aval, a_aval))]\n+\n+ d, c, a_res = jaxpr.jaxpr.invars\n+ a = newvar()\n+ res = newvar()\n+ d_c_a = newvar()\n+ invars = [res, d_c_a]\n+ eqns = (\n+ [pe._unpack_eqn(d_c_a, [d, c, a]),\n+ pe._pack_eqn([a, res], a_res)]\n+ + list(jaxpr.jaxpr.eqns))\n+\n+ new_jaxpr = core.Jaxpr(jaxpr.jaxpr.constvars, jaxpr.jaxpr.freevars,\n+ invars, jaxpr.jaxpr.outvar, eqns)\n+ core.skip_checks or core.check_jaxpr(new_jaxpr)\n+ return core.TypedJaxpr(new_jaxpr, jaxpr.literals, in_avals, jaxpr.out_aval)\n- # return scan_initial_p.bind(core.unit,\n# transpose_jaxpr :: (res -> a -> b) -> (res -> CT b -> CT a)\n+# TODO either top-level restructure, or else munge somehow\ndef transpose_jaxpr2(jaxpr):\nassert len(jaxpr.in_avals) == 2\ndef transposed(res, b_bar):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -145,6 +145,9 @@ def backward_pass(jaxpr, consts, freevar_vals, args, cotangent_in):\ndef read_cotangent(v):\nreturn ct_env.get(v, zero)\n+ def read_primal(v):\n+ return primal_env.get(v)\n+\nprimal_env = {v: val for v, val in zip(jaxpr.freevars, freevar_vals)\nif val is not None}\nprimal_env.update(zip(jaxpr.constvars, consts))\n@@ -155,12 +158,16 @@ def backward_pass(jaxpr, consts, freevar_vals, args, cotangent_in):\nfor eqn in jaxpr.eqns[::-1]:\ncts_in = map(read_cotangent, eqn.outvars)\nct_in = TangentTuple(cts_in) if eqn.destructure else cts_in[0]\n- invals = map(primal_env.get, eqn.invars)\n+ if not eqn.restructure:\n+ invals = map(read_primal, eqn.invars)\n+ else:\n+ invals = [tuple(map(read_primal, v)) if type(v) is tuple\n+ else read_primal(v) for v in eqn.invars]\nif eqn.bound_subjaxprs:\nsubjaxprs, sub_consts, sub_freevar_vals = unzip3([\n(subjaxpr,\n- map(primal_env.get, const_vars),\n- map(primal_env.get, bound_vars))\n+ map(read_primal, const_vars),\n+ map(read_primal, bound_vars))\nfor subjaxpr, const_vars, bound_vars in eqn.bound_subjaxprs])\ncts_out, ct_free_vars_out = get_primitive_transpose(eqn.primitive)(\neqn.params, subjaxprs, sub_consts, sub_freevar_vals, invals, ct_in)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -74,11 +74,11 @@ class JaxprTrace(Trace):\navals = [t.aval for t in tracers]\nout_aval = primitive.abstract_eval(*avals, **params)\npartial_val = PartialVal((out_aval, unit))\n- eqn = JaxprEqn(tracers, None, primitive, (), False, params)\n+ eqn = JaxprEqn(tracers, None, primitive, (), False, False, params)\nreturn JaxprTracer(self, partial_val, eqn)\ndef pack(self, tracers):\n- eqn = JaxprEqn(tracers, None, core.pack_p, (), False, {})\n+ eqn = JaxprEqn(tracers, None, core.pack_p, (), False, False, {})\npval = pack_pvals([t.pval for t in tracers])\nreturn JaxprTracer(self, pval, eqn)\n@@ -92,7 +92,8 @@ class JaxprTrace(Trace):\nconst_tracers = map(self.new_instantiated_const, consts)\nenv_tracers = map(self.full_raise, env)\nbound_subjaxpr = (jaxpr, const_tracers, env_tracers)\n- eqn = JaxprEqn(tracers, None, call_primitive, (bound_subjaxpr,), False, params)\n+ eqn = JaxprEqn(tracers, None, call_primitive, (bound_subjaxpr,),\n+ False, False, params)\nreturn JaxprTracer(self, PartialVal((out_pv, out_pv_const)), eqn)\ndef process_map(self, call_primitive, f, tracers, params):\n@@ -109,7 +110,8 @@ class JaxprTrace(Trace):\njaxpr_converted.invars = list(it.chain(jaxpr.constvars, jaxpr.invars))\ninvars = tuple(it.chain(const_tracers, tracers))\nbound_subjaxpr = (jaxpr_converted, (), env)\n- eqn = JaxprEqn(invars, None, call_primitive, (bound_subjaxpr,), False, params)\n+ eqn = JaxprEqn(invars, None, call_primitive, (bound_subjaxpr,),\n+ False, False, params)\nreturn JaxprTracer(self, PartialVal((out_pv, out_const)), eqn)\ndef post_process_call(self, call_primitive, out_tracer):\n@@ -124,7 +126,8 @@ class JaxprTrace(Trace):\nconst_tracers = map(trace.new_instantiated_const, consts)\nenv_tracers = map(trace.full_raise, env)\nbound_subjaxpr = (jaxpr, const_tracers, env_tracers)\n- eqn = JaxprEqn([], None, call_primitive, (bound_subjaxpr,), False, {})\n+ eqn = JaxprEqn([], None, call_primitive, (bound_subjaxpr,),\n+ False, False, {})\nreturn JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), eqn)\nreturn out, todo\n@@ -149,7 +152,7 @@ def scan_process_primitive(trace, consts, init, xs, avals, jaxpr):\navals=avals1, jaxpr=jaxpr1)\nparams_out = {'avals' : avals2, 'jaxpr' : jaxpr2}\n- eqn = JaxprEqn([consts, init, xs], None, scan_p, (), False, params_out)\n+ eqn = JaxprEqn([consts, init, xs], None, scan_p, (), False, False, params_out)\nreturn JaxprTracer(trace, PartialVal((ans, ans_pv)), )\n# in_pvs, in_consts = unzip2([t.pval for t in tracers])\n@@ -278,7 +281,7 @@ class JaxprTracer(Tracer):\nif isinstance(pv, AbstractValue):\nconst = [unit for _ in range(n)]\nkey = object()\n- eqn = JaxprEqn([self], [None]*n, core.identity_p, (), True, {})\n+ eqn = JaxprEqn([self], [None]*n, core.identity_p, (), False, True, {})\ndef child_tracer(i, pval, c):\nd = Destructuring(i, eqn, key)\nreturn JaxprTracer(self.trace, PartialVal((pval, c)), d).full_lower()\n@@ -416,12 +419,16 @@ ConstVar = namedtuple('ConstVar', ['val'])\nLambdaBinding = namedtuple('LambdaBinding', [])\ndef eqn_tracer_to_var(var, outvars, eqn):\n- invars, _, primitive, bound_subjaxprs, destructure, params = eqn\n+ invars, _, primitive, bound_subjaxprs, restructure, destructure, params = eqn\n+ if not restructure:\ninvars = map(var, invars)\n+ else:\n+ invars = [tuple(map(var, v)) if type(v) is tuple else var(v)\n+ for v in invars]\nnew_bound_subjaxprs = [(j, map(var, c), map(var, f))\nfor j, c, f in bound_subjaxprs]\nreturn JaxprEqn(invars, outvars, primitive,\n- new_bound_subjaxprs, destructure, params)\n+ new_bound_subjaxprs, restructure, destructure, params)\ndef tracers_to_jaxpr(in_tracers, out_tracer):\n@@ -486,36 +493,18 @@ class Var(object):\ndef eqn_parents(eqn):\nsubjaxpr_tracers = [it.chain(c, f) for _, c, f in eqn.bound_subjaxprs]\n+ if not eqn.restructure:\nreturn list(it.chain(eqn.invars, *subjaxpr_tracers))\n+ else:\n+ invars = []\n+ for v in eqn.invars:\n+ if type(v) is tuple:\n+ invars.extend(v)\n+ else:\n+ invars.append(v)\n+ return list(it.chain(invars, *subjaxpr_tracers))\n-def eval_jaxpr_raw(jaxpr, consts, freevar_vals, *args):\n- assert all(map(core.valid_jaxtype, consts))\n- assert all(map(core.valid_jaxtype, freevar_vals))\n- assert all(map(core.valid_jaxtype, args))\n-\n- def read(v):\n- return env[v]\n-\n- def write(v, val):\n- env[v] = val\n-\n- env = {}\n- write(unitvar, unit)\n- map(write, jaxpr.constvars, consts)\n- map(write, jaxpr.invars, args)\n- map(write, jaxpr.freevars, freevar_vals)\n- for eqn in jaxpr.eqns:\n- in_vals = map(read, eqn.invars)\n- subfuns = [partial(core.eval_jaxpr, subjaxpr, map(read, const_bindings),\n- map(read, freevar_bindings))\n- for subjaxpr, const_bindings, freevar_bindings\n- in eqn.bound_subjaxprs]\n- ans = eqn.primitive.impl(*(subfuns + in_vals), **eqn.params) # not bind!\n- outvals = list(ans) if eqn.destructure else [ans]\n- map(write, eqn.outvars, outvals)\n- return read(jaxpr.outvar)\n-\ndef compiled_call_impl(fun, *args, **kwargs):\nwith new_master(JaxprTrace, True) as master:\npvals = map(abstractify, args)\n@@ -616,10 +605,10 @@ def _closure_convert_jaxpr(jaxpr, newvar):\nreturn lifted_jaxpr\ndef _unpack_eqn(invar, outvars):\n- return core.JaxprEqn([invar], outvars, core.identity_p, (), True, {})\n+ return core.JaxprEqn([invar], outvars, core.identity_p, (), False, True, {})\ndef _pack_eqn(invars, outvar):\n- return core.JaxprEqn(invars, [outvar], core.pack_p, (), False, {})\n+ return core.JaxprEqn(invars, [outvar], core.pack_p, (), False, False, {})\ndef partial_eval_jaxpr2(jaxpr, first_components):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -222,7 +222,7 @@ def replicated_comp(jaxpr, ax_env, const_vals, freevar_shapes, *arg_shapes):\nmap(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))\nmap(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\nfor eqn in jaxpr.eqns:\n- in_nodes = map(read, eqn.invars)\n+ in_nodes = map(read, eqn.invars) # TODO\nif eqn.primitive in parallel_translation_rules:\nname = eqn.params['axis_name']\nparams = {k: eqn.params[k] for k in eqn.params if k != 'axis_name'}\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -163,7 +163,7 @@ def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nmap(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))\nmap(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\nfor eqn in jaxpr.eqns:\n- in_nodes = map(read, eqn.invars)\n+ in_nodes = map(read, eqn.invars) # TODO\nin_shapes = map(c.GetShape, in_nodes)\nsubcs = [\njaxpr_computation(\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax.py",
"new_path": "jax/lax.py",
"diff": "@@ -3819,10 +3819,10 @@ xla.translations[while_p] = _while_loop_translation_rule\nbatching.primitive_batchers[while_p] = _while_loop_batching_rule\ndef _unpack_eqn(invar, outvars):\n- return core.JaxprEqn([invar], outvars, core.identity_p, (), True, {})\n+ return core.JaxprEqn([invar], outvars, core.identity_p, (), False, True, {})\ndef _pack_eqn(invars, outvar):\n- return core.JaxprEqn(invars, [outvar], core.pack_p, (), False, {})\n+ return core.JaxprEqn(invars, [outvar], core.pack_p, (), False, False, {})\ndef _cond_abstract_eval(pred, true_op, true_consts, false_op, false_consts,\n"
}
] | Python | Apache License 2.0 | google/jax | add jaxpr eqn structured input, transpose progress
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 01.05.2019 15:47:01 | 25,200 | 0988f6d8d5f6cb293e0caed09500669c11c0e59d | pattern unpacking at jaxpr top-level (pair w/
next step is to handle that new complexity in our jaxpr munging... | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -134,9 +134,9 @@ def eval_jaxpr(jaxpr, consts, freevar_vals, *args):\nenv = {}\nwrite(unitvar, unit)\n- map(write, jaxpr.constvars, consts)\n- map(write, jaxpr.invars, args)\n- map(write, jaxpr.freevars, freevar_vals)\n+ pat_fmap(write, jaxpr.constvars, consts)\n+ pat_fmap(write, jaxpr.invars, args)\n+ pat_fmap(write, jaxpr.freevars, freevar_vals)\nfor eqn in jaxpr.eqns:\nif not eqn.restructure:\nin_vals = map(read, eqn.invars)\n@@ -154,6 +154,14 @@ def eval_jaxpr(jaxpr, consts, freevar_vals, *args):\nreturn read(jaxpr.outvar)\n+# TODO enforce a specific set of types for jaxpr vars\n+def pat_fmap(f, v, *xs):\n+ if type(v) in (tuple, list):\n+ return tuple(map(partial(pat_fmap, f), v, *xs))\n+ else:\n+ return f(v, *xs)\n+\n+\ndef full_lower(val):\nif isinstance(val, Tracer):\nreturn val.full_lower()\n@@ -614,11 +622,11 @@ def check_jaxpr(jaxpr):\nread_const = partial(read_env, const_env)\nwrite_const= partial(write_env, const_env)\n- map(write_const, jaxpr.constvars)\n+ pat_fmap(write_const, jaxpr.constvars)\nwrite(unitvar)\n- map(write, jaxpr.constvars)\n- map(write, jaxpr.freevars)\n- map(write, jaxpr.invars)\n+ pat_fmap(write, jaxpr.constvars)\n+ pat_fmap(write, jaxpr.freevars)\n+ pat_fmap(write, jaxpr.invars)\nfor eqn in jaxpr.eqns:\nif not eqn.restructure:\nmap(read, eqn.invars)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -7,7 +7,8 @@ import jax.numpy as np\nimport jax.lax as lax\nfrom jax.util import curry, unzip2\n-from jax.lax import _abstractify, _unpack_eqn\n+from jax.api_util import pytree_to_jaxtupletree\n+from jax.lax import _abstractify, _unpack_eqn, _pack_eqn\nfrom jax.abstract_arrays import ShapedArray\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import ad\n@@ -151,8 +152,6 @@ def update_arrays(i, aval, xs, x):\nelse:\nreturn lax.dynamic_update_index_in_dim(xs, x[None, ...], i, axis=0)\n-_scan_newvar = pe.gensym('_scan')\n-\n# scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])\ndef scan_initial(f, init, xs):\ncarry_pval = carry_aval, _ = _abstractify(init)\n@@ -163,7 +162,7 @@ def scan_initial(f, init, xs):\nlu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n(carry_aval_out, y_aval), _ = pval_out\nassert carry_aval == carry_aval_out\n- lifted_jaxpr = pe._closure_convert_jaxpr(jaxpr, _scan_newvar)\n+ lifted_jaxpr = pe._closure_convert_jaxpr(jaxpr)\nconsts_aval, _ = _abstractify(core.pack(consts))\nin_avals = (consts_aval, carry_aval, x_aval)\nout_aval = core.AbstractTuple((carry_aval, y_aval))\n@@ -320,63 +319,102 @@ def _scan_initial_transpose(ct, consts, init, xs, forward, length, jaxpr):\na, res = xs\nassert a is None and res is not None\n- # jaxpr :: d -> c -> (a, res) -> (c, b)\n+ # jaxpr :: d -> c -> (a, res) -> (c, b) # TODO assuming restructuring input\n# jaxpr_lifted :: res -> (d, c, a) -> (c, b)\n# jaxpr_lifted_trans :: res -> (CT c, CT b) -> (CT d, CT c, CT a)\n# jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n- jaxpr_lifted = _move_res_and_uncurry(jaxpr, _scan_newvar)\n- import ipdb; ipdb.set_trace()\n- jaxpr_lifted_trans = transpose_jaxpr2(jaxpr_lifted)\n+ jaxpr_lifted = rearrange_binders(\n+ lambda d, c, a_res: (a_res[1], (d, c, a_res[0])), jaxpr)\n+ jaxpr_lifted_trans, out_tree = transpose_jaxpr2(jaxpr_lifted)\njaxpr_trans = _move_stuff_and_add_add(jaxpr_lifted_trans)\n+ import ipdb; ipdb.set_trace()\nct_c, ct_bs = ct\ncarry_ct = core.pack((ct_c, ad_util.zeros_like_aval(jaxpr.in_avals[0])))\n# jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n# scan_p.bind :: (d -> c -> a -> (c, b)) -> d -> c -> [a] -> (c, [b])\n- scan_initial_p.bind(\n+ out = scan_initial_p.bind(\ncore.unit, carry_ct, core.pack((ct_bs, res)),\nforward=not forward, length=length, jaxpr=jaxpr_trans)\n+ import ipdb; ipdb.set_trace()\n-def _move_res_and_uncurry(jaxpr, newvar):\n- # jaxpr :: d -> c -> (a, res) -> (c, b)\n- # jaxpr_lifted :: res -> (d, c, a) -> (c, b)\n- assert len(jaxpr.in_avals) == 3\n- assert type(jaxpr.in_avals[2]) is core.AbstractTuple\n+def rearrange_binders(f, typed_jaxpr):\n+ jaxpr = typed_jaxpr.jaxpr.copy()\n+ jaxpr.invars = f(*jaxpr.invars)\n+ in_avals = f(*typed_jaxpr.in_avals)\n+ core.skip_checks or core.check_jaxpr(jaxpr)\n+ return core.TypedJaxpr(jaxpr, typed_jaxpr.literals, in_avals,\n+ typed_jaxpr.out_aval)\n- d_aval, c_aval, (a_aval, res_aval) = jaxpr.in_avals\n- in_avals = [res_aval, core.AbstractTuple((d_aval, c_aval, a_aval))]\n+_scan_newvar = pe.gensym('_scan')\n- d, c, a_res = jaxpr.jaxpr.invars\n- a = newvar()\n- res = newvar()\n- d_c_a = newvar()\n- invars = [res, d_c_a]\n- eqns = (\n- [pe._unpack_eqn(d_c_a, [d, c, a]),\n- pe._pack_eqn([a, res], a_res)]\n- + list(jaxpr.jaxpr.eqns))\n+def _move_stuff_and_add_add(typed_jaxpr):\n+ # jaxpr_lifted_trans :: res -> (CT c, CT b) -> (CT d, CT c, CT a)\n+ # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n- new_jaxpr = core.Jaxpr(jaxpr.jaxpr.constvars, jaxpr.jaxpr.freevars,\n- invars, jaxpr.jaxpr.outvar, eqns)\n- core.skip_checks or core.check_jaxpr(new_jaxpr)\n- return core.TypedJaxpr(new_jaxpr, jaxpr.literals, in_avals, jaxpr.out_aval)\n+ res_aval, (CTc_aval, CTb_aval) = typed_jaxpr.in_avals\n+ CTd_aval, CTc_aval2, CTa_aval = typed_jaxpr.out_aval\n+ assert CTc_aval == CTc_aval2\n+ in_avals = (core.AbstractTuple(()), core.AbstractTuple((CTc_aval, CTd_aval)),\n+ core.AbstractTuple((CTb_aval, res_aval)))\n+ out_aval = core.AbstractTuple((core.AbstractTuple((CTd_aval, CTc_aval)),\n+ CTa_aval))\n+\n+ jaxpr = typed_jaxpr.jaxpr.copy()\n+\n+ # munge input side\n+ CTc_in = _scan_newvar()\n+ CTb_in = _scan_newvar()\n+ res_in, CTc_CTb_in = jaxpr.invars\n+ jaxpr.invars = ((), (CTc_in, CTd_in), (CTb_in, res_in))\n+ jaxpr.eqns = (\n+ [_pack_eqn([CTc_in, CTb_in], CTc_CTb_in)] +\n+ jaxpr.eqns)\n+\n+ # munge output side\n+ CTd_in = _scan_newvar()\n+ CTd_new = _scan_newvar()\n+ CTd_sum = _scan_newvar()\n+ CTc = _scan_newvar()\n+ CTa = _scan_newvar()\n+ partial_out = _scan_newvar()\n+ outvar = _scan_newvar()\n+ jaxpr.eqns = (\n+ jaxpr.eqns +\n+ [_unpack_eqn(jaxpr.outvar, [CTd_new, CTc, CTa]),\n+ _add_any_eqn(CTd_sum, CTd_new, CTd_in),\n+ _pack_eqn([CTd_sum, CTc], partial_out),\n+ _pack_eqn([partial_out, CTa], outvar)])\n+ jaxpr.outvar = outvar\n+\n+ return core.TypedJaxpr(jaxpr, typed_jaxpr.literals,\n+ in_avals, out_aval)\n# transpose_jaxpr :: (res -> a -> b) -> (res -> CT b -> CT a)\n-# TODO either top-level restructure, or else munge somehow\ndef transpose_jaxpr2(jaxpr):\nassert len(jaxpr.in_avals) == 2\n+ nones = core.pat_fmap(lambda _: None, jaxpr.jaxpr.invars[1])\n+\n+ @lu.wrap_init\ndef transposed(res, b_bar):\n_, a_bar = ad.backward_pass(jaxpr.jaxpr, jaxpr.literals, (),\n- (res, None), b_bar)\n+ (res, nones), b_bar)\nreturn a_bar\n- return make_typed_jaxpr(transposed, (jaxpr.in_avals[0], jaxpr.out_aval))\n-def make_typed_jaxpr(py_callable, in_avals):\n+ @lu.transformation_with_aux\n+ def flatten_out(*args):\n+ ans = yield args\n+ yield pytree_to_jaxtupletree(ans)\n+\n+ transposed, out_tree = flatten_out(transposed)\n+ transposed_jaxpr = make_typed_jaxpr(transposed, (jaxpr.in_avals[0], jaxpr.out_aval))\n+ return transposed_jaxpr, out_tree()\n+\n+def make_typed_jaxpr(traceable, in_avals):\npvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]\n- jaxpr, pval_out, consts = pe.trace_to_jaxpr(\n- lu.wrap_init(py_callable), pvals, instantiate=True)\n+ jaxpr, pval_out, consts = pe.trace_to_jaxpr(traceable, pvals, instantiate=True)\nout_aval, _ = pval_out\nassert isinstance(out_aval, core.AbstractValue)\nreturn core.TypedJaxpr(jaxpr, consts, in_avals, out_aval)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -148,13 +148,16 @@ def backward_pass(jaxpr, consts, freevar_vals, args, cotangent_in):\ndef read_primal(v):\nreturn primal_env.get(v)\n- primal_env = {v: val for v, val in zip(jaxpr.freevars, freevar_vals)\n- if val is not None}\n- primal_env.update(zip(jaxpr.constvars, consts))\n- primal_env.update((v, val) for v, val in zip(jaxpr.invars, args)\n- if val is not None)\n- ct_env = {jaxpr.outvar: cotangent_in}\n+ def write_primal(v, val):\n+ if val is not None:\n+ primal_env[v] = val\n+\n+ primal_env = {}\n+ core.pat_fmap(write_primal, jaxpr.constvars, consts)\n+ core.pat_fmap(write_primal, jaxpr.freevars, freevar_vals)\n+ core.pat_fmap(write_primal, jaxpr.invars, args)\n+ ct_env = {jaxpr.outvar: cotangent_in}\nfor eqn in jaxpr.eqns[::-1]:\ncts_in = map(read_cotangent, eqn.outvars)\nct_in = TangentTuple(cts_in) if eqn.destructure else cts_in[0]\n@@ -182,9 +185,10 @@ def backward_pass(jaxpr, consts, freevar_vals, args, cotangent_in):\ncts_out = [zero for _ in eqn.invars]\nmap(write_cotangent, eqn.invars, cts_out)\n- cotangents_out = [read_cotangent(var) if argval is None else None\n- for var, argval in zip(jaxpr.invars, args)]\n- freevar_cts = map(read_cotangent, jaxpr.freevars)\n+ cotangents_out = core.pat_fmap(\n+ lambda var, argval: read_cotangent(var) if argval is None else None,\n+ jaxpr.invars, args)\n+ freevar_cts = core.pat_fmap(read_cotangent, jaxpr.freevars)\nreturn freevar_cts, cotangents_out\ndef get_primitive_transpose(p):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -595,13 +595,11 @@ def partial_eval_jaxpr(jaxpr, consts, avals, first_components):\nreturn (jaxpr_1, consts_1), (lifted_jaxpr_2, ()), out_pv_2, fc_out\n-def _closure_convert_jaxpr(jaxpr, newvar):\n+def _closure_convert_jaxpr(jaxpr):\nlifted_jaxpr = jaxpr.copy()\nlifted_jaxpr.constvars = ()\n- consts_var = newvar()\n- lifted_jaxpr.invars = [consts_var] + jaxpr.invars\n- lifted_jaxpr.eqns = (\n- [_unpack_eqn(consts_var, jaxpr.constvars)] + list(jaxpr.eqns))\n+ lifted_jaxpr.invars = [tuple(jaxpr.constvars)] + list(jaxpr.invars)\n+ core.skip_checks or core.check_jaxpr(lifted_jaxpr)\nreturn lifted_jaxpr\ndef _unpack_eqn(invar, outvars):\n@@ -634,8 +632,8 @@ def partial_eval_jaxpr2(jaxpr, first_components):\n# jaxpr_2 :: res | d2 -> c2 -> a2 -> (c2, b2)\n# lifted_jaxpr_2 :: res -> d2 -> c2 -> a2 -> (c2, b2)\n# doubly_lifted_jaxpr_2 :: d2 -> c2 -> (a2, res) -> (c2, b2)\n- lifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\n- doubly_lifted_jaxpr_2 = _move_and_pair_arg(lifted_jaxpr_2, _partial_eval_gensym)\n+ lifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2)\n+ doubly_lifted_jaxpr_2 = _move_and_pair_arg(lifted_jaxpr_2)\nfc_out = fc_c_out, fc_b_out = isnone(out_pv_c), isnone(out_pv_b)\nin_avals_1, in_avals_2 = unzip2(map(_split_avals, first_components,\n@@ -662,13 +660,11 @@ def partial_eval_jaxpr2(jaxpr, first_components):\nout_aval_2)\nreturn typed_jaxpr_1, typed_jaxpr_2, fc_out\n-def _move_and_pair_arg(jaxpr, newvar):\n+def _move_and_pair_arg(jaxpr):\nmoved_jaxpr = jaxpr.copy()\nres, d, c, a = jaxpr.invars\n- pair_var = newvar()\n- moved_jaxpr.invars = [d, c, pair_var]\n- moved_jaxpr.eqns = (\n- [_unpack_eqn(pair_var, [a, res])] + list(jaxpr.eqns))\n+ moved_jaxpr.invars = [d, c, (a, res)]\n+ core.skip_checks or core.check_jaxpr(moved_jaxpr)\nreturn moved_jaxpr\ndef _split_avals(first_component, aval):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -155,6 +155,7 @@ def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nenv = {}\nconsts_env = dict(zip(jaxpr.constvars, const_vals))\nwrite(core.unitvar, c.Tuple())\n+ # TODO update with core.fmap\nif const_vals:\nmap(write, jaxpr.constvars, map(c.Constant, const_vals))\nmap(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))\n@@ -163,7 +164,7 @@ def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nmap(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))\nmap(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\nfor eqn in jaxpr.eqns:\n- in_nodes = map(read, eqn.invars) # TODO\n+ in_nodes = map(read, eqn.invars)\nin_shapes = map(c.GetShape, in_nodes)\nsubcs = [\njaxpr_computation(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -32,17 +32,17 @@ def f(c, a):\nas_ = np.ones((5, 3))\nc = np.ones(4)\n-print scan_reference(f, c, as_)\n-print scan_initial(f, c, as_)\n-print\n+# print scan_reference(f, c, as_)\n+# print scan_initial(f, c, as_)\n+# print\n-print jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n-print jvp(lambda c, as_: scan_initial(f, c, as_), (c, as_), (c, as_))[1]\n-print\n+# print jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n+# print jvp(lambda c, as_: scan_initial(f, c, as_), (c, as_), (c, as_))[1]\n+# print\n-print linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n-print linearize(lambda c, as_: scan_initial(f, c, as_), c, as_)[1](c, as_)\n-print\n+# print linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n+# print linearize(lambda c, as_: scan_initial(f, c, as_), c, as_)[1](c, as_)\n+# print\n# print grad(lambda c, as_: scan_reference(f, c, as_)[0].sum())(c, as_)\nprint grad(lambda c, as_: list(scan_initial(f, c, as_))[0].sum())(c, as_)\n"
}
] | Python | Apache License 2.0 | google/jax | pattern unpacking at jaxpr top-level (pair w/ @dougalm)
next step is to handle that new complexity in our jaxpr munging...
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 07.05.2019 08:52:08 | 25,200 | 4c2ec3e4427badcf3fb5f0ee03ca09e41c71d15b | ship it | [
{
"change_type": "MODIFY",
"old_path": "jax/ad_util.py",
"new_path": "jax/ad_util.py",
"diff": "@@ -57,7 +57,7 @@ def zeros_like_aval(aval):\naval_zeros_likers = {}\ndef zeros_like_abstract_tuple(tup):\n- return AbstractTuple(map(zeros_like_aval, tup))\n+ return JaxTuple(map(zeros_like_aval, tup))\naval_zeros_likers[AbstractTuple] = zeros_like_abstract_tuple\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -67,11 +67,12 @@ class TypedJaxpr(namedtuple('TypedJaxpr', ['jaxpr', 'literals', 'in_avals', 'out\n@curry\ndef jaxpr_as_fun(typed_jaxpr, *args):\n- from jax.lax import _abstractify\n- for arg, in_aval in zip(args, typed_jaxpr.in_avals):\n+ from jax.lax import _abstractify # TODO\n+ invars = typed_jaxpr.jaxpr.invars\n+ for arg, in_aval, varname in zip(args, typed_jaxpr.in_avals, invars):\narg_aval, _ = _abstractify(arg)\nif arg_aval != in_aval:\n- raise TypeError(\"input type mismatch\")\n+ raise TypeError(\"input type mismatch for arg {}\".format(varname))\nout = eval_jaxpr(typed_jaxpr.jaxpr, typed_jaxpr.literals, (), *args)\nout_aval, _ = _abstractify(out)\nif out_aval != typed_jaxpr.out_aval:\n@@ -157,6 +158,9 @@ def eval_jaxpr(jaxpr, consts, freevar_vals, *args):\n# TODO enforce a specific set of types for jaxpr vars\ndef pat_fmap(f, v, *xs):\nif type(v) in (tuple, list):\n+ if len(xs) == 1 and xs[0] is None:\n+ return tuple(map(partial(pat_fmap, f), v, [None] * len(v)))\n+ else:\nreturn tuple(map(partial(pat_fmap, f), v, *xs))\nelse:\nreturn f(v, *xs)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -186,9 +186,17 @@ def _scan_initial_impl(consts, init, xs, forward, length, jaxpr):\nreturn (carry_out, ys_out)\nys_init = empty_arrays(ys_aval)\n- carry, ys = lax.fori_loop(0, length, body_fun, (init, ys_init))\n+ # carry, ys = lax.fori_loop(0, length, body_fun, (init, ys_init))\n+ carry, ys = fori_loop(0, length, body_fun, (init, ys_init))\nreturn core.pack((carry, ys))\n+# TODO remove\n+def fori_loop(start, stop, body_fun, init_val):\n+ carry = init_val\n+ for i in range(start, stop):\n+ carry = body_fun(i, carry)\n+ return carry\n+\ndef _scan_initial_jvp(primals, tangents, forward, length, jaxpr):\nconsts, init, xs = primals\n@@ -322,22 +330,31 @@ def _scan_initial_transpose(ct, consts, init, xs, forward, length, jaxpr):\n# jaxpr :: d -> c -> (a, res) -> (c, b) # TODO assuming restructuring input\n# jaxpr_lifted :: res -> (d, c, a) -> (c, b)\n# jaxpr_lifted_trans :: res -> (CT c, CT b) -> (CT d, CT c, CT a)\n- # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n+ # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT c, CT d), CT a)\njaxpr_lifted = rearrange_binders(\nlambda d, c, a_res: (a_res[1], (d, c, a_res[0])), jaxpr)\n- jaxpr_lifted_trans, out_tree = transpose_jaxpr2(jaxpr_lifted)\n+ jaxpr_lifted_trans = transpose_jaxpr2(jaxpr_lifted)\njaxpr_trans = _move_stuff_and_add_add(jaxpr_lifted_trans)\n- import ipdb; ipdb.set_trace()\n- ct_c, ct_bs = ct\n- carry_ct = core.pack((ct_c, ad_util.zeros_like_aval(jaxpr.in_avals[0])))\n+ c_aval, b_aval = jaxpr.out_aval\n+ d_aval, c_aval2, _ = jaxpr.in_avals\n+ assert c_aval == c_aval2\n+ bs_aval = promote_aval_rank(length, b_aval)\n+ ct_d = ad_util.zeros_like_aval(d_aval)\n+ ct_c, ct_bs = ad.instantiate_zeros_aval(core.AbstractTuple((c_aval, bs_aval)), ct)\n+ carry_ct = core.pack((ct_c, ct_d))\n+\n+ # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT c, CT d), CT a)\n+ core.check_jaxpr(jaxpr_trans.jaxpr)\n+ unit_aval, (ct_c_aval, ct_d_aval), (ct_b_aval, _) = jaxpr_trans.in_avals\n+ assert core.lattice_join(ct_c_aval, core.get_aval(ct_c)) == ct_c_aval\n+ assert core.lattice_join(ct_d_aval, core.get_aval(ct_d)) == ct_d_aval\n- # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n- # scan_p.bind :: (d -> c -> a -> (c, b)) -> d -> c -> [a] -> (c, [b])\nout = scan_initial_p.bind(\ncore.unit, carry_ct, core.pack((ct_bs, res)),\nforward=not forward, length=length, jaxpr=jaxpr_trans)\n- import ipdb; ipdb.set_trace()\n+ (ct_init, ct_consts), ct_as = out\n+ return ct_consts, ct_init, (ct_as, None)\ndef rearrange_binders(f, typed_jaxpr):\njaxpr = typed_jaxpr.jaxpr.copy()\n@@ -351,21 +368,24 @@ _scan_newvar = pe.gensym('_scan')\ndef _move_stuff_and_add_add(typed_jaxpr):\n# jaxpr_lifted_trans :: res -> (CT c, CT b) -> (CT d, CT c, CT a)\n- # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT d, CT c), CT a)\n+ # jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT c, CT d), CT a)\nres_aval, (CTc_aval, CTb_aval) = typed_jaxpr.in_avals\nCTd_aval, CTc_aval2, CTa_aval = typed_jaxpr.out_aval\nassert CTc_aval == CTc_aval2\nin_avals = (core.AbstractTuple(()), core.AbstractTuple((CTc_aval, CTd_aval)),\ncore.AbstractTuple((CTb_aval, res_aval)))\n- out_aval = core.AbstractTuple((core.AbstractTuple((CTd_aval, CTc_aval)),\n+ out_aval = core.AbstractTuple((core.AbstractTuple((CTc_aval, CTd_aval)),\nCTa_aval))\njaxpr = typed_jaxpr.jaxpr.copy()\n+ # TODO assume not restructuring input\n+ assert not any(type(invar) is tuple for invar in jaxpr.invars)\n# munge input side\nCTc_in = _scan_newvar()\nCTb_in = _scan_newvar()\n+ CTd_in = _scan_newvar()\nres_in, CTc_CTb_in = jaxpr.invars\njaxpr.invars = ((), (CTc_in, CTd_in), (CTb_in, res_in))\njaxpr.eqns = (\n@@ -373,7 +393,6 @@ def _move_stuff_and_add_add(typed_jaxpr):\njaxpr.eqns)\n# munge output side\n- CTd_in = _scan_newvar()\nCTd_new = _scan_newvar()\nCTd_sum = _scan_newvar()\nCTc = _scan_newvar()\n@@ -384,33 +403,32 @@ def _move_stuff_and_add_add(typed_jaxpr):\njaxpr.eqns +\n[_unpack_eqn(jaxpr.outvar, [CTd_new, CTc, CTa]),\n_add_any_eqn(CTd_sum, CTd_new, CTd_in),\n- _pack_eqn([CTd_sum, CTc], partial_out),\n+ _pack_eqn([CTc, CTd_sum], partial_out),\n_pack_eqn([partial_out, CTa], outvar)])\njaxpr.outvar = outvar\n+ # TODO should really have a check_typed_jaxpr\n+ core.skip_checks or core.check_jaxpr(jaxpr)\nreturn core.TypedJaxpr(jaxpr, typed_jaxpr.literals,\nin_avals, out_aval)\n+def _add_any_eqn(tot, a, b):\n+ return core.JaxprEqn([a, b], [tot], ad_util.add_jaxvals_p, (), False, False, {})\n+\n# transpose_jaxpr :: (res -> a -> b) -> (res -> CT b -> CT a)\ndef transpose_jaxpr2(jaxpr):\nassert len(jaxpr.in_avals) == 2\n- nones = core.pat_fmap(lambda _: None, jaxpr.jaxpr.invars[1])\n@lu.wrap_init\ndef transposed(res, b_bar):\n- _, a_bar = ad.backward_pass(jaxpr.jaxpr, jaxpr.literals, (),\n- (res, nones), b_bar)\n+ _, (_, a_bar) = ad.backward_pass(jaxpr.jaxpr, jaxpr.literals, (),\n+ (res, None), b_bar)\n+ a_bar = ad.instantiate_zeros_aval(jaxpr.in_avals[1], a_bar)\nreturn a_bar\n- @lu.transformation_with_aux\n- def flatten_out(*args):\n- ans = yield args\n- yield pytree_to_jaxtupletree(ans)\n-\n- transposed, out_tree = flatten_out(transposed)\ntransposed_jaxpr = make_typed_jaxpr(transposed, (jaxpr.in_avals[0], jaxpr.out_aval))\n- return transposed_jaxpr, out_tree()\n+ return transposed_jaxpr\ndef make_typed_jaxpr(traceable, in_avals):\npvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -21,7 +21,7 @@ import itertools as it\nfrom . import partial_eval as pe\nfrom .. import core as core\nfrom ..core import JaxTuple, Trace, Tracer, new_master, get_aval, pack, call_p, Primitive\n-from ..ad_util import (add_jaxvals, add_jaxvals_p, zeros_like_jaxval,\n+from ..ad_util import (add_jaxvals, add_jaxvals_p, zeros_like_jaxval, zeros_like_aval,\nzeros_like_p, zero, Zero)\nfrom ..util import unzip2, unzip3, safe_map, safe_zip, partial\nfrom ..tree_util import process_pytree, build_tree, register_pytree_node, tree_map\n@@ -185,12 +185,29 @@ def backward_pass(jaxpr, consts, freevar_vals, args, cotangent_in):\ncts_out = [zero for _ in eqn.invars]\nmap(write_cotangent, eqn.invars, cts_out)\n- cotangents_out = core.pat_fmap(\n- lambda var, argval: read_cotangent(var) if argval is None else None,\n- jaxpr.invars, args)\nfreevar_cts = core.pat_fmap(read_cotangent, jaxpr.freevars)\n+ cotangents_out = core.pat_fmap(lambda v, _: read_cotangent(v), jaxpr.invars, None)\n+ cotangents_out = tuple(map(pack_cotangents_like_caller, args, cotangents_out))\nreturn freevar_cts, cotangents_out\n+def pack_cotangents_like_caller(arg, ct):\n+ if type(arg) is tuple:\n+ return tuple(map(pack_cotangents_like_caller, arg, ct))\n+ elif arg is None:\n+ return recursively_pack(ct)\n+ else:\n+ return None\n+\n+def recursively_pack(ct):\n+ if type(ct) is tuple:\n+ ct = tuple(map(recursively_pack, ct))\n+ if any(elt is zero or isinstance(elt, TangentTuple) for elt in ct):\n+ return TangentTuple(ct)\n+ else:\n+ return pack(ct)\n+ else:\n+ return ct\n+\ndef get_primitive_transpose(p):\ntry:\nreturn primitive_transposes[p]\n@@ -409,6 +426,14 @@ def instantiate_zeros(example, tangent):\nelse:\nreturn tangent\n+def instantiate_zeros_aval(aval, tangent):\n+ if tangent is zero:\n+ return zeros_like_aval(aval)\n+ elif isinstance(tangent, TangentTuple):\n+ return pack(map(instantiate_zeros_aval, aval, tangent))\n+ else:\n+ return tangent\n+\n@transformation_with_aux\ndef traceable(in_tree_def, new_primals, new_tangents):\nnew_tangents = build_tree(in_tree_def, new_tangents)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -32,19 +32,19 @@ def f(c, a):\nas_ = np.ones((5, 3))\nc = np.ones(4)\n-# print scan_reference(f, c, as_)\n-# print scan_initial(f, c, as_)\n-# print\n+print scan_reference(f, c, as_)\n+print scan_initial(f, c, as_)\n+print\n-# print jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n-# print jvp(lambda c, as_: scan_initial(f, c, as_), (c, as_), (c, as_))[1]\n-# print\n+print jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n+print jvp(lambda c, as_: scan_initial(f, c, as_), (c, as_), (c, as_))[1]\n+print\n-# print linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n-# print linearize(lambda c, as_: scan_initial(f, c, as_), c, as_)[1](c, as_)\n-# print\n+print linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n+print linearize(lambda c, as_: scan_initial(f, c, as_), c, as_)[1](c, as_)\n+print\n-# print grad(lambda c, as_: scan_reference(f, c, as_)[0].sum())(c, as_)\n+print grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\nprint grad(lambda c, as_: list(scan_initial(f, c, as_))[0].sum())(c, as_)\nprint\n"
}
] | Python | Apache License 2.0 | google/jax | ship it
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 08.05.2019 07:50:38 | 25,200 | e736a0a9a11b3a513b145834496da0cd8c74219c | cleanup: remove call_initial, add xla pat_fmap | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -142,8 +142,8 @@ def eval_jaxpr(jaxpr, consts, freevar_vals, *args):\nif not eqn.restructure:\nin_vals = map(read, eqn.invars)\nelse:\n- in_vals = [pack(map(read, invars)) if type(invars) is tuple else read(invars)\n- for invars in eqn.invars]\n+ in_vals = [pack(map(read, invars)) if type(invars) is tuple\n+ else read(invars) for invars in eqn.invars]\nsubfuns = [partial(eval_jaxpr, subjaxpr, map(read, const_bindings),\nmap(read, freevar_bindings))\nfor subjaxpr, const_bindings, freevar_bindings\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -15,19 +15,6 @@ from jax.interpreters import ad\nfrom jax import ad_util\n-def pvals_with_zeros(zero_components, aval):\n- if zero_components is True:\n- return pe.PartialVal((None, ad.zero))\n- elif zero_components is False:\n- return pe.PartialVal((aval, core.unit))\n- elif isinstance(zero_components, ZeroTuple):\n- avals, consts = unzip(map, pvals_with_zeros, zero_components, aval)\n- return pe.PartialVal((core.AbstractTuple(avals),\n- core.JaxprTracerTuple(consts)))\n-\n-strip_zeros = partial(ad.strip_zeros, core.unit, core.pack)\n-strip_zeros_aval = partial(ad.strip_zeros, core.AbstractTuple(()), core.AbstractTuple)\n-\ndef convert_zeros(keep_symbolic, example, tangent):\nif tangent is ad.zero:\nif keep_symbolic:\n@@ -39,34 +26,6 @@ def convert_zeros(keep_symbolic, example, tangent):\nelse:\nreturn tangent\n-\n-_call_const = pe.gensym('_consts')\n-\n-def call_initial(f, *args):\n- pvals = map(_abstractify, args)\n- avals = [aval for (aval, _) in pvals]\n- jaxpr, _, consts = pe.trace_to_jaxpr(\n- lu.wrap_init(f), pvals, instantiate=True)\n- lifted_jaxpr = pe._closure_convert_jaxpr(jaxpr, _call_const)\n- lifted_args = (core.pack(consts),) + args\n- return call_initial_p.bind(*lifted_args, jaxpr=lifted_jaxpr, consts=())\n-\n-def _call_initial_impl(*args, **kwargs):\n- jaxpr = kwargs.pop('jaxpr')\n- consts = kwargs.pop('consts')\n- return core.jaxpr_as_fun(jaxpr, consts)(*args)\n-\n-def _call_initial_jvp(primals, tangents, jaxpr, consts):\n- avals = [aval for (aval, _) in map(_abstractify, primals)]\n- where_zeros = map(ad.get_zeros, tangents)\n- nonzero_tangents = strip_zeros(where_zeros, tangents)\n- jaxpr_jvp, new_consts, where_zeros_out = ad.jvp_jaxpr(jaxpr, consts, avals, where_zeros)\n- primal_out, tangent_out = call_initial_p.bind(\n- core.pack(primals), core.pack(nonzero_tangents), jaxpr=jaxpr_jvp,\n- consts=new_consts)\n- tangent_out_zeros = ad.put_zeros(ad.TangentTuple, where_zeros_out, tangent_out)\n- return primal_out, tangent_out_zeros\n-\ndef is_const(x):\nif x is None:\nreturn True\n@@ -77,44 +36,6 @@ def is_const(x):\nelse:\nraise TypeError(type(x))\n-def as_aval(pv, const):\n- if pv is None:\n- pv, _ = _abstractify(const)\n- return pv\n- elif type(pv) is pe.JaxprTracerTuple:\n- return core.AbstractTuple(map(as_aval, pv, const))\n- elif isinstance(pv, core.AbstractValue):\n- return pv\n- else:\n- raise TypeError((pv, const))\n-\n-def _call_initial_partial_eval(trace, *tracers, **kwargs):\n- jaxpr = kwargs.pop('jaxpr')\n- consts = kwargs.pop('consts')\n- in_pvs, in_consts = unzip2([t.pval for t in tracers])\n- first_components = map(is_const, in_pvs)\n- avals = map(as_aval, in_pvs, in_consts)\n- (jaxpr_1, consts_1), (jaxpr_2, consts_2), out_pv, first_components_out = \\\n- pe.partial_eval_jaxpr(jaxpr, consts, avals, first_components)\n- out_const, residuals = call_initial_p.bind(\n- *in_consts, jaxpr=jaxpr_1, consts=consts_1)\n- residual_tracers = core.pack(map(trace.new_instantiated_const, residuals))\n- eqn = core.JaxprEqn((residual_tracers,) + tracers, None, call_initial_p, (),\n- False, False, dict(jaxpr=jaxpr_2, consts=consts_2))\n- return pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\n-\n-\n-def _call_initial_transpose():\n- assert False\n-\n-call_initial_p = core.Primitive(\"call_initial\")\n-call_initial_p.def_impl(_call_initial_impl)\n-ad.primitive_jvps[call_initial_p] = _call_initial_jvp\n-pe.custom_partial_eval_rules[call_initial_p] = _call_initial_partial_eval\n-\n-\n-###\n-\ndef demote_aval_rank(xs):\nif isinstance(xs, core.AbstractTuple):\n@@ -161,7 +82,10 @@ def scan_initial(f, init, xs):\njaxpr, pval_out, consts = pe.trace_to_jaxpr(\nlu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n(carry_aval_out, y_aval), _ = pval_out\n- assert carry_aval == carry_aval_out\n+ if carry_aval != carry_aval_out:\n+ msg = (\"scanned function carry output does not match carry input: \"\n+ \"input carry is {} and output carry is {}\")\n+ raise TypeError(msg.format(carry_aval, carry_aval_out))\nlifted_jaxpr = pe._closure_convert_jaxpr(jaxpr)\nconsts_aval, _ = _abstractify(core.pack(consts))\nin_avals = (consts_aval, carry_aval, x_aval)\n@@ -186,17 +110,9 @@ def _scan_initial_impl(consts, init, xs, forward, length, jaxpr):\nreturn (carry_out, ys_out)\nys_init = empty_arrays(ys_aval)\n- # carry, ys = lax.fori_loop(0, length, body_fun, (init, ys_init))\n- carry, ys = fori_loop(0, length, body_fun, (init, ys_init))\n+ carry, ys = lax.fori_loop(0, length, body_fun, (init, ys_init))\nreturn core.pack((carry, ys))\n-# TODO remove\n-def fori_loop(start, stop, body_fun, init_val):\n- carry = init_val\n- for i in range(start, stop):\n- carry = body_fun(i, carry)\n- return carry\n-\ndef _scan_initial_jvp(primals, tangents, forward, length, jaxpr):\nconsts, init, xs = primals\n@@ -211,7 +127,7 @@ def _scan_initial_jvp(primals, tangents, forward, length, jaxpr):\nwhere_carry_zeros = where_init_zeros\nwhile True:\nwhere_zeros = (where_consts_zeros, where_carry_zeros, where_xs_zeros)\n- jaxpr_jvp, where_zeros_out = ad.jvp_jaxpr2(jaxpr, where_zeros)\n+ jaxpr_jvp, where_zeros_out = ad.jvp_jaxpr(jaxpr, where_zeros)\nwhere_carry_zeros_out, where_ys_zeros = where_zeros_out\nif where_carry_zeros_out == where_carry_zeros:\nbreak\n@@ -239,17 +155,6 @@ def _scan_initial_jvp(primals, tangents, forward, length, jaxpr):\ncarry_out_dot = ad.put_zeros(ad.TangentTuple, where_carry_zeros_out, carry_out_dot)\nreturn core.pack((carry_out, ys)), ad.TangentTuple((carry_out_dot, ys_dot))\n-def instantiate_zeros(example, tangent, keep_symbolic):\n- if tangent is ad.zero:\n- if keep_symbolic:\n- return tangent\n- else:\n- return ad.zeros_like_jaxval(example)\n- elif isinstance(tangent, ad.TangentTuple):\n- return ad.TangentTuple(map(instantiate_zeros, example, tangent, keep_symbolic))\n- else:\n- return tangent\n-\ndef binary_lattice_join(a, b):\nt = (type(a), type(b))\nif t == (tuple, tuple):\n@@ -275,7 +180,7 @@ def _scan_initial_partial_eval(trace, *tracers, **kwargs):\nfc_carry = fc_init\nwhile True:\nfirst_components = (fc_consts, fc_carry, fc_xs)\n- jaxpr_1, jaxpr_2, fc_out = pe.partial_eval_jaxpr2(jaxpr, first_components)\n+ jaxpr_1, jaxpr_2, fc_out = pe.partial_eval_jaxpr(jaxpr, first_components)\nfc_carry_out, fc_ys = fc_out\nif fc_carry_out == fc_carry:\nbreak\n@@ -327,13 +232,14 @@ def _scan_initial_transpose(ct, consts, init, xs, forward, length, jaxpr):\na, res = xs\nassert a is None and res is not None\n- # jaxpr :: d -> c -> (a, res) -> (c, b) # TODO assuming restructuring input\n+ # jaxpr :: d -> c -> (a, res) -> (c, b)\n# jaxpr_lifted :: res -> (d, c, a) -> (c, b)\n# jaxpr_lifted_trans :: res -> (CT c, CT b) -> (CT d, CT c, CT a)\n# jaxpr_trans :: * -> (CT c, CT d) -> (CT b, res) -> ((CT c, CT d), CT a)\n+ assert type(jaxpr.jaxpr.invars[2]) is tuple # assume restructuring\njaxpr_lifted = rearrange_binders(\nlambda d, c, a_res: (a_res[1], (d, c, a_res[0])), jaxpr)\n- jaxpr_lifted_trans = transpose_jaxpr2(jaxpr_lifted)\n+ jaxpr_lifted_trans = _transpose_jaxpr(jaxpr_lifted)\njaxpr_trans = _move_stuff_and_add_add(jaxpr_lifted_trans)\nc_aval, b_aval = jaxpr.out_aval\n@@ -379,7 +285,7 @@ def _move_stuff_and_add_add(typed_jaxpr):\nCTa_aval))\njaxpr = typed_jaxpr.jaxpr.copy()\n- # TODO assume not restructuring input\n+ # assume the jaxpr isn't restructuring any inputs\nassert not any(type(invar) is tuple for invar in jaxpr.invars)\n# munge input side\n@@ -407,7 +313,7 @@ def _move_stuff_and_add_add(typed_jaxpr):\n_pack_eqn([partial_out, CTa], outvar)])\njaxpr.outvar = outvar\n- # TODO should really have a check_typed_jaxpr\n+ # TODO(mattjj): use check_typed_jaxpr\ncore.skip_checks or core.check_jaxpr(jaxpr)\nreturn core.TypedJaxpr(jaxpr, typed_jaxpr.literals,\nin_avals, out_aval)\n@@ -417,7 +323,7 @@ def _add_any_eqn(tot, a, b):\n# transpose_jaxpr :: (res -> a -> b) -> (res -> CT b -> CT a)\n-def transpose_jaxpr2(jaxpr):\n+def _transpose_jaxpr(jaxpr):\nassert len(jaxpr.in_avals) == 2\n@lu.wrap_init\n@@ -427,10 +333,10 @@ def transpose_jaxpr2(jaxpr):\na_bar = ad.instantiate_zeros_aval(jaxpr.in_avals[1], a_bar)\nreturn a_bar\n- transposed_jaxpr = make_typed_jaxpr(transposed, (jaxpr.in_avals[0], jaxpr.out_aval))\n+ transposed_jaxpr = _make_typed_jaxpr(transposed, (jaxpr.in_avals[0], jaxpr.out_aval))\nreturn transposed_jaxpr\n-def make_typed_jaxpr(traceable, in_avals):\n+def _make_typed_jaxpr(traceable, in_avals):\npvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]\njaxpr, pval_out, consts = pe.trace_to_jaxpr(traceable, pvals, instantiate=True)\nout_aval, _ = pval_out\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -511,34 +511,7 @@ def strip_zeros(unit, pack, iszero, x):\nreturn pack(map(partial(strip_zeros, unit, pack), iszero, x))\n@transformation_with_aux\n-def f_jvp_traceable(zero_components, primals, tangents):\n- tangents_zeros = map(partial(put_zeros, TangentTuple), zero_components, tangents)\n- primal_out, tangent_out = yield primals, tangents_zeros\n- zeros_out = get_zeros(tangent_out)\n- tangent_out_nozero = strip_zeros(core.unit, pack, zeros_out, tangent_out)\n- yield core.pack((primal_out, tangent_out_nozero)), zeros_out\n-\n-def jvp_jaxpr(jaxpr, consts, avals, zeros):\n- # jaxpr :: a -> b -> c [with consts]\n- f = wrap_init(partial(jaxpr_as_fun, jaxpr, consts))\n- f_jvp, out_zeros = f_jvp_traceable(jvp(f, instantiate=False), zeros)\n- primal_aval = core.AbstractTuple(avals)\n- tangent_aval = strip_zeros(core.AbstractTuple(()), core.AbstractTuple, zeros, primal_aval)\n- primal_pvals = pe.PartialVal((primal_aval , core.unit))\n- tangent_pvals = pe.PartialVal((tangent_aval, core.unit))\n- jaxpr_out, pval_out, consts_out = pe.trace_to_jaxpr(\n- f_jvp, (primal_pvals, tangent_pvals), instantiate=True)\n- # jaxpr_out :: (a, b) -> (a', b') -> (c, c') [with consts]\n- # out_zeros :: zeros(c)\n- return jaxpr_out, consts_out, out_zeros()\n-\n-\n-# TODO ideas to simplify:\n-# - try jaxpr munging\n-# - try writing as an @transform\n-\n-@transformation_with_aux\n-def f_jvp_traceable2(zero_components, *primal_tangent_pairs):\n+def f_jvp_traceable(zero_components, *primal_tangent_pairs):\nprimals, tangents = unzip2(primal_tangent_pairs)\ntangents_zeros = map(partial(put_zeros, TangentTuple), zero_components, tangents)\nprimal_out, tangent_out = yield primals, tangents_zeros\n@@ -548,12 +521,12 @@ def f_jvp_traceable2(zero_components, *primal_tangent_pairs):\nprimal_tangent_pairs_out = [pack((p, t)) for p, t in zip(primal_out, tangent_out_nonzero)]\nyield pack(primal_tangent_pairs_out), zeros_out\n-def jvp_jaxpr2(jaxpr, zeros):\n+def jvp_jaxpr(jaxpr, zeros):\n# jaxpr :: d -> a -> b -> (c1, c2)\n# avals = (d, a, b)\n# f :: d -> a -> b -> (c1, c2)\nf = wrap_init(partial(jaxpr_as_fun, jaxpr.jaxpr, jaxpr.literals))\n- f_jvp, out_zeros = f_jvp_traceable2(jvp(f, instantiate=False), zeros)\n+ f_jvp, out_zeros = f_jvp_traceable(jvp(f, instantiate=False), zeros)\n# f_jvp :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\ntangent_avals = map(partial(strip_zeros, core.AbstractTuple(()), core.AbstractTuple),\nzeros, jaxpr.in_avals)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -572,28 +572,6 @@ def jaxpr_as_fun(jaxpr, consts, *args):\n_partial_eval_gensym = gensym('_peval')\n-def partial_eval_jaxpr(jaxpr, consts, avals, first_components):\n- # jaxpr :: a -> b -> c\n- f = lu.wrap_init(partial(jaxpr_as_fun, jaxpr, consts))\n-\n- cell = []\n- def fun(*vals):\n- pvals = map(as_pval, avals, first_components, vals)\n- jaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\n- out_pv, out_const = out_pval\n- cell.append((out_pv, jaxpr_2))\n- return pack((out_const, pack(consts_2)))\n-\n- pvals = map(as_pval2, avals, first_components)\n- jaxpr_1, out_pval, consts_1 = trace_to_jaxpr(\n- lu.wrap_init(fun), pvals, instantiate=True)\n- out_pv_2, jaxpr_2 = cell[0]\n- lifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2, _partial_eval_gensym)\n- fc_out = isnone(out_pv_2)\n- # jaxpr_1 :: a1 -> b1 -> (c1, res)\n- # lifted_jaxpr_2 :: res -> a2 -> b2 -> c2\n- return (jaxpr_1, consts_1), (lifted_jaxpr_2, ()), out_pv_2, fc_out\n-\ndef _closure_convert_jaxpr(jaxpr):\nlifted_jaxpr = jaxpr.copy()\n@@ -609,7 +587,7 @@ def _pack_eqn(invars, outvar):\nreturn core.JaxprEqn(invars, [outvar], core.pack_p, (), False, False, {})\n-def partial_eval_jaxpr2(jaxpr, first_components):\n+def partial_eval_jaxpr(jaxpr, first_components):\n# jaxpr :: d -> c -> a -> (c, b)\nf = lu.wrap_init(core.jaxpr_as_fun(jaxpr))\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -214,6 +214,7 @@ def replicated_comp(jaxpr, ax_env, const_vals, freevar_shapes, *arg_shapes):\nenv = {}\nwrite(core.unitvar, c.Tuple())\n+ assert False # TODO update with pat_fmap\nif const_vals:\nmap(write, jaxpr.constvars, map(c.Constant, const_vals))\nmap(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))\n@@ -222,7 +223,8 @@ def replicated_comp(jaxpr, ax_env, const_vals, freevar_shapes, *arg_shapes):\nmap(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))\nmap(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\nfor eqn in jaxpr.eqns:\n- in_nodes = map(read, eqn.invars) # TODO\n+ assert False # TODO udpate with eqn.restructure\n+ in_nodes = map(read, eqn.invars)\nif eqn.primitive in parallel_translation_rules:\nname = eqn.params['axis_name']\nparams = {k: eqn.params[k] for k in eqn.params if k != 'axis_name'}\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -152,19 +152,25 @@ def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nassert node is not None\nenv[v] = node\n+ def write_constant(v, val): write(v, c.Constant(val))\n+ def write_param(v, shape): write(v, c.ParameterWithShape(shape))\n+\nenv = {}\n- consts_env = dict(zip(jaxpr.constvars, const_vals))\nwrite(core.unitvar, c.Tuple())\n- # TODO update with core.fmap\n+ core.pat_fmap(write_param, jaxpr.invars, arg_shapes)\nif const_vals:\n- map(write, jaxpr.constvars, map(c.Constant, const_vals))\n- map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))\n+ core.pat_fmap(write_constant, jaxpr.constvars, const_vals)\n+ core.pat_fmap(write_param, jaxpr.freevars, freevar_shapes)\nelse:\nall_freevars = it.chain(jaxpr.constvars, jaxpr.freevars)\n- map(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))\n- map(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\n+ core.pat_fmap(write_param, all_freevars, freevar_shapes)\n+\nfor eqn in jaxpr.eqns:\n+ if not eqn.restructure:\nin_nodes = map(read, eqn.invars)\n+ else:\n+ in_nodes = [xla_pack(c, map(read, invars)) if type(invars) is tuple\n+ else read(invars) for invars in eqn.invars]\nin_shapes = map(c.GetShape, in_nodes)\nsubcs = [\njaxpr_computation(\n@@ -184,6 +190,9 @@ def xla_destructure(c, ans):\nnum_elements = len(c.GetShape(ans).tuple_shapes())\nreturn [c.GetTupleElement(ans, i) for i in range(num_elements)]\n+def xla_pack(c, xs):\n+ return c.Tuple(*xs)\n+\ndef tuple_constant(c, val, canonicalize_types=True):\nreturn c.Tuple(*map(c.Constant, val))\nxb.register_constant_handler(JaxTuple, tuple_constant)\n@@ -224,8 +233,17 @@ def zeros_like_translation_rule(c, x):\nshape.dimensions())\nreturn _zeros_like(c.GetShape(x))\n+def add_jaxvals_translation_rule(c, x, y):\n+ x_shape, y_shape = map(c.GetShape, (x, y))\n+ if x_shape.is_tuple() and y_shape.is_tuple():\n+ xs = xla_destructure(c, x)\n+ ys = xla_destructure(c, y)\n+ return c.Tuple(*map(partial(add_jaxvals_translation_rule, c), xs, ys))\n+ else:\n+ return c.Add(x, y)\n+\ntranslations[ad_util.zeros_like_p] = zeros_like_translation_rule\n-translations[ad_util.add_jaxvals_p] = lambda c, x, y: c.Add(x, y)\n+translations[ad_util.add_jaxvals_p] = add_jaxvals_translation_rule\ndef canonicalize_pyval_dtype(x):\n"
}
] | Python | Apache License 2.0 | google/jax | cleanup: remove call_initial, add xla pat_fmap |
260,335 | 08.05.2019 08:57:51 | 25,200 | 444cda493a38181cf93fec94e591e6dd06041d65 | add underscores, rename scan_initial -> scan | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -155,7 +155,6 @@ def eval_jaxpr(jaxpr, consts, freevar_vals, *args):\nreturn read(jaxpr.outvar)\n-# TODO enforce a specific set of types for jaxpr vars\ndef pat_fmap(f, v, *xs):\nif type(v) in (tuple, list):\nif len(xs) == 1 and xs[0] is None:\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/initial_style.py",
"new_path": "jax/initial_style.py",
"diff": "@@ -15,69 +15,69 @@ from jax.interpreters import ad\nfrom jax import ad_util\n-def convert_zeros(keep_symbolic, example, tangent):\n+def _convert_zeros(keep_symbolic, example, tangent):\nif tangent is ad.zero:\nif keep_symbolic:\nreturn core.unit\nelse:\nreturn ad.zeros_like_jaxval(example)\nelif type(tangent) is ad.TangentTuple:\n- return core.pack(map(convert_zeros, keep_symbolic, example, tangent))\n+ return core.pack(map(_convert_zeros, keep_symbolic, example, tangent))\nelse:\nreturn tangent\n-def is_const(x):\n+def _is_const(x):\nif x is None:\nreturn True\nelif type(x) is pe.JaxprTracerTuple:\n- return tuple(map(is_const, x))\n+ return tuple(map(_is_const, x))\nelif isinstance(x, core.AbstractValue):\nreturn False\nelse:\nraise TypeError(type(x))\n-def demote_aval_rank(xs):\n+def _demote_aval_rank(xs):\nif isinstance(xs, core.AbstractTuple):\n- return core.AbstractTuple(map(demote_aval_rank, xs))\n+ return core.AbstractTuple(map(_demote_aval_rank, xs))\nelse:\nreturn ShapedArray(xs.shape[1:], xs.dtype)\n-def promote_aval_rank(n, xs):\n+def _promote_aval_rank(n, xs):\nif isinstance(xs, core.AbstractTuple):\n- return core.AbstractTuple(map(partial(promote_aval_rank, n), xs))\n+ return core.AbstractTuple(map(partial(_promote_aval_rank, n), xs))\nelse:\nreturn ShapedArray((n,) + xs.shape, xs.dtype)\n-def leading_dim_size(xs):\n+def _leading_dim_size(xs):\nif isinstance(xs, core.JaxTuple):\n- return leading_dim_size(xs[0])\n+ return _leading_dim_size(xs[0])\nelse:\nreturn xs.shape[0]\n-def empty_arrays(aval):\n+def _empty_arrays(aval):\nif isinstance(aval, core.AbstractTuple):\n- return core.pack(map(empty_arrays, aval))\n+ return core.pack(map(_empty_arrays, aval))\nelse:\nreturn lax.full(aval.shape, 0, aval.dtype)\n-def index_arrays(i, aval, xs):\n+def _index_arrays(i, aval, xs):\nif isinstance(aval, core.AbstractTuple):\n- return core.pack(map(partial(index_arrays, i), aval, xs))\n+ return core.pack(map(partial(_index_arrays, i), aval, xs))\nelse:\nreturn lax.dynamic_index_in_dim(xs, i, keepdims=False)\n-def update_arrays(i, aval, xs, x):\n+def _update_arrays(i, aval, xs, x):\nif isinstance(aval, core.AbstractTuple):\n- return core.pack(map(partial(update_arrays, i), aval, xs, x))\n+ return core.pack(map(partial(_update_arrays, i), aval, xs, x))\nelse:\nreturn lax.dynamic_update_index_in_dim(xs, x[None, ...], i, axis=0)\n# scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])\n-def scan_initial(f, init, xs):\n+def scan(f, init, xs):\ncarry_pval = carry_aval, _ = _abstractify(init)\nxs_aval, _ = _abstractify(xs)\n- x_aval = demote_aval_rank(xs_aval)\n+ x_aval = _demote_aval_rank(xs_aval)\nx_pval = pe.PartialVal((x_aval, core.unit))\njaxpr, pval_out, consts = pe.trace_to_jaxpr(\nlu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n@@ -91,30 +91,30 @@ def scan_initial(f, init, xs):\nin_avals = (consts_aval, carry_aval, x_aval)\nout_aval = core.AbstractTuple((carry_aval, y_aval))\njaxpr = core.TypedJaxpr(lifted_jaxpr, (), in_avals, out_aval)\n- length = leading_dim_size(xs)\n- return scan_initial_p.bind(core.pack(consts), init, xs,\n+ length = _leading_dim_size(xs)\n+ return scan_p.bind(core.pack(consts), init, xs,\nforward=True, length=length, jaxpr=jaxpr)\n-def _scan_initial_impl(consts, init, xs, forward, length, jaxpr):\n+def _scan_impl(consts, init, xs, forward, length, jaxpr):\n_, _, x_aval = jaxpr.in_avals\n_, y_aval = jaxpr.out_aval\n- ys_aval = promote_aval_rank(length, y_aval)\n+ ys_aval = _promote_aval_rank(length, y_aval)\ndef body_fun(i, vals):\nidx = i if forward else length - i - 1\ncarry, ys = vals\n- x = index_arrays(idx, x_aval, xs)\n+ x = _index_arrays(idx, x_aval, xs)\ncarry_out, y = core.jaxpr_as_fun(jaxpr)(consts, carry, x)\n- ys_out = update_arrays(idx, y_aval, ys, y)\n+ ys_out = _update_arrays(idx, y_aval, ys, y)\nreturn (carry_out, ys_out)\n- ys_init = empty_arrays(ys_aval)\n+ ys_init = _empty_arrays(ys_aval)\ncarry, ys = lax.fori_loop(0, length, body_fun, (init, ys_init))\nreturn core.pack((carry, ys))\n-def _scan_initial_jvp(primals, tangents, forward, length, jaxpr):\n+def _scan_jvp(primals, tangents, forward, length, jaxpr):\nconsts, init, xs = primals\nconsts_dot, init_dot, xs_dot = tangents\nconsts_aval, carry_aval, x_aval = jaxpr.in_avals\n@@ -132,19 +132,19 @@ def _scan_initial_jvp(primals, tangents, forward, length, jaxpr):\nif where_carry_zeros_out == where_carry_zeros:\nbreak\nelse:\n- where_carry_zeros = binary_lattice_join(where_carry_zeros_out, where_carry_zeros)\n+ where_carry_zeros = _binary_lattice_join(where_carry_zeros_out, where_carry_zeros)\n# convert_zeros is like strip_zeros but uses explicit lattice information to\n# instantiate zeros in some cases, namely in init_dot based on the fixed point\n- nonzero_init_dot = convert_zeros(where_carry_zeros, init, init_dot)\n- nonzero_consts_dot = convert_zeros(where_consts_zeros, consts, consts_dot)\n- nonzero_xs_dot = convert_zeros(where_xs_zeros, xs, xs_dot)\n+ nonzero_init_dot = _convert_zeros(where_carry_zeros, init, init_dot)\n+ nonzero_consts_dot = _convert_zeros(where_consts_zeros, consts, consts_dot)\n+ nonzero_xs_dot = _convert_zeros(where_xs_zeros, xs, xs_dot)\nconsts_dual = core.pack((consts, nonzero_consts_dot))\ninit_dual = core.pack((init, nonzero_init_dot))\nxs_dual = core.pack((xs, nonzero_xs_dot))\n- carry_out_dual, ys_dual = scan_initial_p.bind(\n+ carry_out_dual, ys_dual = scan_p.bind(\nconsts_dual, init_dual, xs_dual,\nforward=forward, length=length, jaxpr=jaxpr_jvp)\n@@ -155,27 +155,27 @@ def _scan_initial_jvp(primals, tangents, forward, length, jaxpr):\ncarry_out_dot = ad.put_zeros(ad.TangentTuple, where_carry_zeros_out, carry_out_dot)\nreturn core.pack((carry_out, ys)), ad.TangentTuple((carry_out_dot, ys_dot))\n-def binary_lattice_join(a, b):\n+def _binary_lattice_join(a, b):\nt = (type(a), type(b))\nif t == (tuple, tuple):\n- return tuple(map(binary_lattice_join, a, b))\n+ return tuple(map(_binary_lattice_join, a, b))\nelif t == (tuple, bool):\n- return tuple(map(binary_lattice_join, a, (b,) * len(a)))\n+ return tuple(map(_binary_lattice_join, a, (b,) * len(a)))\nelif t == (bool, tuple):\n- return tuple(map(binary_lattice_join, (a,) * len(b), b))\n+ return tuple(map(_binary_lattice_join, (a,) * len(b), b))\nelif t == (bool, bool):\nreturn a and b\nelse:\nraise TypeError((type(a), type(b)))\n-def _scan_initial_partial_eval(trace, *tracers, **kwargs):\n+def _scan_partial_eval(trace, *tracers, **kwargs):\njaxpr = kwargs.pop('jaxpr')\nlength = kwargs.pop('length')\nforward = kwargs.pop('forward')\nassert not kwargs\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\n- fc_consts, fc_init, fc_xs = map(is_const, in_pvs)\n+ fc_consts, fc_init, fc_xs = map(_is_const, in_pvs)\nfc_carry = fc_init\nwhile True:\n@@ -185,7 +185,7 @@ def _scan_initial_partial_eval(trace, *tracers, **kwargs):\nif fc_carry_out == fc_carry:\nbreak\nelse:\n- fc_carry = binary_lattice_join(fc_carry, fc_carry_out)\n+ fc_carry = _binary_lattice_join(fc_carry, fc_carry_out)\nconsts_tracer, init_tracer, xs_tracer = tracers\nlifted_init_tracer = _lift_tracer(trace, init_tracer, fc_carry)\n@@ -194,13 +194,13 @@ def _scan_initial_partial_eval(trace, *tracers, **kwargs):\nout_pv = _put_known_pvs(fc_out, jaxpr.out_aval)\n- out_carry, (ys, residuals) = scan_initial_p.bind(\n+ out_carry, (ys, residuals) = scan_p.bind(\n*in_consts, forward=forward, length=length, jaxpr=jaxpr_1)\nout_const = core.pack((out_carry, ys))\nresiduals_tracer = trace.new_instantiated_const(core.pack(residuals))\nd, c, a = lifted_tracers\nnew_tracers = (d, c, (a, residuals_tracer))\n- eqn = core.JaxprEqn(new_tracers, None, scan_initial_p, (), True, False,\n+ eqn = core.JaxprEqn(new_tracers, None, scan_p, (), True, False,\ndict(forward=forward, length=length, jaxpr=jaxpr_2))\nreturn pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\n@@ -226,7 +226,7 @@ def _put_known_pvs(is_known, aval):\nreturn pe.JaxprTracerTuple(map(_put_known_pvs, is_known, aval))\n-def _scan_initial_transpose(ct, consts, init, xs, forward, length, jaxpr):\n+def _scan_transpose(ct, consts, init, xs, forward, length, jaxpr):\nassert consts is None and init is None\nassert type(xs) is tuple\na, res = xs\n@@ -245,7 +245,7 @@ def _scan_initial_transpose(ct, consts, init, xs, forward, length, jaxpr):\nc_aval, b_aval = jaxpr.out_aval\nd_aval, c_aval2, _ = jaxpr.in_avals\nassert c_aval == c_aval2\n- bs_aval = promote_aval_rank(length, b_aval)\n+ bs_aval = _promote_aval_rank(length, b_aval)\nct_d = ad_util.zeros_like_aval(d_aval)\nct_c, ct_bs = ad.instantiate_zeros_aval(core.AbstractTuple((c_aval, bs_aval)), ct)\ncarry_ct = core.pack((ct_c, ct_d))\n@@ -256,7 +256,7 @@ def _scan_initial_transpose(ct, consts, init, xs, forward, length, jaxpr):\nassert core.lattice_join(ct_c_aval, core.get_aval(ct_c)) == ct_c_aval\nassert core.lattice_join(ct_d_aval, core.get_aval(ct_d)) == ct_d_aval\n- out = scan_initial_p.bind(\n+ out = scan_p.bind(\ncore.unit, carry_ct, core.pack((ct_bs, res)),\nforward=not forward, length=length, jaxpr=jaxpr_trans)\n(ct_init, ct_consts), ct_as = out\n@@ -344,8 +344,8 @@ def _make_typed_jaxpr(traceable, in_avals):\nreturn core.TypedJaxpr(jaxpr, consts, in_avals, out_aval)\n-scan_initial_p = core.Primitive(\"scan_initial\")\n-scan_initial_p.def_impl(_scan_initial_impl)\n-ad.primitive_jvps[scan_initial_p] = _scan_initial_jvp\n-ad.primitive_transposes[scan_initial_p] = _scan_initial_transpose\n-pe.custom_partial_eval_rules[scan_initial_p] = _scan_initial_partial_eval\n+scan_p = core.Primitive(\"scan\")\n+scan_p.def_impl(_scan_impl)\n+ad.primitive_jvps[scan_p] = _scan_jvp\n+ad.primitive_transposes[scan_p] = _scan_transpose\n+pe.custom_partial_eval_rules[scan_p] = _scan_partial_eval\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/scan_test.py",
"new_path": "tests/scan_test.py",
"diff": "@@ -2,7 +2,7 @@ from functools import partial\nimport numpy as onp\n-from jax.initial_style import scan_initial\n+from jax.initial_style import scan\nfrom jax.core import pack\nimport jax.core as core\nimport jax.numpy as np\n@@ -33,19 +33,19 @@ as_ = np.ones((5, 3))\nc = np.ones(4)\nprint scan_reference(f, c, as_)\n-print scan_initial(f, c, as_)\n+print scan(f, c, as_)\nprint\nprint jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n-print jvp(lambda c, as_: scan_initial(f, c, as_), (c, as_), (c, as_))[1]\n+print jvp(lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))[1]\nprint\nprint linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n-print linearize(lambda c, as_: scan_initial(f, c, as_), c, as_)[1](c, as_)\n+print linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)\nprint\nprint grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\n-print grad(lambda c, as_: list(scan_initial(f, c, as_))[0].sum())(c, as_)\n+print grad(lambda c, as_: list(scan(f, c, as_))[0].sum())(c, as_)\nprint\n# ###\n"
}
] | Python | Apache License 2.0 | google/jax | add underscores, rename scan_initial -> scan |
260,335 | 08.05.2019 11:25:03 | 25,200 | e19faa7cda24badffd69322fad0056e74d06cb41 | add scan docstring | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -42,18 +42,15 @@ from jax import ad_util\n### fori_loop and while_loop\ndef fori_loop(lower, upper, body_fun, init_val):\n- \"\"\"Loop from `lower` to `upper` by reduction to `while_loop`.\n+ \"\"\"Loop from ``lower`` to ``upper`` by reduction to ``while_loop``.\n- Arguments:\n- lower: loop index lower bound (inclusive)\n- upper: loop index upper bound (exclusive)\n- body_fun: function of type (int, T) -> T, where T is the type of `init_val`\n- init_val: initial loop value, of type T\n+ The type signature in brief is\n- Returns:\n- Loop value from the final iteration, of type T.\n+ .. code-block:: haskell\n+\n+ fori_loop :: Int -> Int -> ((int, a) -> a) -> a -> a\n- The semantics of `fori_loop` are given by this Python implementation::\n+ The semantics of ``fori_loop`` are given by this Python implementation::\ndef fori_loop(lower, upper, body_fun, init_val):\nval = init_val\n@@ -61,8 +58,17 @@ def fori_loop(lower, upper, body_fun, init_val):\nval = body_fun(i, val)\nreturn val\n- Unlike that pure Python version, `fori_loop` is implemented in terms of a call\n- to `while_loop`. See the docstring for `while_loop` for more information.\n+ Unlike that Python version, ``fori_loop`` is implemented in terms of a call to\n+ ``while_loop``. See the docstring for ``while_loop`` for more information.\n+\n+ Args:\n+ lower: an integer representing the loop index lower bound (inclusive)\n+ upper: an integer representing the loop index upper bound (exclusive)\n+ body_fun: function of type ``(int, a) -> a``.\n+ init_val: initial loop carry value of type ``a``.\n+\n+ Returns:\n+ Loop value from the final iteration, of type ``a``.\n\"\"\"\ndef while_cond_fun(loop_carry):\ni, _ = loop_carry\n@@ -77,18 +83,15 @@ def fori_loop(lower, upper, body_fun, init_val):\ndef while_loop(cond_fun, body_fun, init_val):\n- \"\"\"Call `body_fun` repeatedly in a loop while `cond_fun` is True.\n+ \"\"\"Call ``body_fun`` repeatedly in a loop while ``cond_fun`` is True.\n- Arguments:\n- cond_fun: pure function of type `T -> Bool`.\n- body_fun: pure function of type `T -> T`.\n- init_val: value of type `T`, a type that can be a scalar, array, or any\n- (nested) Python tuple/list/dict thereof.\n+ The type signature in brief is\n- Returns:\n- The output from the final iteration of body_fun, of type `T`.\n+ .. code-block:: haskell\n+\n+ while_loop :: (a -> Bool) -> (a -> a) -> a -> a\n- The semantics of `while_loop` are given by this Python implementation::\n+ The semantics of ``while_loop`` are given by this Python implementation::\ndef while_loop(cond_fun, body_fun, init_val):\nval = init_val\n@@ -96,15 +99,24 @@ def while_loop(cond_fun, body_fun, init_val):\nval = body_fun(val)\nreturn val\n- Unlike that pure Python version, `while_loop` is a JAX primitive and is\n- lowered to a single XLA While HLO. That makes it useful for reducing\n- compilation times for jit-compiled functions, since native Python loop\n- constructs in an `@jit` function are unrolled, leading to large XLA\n- computations.\n+ Unlike that Python version, ``while_loop`` is a JAX primitive and is lowered\n+ to a single XLA While HLO. That makes it useful for reducing compilation times\n+ for jit-compiled functions, since native Python loop constructs in an ``@jit``\n+ function are unrolled, leading to large XLA computations.\nAnother difference from using Python-native loop constructs is that\n- `while_loop` is not (yet) reverse-mode differentiable because XLA computations\n+ ``while_loop`` is not reverse-mode differentiable because XLA computations\nrequire static bounds on memory requirements.\n+\n+ Args:\n+ cond_fun: function of type ``a -> Bool``.\n+ body_fun: function of type ``a -> a``.\n+ init_val: value of type ``a``, a type that can be a scalar, array, or any\n+ pytree (nested Python tuple/list/dict) thereof, representing the initial\n+ loop carry value.\n+\n+ Returns:\n+ The output from the final iteration of body_fun, of type ``a``.\n\"\"\"\ninit_val_flat, in_tree = pytree_to_jaxtupletree(init_val)\nflat_body_fun, out_tree = pytree_fun_to_jaxtupletree_fun(lu.wrap_init(body_fun), (in_tree,))\n@@ -472,6 +484,58 @@ def _update_arrays(i, aval, xs, x):\n# scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])\ndef scan(f, init, xs):\n+ \"\"\"Scan a function over array axes while carrying along state.\n+\n+ Scan is similar to a fold, but also returns the successive reduced values. Its\n+ type signature in brief is\n+\n+ .. code-block:: haskell\n+\n+ scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])\n+\n+ where we use [t] here to denote the type t with an additional leading axis.\n+ That is, if t is an array type then [t] represents the type with an additional\n+ leading axis, and if t is a pytree (container) type with array leaves then [t]\n+ represents the type type with the same pytree structure and corresponding\n+ leaves each with an additional leading axis.\n+\n+ When both ``a`` and ``b`` are array types, the semantics of ``scan`` are given\n+ by this Python implementation::\n+\n+ def scan(f, init, xs):\n+ carry = init\n+ ys = []\n+ for x in xs:\n+ carry, y = f(carry, x)\n+ ys.append(y)\n+ return carry, np.stack(ys)\n+\n+ Unlike that Python version, both ``a`` and ``b`` may be arbitrary pytree\n+ types, and so multiple arrays can be scanned over at once.\n+\n+ Also unlike that Python version, ``scan`` is a JAX primitive and is lowered to\n+ a single XLA While HLO. That makes it useful for reducing compilation times\n+ for jit-compiled functions, since native Python loop constructs in an ``@jit``\n+ function are unrolled, leading to large XLA computations.\n+\n+ Args:\n+ f: a Python function to be scanned of type ``c -> a -> (c, b)``, meaning\n+ that ``f`` accepts two arguments where the first is a value of the loop\n+ carry and the second is a slice of ``xs`` along its leading axis, and that\n+ ``f`` returns a pair where the first element represents a new value for\n+ the loop carry and the second represents a slice of the output.\n+ init: an initial loop carry value of type ``c``, which can be a scalar,\n+ array, or any pytree (nested Python tuple/list/dict) thereof, representing\n+ the initial loop carry value.\n+ xs: the value of type ``[a]`` over which to scan along the leading axis,\n+ where ``[a]`` can be an array or any pytree (nested Python\n+ tuple/list/dict) thereof with consistent leading axis sizes.\n+\n+ Returns:\n+ A pair of type ``(c, [b])`` where the first element represents the final\n+ loop carry value and the second element represents the stacked outputs of\n+ the second output of ``f`` when scanned over the leading axis of the inputs.\n+ \"\"\"\ncarry_pval = carry_aval, _ = _abstractify(init)\nxs_aval, _ = _abstractify(xs)\nx_aval = _demote_aval_rank(xs_aval)\n@@ -710,10 +774,9 @@ def _move_stuff_and_add_add(typed_jaxpr):\npe._pack_eqn([partial_out, CTa], outvar)])\njaxpr.outvar = outvar\n- # TODO(mattjj): use check_typed_jaxpr\n+ # TODO(mattjj): add a check_typed_jaxpr and use it here\ncore.skip_checks or core.check_jaxpr(jaxpr)\n- return core.TypedJaxpr(jaxpr, typed_jaxpr.literals,\n- in_avals, out_aval)\n+ return core.TypedJaxpr(jaxpr, typed_jaxpr.literals, in_avals, out_aval)\ndef _add_any_eqn(tot, a, b):\nreturn core.JaxprEqn([a, b], [tot], ad_util.add_jaxvals_p, (), False, False, {})\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -418,6 +418,8 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nout = lax.while_loop(cond, body, (33, 4))\nself.assertEqual(out, (7, 10))\n+ # def testScanImpl(self):\n+\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | add scan docstring |
260,335 | 08.05.2019 11:37:27 | 25,200 | cdedf452586c04fd75ab9eff17fd6308bebbc99e | move scan tests into tests/lax_control_flow_test.py | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -809,4 +809,3 @@ scan_p.def_impl(_scan_impl)\nad.primitive_jvps[scan_p] = _scan_jvp\nad.primitive_transposes[scan_p] = _scan_transpose\npe.custom_partial_eval_rules[scan_p] = _scan_partial_eval\n-\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -24,8 +24,19 @@ import numpy as onp\nimport numpy.random as npr\nfrom jax import api\n+from jax import core\nfrom jax import lax\nfrom jax import test_util as jtu\n+import jax.numpy as np # scan tests use numpy\n+\n+def scan_reference(f, init, xs):\n+ carry = init\n+ ys = []\n+ for x in xs:\n+ (carry, y) = f(carry, x)\n+ ys.append(lax.reshape(y, (1,) + onp.shape(y)))\n+ ys = lax.concatenate(ys, 0)\n+ return core.pack((carry, ys))\nclass LaxControlFlowTest(jtu.JaxTestCase):\n@@ -418,7 +429,73 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nout = lax.while_loop(cond, body, (33, 4))\nself.assertEqual(out, (7, 10))\n- # def testScanImpl(self):\n+ def testScanImpl(self):\n+ d = np.zeros(2)\n+ def f(c, a):\n+ assert a.shape == (3,)\n+ assert c.shape == (4,)\n+ b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ c = np.sin(c * b)\n+ assert b.shape == ()\n+ return core.pack((c, b))\n+\n+ as_ = np.ones((5, 3))\n+ c = np.ones(4)\n+\n+ ans = lax.scan(f, c, as_)\n+ expected = scan_reference(f, c, as_)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ def testScanJVP(self):\n+ d = np.zeros(2)\n+ def f(c, a):\n+ assert a.shape == (3,)\n+ assert c.shape == (4,)\n+ b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ c = np.sin(c * b)\n+ assert b.shape == ()\n+ return core.pack((c, b))\n+\n+ as_ = np.ones((5, 3))\n+ c = np.ones(4)\n+\n+ ans = api.jvp(lambda c, as_: lax.scan(f, c, as_), (c, as_), (c, as_))[1]\n+ expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ def testScanLinearize(self):\n+ d = np.zeros(2)\n+ def f(c, a):\n+ assert a.shape == (3,)\n+ assert c.shape == (4,)\n+ b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ c = np.sin(c * b)\n+ assert b.shape == ()\n+ return core.pack((c, b))\n+\n+ as_ = np.ones((5, 3))\n+ c = np.ones(4)\n+\n+ ans = api.linearize(lambda c, as_: lax.scan(f, c, as_), c, as_)[1](c, as_)\n+ expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ def testScanGrad(self):\n+ d = np.zeros(2)\n+ def f(c, a):\n+ assert a.shape == (3,)\n+ assert c.shape == (4,)\n+ b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ c = np.sin(c * b)\n+ assert b.shape == ()\n+ return core.pack((c, b))\n+\n+ as_ = np.ones((5, 3))\n+ c = np.ones(4)\n+\n+ ans = api.grad(lambda c, as_: list( lax.scan(f, c, as_))[0].sum())(c, as_)\n+ expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\nif __name__ == '__main__':\n"
},
{
"change_type": "DELETE",
"old_path": "tests/scan_test.py",
"new_path": null,
"diff": "-from functools import partial\n-\n-import numpy as onp\n-\n-from jax.core import pack\n-from jax.lax import scan\n-import jax.numpy as np\n-from jax import jvp, linearize, grad\n-\n-###\n-\n-def scan_reference(f, init, xs):\n- carry = init\n- ys = []\n- for x in xs:\n- (carry, y) = f(carry, x)\n- ys.append(y)\n- ys = np.stack(ys)\n- return pack((np.array(carry), ys))\n-\n-d = np.zeros(2)\n-def f(c, a):\n- assert a.shape == (3,)\n- assert c.shape == (4,)\n- b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n- c = np.sin(c * b)\n- assert b.shape == ()\n- return pack((c, b))\n-\n-as_ = np.ones((5, 3))\n-c = np.ones(4)\n-\n-print scan_reference(f, c, as_)\n-print scan(f, c, as_)\n-print\n-\n-print jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n-print jvp(lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))[1]\n-print\n-\n-print linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n-print linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)\n-print\n-\n-print grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\n-print grad(lambda c, as_: list(scan(f, c, as_))[0].sum())(c, as_)\n-print\n-\n-# ###\n-\n-\n-# def f(x, carry):\n-# carry = carry + np.sin(x)\n-# y = pack((carry**2, -carry))\n-# return pack((y, carry))\n-\n-# ys, z = scan_initial(f, 0.0, np.arange(4.))\n-# ys_ref, z_ref = scan_reference(f, 0.0, np.arange(4.))\n-# print onp.allclose(z, z_ref)\n-\n-# print ys\n-# print ys_ref\n-# print z\n-# print z_ref\n-# print\n-# print jvp(partial(scan_initial, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-# print jvp(partial(scan_reference, f), (0.0, np.arange(4.)), (1., np.array([0.3, 0.2, 0.1, 0.1])))\n-# print\n"
}
] | Python | Apache License 2.0 | google/jax | move scan tests into tests/lax_control_flow_test.py |
260,335 | 08.05.2019 13:08:05 | 25,200 | a8ebc249a382f33ab0c3325325ceadbe053dc381 | tweaks to scan docstring | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -482,12 +482,11 @@ def _update_arrays(i, aval, xs, x):\nelse:\nreturn lax.dynamic_update_index_in_dim(xs, x[None, ...], i, axis=0)\n-# scan :: (c -> a -> (c, b)) -> c -> [a] -> (c, [b])\n+\ndef scan(f, init, xs):\n- \"\"\"Scan a function over array axes while carrying along state.\n+ \"\"\"Scan a function over leading array axes while carrying along state.\n- Scan is similar to a fold, but also returns the successive reduced values. Its\n- type signature in brief is\n+ The type signature in brief is\n.. code-block:: haskell\n@@ -496,8 +495,8 @@ def scan(f, init, xs):\nwhere we use [t] here to denote the type t with an additional leading axis.\nThat is, if t is an array type then [t] represents the type with an additional\nleading axis, and if t is a pytree (container) type with array leaves then [t]\n- represents the type type with the same pytree structure and corresponding\n- leaves each with an additional leading axis.\n+ represents the type with the same pytree structure and corresponding leaves\n+ each with an additional leading axis.\nWhen both ``a`` and ``b`` are array types, the semantics of ``scan`` are given\nby this Python implementation::\n"
}
] | Python | Apache License 2.0 | google/jax | tweaks to scan docstring |
260,335 | 08.05.2019 13:15:21 | 25,200 | 54585743a3f646f96bc961d44f1a03a1f1b00ea4 | tweak scan docstring | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -510,7 +510,8 @@ def scan(f, init, xs):\nreturn carry, np.stack(ys)\nUnlike that Python version, both ``a`` and ``b`` may be arbitrary pytree\n- types, and so multiple arrays can be scanned over at once.\n+ types, and so multiple arrays can be scanned over at once and produce multiple\n+ output arrays.\nAlso unlike that Python version, ``scan`` is a JAX primitive and is lowered to\na single XLA While HLO. That makes it useful for reducing compilation times\n"
}
] | Python | Apache License 2.0 | google/jax | tweak scan docstring |
260,335 | 08.05.2019 16:27:23 | 25,200 | 25ac1987986831dc12aee5a5db9f8bcf9b9c13e0 | must handle literals + closure conversion better.. | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -64,7 +64,7 @@ def jvp_subtrace(master, primals, tangents):\nyield (out_primal, out_tangent)\n@transformation_with_aux\n-def jvp_subtrace_aux(master, primals, tangents):\n+def jvp_subtrace_aux(instantiate, master, primals, tangents):\ntrace = JVPTrace(master, core.cur_sublevel())\nfor x in list(primals) + list(tangents):\nif isinstance(x, Tracer):\n@@ -73,6 +73,8 @@ def jvp_subtrace_aux(master, primals, tangents):\nout_tracer, aux_tracer = map(trace.full_raise, (ans, aux))\nout_primal, out_tangent = out_tracer.primal, out_tracer.tangent\naux = aux_tracer.primal # ignore aux tangent\n+ if instantiate:\n+ out_tangent = instantiate_zeros(out_primal, out_tangent)\nyield (out_primal, out_tangent), aux\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -560,6 +560,7 @@ def isnone(x):\ndef _closure_convert_jaxpr(jaxpr):\n+ core.skip_checks or core.check_jaxpr(jaxpr)\nlifted_jaxpr = jaxpr.copy()\nlifted_jaxpr.constvars = ()\nlifted_jaxpr.invars = [tuple(jaxpr.constvars)] + list(jaxpr.invars)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -268,36 +268,33 @@ def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nassert node is not None\nenv[v] = node\n- def write_constant(v, val): write(v, c.Constant(val))\n- def write_param(v, shape): write(v, c.ParameterWithShape(shape))\n-\nenv = {}\nwrite(core.unitvar, c.Tuple())\n- core.pat_fmap(write_param, jaxpr.invars, arg_shapes)\nif const_vals:\n- core.pat_fmap(write_constant, jaxpr.constvars, const_vals)\n- core.pat_fmap(write_param, jaxpr.freevars, freevar_shapes)\n+ _map(write, jaxpr.constvars, map(c.Constant, const_vals))\n+ _map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))\nelse:\nall_freevars = it.chain(jaxpr.constvars, jaxpr.freevars)\n- core.pat_fmap(write_param, all_freevars, freevar_shapes)\n-\n+ _map(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))\n+ _map(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\nfor eqn in jaxpr.eqns:\nif not eqn.restructure:\n- in_nodes = map(read, eqn.invars)\n+ in_nodes = list(map(read, eqn.invars))\nelse:\nin_nodes = [xla_pack(c, map(read, invars)) if type(invars) is tuple\nelse read(invars) for invars in eqn.invars]\n- in_shapes = map(c.GetShape, in_nodes)\n+ in_shapes = _map(c.GetShape, in_nodes)\nsubcs = [\njaxpr_computation(\nsubjaxpr, (),\n- tuple(map(c.GetShape, map(read, const_bindings + freevar_bindings))),\n- *map(c.GetShape, in_nodes))\n+ _map(c.GetShape, map(read, const_bindings + freevar_bindings)),\n+ *in_shapes)\nfor subjaxpr, const_bindings, freevar_bindings in eqn.bound_subjaxprs]\n- subfuns = [(subc, tuple(map(read, const_bindings + freevar_bindings)))\n+ subfuns = [(subc, _map(read, const_bindings + freevar_bindings))\nfor subc, (_, const_bindings, freevar_bindings)\nin zip(subcs, eqn.bound_subjaxprs)]\nans = translation_rule(eqn.primitive)(c, *(subfuns + in_nodes), **eqn.params)\n+ c.GetShape(ans) # force xla to do shape error checking\nout_nodes = xla_destructure(c, ans) if eqn.destructure else [ans]\n_map(write, eqn.outvars, out_nodes)\nreturn c.Build(read(jaxpr.outvar))\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -41,393 +41,393 @@ def scan_reference(f, init, xs):\nclass LaxControlFlowTest(jtu.JaxTestCase):\n- def testWhileWithTuple(self):\n- limit = 10\n+ # def testWhileWithTuple(self):\n+ # limit = 10\n- def loop_cond(state):\n- pos, _ = state\n- return lax.lt(pos, limit)\n+ # def loop_cond(state):\n+ # pos, _ = state\n+ # return lax.lt(pos, limit)\n- def loop_body(state):\n- pos, count = state\n- return (lax.add(pos, 1), lax.add(count, 1))\n+ # def loop_body(state):\n+ # pos, count = state\n+ # return (lax.add(pos, 1), lax.add(count, 1))\n- def loop(init):\n- result = lax.while_loop(loop_cond, loop_body, (init, 0))\n- _, count = result\n- return count\n-\n- cloop = api.jit(loop)\n-\n- self.assertEqual(loop(2), limit - 2)\n- self.assertEqual(cloop(2), limit - 2)\n- self.assertEqual(cloop(2), limit - 2)\n- self.assertEqual(cloop(3), limit - 3)\n-\n- def testNestedWhile(self):\n-\n- def outer_loop(num): # pylint: disable=missing-docstring\n- def cond_fun(state):\n- num, i, _ = state\n- return lax.lt(i, num)\n-\n- def body_fun(state):\n- num, i, count = state\n- return (num, lax.add(i, 1), inner_loop(i, count))\n-\n- init_val = (num, 0, 0)\n- _, i, count = lax.while_loop(cond_fun, body_fun, init_val)\n- return (i, count)\n-\n- def inner_loop(i, count): # pylint: disable=missing-docstring\n- def cond_fun(state):\n- i, j, _ = state\n- return lax.le(j, i)\n-\n- def body_fun(state):\n- i, j, count = state\n- return (i, lax.add(j, 1), lax.add(count, 1))\n+ # def loop(init):\n+ # result = lax.while_loop(loop_cond, loop_body, (init, 0))\n+ # _, count = result\n+ # return count\n+\n+ # cloop = api.jit(loop)\n+\n+ # self.assertEqual(loop(2), limit - 2)\n+ # self.assertEqual(cloop(2), limit - 2)\n+ # self.assertEqual(cloop(2), limit - 2)\n+ # self.assertEqual(cloop(3), limit - 3)\n+\n+ # def testNestedWhile(self):\n+\n+ # def outer_loop(num): # pylint: disable=missing-docstring\n+ # def cond_fun(state):\n+ # num, i, _ = state\n+ # return lax.lt(i, num)\n+\n+ # def body_fun(state):\n+ # num, i, count = state\n+ # return (num, lax.add(i, 1), inner_loop(i, count))\n+\n+ # init_val = (num, 0, 0)\n+ # _, i, count = lax.while_loop(cond_fun, body_fun, init_val)\n+ # return (i, count)\n+\n+ # def inner_loop(i, count): # pylint: disable=missing-docstring\n+ # def cond_fun(state):\n+ # i, j, _ = state\n+ # return lax.le(j, i)\n+\n+ # def body_fun(state):\n+ # i, j, count = state\n+ # return (i, lax.add(j, 1), lax.add(count, 1))\n- init_val = (i, 0, count)\n- _, _, count = lax.while_loop(cond_fun, body_fun, init_val)\n- return count\n+ # init_val = (i, 0, count)\n+ # _, _, count = lax.while_loop(cond_fun, body_fun, init_val)\n+ # return count\n- cloop = api.jit(outer_loop)\n-\n- self.assertEqual(outer_loop(3), (3, 6))\n- self.assertEqual(cloop(3), (3, 6))\n- self.assertEqual(cloop(3), (3, 6))\n- self.assertEqual(cloop(2), (2, 3))\n- self.assertEqual(cloop(4), (4, 10))\n-\n- def testWhileWithClosure(self):\n-\n- def loop(init, local_limit, inc):\n-\n- def loop_cond(state):\n- pos, _ = state\n- return lax.lt(pos, local_limit)\n-\n- def loop_body(state):\n- effect[0] = True\n- pos, count = state\n- return (lax.add(pos, 1), lax.add(count, inc))\n-\n- result = lax.while_loop(loop_cond, loop_body, (init, 0))\n- _, count = result\n- return count\n-\n- cloop = api.jit(loop)\n-\n- limit = 10\n- effect = [False]\n- self.assertEqual(loop(2, limit, 1), limit - 2)\n- assert effect[0]\n- effect[0] = False\n- self.assertEqual(cloop(2, limit, 1), limit - 2)\n- assert effect[0]\n- effect[0] = False\n- self.assertEqual(cloop(2, limit, 1), limit - 2)\n- self.assertEqual(cloop(3, limit, 1), limit - 3)\n- assert not effect[0]\n-\n- def testWhileWithClosureJit(self):\n-\n- def loop(init, local_limit, inc):\n-\n- def loop_cond(state):\n- pos, _ = state\n- return lax.lt(pos, local_limit)\n-\n- def loop_body(state):\n- effect[0] = True\n- pos, count = state\n- f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))\n- return api.jit(f)(pos, inc)\n-\n- result = lax.while_loop(loop_cond, loop_body, (init, 0))\n- _, count = result\n- return count\n-\n- cloop = api.jit(loop)\n-\n- limit = 10\n- effect = [False]\n- self.assertEqual(loop(2, limit, 1), limit - 2)\n- assert effect[0]\n- effect[0] = False\n- self.assertEqual(cloop(2, limit, 1), limit - 2)\n- assert effect[0]\n- effect[0] = False\n- self.assertEqual(cloop(2, limit, 1), limit - 2)\n- self.assertEqual(cloop(3, limit, 1), limit - 3)\n- assert not effect[0]\n-\n- def testNestedWhileWithDynamicUpdateSlice(self):\n- num = 5\n-\n- def update_entry(arr, val, i, j):\n- val = lax.reshape(val, [1, 1])\n- return lax.dynamic_update_slice(arr, val, (i, j))\n-\n- def outer_loop(arr): # pylint: disable=missing-docstring\n-\n- def cond_fun(state):\n- i, num, _, _ = state\n- return lax.lt(i, num)\n-\n- def body_fun(state):\n- i, num, arr, out = state\n- return (lax.add(i, 1), num, arr, inner_loop(i, arr, out))\n-\n- out = onp.zeros(arr.shape, dtype=arr.dtype)\n- init_val = (0, num, arr, out)\n- _, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)\n- return out\n-\n- def inner_loop(i, arr, out): # pylint: disable=missing-docstring\n-\n- def cond_fun(state):\n- i, j, _, _ = state\n- return lax.le(j, i)\n-\n- def body_fun(state):\n- i, j, arr, out = state\n- arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n- arr_i_j = lax.dynamic_index_in_dim(arr_i, j, 0, False)\n- out = update_entry(out, arr_i_j, i, j)\n- return (i, lax.add(j, 1), arr, out)\n-\n- init_val = (i, 0, arr, out)\n- _, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)\n- return out\n-\n- cloop = api.jit(outer_loop)\n- arr = npr.RandomState(0).randn(5, 5)\n- self.assertAllClose(outer_loop(arr), onp.tril(arr), check_dtypes=False)\n- self.assertAllClose(cloop(arr), onp.tril(arr), check_dtypes=False)\n- self.assertAllClose(cloop(arr), onp.tril(arr), check_dtypes=False)\n-\n- def testLoopWithConjunctionCondition(self):\n- def sum_first_n(arr, num): # pylint: disable=missing-docstring\n- def cond_fun(state):\n- arr, num, i, _ = state\n- return lax.bitwise_and(lax.lt(i, num), lax.lt(i, arr.shape[0]))\n-\n- def body_fun(state):\n- arr, num, i, total = state\n- arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n- return (arr, num, lax.add(i, 1), lax.add(total, arr_i))\n-\n- init_val = (arr, num, 0, 0.)\n- _, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)\n- return total\n-\n- cfun = api.jit(sum_first_n)\n- x = npr.RandomState(0).randn(10)\n-\n- for num in [0, 5, 10, 15]:\n- self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),\n- check_dtypes=False)\n- self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n- self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n-\n- def testForiLoopBasic(self):\n- def count(num):\n- def body_fun(i, tot):\n- return lax.add(tot, i)\n- return lax.fori_loop(0, num, body_fun, 0)\n-\n- cfun = api.jit(count)\n-\n- self.assertEqual(count(2), 1)\n- self.assertEqual(count(2), cfun(2))\n- self.assertEqual(count(3), 3)\n- self.assertEqual(count(3), cfun(3))\n- self.assertEqual(count(4), 6)\n- self.assertEqual(count(4), cfun(4))\n-\n- def testForiLoopClosure(self):\n- def count(num):\n- def body_fun(i, tot):\n- return lax.add(num, lax.add(tot, i))\n- return lax.fori_loop(0, num, body_fun, 0)\n-\n- cfun = api.jit(count)\n-\n- self.assertEqual(count(2), 1 + 2**2)\n- self.assertEqual(count(2), cfun(2))\n- self.assertEqual(count(3), 3 + 3**2)\n- self.assertEqual(count(3), cfun(3))\n- self.assertEqual(count(4), 6 + 4**2)\n- self.assertEqual(count(4), cfun(4))\n-\n- def testForiLoopTupleState(self):\n- def sum_first_n(arr, num):\n- def body_fun(i, state):\n- arr, total = state\n- arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n- return (arr, lax.add(total, arr_i))\n-\n- init_val = (arr, 0.)\n- _, total = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun,\n- init_val)\n- return total\n-\n- cfun = api.jit(sum_first_n)\n- x = npr.RandomState(0).randn(10)\n-\n- for num in [0, 5, 10, 15]:\n- self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),\n- check_dtypes=False)\n- self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n- self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n-\n- def testForiLoopDictState(self):\n- def sum_first_n(arr, num):\n- def body_fun(i, state):\n- arr, total = state['arr'], state['total']\n- arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n- return {'arr': arr, 'total': lax.add(total, arr_i)}\n-\n- init_val = {'arr': arr, 'total': 0.}\n- out_val = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)\n- return out_val['total']\n-\n- cfun = api.jit(sum_first_n)\n- x = npr.RandomState(0).randn(10)\n-\n- for num in [0, 5, 10, 15]:\n- self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),\n- check_dtypes=False)\n- self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n- self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n-\n- def testForiLoopEmptyTupleInState(self):\n- def sum_first_n(arr, num):\n- def body_fun(i, state):\n- arr, total, _ = state\n- arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n- return (arr, lax.add(total, arr_i), ())\n-\n- init_val = (arr, 0., ())\n- _, tot, _ = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)\n- return tot\n-\n- cfun = api.jit(sum_first_n)\n- x = npr.RandomState(0).randn(10)\n-\n- for num in [0, 5, 10, 15]:\n- self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),\n- check_dtypes=False)\n- self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n- self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n-\n- def testCond(self):\n- def fun(x):\n- if x < 3:\n- return (x, x)\n- else:\n- y = lax.mul(2, x)\n- return y, lax.mul(2, y)\n-\n- @api.jit\n- def cfun(x):\n- def false_fun(x):\n- y = lax.mul(2, x)\n- return y, lax.mul(2, y)\n- return lax.cond(lax.lt(x, 3), x, lambda x: (x, x), x, false_fun)\n-\n- self.assertEqual(fun(0), cfun(0))\n- self.assertEqual(fun(0), (0, 0))\n- self.assertEqual(fun(1), cfun(1))\n- self.assertEqual(fun(1), (1, 1))\n- self.assertEqual(fun(2), cfun(2))\n- self.assertEqual(fun(2), (2, 2))\n- self.assertEqual(fun(3), cfun(3))\n- self.assertEqual(fun(3), (6, 12))\n- self.assertEqual(fun(4), cfun(4))\n- self.assertEqual(fun(4), (8, 16))\n-\n- def testNestedCond(self):\n- def fun(x):\n- if x < 2:\n- return lax.mul(2, x)\n- else:\n- if x < 5:\n- return lax.mul(3, x)\n- else:\n- return lax.mul(4, x)\n-\n- @api.jit\n- def cfun(x):\n- return lax.cond(\n- lax.lt(x, 2),\n- x, lambda x: lax.mul(2, x),\n- x, lambda x: lax.cond(lax.lt(x, 5),\n- x, lambda x: lax.mul(3, x),\n- 4, lambda y: lax.mul(y, x)))\n-\n- self.assertEqual(cfun(1), 2)\n- self.assertEqual(cfun(3), 9)\n- self.assertEqual(cfun(6), 24)\n- self.assertEqual(cfun(1), fun(1))\n- self.assertEqual(cfun(3), fun(3))\n- self.assertEqual(cfun(6), fun(6))\n-\n- def testCondOneBranchConstant(self):\n- def fun(x):\n- if x < 3:\n- return 5.\n- else:\n- return x\n-\n- @api.jit\n- def cfun(x):\n- return lax.cond(lax.lt(x, 3), x, lambda x: 5, x, lambda x: x)\n-\n- self.assertEqual(fun(0), cfun(0))\n- self.assertEqual(cfun(0), 5)\n- self.assertEqual(fun(4), cfun(4))\n- self.assertEqual(cfun(4), 4)\n-\n- def testCondOneBranchConstantTuple(self):\n- def fun(x):\n- if x < 3:\n- return (1., 2., 3.)\n- else:\n- return (x, 2., 4.)\n-\n- @api.jit\n- def cfun(x):\n- return lax.cond(lax.lt(x, 3),\n- x, lambda x: (1, 2., 3.),\n- x, lambda x: (x, 2., 4.))\n-\n- self.assertEqual(fun(0), cfun(0))\n- self.assertEqual(cfun(0), (1, 2., 3.))\n- self.assertEqual(fun(4), cfun(4))\n- self.assertEqual(cfun(4), (4, 2., 4.))\n-\n- def testIssue514(self):\n- # just check this doesn't crash\n- lax.cond(True,\n- (0, 0), lambda x: (x[0], 0),\n- (1, 1), lambda x: x)\n-\n- def testIssue649(self):\n- from jax import lax\n-\n- def body(x):\n- a, b = x\n- return (7, b + 1)\n-\n- def cond(x):\n- a, b = x\n- return b < 10\n-\n- out = lax.while_loop(cond, body, (33, 4))\n- self.assertEqual(out, (7, 10))\n+ # cloop = api.jit(outer_loop)\n+\n+ # self.assertEqual(outer_loop(3), (3, 6))\n+ # self.assertEqual(cloop(3), (3, 6))\n+ # self.assertEqual(cloop(3), (3, 6))\n+ # self.assertEqual(cloop(2), (2, 3))\n+ # self.assertEqual(cloop(4), (4, 10))\n+\n+ # def testWhileWithClosure(self):\n+\n+ # def loop(init, local_limit, inc):\n+\n+ # def loop_cond(state):\n+ # pos, _ = state\n+ # return lax.lt(pos, local_limit)\n+\n+ # def loop_body(state):\n+ # effect[0] = True\n+ # pos, count = state\n+ # return (lax.add(pos, 1), lax.add(count, inc))\n+\n+ # result = lax.while_loop(loop_cond, loop_body, (init, 0))\n+ # _, count = result\n+ # return count\n+\n+ # cloop = api.jit(loop)\n+\n+ # limit = 10\n+ # effect = [False]\n+ # self.assertEqual(loop(2, limit, 1), limit - 2)\n+ # assert effect[0]\n+ # effect[0] = False\n+ # self.assertEqual(cloop(2, limit, 1), limit - 2)\n+ # assert effect[0]\n+ # effect[0] = False\n+ # self.assertEqual(cloop(2, limit, 1), limit - 2)\n+ # self.assertEqual(cloop(3, limit, 1), limit - 3)\n+ # assert not effect[0]\n+\n+ # def testWhileWithClosureJit(self):\n+\n+ # def loop(init, local_limit, inc):\n+\n+ # def loop_cond(state):\n+ # pos, _ = state\n+ # return lax.lt(pos, local_limit)\n+\n+ # def loop_body(state):\n+ # effect[0] = True\n+ # pos, count = state\n+ # f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))\n+ # return api.jit(f)(pos, inc)\n+\n+ # result = lax.while_loop(loop_cond, loop_body, (init, 0))\n+ # _, count = result\n+ # return count\n+\n+ # cloop = api.jit(loop)\n+\n+ # limit = 10\n+ # effect = [False]\n+ # self.assertEqual(loop(2, limit, 1), limit - 2)\n+ # assert effect[0]\n+ # effect[0] = False\n+ # self.assertEqual(cloop(2, limit, 1), limit - 2)\n+ # assert effect[0]\n+ # effect[0] = False\n+ # self.assertEqual(cloop(2, limit, 1), limit - 2)\n+ # self.assertEqual(cloop(3, limit, 1), limit - 3)\n+ # assert not effect[0]\n+\n+ # def testNestedWhileWithDynamicUpdateSlice(self):\n+ # num = 5\n+\n+ # def update_entry(arr, val, i, j):\n+ # val = lax.reshape(val, [1, 1])\n+ # return lax.dynamic_update_slice(arr, val, (i, j))\n+\n+ # def outer_loop(arr): # pylint: disable=missing-docstring\n+\n+ # def cond_fun(state):\n+ # i, num, _, _ = state\n+ # return lax.lt(i, num)\n+\n+ # def body_fun(state):\n+ # i, num, arr, out = state\n+ # return (lax.add(i, 1), num, arr, inner_loop(i, arr, out))\n+\n+ # out = onp.zeros(arr.shape, dtype=arr.dtype)\n+ # init_val = (0, num, arr, out)\n+ # _, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)\n+ # return out\n+\n+ # def inner_loop(i, arr, out): # pylint: disable=missing-docstring\n+\n+ # def cond_fun(state):\n+ # i, j, _, _ = state\n+ # return lax.le(j, i)\n+\n+ # def body_fun(state):\n+ # i, j, arr, out = state\n+ # arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n+ # arr_i_j = lax.dynamic_index_in_dim(arr_i, j, 0, False)\n+ # out = update_entry(out, arr_i_j, i, j)\n+ # return (i, lax.add(j, 1), arr, out)\n+\n+ # init_val = (i, 0, arr, out)\n+ # _, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)\n+ # return out\n+\n+ # cloop = api.jit(outer_loop)\n+ # arr = npr.RandomState(0).randn(5, 5)\n+ # self.assertAllClose(outer_loop(arr), onp.tril(arr), check_dtypes=False)\n+ # self.assertAllClose(cloop(arr), onp.tril(arr), check_dtypes=False)\n+ # self.assertAllClose(cloop(arr), onp.tril(arr), check_dtypes=False)\n+\n+ # def testLoopWithConjunctionCondition(self):\n+ # def sum_first_n(arr, num): # pylint: disable=missing-docstring\n+ # def cond_fun(state):\n+ # arr, num, i, _ = state\n+ # return lax.bitwise_and(lax.lt(i, num), lax.lt(i, arr.shape[0]))\n+\n+ # def body_fun(state):\n+ # arr, num, i, total = state\n+ # arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n+ # return (arr, num, lax.add(i, 1), lax.add(total, arr_i))\n+\n+ # init_val = (arr, num, 0, 0.)\n+ # _, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)\n+ # return total\n+\n+ # cfun = api.jit(sum_first_n)\n+ # x = npr.RandomState(0).randn(10)\n+\n+ # for num in [0, 5, 10, 15]:\n+ # self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),\n+ # check_dtypes=False)\n+ # self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n+ # self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n+\n+ # def testForiLoopBasic(self):\n+ # def count(num):\n+ # def body_fun(i, tot):\n+ # return lax.add(tot, i)\n+ # return lax.fori_loop(0, num, body_fun, 0)\n+\n+ # cfun = api.jit(count)\n+\n+ # self.assertEqual(count(2), 1)\n+ # self.assertEqual(count(2), cfun(2))\n+ # self.assertEqual(count(3), 3)\n+ # self.assertEqual(count(3), cfun(3))\n+ # self.assertEqual(count(4), 6)\n+ # self.assertEqual(count(4), cfun(4))\n+\n+ # def testForiLoopClosure(self):\n+ # def count(num):\n+ # def body_fun(i, tot):\n+ # return lax.add(num, lax.add(tot, i))\n+ # return lax.fori_loop(0, num, body_fun, 0)\n+\n+ # cfun = api.jit(count)\n+\n+ # self.assertEqual(count(2), 1 + 2**2)\n+ # self.assertEqual(count(2), cfun(2))\n+ # self.assertEqual(count(3), 3 + 3**2)\n+ # self.assertEqual(count(3), cfun(3))\n+ # self.assertEqual(count(4), 6 + 4**2)\n+ # self.assertEqual(count(4), cfun(4))\n+\n+ # def testForiLoopTupleState(self):\n+ # def sum_first_n(arr, num):\n+ # def body_fun(i, state):\n+ # arr, total = state\n+ # arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n+ # return (arr, lax.add(total, arr_i))\n+\n+ # init_val = (arr, 0.)\n+ # _, total = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun,\n+ # init_val)\n+ # return total\n+\n+ # cfun = api.jit(sum_first_n)\n+ # x = npr.RandomState(0).randn(10)\n+\n+ # for num in [0, 5, 10, 15]:\n+ # self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),\n+ # check_dtypes=False)\n+ # self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n+ # self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n+\n+ # def testForiLoopDictState(self):\n+ # def sum_first_n(arr, num):\n+ # def body_fun(i, state):\n+ # arr, total = state['arr'], state['total']\n+ # arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n+ # return {'arr': arr, 'total': lax.add(total, arr_i)}\n+\n+ # init_val = {'arr': arr, 'total': 0.}\n+ # out_val = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)\n+ # return out_val['total']\n+\n+ # cfun = api.jit(sum_first_n)\n+ # x = npr.RandomState(0).randn(10)\n+\n+ # for num in [0, 5, 10, 15]:\n+ # self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),\n+ # check_dtypes=False)\n+ # self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n+ # self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n+\n+ # def testForiLoopEmptyTupleInState(self):\n+ # def sum_first_n(arr, num):\n+ # def body_fun(i, state):\n+ # arr, total, _ = state\n+ # arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)\n+ # return (arr, lax.add(total, arr_i), ())\n+\n+ # init_val = (arr, 0., ())\n+ # _, tot, _ = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)\n+ # return tot\n+\n+ # cfun = api.jit(sum_first_n)\n+ # x = npr.RandomState(0).randn(10)\n+\n+ # for num in [0, 5, 10, 15]:\n+ # self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),\n+ # check_dtypes=False)\n+ # self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n+ # self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)\n+\n+ # def testCond(self):\n+ # def fun(x):\n+ # if x < 3:\n+ # return (x, x)\n+ # else:\n+ # y = lax.mul(2, x)\n+ # return y, lax.mul(2, y)\n+\n+ # @api.jit\n+ # def cfun(x):\n+ # def false_fun(x):\n+ # y = lax.mul(2, x)\n+ # return y, lax.mul(2, y)\n+ # return lax.cond(lax.lt(x, 3), x, lambda x: (x, x), x, false_fun)\n+\n+ # self.assertEqual(fun(0), cfun(0))\n+ # self.assertEqual(fun(0), (0, 0))\n+ # self.assertEqual(fun(1), cfun(1))\n+ # self.assertEqual(fun(1), (1, 1))\n+ # self.assertEqual(fun(2), cfun(2))\n+ # self.assertEqual(fun(2), (2, 2))\n+ # self.assertEqual(fun(3), cfun(3))\n+ # self.assertEqual(fun(3), (6, 12))\n+ # self.assertEqual(fun(4), cfun(4))\n+ # self.assertEqual(fun(4), (8, 16))\n+\n+ # def testNestedCond(self):\n+ # def fun(x):\n+ # if x < 2:\n+ # return lax.mul(2, x)\n+ # else:\n+ # if x < 5:\n+ # return lax.mul(3, x)\n+ # else:\n+ # return lax.mul(4, x)\n+\n+ # @api.jit\n+ # def cfun(x):\n+ # return lax.cond(\n+ # lax.lt(x, 2),\n+ # x, lambda x: lax.mul(2, x),\n+ # x, lambda x: lax.cond(lax.lt(x, 5),\n+ # x, lambda x: lax.mul(3, x),\n+ # 4, lambda y: lax.mul(y, x)))\n+\n+ # self.assertEqual(cfun(1), 2)\n+ # self.assertEqual(cfun(3), 9)\n+ # self.assertEqual(cfun(6), 24)\n+ # self.assertEqual(cfun(1), fun(1))\n+ # self.assertEqual(cfun(3), fun(3))\n+ # self.assertEqual(cfun(6), fun(6))\n+\n+ # def testCondOneBranchConstant(self):\n+ # def fun(x):\n+ # if x < 3:\n+ # return 5.\n+ # else:\n+ # return x\n+\n+ # @api.jit\n+ # def cfun(x):\n+ # return lax.cond(lax.lt(x, 3), x, lambda x: 5, x, lambda x: x)\n+\n+ # self.assertEqual(fun(0), cfun(0))\n+ # self.assertEqual(cfun(0), 5)\n+ # self.assertEqual(fun(4), cfun(4))\n+ # self.assertEqual(cfun(4), 4)\n+\n+ # def testCondOneBranchConstantTuple(self):\n+ # def fun(x):\n+ # if x < 3:\n+ # return (1., 2., 3.)\n+ # else:\n+ # return (x, 2., 4.)\n+\n+ # @api.jit\n+ # def cfun(x):\n+ # return lax.cond(lax.lt(x, 3),\n+ # x, lambda x: (1, 2., 3.),\n+ # x, lambda x: (x, 2., 4.))\n+\n+ # self.assertEqual(fun(0), cfun(0))\n+ # self.assertEqual(cfun(0), (1, 2., 3.))\n+ # self.assertEqual(fun(4), cfun(4))\n+ # self.assertEqual(cfun(4), (4, 2., 4.))\n+\n+ # def testIssue514(self):\n+ # # just check this doesn't crash\n+ # lax.cond(True,\n+ # (0, 0), lambda x: (x[0], 0),\n+ # (1, 1), lambda x: x)\n+\n+ # def testIssue649(self):\n+ # from jax import lax\n+\n+ # def body(x):\n+ # a, b = x\n+ # return (7, b + 1)\n+\n+ # def cond(x):\n+ # a, b = x\n+ # return b < 10\n+\n+ # out = lax.while_loop(cond, body, (33, 4))\n+ # self.assertEqual(out, (7, 10))\ndef testScanImpl(self):\nd = np.zeros(2)\n@@ -446,6 +446,20 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nexpected = scan_reference(f, c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ # ans = api.jit(lax.scan, (0,))(f, c, as_)\n+ # expected = scan_reference(f, c, as_)\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ f = api.jit(f)\n+\n+ ans = lax.scan(f, c, as_)\n+ expected = scan_reference(f, c, as_)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ # ans = api.jit(lax.scan, (0,))(f, c, as_)\n+ # expected = scan_reference(f, c, as_)\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\n+\ndef testScanJVP(self):\nd = np.zeros(2)\ndef f(c, a):\n"
}
] | Python | Apache License 2.0 | google/jax | must handle literals + closure conversion better.. |
260,335 | 08.05.2019 17:41:36 | 25,200 | c08b9fee47f5c49a8ab6708f83488c7dc1a92910 | remove const_env from check_jaxpr, add scan trans | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -642,11 +642,6 @@ def check_jaxpr(jaxpr):\nread = partial(read_env, env)\nwrite = partial(write_env, env)\n- const_env = set()\n- read_const = partial(read_env, const_env)\n- write_const= partial(write_env, const_env)\n-\n- pat_fmap(write_const, jaxpr.constvars)\nwrite(unitvar)\npat_fmap(write, jaxpr.constvars)\npat_fmap(write, jaxpr.freevars)\n@@ -659,7 +654,7 @@ def check_jaxpr(jaxpr):\nfor invar in eqn.invars]\nfor subjaxpr, constvars, freevars in eqn.bound_subjaxprs:\nmap(read, freevars)\n- map(read_const, constvars)\n+ map(read, constvars)\ncheck_jaxpr(subjaxpr)\nmap(write, eqn.outvars)\nread(jaxpr.outvar)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -809,3 +809,4 @@ scan_p.def_impl(_scan_impl)\nad.primitive_jvps[scan_p] = _scan_jvp\nad.primitive_transposes[scan_p] = _scan_transpose\npe.custom_partial_eval_rules[scan_p] = _scan_partial_eval\n+xla.translations[scan_p] = partial(xla.lower_fun, _scan_impl)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -439,6 +439,8 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nassert b.shape == ()\nreturn core.pack((c, b))\n+ f_jit = api.jit(f)\n+\nas_ = np.ones((5, 3))\nc = np.ones(4)\n@@ -446,18 +448,17 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nexpected = scan_reference(f, c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n- # ans = api.jit(lax.scan, (0,))(f, c, as_)\n- # expected = scan_reference(f, c, as_)\n- # self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- f = api.jit(f)\n-\n- ans = lax.scan(f, c, as_)\n+ ans = api.jit(lax.scan, (0,))(f, c, as_)\nexpected = scan_reference(f, c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n- # ans = api.jit(lax.scan, (0,))(f, c, as_)\n- # expected = scan_reference(f, c, as_)\n+ ans = lax.scan(f_jit, c, as_)\n+ expected = scan_reference(f_jit, c, as_)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ # TODO(mattjj): debug!\n+ # ans = api.jit(lax.scan, (0,))(f_jit, c, as_)\n+ # expected = scan_reference(f_jit, c, as_)\n# self.assertAllClose(ans, expected, check_dtypes=False)\ndef testScanJVP(self):\n"
}
] | Python | Apache License 2.0 | google/jax | remove const_env from check_jaxpr, add scan trans |
260,335 | 09.05.2019 11:40:19 | 25,200 | ea8e414a8399ec3bb12e6cbcc561e09cb7c80d99 | improve jax.random shape error messages | [
{
"change_type": "MODIFY",
"old_path": "jax/random.py",
"new_path": "jax/random.py",
"diff": "@@ -218,6 +218,14 @@ def _random_bits(key, bit_width, shape):\n### random samplers\n+def _check_shape(name, shape):\n+ try:\n+ shape = tuple(map(int, shape))\n+ except TypeError:\n+ msg = \"{} requires a concrete tuple of integers as shape argument, got {}.\"\n+ raise ValueError(msg.format(name, shape))\n+\n+\ndef uniform(key, shape, dtype=onp.float32, minval=0., maxval=1.):\n\"\"\"Sample uniform random values in [minval, maxval) with given shape/dtype.\n@@ -235,6 +243,7 @@ def uniform(key, shape, dtype=onp.float32, minval=0., maxval=1.):\n@partial(jit, static_argnums=(1, 2))\ndef _uniform(key, shape, dtype, minval, maxval):\n+ _check_shape(\"uniform\", shape)\nif not onp.issubdtype(dtype, onp.floating):\nraise TypeError(\"uniform only accepts floating point dtypes.\")\n@@ -279,6 +288,7 @@ def randint(key, shape, minval, maxval, dtype=onp.int32):\n@partial(jit, static_argnums=(1, 4))\ndef _randint(key, shape, minval, maxval, dtype=onp.int32):\n+ _check_shape(\"randint\", shape)\nif not onp.issubdtype(dtype, onp.integer):\nraise TypeError(\"randint only accepts integer dtypes.\")\n@@ -374,6 +384,7 @@ def normal(key, shape, dtype=onp.float32):\n@partial(jit, static_argnums=(1, 2))\ndef _normal(key, shape, dtype):\n+ _check_shape(\"normal\", shape)\nlo = onp.nextafter(onp.array(-1., dtype), 0., dtype=dtype)\nhi = onp.array(1., dtype)\nu = uniform(key, shape, dtype, lo, hi)\n@@ -397,6 +408,7 @@ def bernoulli(key, p=onp.float32(0.5), shape=()):\n@partial(jit, static_argnums=(2,))\ndef _bernoulli(key, p, shape):\n+ _check_shape(\"bernoulli\", shape)\nshape = shape or onp.shape(p)\nif not onp.issubdtype(onp.float32, lax.dtype(p)):\np = lax.convert_element_type(p, onp.float32)\n@@ -425,6 +437,7 @@ def beta(key, a, b, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(3, 4))\ndef _beta(key, a, b, shape, dtype):\n+ _check_shape(\"beta\", shape)\na = lax.convert_element_type(a, dtype)\nb = lax.convert_element_type(b, dtype)\nshape = shape or lax.broadcast_shapes(np.shape(a), np.shape(b))\n@@ -450,6 +463,7 @@ def cauchy(key, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(1, 2))\ndef _cauchy(key, shape, dtype):\n+ _check_shape(\"cauchy\", shape)\nu = uniform(key, shape, dtype)\npi = _constant_like(u, onp.pi)\nreturn lax.tan(lax.mul(pi, lax.sub(u, _constant_like(u, 0.5))))\n@@ -473,6 +487,7 @@ def dirichlet(key, alpha, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(2, 3))\ndef _dirichlet(key, alpha, shape, dtype):\n+ _check_shape(\"dirichlet\", shape)\nalpha = asarray(alpha, dtype)\nshape = shape or alpha.shape[:-1]\ngamma_samples = gamma(key, alpha, shape + alpha.shape[-1:], dtype)\n@@ -495,6 +510,7 @@ def exponential(key, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(1, 2))\ndef _exponential(key, shape, dtype):\n+ _check_shape(\"exponential\", shape)\nu = uniform(key, shape, dtype)\n# taking 1 - u to move the domain of log to (0, 1] instead of [0, 1)\nreturn lax.neg(lax.log(lax.sub(_constant_like(u, 1), u)))\n@@ -568,6 +584,7 @@ def gamma(key, a, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(2, 3))\ndef _gamma(key, a, shape=(), dtype=onp.float32):\n+ _check_shape(\"gamma\", shape)\na = lax.convert_element_type(a, dtype)\nshape = shape or onp.shape(a)\nif onp.shape(a) != shape:\n@@ -594,6 +611,7 @@ def gumbel(key, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(1, 2))\ndef _gumbel(key, shape, dtype):\n+ _check_shape(\"gumbel\", shape)\nreturn -np.log(-np.log(uniform(key, shape, dtype)))\n@@ -613,6 +631,7 @@ def laplace(key, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(1, 2))\ndef _laplace(key, shape, dtype):\n+ _check_shape(\"laplace\", shape)\nu = uniform(key, shape, dtype, minval=-1., maxval=1.)\nreturn lax.mul(lax.sign(u), lax.log1p(lax.neg(lax.abs(u))))\n@@ -635,6 +654,7 @@ def pareto(key, b, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(2, 3))\ndef _pareto(key, b, shape, dtype):\n+ _check_shape(\"pareto\", shape)\nb = lax.convert_element_type(b, dtype)\nshape = shape or onp.shape(b)\nif onp.shape(b) != shape:\n@@ -661,6 +681,7 @@ def t(key, df, shape=(), dtype=onp.float32):\n@partial(jit, static_argnums=(2, 3))\ndef _t(key, df, shape, dtype):\n+ _check_shape(\"t\", shape)\ndf = lax.convert_element_type(df, dtype)\nshape = shape or onp.shape(df)\nkey_n, key_g = split(key)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/random_test.py",
"new_path": "tests/random_test.py",
"diff": "@@ -17,6 +17,7 @@ from __future__ import division\nfrom __future__ import print_function\nfrom unittest import SkipTest\n+import re\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n@@ -343,6 +344,20 @@ class LaxRandomTest(jtu.JaxTestCase):\nkeys = [random.fold_in(key, i) for i in range(10)]\nassert onp.unique(onp.ravel(keys)).shape == (20,)\n+ def testStaticShapeErrors(self):\n+ @api.jit\n+ def feature_map(n, d, sigma=1.0, seed=123):\n+ key = random.PRNGKey(seed)\n+ W = random.normal(key, (d, n)) / sigma\n+ w = random.normal(key, (d, )) / sigma\n+ b = 2 * np.pi * random.uniform(key, (d, ))\n+\n+ phi = lambda x, t: np.sqrt(2.0 / d) * np.cos(np.matmul(W, x) + w*t + b)\n+ return phi\n+\n+ self.assertRaisesRegex(ValueError, re.compile(r'.*requires a concrete.*'),\n+ lambda: feature_map(5, 3))\n+\nif __name__ == \"__main__\":\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | improve jax.random shape error messages |
260,335 | 09.05.2019 15:46:34 | 25,200 | 37f9c26858c241316ee607e1b08f1847703837ed | add collectives: allreduce min/max, permute | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_parallel.py",
"new_path": "jax/lax/lax_parallel.py",
"diff": "@@ -20,8 +20,10 @@ from jax.abstract_arrays import ShapedArray\nfrom jax.core import Primitive\nfrom jax.interpreters import ad\nfrom jax.interpreters import parallel\n+from jax.interpreters import xla\nfrom jax.interpreters import pxla\n-from jax.util import partial\n+from jax.util import partial, unzip2\n+from jax.lib import xla_bridge\n### parallel traceables\n@@ -32,6 +34,12 @@ def psum(x, axis_name):\ndef pmax(x, axis_name):\nreturn pmax_p.bind(x, axis_name=axis_name)\n+def pmin(x, axis_name):\n+ return pmin_p.bind(x, axis_name=axis_name)\n+\n+def ppermute(x, axis_name, perm):\n+ return ppermute_p.bind(x, axis_name=axis_name, perm=perm)\n+\ndef pswapaxes(x, axis_name, axis):\n\"\"\"Analogue to `np.swapaxes` involving a hidden axis.\n@@ -72,43 +80,74 @@ def _unbound_name_error(primitive_name, *args, **kwargs):\ndef PmapPrimitive(name):\nprim = Primitive(name)\nprim.def_impl(partial(_unbound_name_error, name))\n- prim.def_abstract_eval(lambda x, *args, **kwargs: x)\n+ prim.def_abstract_eval(lambda x, *args, **params: x)\nreturn prim\n-def _psum_serial_pmap_rule(vals, axes):\n+def _allreduce_serial_pmap_rule(reducer, vals, axes):\nval, = vals\naxis, = axes\n- return lax._reduce_sum(val, [axis]), None\n+ return reducer(val, [axis]), None\n-def _psum_transpose_rule(t, axis_name):\n- return [t]\n+def _allreduce_translation_rule(prim, c, val, device_groups):\n+ dtype = c.GetShape(val).numpy_dtype()\n+ scalar = xla_bridge.Shape.array_shape(dtype, ())\n+ computation = xla.primitive_computation(prim, scalar, scalar)\n+ return c.AllReduce(val, computation, replica_groups=device_groups)\n-def _psum_parallel_translation_rule(c, val, device_groups):\n- if len(device_groups) > 1:\n- return c.CrossReplicaSum(val, device_groups)\n- else:\n- return c.CrossReplicaSum(val)\npsum_p = PmapPrimitive('psum')\n-psum_p.def_impl(partial(_unbound_name_error, 'psum'))\n-psum_p.def_abstract_eval(lambda x, *args, **kwargs: x)\n-parallel.serial_pmap_primitive_rules[psum_p] = _psum_serial_pmap_rule\n-pxla.parallel_translation_rules[psum_p] = _psum_parallel_translation_rule\n-ad.deflinear(psum_p, _psum_transpose_rule)\nparallel.defreducer(lax.reduce_sum_p, psum_p)\n+parallel.serial_pmap_primitive_rules[psum_p] = \\\n+ partial(_allreduce_serial_pmap_rule, lax._reduce_sum)\n+# TODO(mattjj): replace translation rule when we update jaxlib\n+# pxla.parallel_translation_rules[psum_p] = \\\n+# partial(_allreduce_translation_rule, lax.add_p)\n+pxla.parallel_translation_rules[psum_p] = \\\n+ lambda c, val, device_groups: c.CrossReplicaSum(val, device_groups)\n+ad.deflinear(psum_p, lambda t, axis_name: [t])\n-def _pmax_serial_pmap_rule(vals, axes):\n- val, = vals\n- axis, = axes\n- return lax._reduce_max(val, [axis]), None\npmax_p = PmapPrimitive('pmax')\n-pmax_p.def_impl(partial(_unbound_name_error, 'pmax'))\n-pmax_p.def_abstract_eval(lambda x, *args, **kwargs: x)\n-parallel.serial_pmap_primitive_rules[pmax_p] = _pmax_serial_pmap_rule\nparallel.defreducer(lax.reduce_max_p, pmax_p)\n+parallel.serial_pmap_primitive_rules[pmax_p] = \\\n+ partial(_allreduce_serial_pmap_rule, lax._reduce_max)\n+pxla.parallel_translation_rules[pmax_p] = \\\n+ partial(_allreduce_translation_rule, lax.max_p)\n+\n+\n+pmin_p = PmapPrimitive('pmin')\n+parallel.defreducer(lax.reduce_min_p, pmin_p)\n+parallel.serial_pmap_primitive_rules[pmin_p] = \\\n+ partial(_allreduce_serial_pmap_rule, lax._reduce_min)\n+pxla.parallel_translation_rules[pmin_p] = \\\n+ partial(_allreduce_translation_rule, lax.min_p)\n+\n+\n+def _ppermute_translation_rule(c, x, device_groups, perm):\n+ group_size = len(perm)\n+ if not all(len(grp) == group_size for grp in device_groups):\n+ msg = (\"ppermute permutation must match device group size, got permutation \"\n+ \"{} for device_groups {}.\".format(perm, device_groups))\n+ raise ValueError(msg)\n+ if not all(0 <= i < group_size and 0 <= j < group_size for i, j in perm):\n+ msg = (\"ppermute permutation elements must take on values between 0 and \"\n+ \"the group size {}, but got {}.\")\n+ raise ValueError(msg.format(group_size, perm))\n+ sources, dests = unzip2(perm)\n+ if not (len(sources) == len(set(sources)) and len(dests) == len(set(dests))):\n+ msg = \"ppermute sources and destinations must be unique, got {}.\"\n+ raise ValueError(msg.format(perm))\n+\n+ full_perm = []\n+ for grp in device_groups:\n+ grp = list(sorted(grp))\n+ full_perm.extend((grp[src], grp[dst]) for src, dst in perm)\n+ return c.CollectivePermute(x, full_perm)\n+\n+ppermute_p = PmapPrimitive('ppermute')\n+pxla.parallel_translation_rules[ppermute_p] = _ppermute_translation_rule\ndef _pswapaxes_serial_pmap_rule(vals, axes, axis):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/pmap_test.py",
"new_path": "tests/pmap_test.py",
"diff": "@@ -280,6 +280,106 @@ class PmapTest(jtu.JaxTestCase):\nw = jit(lambda x: list(x)[0])(y)\nself.assertAllClose(w, x, check_dtypes=False)\n+ @jtu.skip_on_devices(\"cpu\", \"gpu\")\n+ def testCollectivePermute(self):\n+ device_count = xla_bridge.device_count()\n+ if device_count != 2:\n+ raise SkipTest(\"skipping because device_count != 2\")\n+\n+ f = lambda x: lax.ppermute(x, perm=[(0, 1), (1, 0)], axis_name='i')\n+ f = pmap(f, 'i')\n+\n+ x = np.arange(4 * device_count).reshape((device_count, 4))\n+ ans = f(x)\n+ expected = x[::-1]\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ @jtu.skip_on_devices(\"cpu\", \"gpu\")\n+ def testRule30(self):\n+ device_count = xla_bridge.device_count()\n+ if device_count != 2:\n+ raise SkipTest(\"skipping because device_count != 2\")\n+\n+ def update_board(board):\n+ # rule 30: https://en.wikipedia.org/wiki/Rule_30\n+ left = board[:-2]\n+ right = board[2:]\n+ center = board[1:-1]\n+ return lax.bitwise_xor(left, lax.bitwise_or(center, right))\n+\n+ @partial(pmap, axis_name='i')\n+ def step(board_slice):\n+ left, right = board_slice[:1], board_slice[-1:]\n+ left = lax.ppermute(left, perm=[(0, 1), (1, 0)], axis_name='i')\n+ right = lax.ppermute(right, perm=[(0, 1), (1, 0)], axis_name='i')\n+ left, right = right, left\n+\n+ enlarged_board_slice = np.concatenate([left, board_slice, right])\n+ return update_board(enlarged_board_slice)\n+\n+ board = onp.zeros(40, dtype=bool)\n+ board[board.shape[0] // 2] = True\n+ reshaped_board = board.reshape((device_count, -1))\n+\n+ boards = []\n+ def print_board(board):\n+ boards.append(''.join('*' if x else ' ' for x in board.ravel()))\n+\n+ print_board(reshaped_board)\n+ for _ in range(20):\n+ reshaped_board = step(reshaped_board)\n+ print_board(reshaped_board)\n+\n+ ans = '\\n'.join(boards)\n+ expected = '\\n'.join((\n+ ' * ',\n+ ' *** ',\n+ ' ** * ',\n+ ' ** **** ',\n+ ' ** * * ',\n+ ' ** **** *** ',\n+ ' ** * * * ',\n+ ' ** **** ****** ',\n+ ' ** * *** * ',\n+ ' ** **** ** * *** ',\n+ ' ** * * **** ** * ',\n+ ' ** **** ** * * **** ',\n+ ' ** * *** ** ** * * ',\n+ ' ** **** ** *** *** ** *** ',\n+ ' ** * * *** * *** * * ',\n+ ' ** **** ** * * ***** ******* ',\n+ ' ** * *** **** * *** * ',\n+ ' ** **** ** *** ** ** * *** ',\n+ ' ** * * *** * ** *** **** ** * ',\n+ ' ** **** ** * ****** * * *** ****',\n+ ' * * *** **** **** *** ** * ',\n+ ))\n+\n+ print(ans)\n+ self.assertEqual(ans, expected)\n+\n+ @jtu.skip_on_devices(\"cpu\", \"gpu\")\n+ def testReduceMax(self):\n+ f = pmap(lambda x: x - lax.pmax(x, 'i'), axis_name='i')\n+\n+ shape = (xla_bridge.device_count(), 4)\n+ x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)\n+ expected = x - onp.max(x, 0)\n+\n+ ans = f(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ @jtu.skip_on_devices(\"cpu\", \"gpu\")\n+ def testReduceMin(self):\n+ f = pmap(lambda x: x - lax.pmin(x, 'i'), axis_name='i')\n+\n+ shape = (xla_bridge.device_count(), 4)\n+ x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)\n+ expected = x - onp.min(x, 0)\n+\n+ ans = f(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | add collectives: allreduce min/max, permute
Co-authored-by: Peter Hawkins <phawkins@google.com> |
260,335 | 09.05.2019 20:00:24 | 25,200 | 5aea10c7b39b74a95a1bd7576d26b159503aa9ce | make static_argnums cache on value when possible
fixes | [
{
"change_type": "MODIFY",
"old_path": "jax/api.py",
"new_path": "jax/api.py",
"diff": "@@ -46,7 +46,7 @@ from .tree_util import (process_pytree, node_types, build_tree, PyTreeDef,\ntree_map, tree_flatten, tree_unflatten, tree_structure,\ntree_transpose, leaf)\nfrom .util import (unzip2, unzip3, curry, partial, safe_map, safe_zip,\n- WrapHashably, prod)\n+ WrapHashably, Hashable, prod)\nfrom .lib.xla_bridge import canonicalize_dtype, device_count\nfrom .abstract_arrays import ShapedArray\nfrom .interpreters import partial_eval as pe\n@@ -824,11 +824,19 @@ def _argnums_partial(f, dyn_argnums, args):\ndyn_argnums = (dyn_argnums,)\nelse:\ndyn_argnums = tuple(dyn_argnums)\n- fixed_args = tuple([None if i in dyn_argnums else WrapHashably(arg)\n+ fixed_args = tuple([None if i in dyn_argnums else _wrap_hashably(arg)\nfor i, arg in enumerate(args)])\ndyn_args = tuple(args[i] for i in dyn_argnums)\nreturn _argnums_partial_(f, dyn_argnums, fixed_args), dyn_args\n+def _wrap_hashably(arg):\n+ try:\n+ hash(arg)\n+ except TypeError:\n+ return WrapHashably(arg)\n+ else:\n+ return Hashable(arg)\n+\n@lu.transformation\ndef _argnums_partial_(dyn_argnums, fixed_args, *dyn_args, **kwargs):\nargs = [None if arg is None else arg.val for arg in fixed_args]\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/util.py",
"new_path": "jax/util.py",
"diff": "@@ -189,6 +189,8 @@ def prod(xs):\nclass WrapHashably(object):\n+ __slots__ = [\"val\"]\n+\ndef __init__(self, val):\nself.val = val\n@@ -198,6 +200,18 @@ class WrapHashably(object):\ndef __eq__(self, other):\nreturn self.val is other.val\n+class Hashable(object):\n+ __slots__ = [\"val\"]\n+\n+ def __init__(self, val):\n+ self.val = val\n+\n+ def __hash__(self):\n+ return hash(self.val)\n+\n+ def __eq__(self, other):\n+ return self.val == other.val\n+\ndef get_module_functions(module):\n"
}
] | Python | Apache License 2.0 | google/jax | make static_argnums cache on value when possible
fixes #691 |
260,335 | 09.05.2019 07:23:39 | 25,200 | 085f06e4b61adc012da1590d55e7aabcb07230ae | add some PartialVal invariants | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -72,11 +72,13 @@ def jaxpr_as_fun(typed_jaxpr, *args):\nfor arg, in_aval, varname in zip(args, typed_jaxpr.in_avals, invars):\narg_aval, _ = _abstractify(arg)\nif arg_aval != in_aval:\n- raise TypeError(\"input type mismatch for arg {}\".format(varname))\n+ msg = \"input type mismatch for arg {}: arg {} for parameter {}.\"\n+ raise TypeError(msg.format(varname, arg_aval, in_aval))\nout = eval_jaxpr(typed_jaxpr.jaxpr, typed_jaxpr.literals, (), *args)\nout_aval, _ = _abstractify(out)\nif out_aval != typed_jaxpr.out_aval:\n- raise TypeError(\"output type mismatch\")\n+ msg = \"output type mismatch: output value {} for output type {}.\"\n+ raise TypeError(msg.format(out_aval, typed_jaxpr.out_aval))\nreturn out\n@@ -538,9 +540,12 @@ class AbstractTuple(AbstractValue, tuple):\ndef __repr__(self):\nreturn '({})'.format(','.join(map(repr, self)))\n- def __bool__(self, ignored_tracer):\n+ def _bool(self, ignored_tracer):\nreturn bool(self)\n- __nonzero__ = __bool__\n+ _nonzero = _bool\n+\n+ def _eq(self, self_traced, other):\n+ return tuple(self_traced) == tuple(other)\nunit = JaxTuple(())\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -32,6 +32,19 @@ map = safe_map\nzip = safe_zip\ndef identity(x): return x\n+# A partial value (pval) is modeled as a pair (pv, const), as per\n+# type PVal = (PV, Const)\n+# data PV = NonePV | AbstractPV AbstractValue | JaxprTracerTuple [PV]\n+# type Const = MaybeTraced JaxType\n+# where the NonePV arm indicates a known (constant) value, the AbstractPV arm\n+# indicates an unknown value, and the JaxprTracerTuple indicates a finer-grained\n+# representation that might be a mixture.\n+# There are two additional invariants:\n+# 1. when the pv is a JaxprTracerTuple, then the const is a JaxTuple of the\n+# same length (or a traced version);\n+# 2. when the pv is an AbstractValue, then the const must be unit.\n+\n+\nclass JaxprTrace(Trace):\ndef pure(self, val):\nreturn self.new_const(val)\n@@ -126,7 +139,7 @@ class JaxprTrace(Trace):\nenv_tracers = map(trace.full_raise, env)\nbound_subjaxpr = (jaxpr, const_tracers, env_tracers)\neqn = JaxprEqn([], None, call_primitive, (bound_subjaxpr,),\n- False, False, {})\n+ False, False, params)\nreturn JaxprTracer(trace, PartialVal((out_pv, out_pv_const)), eqn)\nreturn out, todo\n@@ -297,10 +310,16 @@ Destructuring = namedtuple('Destructuring', ['i', 'eqn', 'key'])\nclass PartialVal(tuple):\ndef __new__(cls, xs):\n- assert core.skip_checks or (\n- isinstance(xs[0], valid_pv_types)\n- and isinstance(xs[1], core.Tracer) or core.valid_jaxtype(xs[1])\n- ), xs\n+ pv, const = xs\n+ if not core.skip_checks:\n+ # type checks\n+ assert isinstance(pv, valid_pv_types), xs\n+ assert isinstance(const, core.Tracer) or core.valid_jaxtype(const), xs\n+ # invariant checks\n+ if type(pv) is JaxprTracerTuple:\n+ assert len(pv) == len(const), xs\n+ if isinstance(pv, AbstractValue):\n+ assert const == core.unit, xs\nreturn tuple.__new__(cls, xs)\nvalid_pv_types = (AbstractValue, JaxprTracerTuple, type(None))\n@@ -371,15 +390,15 @@ def partial_val_aval(pv, const):\ndef pack_pvals(pvals):\npvs, consts = unzip2(pvals)\nif all(pv is None for pv in pvs):\n- pv_out = None\n+ return PartialVal((None, pack(consts)))\nelif all(isinstance(pv, AbstractValue) for pv in pvs):\npv_out = AbstractTuple(pvs)\n+ return PartialVal((pv_out, unit))\nelse:\npv_out = JaxprTracerTuple(pvs)\nreturn PartialVal((pv_out, pack(consts)))\n-\ndef abstractify(x):\nreturn PartialVal((core.concrete_aval(x), unit))\n@@ -584,7 +603,11 @@ def partial_eval_jaxpr(jaxpr, first_components):\ndef fun(*vals):\npvals = map(as_pval, jaxpr.in_avals, first_components, vals)\njaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\n- (out_pv_c, out_pv_b), (out_const_c, out_const_b) = out_pval\n+ (out_pv_c, out_pv_b), out_const = out_pval\n+ if out_const is core.unit:\n+ out_const_c, out_const_b = core.unit, core.unit\n+ else:\n+ out_const_c, out_const_b = out_const\ncell.append((out_pv_c, out_pv_b, jaxpr_2))\nreturn pack((out_const_c, pack((out_const_b, pack(consts_2)))))\n@@ -646,5 +669,12 @@ def _split_avals(first_component, aval):\nelse:\nraise TypeError(t)\n+# TODO do we want this?\n+# def _abstract_unit_tree_like(aval):\n+# if type(aval) is AbstractTuple:\n+# return AbstractTuple(map(_abstract_unit_tree_like, aval))\n+# else:\n+# return AbstractTuple(())\n+\ncustom_partial_eval_rules = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -34,10 +34,13 @@ from jax.interpreters import batching\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import xla\nfrom jax.interpreters import ad\n-from jax.util import partial, unzip2\n+from jax.util import partial, unzip2, safe_map, safe_zip\nfrom jax.tree_util import build_tree, tree_unflatten\nfrom jax import ad_util\n+map = safe_map\n+zip = safe_zip\n+\n### fori_loop and while_loop\n@@ -447,12 +450,14 @@ def _is_const(x):\ndef _demote_aval_rank(xs):\n+ assert isinstance(xs, core.AbstractValue)\nif isinstance(xs, core.AbstractTuple):\nreturn core.AbstractTuple(map(_demote_aval_rank, xs))\nelse:\nreturn ShapedArray(xs.shape[1:], xs.dtype)\ndef _promote_aval_rank(n, xs):\n+ assert isinstance(xs, core.AbstractValue)\nif isinstance(xs, core.AbstractTuple):\nreturn core.AbstractTuple(map(partial(_promote_aval_rank, n), xs))\nelse:\n@@ -465,18 +470,21 @@ def _leading_dim_size(xs):\nreturn xs.shape[0]\ndef _empty_arrays(aval):\n+ assert isinstance(aval, core.AbstractValue)\nif isinstance(aval, core.AbstractTuple):\nreturn core.pack(map(_empty_arrays, aval))\nelse:\nreturn lax.full(aval.shape, 0, aval.dtype)\ndef _index_arrays(i, aval, xs):\n+ assert isinstance(aval, core.AbstractValue)\nif isinstance(aval, core.AbstractTuple):\nreturn core.pack(map(partial(_index_arrays, i), aval, xs))\nelse:\nreturn lax.dynamic_index_in_dim(xs, i, keepdims=False)\ndef _update_arrays(i, aval, xs, x):\n+ assert isinstance(aval, core.AbstractValue)\nif isinstance(aval, core.AbstractTuple):\nreturn core.pack(map(partial(_update_arrays, i), aval, xs, x))\nelse:\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -429,34 +429,33 @@ class LaxControlFlowTest(jtu.JaxTestCase):\n# out = lax.while_loop(cond, body, (33, 4))\n# self.assertEqual(out, (7, 10))\n- def testScanImpl(self):\n- d = np.zeros(2)\n- def f(c, a):\n- assert a.shape == (3,)\n- assert c.shape == (4,)\n- b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n- c = np.sin(c * b)\n- assert b.shape == ()\n- return core.pack((c, b))\n-\n- f_jit = api.jit(f)\n-\n- as_ = np.ones((5, 3))\n- c = np.ones(4)\n-\n- ans = lax.scan(f, c, as_)\n- expected = scan_reference(f, c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n+ # def testScanImpl(self):\n+ # d = np.zeros(2)\n+ # def f(c, a):\n+ # assert a.shape == (3,)\n+ # assert c.shape == (4,)\n+ # b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ # c = np.sin(c * b)\n+ # assert b.shape == ()\n+ # return core.pack((c, b))\n+\n+ # f_jit = api.jit(f)\n+\n+ # as_ = np.ones((5, 3))\n+ # c = np.ones(4)\n+\n+ # ans = lax.scan(f, c, as_)\n+ # expected = scan_reference(f, c, as_)\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\n- ans = api.jit(lax.scan, (0,))(f, c, as_)\n- expected = scan_reference(f, c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n+ # ans = api.jit(lax.scan, (0,))(f, c, as_)\n+ # expected = scan_reference(f, c, as_)\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\n- ans = lax.scan(f_jit, c, as_)\n- expected = scan_reference(f_jit, c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n+ # ans = lax.scan(f_jit, c, as_)\n+ # expected = scan_reference(f_jit, c, as_)\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\n- # TODO(mattjj): debug!\n# ans = api.jit(lax.scan, (0,))(f_jit, c, as_)\n# expected = scan_reference(f_jit, c, as_)\n# self.assertAllClose(ans, expected, check_dtypes=False)\n@@ -471,46 +470,52 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nassert b.shape == ()\nreturn core.pack((c, b))\n- as_ = np.ones((5, 3))\n- c = np.ones(4)\n-\n- ans = api.jvp(lambda c, as_: lax.scan(f, c, as_), (c, as_), (c, as_))[1]\n- expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- def testScanLinearize(self):\n- d = np.zeros(2)\n- def f(c, a):\n- assert a.shape == (3,)\n- assert c.shape == (4,)\n- b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n- c = np.sin(c * b)\n- assert b.shape == ()\n- return core.pack((c, b))\n+ f_jit = api.jit(f)\nas_ = np.ones((5, 3))\nc = np.ones(4)\n- ans = api.linearize(lambda c, as_: lax.scan(f, c, as_), c, as_)[1](c, as_)\n- expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n+ # ans = api.jvp(lambda c, as_: lax.scan(f, c, as_), (c, as_), (c, as_))[1]\n+ # expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\n- def testScanGrad(self):\n- d = np.zeros(2)\n- def f(c, a):\n- assert a.shape == (3,)\n- assert c.shape == (4,)\n- b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n- c = np.sin(c * b)\n- assert b.shape == ()\n- return core.pack((c, b))\n+ ans = api.jvp(lambda c, as_: api.jit(lax.scan, (0,))(f, c, as_), (c, as_), (c, as_))[1]\n+ expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n- as_ = np.ones((5, 3))\n- c = np.ones(4)\n+ # def testScanLinearize(self):\n+ # d = np.zeros(2)\n+ # def f(c, a):\n+ # assert a.shape == (3,)\n+ # assert c.shape == (4,)\n+ # b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ # c = np.sin(c * b)\n+ # assert b.shape == ()\n+ # return core.pack((c, b))\n+\n+ # as_ = np.ones((5, 3))\n+ # c = np.ones(4)\n+\n+ # ans = api.linearize(lambda c, as_: lax.scan(f, c, as_), c, as_)[1](c, as_)\n+ # expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\n- ans = api.grad(lambda c, as_: list( lax.scan(f, c, as_))[0].sum())(c, as_)\n- expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n+ # def testScanGrad(self):\n+ # d = np.zeros(2)\n+ # def f(c, a):\n+ # assert a.shape == (3,)\n+ # assert c.shape == (4,)\n+ # b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ # c = np.sin(c * b)\n+ # assert b.shape == ()\n+ # return core.pack((c, b))\n+\n+ # as_ = np.ones((5, 3))\n+ # c = np.ones(4)\n+\n+ # ans = api.grad(lambda c, as_: list( lax.scan(f, c, as_))[0].sum())(c, as_)\n+ # expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\nif __name__ == '__main__':\n"
}
] | Python | Apache License 2.0 | google/jax | add some PartialVal invariants
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 09.05.2019 07:55:19 | 25,200 | 360e39756f19a530830046919336b8a1e34e82f3 | must guarantee progress on lattice... | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -30,7 +30,7 @@ from .pprint_util import pp, vcat, hcat, pp_kv_pairs\n# TODO(dougalm): the trace cache breaks the leak detector. Consisder solving.\ncheck_leaks = False\n# TODO(dougalm): put this behind a flag that's enabled during testing\n-skip_checks = False # not __debug__ # google doesn't use -O\n+skip_checks = True # not __debug__ # google doesn't use -O\nzip = safe_zip\nmap = safe_map\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -427,6 +427,9 @@ class DeviceTuple(DeviceValue):\ndef __repr__(self):\nreturn 'DeviceTuple(len={length})'.format(length=len(self))\n+ def __eq__(self, other):\n+ return tuple(self) == tuple(other)\n+\n# DeviceValues don't need to be dtype-canonicalized because we assume values on\n# the device have already been canonicalized.\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -594,14 +594,17 @@ def _scan_jvp(primals, tangents, forward, length, jaxpr):\nwhere_xs_zeros = ad.get_zeros(xs_dot) # same as where_x_zeros b/c arrays\nwhere_carry_zeros = where_init_zeros\n- while True:\n+ for i in range(1000):\nwhere_zeros = (where_consts_zeros, where_carry_zeros, where_xs_zeros)\njaxpr_jvp, where_zeros_out = ad.jvp_jaxpr(jaxpr, where_zeros)\n+ # TODO instantiate_as_far_as=(where_carry_zeros, True/False)\nwhere_carry_zeros_out, where_ys_zeros = where_zeros_out\nif where_carry_zeros_out == where_carry_zeros:\nbreak\nelse:\nwhere_carry_zeros = _binary_lattice_join(where_carry_zeros_out, where_carry_zeros)\n+ else:\n+ raise FixedPointError\n# convert_zeros is like strip_zeros but uses explicit lattice information to\n# instantiate zeros in some cases, namely in init_dot based on the fixed point\n@@ -647,14 +650,17 @@ def _scan_partial_eval(trace, *tracers, **kwargs):\nfc_consts, fc_init, fc_xs = map(_is_const, in_pvs)\nfc_carry = fc_init\n- while True:\n+ for i in range(1000):\nfirst_components = (fc_consts, fc_carry, fc_xs)\njaxpr_1, jaxpr_2, fc_out = pe.partial_eval_jaxpr(jaxpr, first_components)\n+ # TODO instantiate_as_far_as=(fc_carry, True/False)\nfc_carry_out, fc_ys = fc_out\nif fc_carry_out == fc_carry:\nbreak\nelse:\nfc_carry = _binary_lattice_join(fc_carry, fc_carry_out)\n+ else:\n+ raise FixedPointError\nconsts_tracer, init_tracer, xs_tracer = tracers\nlifted_init_tracer = _lift_tracer(trace, init_tracer, fc_carry)\n@@ -812,6 +818,9 @@ def _make_typed_jaxpr(traceable, in_avals):\nreturn core.TypedJaxpr(jaxpr, consts, in_avals, out_aval)\n+class FixedPointError(Exception): pass\n+\n+\nscan_p = core.Primitive(\"scan\")\nscan_p.def_impl(_scan_impl)\nad.primitive_jvps[scan_p] = _scan_jvp\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -460,28 +460,36 @@ class LaxControlFlowTest(jtu.JaxTestCase):\n# expected = scan_reference(f_jit, c, as_)\n# self.assertAllClose(ans, expected, check_dtypes=False)\n- def testScanJVP(self):\n- d = np.zeros(2)\n- def f(c, a):\n- assert a.shape == (3,)\n- assert c.shape == (4,)\n- b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n- c = np.sin(c * b)\n- assert b.shape == ()\n- return core.pack((c, b))\n+# def testScanJVP(self):\n+# d = np.zeros(2)\n+# def f(c, a):\n+# assert a.shape == (3,)\n+# assert c.shape == (4,)\n+# b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+# c = np.sin(c * b)\n+# assert b.shape == ()\n+# return core.pack((c, b))\n- f_jit = api.jit(f)\n+# f_jit = api.jit(f)\n- as_ = np.ones((5, 3))\n- c = np.ones(4)\n+# as_ = np.ones((5, 3))\n+# c = np.ones(4)\n# ans = api.jvp(lambda c, as_: lax.scan(f, c, as_), (c, as_), (c, as_))[1]\n# expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n# self.assertAllClose(ans, expected, check_dtypes=False)\n- ans = api.jvp(lambda c, as_: api.jit(lax.scan, (0,))(f, c, as_), (c, as_), (c, as_))[1]\n- expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n- self.assertAllClose(ans, expected, check_dtypes=False)\n+# ans = api.jvp(lambda c, as_: api.jit(lax.scan, (0,))(f, c, as_), (c, as_), (c, as_))[1]\n+# expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n+# self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+# ans = api.jvp(lambda c, as_: lax.scan(f_jit, c, as_), (c, as_), (c, as_))[1]\n+# expected = api.jvp(lambda c, as_: scan_reference(f_jit, c, as_), (c, as_), (c, as_))[1]\n+# self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+# ans = api.jvp(lambda c, as_: api.jit(lax.scan, (0,))(f_jit, c, as_), (c, as_), (c, as_))[1]\n+# expected = api.jvp(lambda c, as_: scan_reference(f_jit, c, as_), (c, as_), (c, as_))[1]\n+# self.assertAllClose(ans, expected, check_dtypes=False)\n# def testScanLinearize(self):\n# d = np.zeros(2)\n@@ -493,6 +501,8 @@ class LaxControlFlowTest(jtu.JaxTestCase):\n# assert b.shape == ()\n# return core.pack((c, b))\n+# f_jit = api.jit(f)\n+\n# as_ = np.ones((5, 3))\n# c = np.ones(4)\n@@ -500,23 +510,49 @@ class LaxControlFlowTest(jtu.JaxTestCase):\n# expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n# self.assertAllClose(ans, expected, check_dtypes=False)\n- # def testScanGrad(self):\n- # d = np.zeros(2)\n- # def f(c, a):\n- # assert a.shape == (3,)\n- # assert c.shape == (4,)\n- # b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n- # c = np.sin(c * b)\n- # assert b.shape == ()\n- # return core.pack((c, b))\n+# ans = api.linearize(lambda c, as_: api.jit(lax.scan, (0,))(f, c, as_), c, as_)[1](c, as_)\n+# expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n+# self.assertAllClose(ans, expected, check_dtypes=False)\n- # as_ = np.ones((5, 3))\n- # c = np.ones(4)\n+# ans = api.linearize(lambda c, as_: lax.scan(f_jit, c, as_), c, as_)[1](c, as_)\n+# expected = api.linearize(lambda c, as_: scan_reference(f_jit, c, as_), c, as_)[1](c, as_)\n+# self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+# ans = api.linearize(lambda c, as_: api.jit(lax.scan, (0,))(f_jit, c, as_), c, as_)[1](c, as_)\n+# expected = api.linearize(lambda c, as_: scan_reference(f_jit, c, as_), c, as_)[1](c, as_)\n+# self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ def testScanGrad(self):\n+ d = np.zeros(2)\n+ def f(c, a):\n+ assert a.shape == (3,)\n+ assert c.shape == (4,)\n+ b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\n+ c = np.sin(c * b)\n+ assert b.shape == ()\n+ return core.pack((c, b))\n+\n+ f_jit = api.jit(f)\n+\n+ as_ = np.ones((5, 3))\n+ c = np.ones(4)\n# ans = api.grad(lambda c, as_: list( lax.scan(f, c, as_))[0].sum())(c, as_)\n# expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\n# self.assertAllClose(ans, expected, check_dtypes=False)\n+ ans = api.grad(lambda c, as_: list(api.jit(lax.scan, (0,))(f, c, as_))[0].sum())(c, as_)\n+ expected = api.grad(lambda c, as_: list( scan_reference(f, c, as_))[0].sum())(c, as_)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ # ans = api.grad(lambda c, as_: list( lax.scan(f_jit, c, as_))[0].sum())(c, as_)\n+ # expected = api.grad(lambda c, as_: list(scan_reference(f_jit, c, as_))[0].sum())(c, as_)\n+ # self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+# ans = api.grad(lambda c, as_: list(api.jit(lax.scan, (0,))(f_jit, c, as_))[0].sum())(c, as_)\n+# expected = api.grad(lambda c, as_: list( scan_reference(f_jit, c, as_))[0].sum())(c, as_)\n+# self.assertAllClose(ans, expected, check_dtypes=False)\n+\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | must guarantee progress on lattice...
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 10.05.2019 08:20:40 | 25,200 | 643c26fd77c665befc45bdada81ea00fea331095 | flip lattice conventions to confuse our enemies | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -533,60 +533,60 @@ def map_transpose(primitive, params, jaxpr, consts, freevar_vals, args, ct):\ndef jaxpr_as_fun(jaxpr, consts, *args):\nreturn core.eval_jaxpr(jaxpr, consts, (), *args)\n-def get_zeros(tangent):\n+def get_nonzeros(tangent):\nif tangent is zero:\n- return True\n+ return False\nelif isinstance(tangent, TangentTuple):\n- return tuple(map(get_zeros, tangent))\n+ return tuple(map(get_nonzeros, tangent))\nelse:\n- return False\n+ return True\n-def put_zeros(pack, iszero, x):\n- if iszero is True:\n- return zero\n- elif iszero is False:\n+def put_zeros(pack, isnonzero, x):\n+ if isnonzero is True:\nreturn x\n+ elif isnonzero is False:\n+ return zero\nelse:\n- return pack(map(partial(put_zeros, pack), iszero, x))\n+ return pack(map(partial(put_zeros, pack), isnonzero, x))\n-def strip_zeros(unit, pack, iszero, x):\n- if iszero is True:\n- return unit\n- elif iszero is False:\n+def strip_zeros(unit, pack, isnonzero, x):\n+ if isnonzero is True:\nreturn x\n+ elif isnonzero is False:\n+ return unit\nelse:\n- return pack(map(partial(strip_zeros, unit, pack), iszero, x))\n+ return pack(map(partial(strip_zeros, unit, pack), isnonzero, x))\n@transformation_with_aux\n-def f_jvp_traceable(zero_components, *primal_tangent_pairs):\n+def f_jvp_traceable(nonzero_components, *primal_tangent_pairs):\nprimals, tangents = unzip2(primal_tangent_pairs)\n- tangents_zeros = map(partial(put_zeros, TangentTuple), zero_components, tangents)\n+ tangents_zeros = map(partial(put_zeros, TangentTuple), nonzero_components, tangents)\nprimal_out, tangent_out = yield (primals, tangents_zeros), {}\n# TODO check output is tuple\n- zeros_out = get_zeros(tangent_out)\n- tangent_out_nonzero = strip_zeros(core.unit, pack, zeros_out, tangent_out)\n+ nonzeros_out = get_nonzeros(tangent_out)\n+ tangent_out_nonzero = strip_zeros(core.unit, pack, nonzeros_out, tangent_out)\nprimal_tangent_pairs_out = [pack((p, t)) for p, t in zip(primal_out, tangent_out_nonzero)]\n- yield pack(primal_tangent_pairs_out), zeros_out\n+ yield pack(primal_tangent_pairs_out), nonzeros_out\n-def jvp_jaxpr(jaxpr, zeros):\n+def jvp_jaxpr(jaxpr, nonzeros):\n# jaxpr :: d -> a -> b -> (c1, c2)\n# avals = (d, a, b)\n# f :: d -> a -> b -> (c1, c2)\nf = wrap_init(partial(jaxpr_as_fun, jaxpr.jaxpr, jaxpr.literals))\n- f_jvp, out_zeros = f_jvp_traceable(jvp(f, instantiate=False), zeros)\n+ f_jvp, out_nonzeros = f_jvp_traceable(jvp(f, instantiate=False), nonzeros)\n# f_jvp :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\ntangent_avals = map(partial(strip_zeros, core.AbstractTuple(()), core.AbstractTuple),\n- zeros, jaxpr.in_avals)\n+ nonzeros, jaxpr.in_avals)\npt_pvals = [pe.PartialVal((core.AbstractTuple((p_aval, t_aval)), core.unit))\nfor p_aval, t_aval in zip(jaxpr.in_avals, tangent_avals)]\njaxpr_out, pval_out, literals_out = pe.trace_to_jaxpr(\nf_jvp, pt_pvals, instantiate=True)\n# jaxpr_out :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\n- # out_zeros :: (zeros(c1), zeros(c2))\n+ # out_nonzeros :: (nonzeros(c1), nonzeros(c2))\nin_avals = tuple(map(core.AbstractTuple, zip(jaxpr.in_avals, tangent_avals)))\nout_aval, _ = pval_out\njaxpr_out = core.TypedJaxpr(jaxpr_out, literals_out, in_avals, out_aval)\n- return jaxpr_out, out_zeros()\n+ return jaxpr_out, out_nonzeros()\nprimitive_transposes[core.call_p] = partial(call_transpose, call_p)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -543,41 +543,40 @@ def unzip_tracer_tuple(pvals):\npvs, consts = unzip2(pvals)\nreturn PartialVal((JaxprTracerTuple(pvs), pack(consts)))\n-def as_pval(aval, is_known, val):\n- t = type(is_known)\n+def as_pval(aval, is_unknown, val):\n+ t = type(is_unknown)\nif t is tuple:\n- return unzip_tracer_tuple(map(as_pval, aval, is_known, val))\n+ return unzip_tracer_tuple(map(as_pval, aval, is_unknown, val))\nelif t is bool:\n- if is_known:\n- return PartialVal((None, val))\n- else:\n+ if is_unknown:\nreturn PartialVal((aval, core.unit))\n+ else:\n+ return PartialVal((None, val))\nelse:\nraise TypeError(t)\n-def as_pval2(aval, is_known):\n- t = type(is_known)\n+def as_pval2(aval, is_unknown):\n+ t = type(is_unknown)\nif t is tuple:\n- return unzip_tracer_tuple(map(as_pval2, aval, is_known))\n+ return unzip_tracer_tuple(map(as_pval2, aval, is_unknown))\nelif t is bool:\n- if is_known:\n- return PartialVal((aval, core.unit))\n- else:\n+ if is_unknown:\nreturn PartialVal((core.AbstractTuple(()), core.unit))\n+ else:\n+ return PartialVal((aval, core.unit))\nelse:\nraise TypeError(t)\n-def isnone(x):\n+def unknown(x):\nif x is None:\n- return True\n- elif type(x) is JaxprTracerTuple:\n- return tuple(map(isnone, x))\n- elif isinstance(x, AbstractValue):\nreturn False\n+ elif type(x) is JaxprTracerTuple:\n+ return tuple(map(unknown, x))\n+ elif isinstance(x, core.AbstractValue):\n+ return True\nelse:\nraise TypeError(type(x))\n-\ndef _closure_convert_jaxpr(jaxpr):\ncore.skip_checks or core.check_jaxpr(jaxpr)\nlifted_jaxpr = jaxpr.copy()\n@@ -593,7 +592,7 @@ def _pack_eqn(invars, outvar):\nreturn core.JaxprEqn(invars, [outvar], core.pack_p, (), False, False, {})\n-def partial_eval_jaxpr(jaxpr, first_components):\n+def partial_eval_jaxpr(jaxpr, second_components):\n# jaxpr :: d -> c -> a -> (c, b)\nf = lu.wrap_init(core.jaxpr_as_fun(jaxpr))\n@@ -601,7 +600,7 @@ def partial_eval_jaxpr(jaxpr, first_components):\n# we do some final-style output munging to place residuals\n# fun :: d1 -> c1 -> a1 -> (c1, (b1, res))\ndef fun(*vals):\n- pvals = map(as_pval, jaxpr.in_avals, first_components, vals)\n+ pvals = map(as_pval, jaxpr.in_avals, second_components, vals)\njaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\n(out_pv_c, out_pv_b), out_const = out_pval\nif out_const is core.unit:\n@@ -611,7 +610,7 @@ def partial_eval_jaxpr(jaxpr, first_components):\ncell.append((out_pv_c, out_pv_b, jaxpr_2))\nreturn pack((out_const_c, pack((out_const_b, pack(consts_2)))))\n- pvals = map(as_pval2, jaxpr.in_avals, first_components)\n+ pvals = map(as_pval2, jaxpr.in_avals, second_components)\njaxpr_1, out_pval, consts_1 = trace_to_jaxpr(\nlu.wrap_init(fun), pvals, instantiate=True)\nout_pv_c, out_pv_b, jaxpr_2 = cell[0]\n@@ -622,11 +621,11 @@ def partial_eval_jaxpr(jaxpr, first_components):\n# doubly_lifted_jaxpr_2 :: d2 -> c2 -> (a2, res) -> (c2, b2)\nlifted_jaxpr_2 = _closure_convert_jaxpr(jaxpr_2)\ndoubly_lifted_jaxpr_2 = _move_and_pair_arg(lifted_jaxpr_2)\n- fc_out = fc_c_out, fc_b_out = isnone(out_pv_c), isnone(out_pv_b)\n+ sc_out = sc_c_out, sc_b_out = unknown(out_pv_c), unknown(out_pv_b)\n- in_avals_1, in_avals_2 = unzip2(map(_split_avals, first_components,\n+ in_avals_1, in_avals_2 = unzip2(map(_split_avals, second_components,\njaxpr.in_avals))\n- out_aval_1, out_aval_2 = _split_avals(fc_out, jaxpr.out_aval)\n+ out_aval_1, out_aval_2 = _split_avals(sc_out, jaxpr.out_aval)\n# in_avals_1 is already (d1, c1, a1), and out_aval_2 is already (c2, b2), but\n# we must munge:\n@@ -646,7 +645,7 @@ def partial_eval_jaxpr(jaxpr, first_components):\ntyped_jaxpr_1 = TypedJaxpr(jaxpr_1, consts_1, in_avals_1, lifted_out_aval_1)\ntyped_jaxpr_2 = TypedJaxpr(doubly_lifted_jaxpr_2, (), lifted_in_avals_2,\nout_aval_2)\n- return typed_jaxpr_1, typed_jaxpr_2, fc_out\n+ return typed_jaxpr_1, typed_jaxpr_2, sc_out\ndef _move_and_pair_arg(jaxpr):\nmoved_jaxpr = jaxpr.copy()\n@@ -655,26 +654,19 @@ def _move_and_pair_arg(jaxpr):\ncore.skip_checks or core.check_jaxpr(moved_jaxpr)\nreturn moved_jaxpr\n-def _split_avals(first_component, aval):\n- t = type(first_component)\n+def _split_avals(second_component, aval):\n+ t = type(second_component)\nif t is tuple:\nassert type(aval) is AbstractTuple\n- avals1, avals2 = unzip2(map(_split_avals, first_component, aval))\n+ avals1, avals2 = unzip2(map(_split_avals, second_component, aval))\nreturn AbstractTuple(avals1), AbstractTuple(avals2)\nelif t is bool:\n- if first_component:\n- return aval, AbstractTuple(())\n- else:\n+ if second_component:\nreturn AbstractTuple(()), aval\n+ else:\n+ return aval, AbstractTuple(())\nelse:\nraise TypeError(t)\n-# TODO do we want this?\n-# def _abstract_unit_tree_like(aval):\n-# if type(aval) is AbstractTuple:\n-# return AbstractTuple(map(_abstract_unit_tree_like, aval))\n-# else:\n-# return AbstractTuple(())\n-\ncustom_partial_eval_rules = {}\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -427,28 +427,17 @@ def _maybe_tracer_tuple_to_abstract_tuple(tup):\n### scan\n-def _convert_zeros(keep_symbolic, example, tangent):\n+def _convert_zeros(convert_symbolic, example, tangent):\nif tangent is ad.zero:\n- if keep_symbolic:\n+ if not convert_symbolic:\nreturn core.unit\nelse:\nreturn ad.zeros_like_jaxval(example)\nelif type(tangent) is ad.TangentTuple:\n- return core.pack(map(_convert_zeros, keep_symbolic, example, tangent))\n+ return core.pack(map(_convert_zeros, convert_symbolic, example, tangent))\nelse:\nreturn tangent\n-def _is_const(x):\n- if x is None:\n- return True\n- elif type(x) is pe.JaxprTracerTuple:\n- return tuple(map(_is_const, x))\n- elif isinstance(x, core.AbstractValue):\n- return False\n- else:\n- raise TypeError(type(x))\n-\n-\ndef _demote_aval_rank(xs):\nassert isinstance(xs, core.AbstractValue)\nif isinstance(xs, core.AbstractTuple):\n@@ -589,28 +578,27 @@ def _scan_jvp(primals, tangents, forward, length, jaxpr):\nconsts_aval, carry_aval, x_aval = jaxpr.in_avals\n_, y_aval = jaxpr.out_aval\n- where_consts_zeros = ad.get_zeros(consts_dot)\n- where_init_zeros = ad.get_zeros(init_dot)\n- where_xs_zeros = ad.get_zeros(xs_dot) # same as where_x_zeros b/c arrays\n+ consts_nonzeros = ad.get_nonzeros(consts_dot)\n+ init_nonzeros = ad.get_nonzeros(init_dot)\n+ xs_nonzeros = ad.get_nonzeros(xs_dot) # same as x_nonzeros b/c arrays\n- where_carry_zeros = where_init_zeros\n- for i in range(1000):\n- where_zeros = (where_consts_zeros, where_carry_zeros, where_xs_zeros)\n- jaxpr_jvp, where_zeros_out = ad.jvp_jaxpr(jaxpr, where_zeros)\n- # TODO instantiate_as_far_as=(where_carry_zeros, True/False)\n- where_carry_zeros_out, where_ys_zeros = where_zeros_out\n- if where_carry_zeros_out == where_carry_zeros:\n+ carry_nonzeros = init_nonzeros\n+ for _ in range(1000):\n+ nonzeros = (consts_nonzeros, carry_nonzeros, xs_nonzeros)\n+ jaxpr_jvp, nonzeros_out = ad.jvp_jaxpr(jaxpr, nonzeros)\n+ carry_nonzeros_out, ys_nonzeros = nonzeros_out\n+ if carry_nonzeros_out == carry_nonzeros:\nbreak\nelse:\n- where_carry_zeros = _binary_lattice_join(where_carry_zeros_out, where_carry_zeros)\n+ carry_nonzeros = _binary_lattice_join(carry_nonzeros_out, carry_nonzeros)\nelse:\nraise FixedPointError\n# convert_zeros is like strip_zeros but uses explicit lattice information to\n# instantiate zeros in some cases, namely in init_dot based on the fixed point\n- nonzero_init_dot = _convert_zeros(where_carry_zeros, init, init_dot)\n- nonzero_consts_dot = _convert_zeros(where_consts_zeros, consts, consts_dot)\n- nonzero_xs_dot = _convert_zeros(where_xs_zeros, xs, xs_dot)\n+ nonzero_init_dot = _convert_zeros(carry_nonzeros, init, init_dot)\n+ nonzero_consts_dot = _convert_zeros(consts_nonzeros, consts, consts_dot)\n+ nonzero_xs_dot = _convert_zeros(xs_nonzeros, xs, xs_dot)\nconsts_dual = core.pack((consts, nonzero_consts_dot))\ninit_dual = core.pack((init, nonzero_init_dot))\n@@ -621,10 +609,10 @@ def _scan_jvp(primals, tangents, forward, length, jaxpr):\nforward=forward, length=length, jaxpr=jaxpr_jvp)\nys, ys_dot = ys_dual\n- ys_dot = ad.put_zeros(ad.TangentTuple, where_ys_zeros, ys_dot)\n+ ys_dot = ad.put_zeros(ad.TangentTuple, ys_nonzeros, ys_dot)\ncarry_out, carry_out_dot = carry_out_dual\n- carry_out_dot = ad.put_zeros(ad.TangentTuple, where_carry_zeros_out, carry_out_dot)\n+ carry_out_dot = ad.put_zeros(ad.TangentTuple, carry_nonzeros_out, carry_out_dot)\nreturn core.pack((carry_out, ys)), ad.TangentTuple((carry_out_dot, ys_dot))\ndef _binary_lattice_join(a, b):\n@@ -636,38 +624,38 @@ def _binary_lattice_join(a, b):\nelif t == (bool, tuple):\nreturn tuple(map(_binary_lattice_join, (a,) * len(b), b))\nelif t == (bool, bool):\n- return a and b\n+ return a or b\nelse:\nraise TypeError((type(a), type(b)))\ndef _scan_partial_eval(trace, *tracers, **kwargs):\n+ # Implements the Rumsfeld Transform: turn uknown unknowns into known unknowns\njaxpr = kwargs.pop('jaxpr')\nlength = kwargs.pop('length')\nforward = kwargs.pop('forward')\nassert not kwargs\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\n- fc_consts, fc_init, fc_xs = map(_is_const, in_pvs)\n+ sc_consts, sc_init, sc_xs = map(pe.unknown, in_pvs)\n- fc_carry = fc_init\n+ sc_carry = sc_init\nfor i in range(1000):\n- first_components = (fc_consts, fc_carry, fc_xs)\n- jaxpr_1, jaxpr_2, fc_out = pe.partial_eval_jaxpr(jaxpr, first_components)\n- # TODO instantiate_as_far_as=(fc_carry, True/False)\n- fc_carry_out, fc_ys = fc_out\n- if fc_carry_out == fc_carry:\n+ second_components = (sc_consts, sc_carry, sc_xs)\n+ jaxpr_1, jaxpr_2, sc_out = pe.partial_eval_jaxpr(jaxpr, second_components)\n+ sc_carry_out, sc_ys = sc_out\n+ if sc_carry_out == sc_carry:\nbreak\nelse:\n- fc_carry = _binary_lattice_join(fc_carry, fc_carry_out)\n+ sc_carry = _binary_lattice_join(sc_carry, sc_carry_out)\nelse:\nraise FixedPointError\nconsts_tracer, init_tracer, xs_tracer = tracers\n- lifted_init_tracer = _lift_tracer(trace, init_tracer, fc_carry)\n+ lifted_init_tracer = _lift_tracer(trace, init_tracer, sc_carry)\nlifted_tracers = consts_tracer, lifted_init_tracer, xs_tracer\nin_pvs, in_consts = unzip2([t.pval for t in lifted_tracers])\n- out_pv = _put_known_pvs(fc_out, jaxpr.out_aval)\n+ out_pv = _put_known_pvs(sc_out, jaxpr.out_aval)\nout_carry, (ys, residuals) = scan_p.bind(\n*in_consts, forward=forward, length=length, jaxpr=jaxpr_1)\n@@ -679,26 +667,26 @@ def _scan_partial_eval(trace, *tracers, **kwargs):\ndict(forward=forward, length=length, jaxpr=jaxpr_2))\nreturn pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\n-def _lift_tracer(trace, tracer, is_const):\n- t = type(is_const)\n+def _lift_tracer(trace, tracer, is_unkown):\n+ t = type(is_unkown)\nif t is bool:\n- if not is_const:\n+ if is_unkown:\nreturn trace.instantiate_const(tracer)\nelse:\nreturn tracer\nelif t is tuple:\ntracers = map(trace.full_raise, tracer)\n- return core.pack(map(partial(_lift_tracer, trace), tracers, is_const))\n+ return core.pack(map(partial(_lift_tracer, trace), tracers, is_unkown))\nelse:\nraise TypeError(t)\n-def _put_known_pvs(is_known, aval):\n- if is_known is True:\n+def _put_known_pvs(is_unknown, aval):\n+ if is_unknown is False:\nreturn None\n- elif is_known is False:\n+ elif is_unknown is True:\nreturn aval\nelse:\n- return pe.JaxprTracerTuple(map(_put_known_pvs, is_known, aval))\n+ return pe.JaxprTracerTuple(map(_put_known_pvs, is_unknown, aval))\ndef _scan_transpose(ct, consts, init, xs, forward, length, jaxpr):\n"
}
] | Python | Apache License 2.0 | google/jax | flip lattice conventions to confuse our enemies
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 10.05.2019 08:58:05 | 25,200 | a300319fd5df24ce2cc8a939a96faea498276e74 | add lattice instantiate-at logic, scan works! | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -47,8 +47,7 @@ def jvpfun(instantiate, primals, tangents):\nwith new_master(JVPTrace) as master:\nout_primal, out_tangent = yield (master, primals, tangents), {}\ndel master\n- if instantiate:\n- out_tangent = instantiate_zeros(out_primal, out_tangent)\n+ out_tangent = instantiate_zeros_at(instantiate, out_primal, out_tangent)\nyield (out_primal, out_tangent)\n@@ -73,8 +72,7 @@ def jvp_subtrace_aux(instantiate, master, primals, tangents):\nout_tracer, aux_tracer = map(trace.full_raise, (ans, aux))\nout_primal, out_tangent = out_tracer.primal, out_tracer.tangent\naux = aux_tracer.primal # ignore aux tangent\n- if instantiate:\n- out_tangent = instantiate_zeros(out_primal, out_tangent)\n+ out_tangent = instantiate_zeros_at(instantiate, out_primal, out_tangent)\nyield (out_primal, out_tangent), aux\n@@ -465,6 +463,19 @@ deflinear(core.pack_p, lambda t: list(t) if t is not zero else zero)\ndeflinear(add_jaxvals_p, lambda t: (t, t))\n+def instantiate_zeros_at(instantiate, example, tangent):\n+ t = type(instantiate)\n+ if t is tuple:\n+ # note to future selves: it wasn't clear whether to pack here\n+ return TangentTuple(map(instantiate_zeros_at, instantiate, example, tangent))\n+ elif t is bool:\n+ if instantiate:\n+ return instantiate_zeros(example, tangent)\n+ else:\n+ return tangent\n+ else:\n+ raise TypeError(t)\n+\ndef instantiate_zeros(example, tangent):\nif tangent is zero:\nreturn zeros_like_jaxval(example)\n@@ -568,12 +579,12 @@ def f_jvp_traceable(nonzero_components, *primal_tangent_pairs):\nprimal_tangent_pairs_out = [pack((p, t)) for p, t in zip(primal_out, tangent_out_nonzero)]\nyield pack(primal_tangent_pairs_out), nonzeros_out\n-def jvp_jaxpr(jaxpr, nonzeros):\n+def jvp_jaxpr(jaxpr, nonzeros, instantiate):\n# jaxpr :: d -> a -> b -> (c1, c2)\n# avals = (d, a, b)\n# f :: d -> a -> b -> (c1, c2)\nf = wrap_init(partial(jaxpr_as_fun, jaxpr.jaxpr, jaxpr.literals))\n- f_jvp, out_nonzeros = f_jvp_traceable(jvp(f, instantiate=False), nonzeros)\n+ f_jvp, out_nonzeros = f_jvp_traceable(jvp(f, instantiate=instantiate), nonzeros)\n# f_jvp :: (d, d') -> (a, a') -> (b, b') -> ((c1, c1'), (c2, c2'))\ntangent_avals = map(partial(strip_zeros, core.AbstractTuple(()), core.AbstractTuple),\nnonzeros, jaxpr.in_avals)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -424,14 +424,25 @@ def trace_to_subjaxpr(master, instantiate, pvals):\nout_tracer = yield in_tracers, {}\nout_tracer = trace.full_raise(out_tracer)\n- if instantiate:\n- out_tracer = trace.instantiate_const(out_tracer)\n+ out_tracer = instantiate_const_at(trace, instantiate, out_tracer)\njaxpr, consts, env = tracers_to_jaxpr(in_tracers, out_tracer)\nout_pval = out_tracer.pval\ndel trace, in_tracers, out_tracer\nyield jaxpr, (out_pval, consts, env)\n+def instantiate_const_at(trace, instantiate, tracer):\n+ t = type(instantiate)\n+ if t is tuple:\n+ return pack(map(partial(instantiate_const_at, trace), instantiate, tracer))\n+ elif t is bool:\n+ if instantiate:\n+ return trace.instantiate_const(tracer)\n+ else:\n+ return tracer\n+ else:\n+ raise TypeError(t)\n+\nFreeVar = namedtuple('FreeVar', ['val'])\nConstVar = namedtuple('ConstVar', ['val'])\n@@ -592,7 +603,7 @@ def _pack_eqn(invars, outvar):\nreturn core.JaxprEqn(invars, [outvar], core.pack_p, (), False, False, {})\n-def partial_eval_jaxpr(jaxpr, second_components):\n+def partial_eval_jaxpr(jaxpr, second_components, instantiate):\n# jaxpr :: d -> c -> a -> (c, b)\nf = lu.wrap_init(core.jaxpr_as_fun(jaxpr))\n@@ -601,7 +612,7 @@ def partial_eval_jaxpr(jaxpr, second_components):\n# fun :: d1 -> c1 -> a1 -> (c1, (b1, res))\ndef fun(*vals):\npvals = map(as_pval, jaxpr.in_avals, second_components, vals)\n- jaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals)\n+ jaxpr_2, out_pval, consts_2 = trace_to_jaxpr(f, pvals, instantiate=instantiate)\n(out_pv_c, out_pv_b), out_const = out_pval\nif out_const is core.unit:\nout_const_c, out_const_b = core.unit, core.unit\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -437,6 +437,15 @@ core.pytype_aval_mappings[DeviceTuple] = core.pytype_aval_mappings[JaxTuple]\npytype_aval_mappings[DeviceTuple] = op.attrgetter('aval')\ncanonicalize_dtype_handlers[DeviceTuple] = identity\n+def _device_tuple_constant_handler(c, val, canonicalize_types=True):\n+ py_val = pack(c.Constant(elt, canonicalize_types=canonicalize_types)\n+ for elt in val)\n+ return c.Constant(py_val)\n+xb.register_constant_handler(DeviceTuple, _device_tuple_constant_handler)\n+\n+# TODO(mattjj): could jit-compile a computation here\n+ad_util.jaxval_adders[DeviceTuple] = ad_util.add_jaxtuples\n+\ndef forward_method(attrname, self, fun, *args):\nreturn fun(getattr(self, attrname), *args)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -585,7 +585,8 @@ def _scan_jvp(primals, tangents, forward, length, jaxpr):\ncarry_nonzeros = init_nonzeros\nfor _ in range(1000):\nnonzeros = (consts_nonzeros, carry_nonzeros, xs_nonzeros)\n- jaxpr_jvp, nonzeros_out = ad.jvp_jaxpr(jaxpr, nonzeros)\n+ jaxpr_jvp, nonzeros_out = ad.jvp_jaxpr(jaxpr, nonzeros,\n+ instantiate=(carry_nonzeros, False))\ncarry_nonzeros_out, ys_nonzeros = nonzeros_out\nif carry_nonzeros_out == carry_nonzeros:\nbreak\n@@ -641,7 +642,8 @@ def _scan_partial_eval(trace, *tracers, **kwargs):\nsc_carry = sc_init\nfor i in range(1000):\nsecond_components = (sc_consts, sc_carry, sc_xs)\n- jaxpr_1, jaxpr_2, sc_out = pe.partial_eval_jaxpr(jaxpr, second_components)\n+ jaxpr_1, jaxpr_2, sc_out = pe.partial_eval_jaxpr(jaxpr, second_components,\n+ instantiate=(sc_carry, False))\nsc_carry_out, sc_ys = sc_out\nif sc_carry_out == sc_carry:\nbreak\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -542,19 +542,17 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nexpected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n- # TODO(mattjj): scan fix\n- # ans = api.grad(lambda c, as_: list(api.jit(lax.scan, (0,))(f, c, as_))[0].sum())(c, as_)\n- # expected = api.grad(lambda c, as_: list( scan_reference(f, c, as_))[0].sum())(c, as_)\n- # self.assertAllClose(ans, expected, check_dtypes=False)\n+ ans = api.grad(lambda c, as_: list(api.jit(lax.scan, (0,))(f, c, as_))[0].sum())(c, as_)\n+ expected = api.grad(lambda c, as_: list( scan_reference(f, c, as_))[0].sum())(c, as_)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\nans = api.grad(lambda c, as_: list( lax.scan(f_jit, c, as_))[0].sum())(c, as_)\nexpected = api.grad(lambda c, as_: list(scan_reference(f_jit, c, as_))[0].sum())(c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n- # TODO(mattjj): scan fix\n- # ans = api.grad(lambda c, as_: list(api.jit(lax.scan, (0,))(f_jit, c, as_))[0].sum())(c, as_)\n- # expected = api.grad(lambda c, as_: list( scan_reference(f_jit, c, as_))[0].sum())(c, as_)\n- # self.assertAllClose(ans, expected, check_dtypes=False)\n+ ans = api.grad(lambda c, as_: list(api.jit(lax.scan, (0,))(f_jit, c, as_))[0].sum())(c, as_)\n+ expected = api.grad(lambda c, as_: list( scan_reference(f_jit, c, as_))[0].sum())(c, as_)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\nif __name__ == '__main__':\n"
}
] | Python | Apache License 2.0 | google/jax | add lattice instantiate-at logic, scan works!
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 10.05.2019 09:48:51 | 25,200 | 48811fbb0e567cbde94c1eab0fe48c1eee0bac6b | fix join_pvals bug from cond | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -360,8 +360,17 @@ def join_pvals(pval1, pval2):\nelse:\n# the pvals are tuples with some mixtures of known/unknown\nassert isinstance(pv1, JaxprTracerTuple) or isinstance(pv2, JaxprTracerTuple)\n- pv1 = [None] * len(pv2) if pv1 is None else pv1\n- pv2 = [None] * len(pv1) if pv2 is None else pv2\n+ def explode(pv, const):\n+ if isinstance(pv, AbstractValue):\n+ assert const == core.unit\n+ const = [core.unit] * len(pv)\n+ elif pv is None:\n+ pv = [None] * len(const)\n+ else:\n+ assert isinstance(pv, JaxprTracerTuple)\n+ return pv, const\n+ pv1, const1 = explode(pv1, const1)\n+ pv2, const2 = explode(pv2, const2)\npvals1, pvals2 = zip(pv1, const1), zip(pv2, const2)\njoin_pvs, join_consts = unzip2(map(join_pvals, pvals1, pvals2))\nif all(isinstance(pv, AbstractValue) for pv in join_pvs):\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -409,12 +409,11 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nself.assertEqual(fun(4), cfun(4))\nself.assertEqual(cfun(4), (4, 2., 4.))\n- # TODO(mattjj): post-scan fix\n- # def testIssue514(self):\n- # # just check this doesn't crash\n- # lax.cond(True,\n- # (0, 0), lambda x: (x[0], 0),\n- # (1, 1), lambda x: x)\n+ def testIssue514(self):\n+ # just check this doesn't crash\n+ lax.cond(True,\n+ (0, 0), lambda x: (x[0], 0),\n+ (1, 1), lambda x: x)\ndef testIssue649(self):\nfrom jax import lax\n"
}
] | Python | Apache License 2.0 | google/jax | fix join_pvals bug from cond
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 10.05.2019 10:18:37 | 25,200 | 840c6607732e47540efeb7325c61c065ce4df19d | refine scan test granularity | [
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -19,6 +19,7 @@ from __future__ import print_function\nfrom functools import partial\nfrom absl.testing import absltest\n+from absl.testing import parameterized\nimport numpy as onp\nimport numpy.random as npr\n@@ -429,7 +430,12 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nout = lax.while_loop(cond, body, (33, 4))\nself.assertEqual(out, (7, 10))\n- def testScanImpl(self):\n+ @parameterized.named_parameters(\n+ {\"testcase_name\": \"jit_scan={}_jit_f={}\".format(jit_scan, jit_f),\n+ \"jit_scan\": jit_scan, \"jit_f\": jit_f}\n+ for jit_scan in [False, True]\n+ for jit_f in [False, True])\n+ def testScanImpl(self, jit_scan, jit_f):\nd = np.zeros(2)\ndef f(c, a):\nassert a.shape == (3,)\n@@ -439,28 +445,26 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nassert b.shape == ()\nreturn core.pack((c, b))\n- f_jit = api.jit(f)\n+ if jit_f:\n+ f = api.jit(f)\n+ if jit_scan:\n+ scan = api.jit(lax.scan, (0,))\n+ else:\n+ scan = lax.scan\nas_ = np.ones((5, 3))\nc = np.ones(4)\n- ans = lax.scan(f, c, as_)\n- expected = scan_reference(f, c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- ans = api.jit(lax.scan, (0,))(f, c, as_)\n+ ans = scan(f, c, as_)\nexpected = scan_reference(f, c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n- ans = lax.scan(f_jit, c, as_)\n- expected = scan_reference(f_jit, c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- ans = api.jit(lax.scan, (0,))(f_jit, c, as_)\n- expected = scan_reference(f_jit, c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- def testScanJVP(self):\n+ @parameterized.named_parameters(\n+ {\"testcase_name\": \"jit_scan={}_jit_f={}\".format(jit_scan, jit_f),\n+ \"jit_scan\": jit_scan, \"jit_f\": jit_f}\n+ for jit_scan in [False, True]\n+ for jit_f in [False, True])\n+ def testScanJVP(self, jit_scan, jit_f):\nd = np.zeros(2)\ndef f(c, a):\nassert a.shape == (3,)\n@@ -470,28 +474,26 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nassert b.shape == ()\nreturn core.pack((c, b))\n- f_jit = api.jit(f)\n+ if jit_f:\n+ f = api.jit(f)\n+ if jit_scan:\n+ scan = api.jit(lax.scan, (0,))\n+ else:\n+ scan = lax.scan\nas_ = np.ones((5, 3))\nc = np.ones(4)\n- ans = api.jvp(lambda c, as_: lax.scan(f, c, as_), (c, as_), (c, as_))[1]\n- expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- ans = api.jvp(lambda c, as_: api.jit(lax.scan, (0,))(f, c, as_), (c, as_), (c, as_))[1]\n+ ans = api.jvp(lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))[1]\nexpected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))[1]\nself.assertAllClose(ans, expected, check_dtypes=False)\n- ans = api.jvp(lambda c, as_: lax.scan(f_jit, c, as_), (c, as_), (c, as_))[1]\n- expected = api.jvp(lambda c, as_: scan_reference(f_jit, c, as_), (c, as_), (c, as_))[1]\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- ans = api.jvp(lambda c, as_: api.jit(lax.scan, (0,))(f_jit, c, as_), (c, as_), (c, as_))[1]\n- expected = api.jvp(lambda c, as_: scan_reference(f_jit, c, as_), (c, as_), (c, as_))[1]\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- def testScanLinearize(self):\n+ @parameterized.named_parameters(\n+ {\"testcase_name\": \"jit_scan={}_jit_f={}\".format(jit_scan, jit_f),\n+ \"jit_scan\": jit_scan, \"jit_f\": jit_f}\n+ for jit_scan in [False, True]\n+ for jit_f in [False, True])\n+ def testScanLinearize(self, jit_scan, jit_f):\nd = np.zeros(2)\ndef f(c, a):\nassert a.shape == (3,)\n@@ -501,28 +503,26 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nassert b.shape == ()\nreturn core.pack((c, b))\n- f_jit = api.jit(f)\n+ if jit_f:\n+ f = api.jit(f)\n+ if jit_scan:\n+ scan = api.jit(lax.scan, (0,))\n+ else:\n+ scan = lax.scan\nas_ = np.ones((5, 3))\nc = np.ones(4)\n- ans = api.linearize(lambda c, as_: lax.scan(f, c, as_), c, as_)[1](c, as_)\n- expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- ans = api.linearize(lambda c, as_: api.jit(lax.scan, (0,))(f, c, as_), c, as_)[1](c, as_)\n+ ans = api.linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)\nexpected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n- ans = api.linearize(lambda c, as_: lax.scan(f_jit, c, as_), c, as_)[1](c, as_)\n- expected = api.linearize(lambda c, as_: scan_reference(f_jit, c, as_), c, as_)[1](c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- ans = api.linearize(lambda c, as_: api.jit(lax.scan, (0,))(f_jit, c, as_), c, as_)[1](c, as_)\n- expected = api.linearize(lambda c, as_: scan_reference(f_jit, c, as_), c, as_)[1](c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- def testScanGrad(self):\n+ @parameterized.named_parameters(\n+ {\"testcase_name\": \"jit_scan={}_jit_f={}\".format(jit_scan, jit_f),\n+ \"jit_scan\": jit_scan, \"jit_f\": jit_f}\n+ for jit_scan in [False, True]\n+ for jit_f in [False, True])\n+ def testScanGrad(self, jit_scan, jit_f):\nd = np.zeros(2)\ndef f(c, a):\nassert a.shape == (3,)\n@@ -532,27 +532,20 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nassert b.shape == ()\nreturn core.pack((c, b))\n- f_jit = api.jit(f)\n+ if jit_f:\n+ f = api.jit(f)\n+ if jit_scan:\n+ scan = api.jit(lax.scan, (0,))\n+ else:\n+ scan = lax.scan\nas_ = np.ones((5, 3))\nc = np.ones(4)\n- ans = api.grad(lambda c, as_: list( lax.scan(f, c, as_))[0].sum())(c, as_)\n- expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- ans = api.grad(lambda c, as_: list(api.jit(lax.scan, (0,))(f, c, as_))[0].sum())(c, as_)\n+ ans = api.grad(lambda c, as_: list( scan(f, c, as_))[0].sum())(c, as_)\nexpected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n- ans = api.grad(lambda c, as_: list( lax.scan(f_jit, c, as_))[0].sum())(c, as_)\n- expected = api.grad(lambda c, as_: list(scan_reference(f_jit, c, as_))[0].sum())(c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\n- ans = api.grad(lambda c, as_: list(api.jit(lax.scan, (0,))(f_jit, c, as_))[0].sum())(c, as_)\n- expected = api.grad(lambda c, as_: list( scan_reference(f_jit, c, as_))[0].sum())(c, as_)\n- self.assertAllClose(ans, expected, check_dtypes=False)\n-\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | refine scan test granularity |
260,335 | 10.05.2019 12:27:14 | 25,200 | c1f01222b7c8fa9877999baf895c714e2f55ee6b | generalize pmap collective-permute tests | [
{
"change_type": "MODIFY",
"old_path": "tests/pmap_test.py",
"new_path": "tests/pmap_test.py",
"diff": "@@ -283,25 +283,32 @@ class PmapTest(jtu.JaxTestCase):\n@jtu.skip_on_devices(\"cpu\", \"gpu\")\ndef testCollectivePermute(self):\ndevice_count = xla_bridge.device_count()\n- if device_count != 2:\n- raise SkipTest(\"skipping because device_count != 2\")\n-\n- f = lambda x: lax.ppermute(x, perm=[(0, 1), (1, 0)], axis_name='i')\n+ rotation = [(i, i + 1 % device_count) for i in range(device_count)]\n+ f = lambda x: lax.ppermute(x, perm=rotation, axis_name='i')\nf = pmap(f, 'i')\nx = np.arange(4 * device_count).reshape((device_count, 4))\nans = f(x)\n- expected = x[::-1]\n+ expected = onp.roll(x, shift=1, axis=0)\nself.assertAllClose(ans, expected, check_dtypes=False)\n@jtu.skip_on_devices(\"cpu\", \"gpu\")\ndef testRule30(self):\n+ # This is a test of collective_permute implementing a simple halo exchange\n+ # to run a rule 30 simulation: https://en.wikipedia.org/wiki/Rule_30\n+ # Halo exchange should be useful in spatially-sharded convolutions and in\n+ # other simulations.\ndevice_count = xla_bridge.device_count()\n- if device_count != 2:\n- raise SkipTest(\"skipping because device_count != 2\")\n+\n+ def send_right(x, axis_name):\n+ left_perm = [(i, (i + 1) % device_count) for i in range(device_count)]\n+ return lax.ppermute(x, perm=left_perm, axis_name=axis_name)\n+\n+ def send_left(x, axis_name):\n+ left_perm = [((i + 1) % device_count, i) for i in range(device_count)]\n+ return lax.ppermute(x, perm=left_perm, axis_name=axis_name)\ndef update_board(board):\n- # rule 30: https://en.wikipedia.org/wiki/Rule_30\nleft = board[:-2]\nright = board[2:]\ncenter = board[1:-1]\n@@ -310,10 +317,7 @@ class PmapTest(jtu.JaxTestCase):\n@partial(pmap, axis_name='i')\ndef step(board_slice):\nleft, right = board_slice[:1], board_slice[-1:]\n- left = lax.ppermute(left, perm=[(0, 1), (1, 0)], axis_name='i')\n- right = lax.ppermute(right, perm=[(0, 1), (1, 0)], axis_name='i')\n- left, right = right, left\n-\n+ right, left = send_left(left, 'i'), send_right(right, 'i')\nenlarged_board_slice = np.concatenate([left, board_slice, right])\nreturn update_board(enlarged_board_slice)\n"
}
] | Python | Apache License 2.0 | google/jax | generalize pmap collective-permute tests |
260,335 | 10.05.2019 12:27:15 | 25,200 | 07bf50967b46909023d58eed14737d570920a569 | make jtu.skip_on_devices read jax_platform_name
fixes | [
{
"change_type": "MODIFY",
"old_path": "jax/lib/xla_bridge.py",
"new_path": "jax/lib/xla_bridge.py",
"diff": "@@ -65,7 +65,7 @@ except ImportError:\nFLAGS = flags.FLAGS\nflags.DEFINE_bool('jax_enable_x64',\n- strtobool(os.getenv('JAX_ENABLE_X64', \"False\")),\n+ strtobool(os.getenv('JAX_ENABLE_X64', 'False')),\n'Enable 64-bit types to be used.')\nflags.DEFINE_string(\n'jax_xla_backend', 'xla',\n@@ -74,12 +74,11 @@ flags.DEFINE_string(\n'jax_backend_target', 'local',\n'Either \"local\" or \"rpc:address\" to connect to a remote service target.')\nflags.DEFINE_string(\n- 'jax_platform_name', '',\n- 'Platform name for XLA. The default is to attempt to use a '\n- 'GPU if available, but fall back to CPU otherwise. To set '\n- 'the platform manually, pass \"cpu\" for CPU or \"gpu\" for '\n- 'GPU.')\n-\n+ 'jax_platform_name',\n+ os.getenv('JAX_PLATFORM_NAME', ''),\n+ 'Platform name for XLA. The default is to attempt to use a GPU if '\n+ 'available, but fall back to CPU otherwise. To set the platform manually, '\n+ 'pass \"cpu\" for CPU or \"gpu\" for GPU.')\ndef get_compile_options(num_replicas=None):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/test_util.py",
"new_path": "jax/test_util.py",
"diff": "@@ -20,6 +20,7 @@ import functools\nimport re\nimport itertools as it\nimport os\n+from unittest import SkipTest\nfrom absl.testing import absltest\nfrom absl.testing import parameterized\n@@ -33,15 +34,15 @@ from . import api\nfrom .config import flags\nfrom .util import partial\nfrom .tree_util import tree_multimap, tree_all, tree_map, tree_reduce\n+from .lib import xla_bridge\n# lbr tests placeholder\nFLAGS = flags.FLAGS\nflags.DEFINE_enum(\n- 'jax_test_dut',\n- 'cpu',\n- enum_values=['cpu', 'gpu', 'tpu'],\n+ 'jax_test_dut', '',\n+ enum_values=['', 'cpu', 'gpu', 'tpu'],\nhelp=\n'Describes the device under test in case special consideration is required.'\n)\n@@ -173,11 +174,11 @@ def skip_on_devices(*disabled_devices):\ndef skip(test_method):\n@functools.wraps(test_method)\ndef test_method_wrapper(self, *args, **kwargs):\n- device = FLAGS.jax_test_dut\n+ device = FLAGS.jax_test_dut or xla_bridge.get_backend().platform\nif device in disabled_devices:\ntest_name = getattr(test_method, '__name__', '[unknown test]')\n- return absltest.unittest.skip(\n- '{} not supported on {}.'.format(test_name, device.upper()))\n+ raise SkipTest('{} not supported on {}.'\n+ .format(test_name, device.upper()))\nreturn test_method(self, *args, **kwargs)\nreturn test_method_wrapper\nreturn skip\n@@ -191,9 +192,8 @@ def skip_on_flag(flag_name, skip_value):\nflag_value = getattr(FLAGS, flag_name)\nif flag_value == skip_value:\ntest_name = getattr(test_method, '__name__', '[unknown test]')\n- return absltest.unittest.skip(\n- '{} not supported when FLAGS.{} is {}'.format(\n- test_name, flag_name, flag_value))\n+ raise SkipTest('{} not supported when FLAGS.{} is {}'\n+ .format(test_name, flag_name, flag_value))\nreturn test_method(self, *args, **kwargs)\nreturn test_method_wrapper\nreturn skip\n"
}
] | Python | Apache License 2.0 | google/jax | make jtu.skip_on_devices read jax_platform_name
fixes #696 |
260,335 | 10.05.2019 13:26:51 | 25,200 | 134ce27fe9a696ab4fa954073670fc49b22d27b6 | sketch out ppermute transpose rule, untested | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_parallel.py",
"new_path": "jax/lax/lax_parallel.py",
"diff": "@@ -105,7 +105,6 @@ parallel.serial_pmap_primitive_rules[psum_p] = \\\n# partial(_allreduce_translation_rule, lax.add_p)\npxla.parallel_translation_rules[psum_p] = \\\nlambda c, val, device_groups: c.CrossReplicaSum(val, device_groups)\n-\nad.deflinear(psum_p, lambda t, axis_name: [t])\n@@ -126,11 +125,7 @@ pxla.parallel_translation_rules[pmin_p] = \\\ndef _ppermute_translation_rule(c, x, device_groups, perm):\n- group_size = len(perm)\n- if not all(len(grp) == group_size for grp in device_groups):\n- msg = (\"ppermute permutation must match device group size, got permutation \"\n- \"{} for device_groups {}.\".format(perm, device_groups))\n- raise ValueError(msg)\n+ group_size = len(device_groups[0])\nif not all(0 <= i < group_size and 0 <= j < group_size for i, j in perm):\nmsg = (\"ppermute permutation elements must take on values between 0 and \"\n\"the group size {}, but got {}.\")\n@@ -146,7 +141,13 @@ def _ppermute_translation_rule(c, x, device_groups, perm):\nfull_perm.extend((grp[src], grp[dst]) for src, dst in perm)\nreturn c.CollectivePermute(x, full_perm)\n+def _ppermute_transpose_rule(t, perm, axis_name):\n+ sources, dests = unzip2(perm)\n+ inverse_perm = zip(dests, srcs)\n+ return ppermute(t, axis_name=axis_name, perm=inverse_perm)\n+\nppermute_p = PmapPrimitive('ppermute')\n+# ad.deflinear(ppermute_p, _ppermute_transpose_rule) # TODO(mattjj): test this\npxla.parallel_translation_rules[ppermute_p] = _ppermute_translation_rule\n"
}
] | Python | Apache License 2.0 | google/jax | sketch out ppermute transpose rule, untested |
260,335 | 10.05.2019 14:00:21 | 25,200 | 5cfa18015c148e31bf25ce35fda7d11b3a9441bb | fix things we broke on the path to scan | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -56,13 +56,20 @@ class Jaxpr(object):\nreturn Jaxpr(self.constvars[:], self.freevars[:], self.invars[:],\nself.outvar, self.eqns[:])\n-class TypedJaxpr(namedtuple('TypedJaxpr', ['jaxpr', 'literals', 'in_avals', 'out_aval'])):\n+class TypedJaxpr(object):\ndef __init__(self, jaxpr, literals, in_avals, out_aval):\nassert type(jaxpr) is Jaxpr\nassert len(literals) == len(jaxpr.constvars)\nassert len(in_avals) == len(jaxpr.invars)\nassert not jaxpr.freevars\n- super(TypedJaxpr, self).__init__(jaxpr, literals, in_avals, out_aval)\n+\n+ self.jaxpr = jaxpr\n+ self.literals = literals\n+ self.in_avals = in_avals\n+ self.out_aval = out_aval\n+\n+ def __iter__(self):\n+ return iter((self.jaxpr, self.literals, self.in_avals, self.out_aval))\n@curry\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -252,6 +252,8 @@ def jaxpr_replicas(jaxpr):\ndef _max(itr): return max(list(itr) or [1])\ndef replicated_comp(jaxpr, ax_env, const_vals, freevar_shapes, *arg_shapes):\n+ # TODO(mattjj): support argument pattern-matching\n+ assert not any(type(invar) in (tuple, list) for invar in jaxpr.invars)\nc = xb.make_computation_builder(\"replicated_computation\")\ndef read(v):\n@@ -266,7 +268,6 @@ def replicated_comp(jaxpr, ax_env, const_vals, freevar_shapes, *arg_shapes):\nenv = {}\nwrite(core.unitvar, c.Tuple())\n- assert False # TODO update with pat_fmap\nif const_vals:\n_map(write, jaxpr.constvars, map(c.Constant, const_vals))\n_map(write, jaxpr.freevars, map(c.ParameterWithShape, freevar_shapes))\n@@ -275,8 +276,11 @@ def replicated_comp(jaxpr, ax_env, const_vals, freevar_shapes, *arg_shapes):\n_map(write, all_freevars, map(c.ParameterWithShape, freevar_shapes))\n_map(write, jaxpr.invars, map(c.ParameterWithShape, arg_shapes))\nfor eqn in jaxpr.eqns:\n- assert False # TODO udpate with eqn.restructure\n+ if not eqn.restructure:\nin_nodes = list(map(read, eqn.invars))\n+ else:\n+ in_nodes = [xla.xla_pack(c, map(read, invars)) if type(invars) is tuple\n+ else read(invars) for invars in eqn.invars]\nif eqn.primitive in parallel_translation_rules:\nname = eqn.params['axis_name']\nparams = {k: eqn.params[k] for k in eqn.params if k != 'axis_name'}\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -38,8 +38,6 @@ from ..lib import xla_bridge as xb\nfrom . import partial_eval as pe\nfrom . import ad\n-_map = safe_map # TODO remove\n-\nFLAGS = flags.FLAGS\nflags.DEFINE_bool('jax_device_values',\nstrtobool(os.getenv('JAX_DEVICE_VALUES', \"True\")),\n@@ -259,6 +257,8 @@ def build_jaxpr(jaxpr, const_vals, *abstract_args):\nreturn built_c\ndef jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\n+ # TODO(mattjj): support argument pattern-matching\n+ assert not any(type(invar) in (tuple, list) for invar in jaxpr.invars)\nc = xb.make_computation_builder(\"jaxpr_computation\")\ndef read(v):\n"
},
{
"change_type": "DELETE",
"old_path": "tests/call_initial_test.py",
"new_path": null,
"diff": "-from functools import partial\n-\n-from jax.core import pack\n-import jax.core as core\n-import jax.numpy as np\n-from jax import jvp, linearize\n-from jax.initial_style import call_initial\n-\n-\n-def f1(x, y, z):\n- return core.pack((np.sin(x * y), y, 1.0))\n-\n-# def f1(x, y, z):\n-# return core.pack((np.sin(x * y), y))\n-\n-f2 = partial(call_initial, f1)\n-\n-xs = (1., 2., 3.)\n-xst = (4., 5., 6.)\n-\n-print \"\\neval\"\n-print f1(*xs)\n-print f2(*xs)\n-\n-\n-print \"\\njvp\"\n-print jvp(f1, xs, xst)\n-print jvp(f2, xs, xst)\n-\n-print \"\\nlinearize\"\n-print linearize(f1, *xs)[1](*xst)\n-print linearize(f2, *xs)[1](*xst)\n"
}
] | Python | Apache License 2.0 | google/jax | fix things we broke on the path to scan |
260,335 | 10.05.2019 14:24:15 | 25,200 | 78c804772e47f98721acd31a7e0953504aa504a8 | fix typo in pmap test | [
{
"change_type": "MODIFY",
"old_path": "tests/pmap_test.py",
"new_path": "tests/pmap_test.py",
"diff": "@@ -283,7 +283,7 @@ class PmapTest(jtu.JaxTestCase):\n@jtu.skip_on_devices(\"cpu\", \"gpu\")\ndef testCollectivePermute(self):\ndevice_count = xla_bridge.device_count()\n- rotation = [(i, i + 1 % device_count) for i in range(device_count)]\n+ rotation = [(i, (i + 1) % device_count) for i in range(device_count)]\nf = lambda x: lax.ppermute(x, perm=rotation, axis_name='i')\nf = pmap(f, 'i')\n"
}
] | Python | Apache License 2.0 | google/jax | fix typo in pmap test |
260,335 | 10.05.2019 15:52:12 | 25,200 | 29e67f011905adf97660167fef69ec80fa6d9a67 | scan bug fixed, other cleanup | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -30,7 +30,7 @@ from .pprint_util import pp, vcat, hcat, pp_kv_pairs\n# TODO(dougalm): the trace cache breaks the leak detector. Consisder solving.\ncheck_leaks = False\n# TODO(dougalm): put this behind a flag that's enabled during testing\n-skip_checks = True # not __debug__ # google doesn't use -O\n+skip_checks = False # not __debug__ # google doesn't use -O\nzip = safe_zip\nmap = safe_map\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -302,11 +302,11 @@ class JVPTracer(Tracer):\n__slots__ = ['primal', 'tangent']\ndef __init__(self, trace, primal, tangent):\n+ if not core.skip_checks:\n+ _primal_tangent_shapes_match(primal, tangent)\nself.trace = trace\nself.primal = primal\nself.tangent = tangent\n- # TODO(mattjj,dougalm): behind skip_checks, check primal/tangent shapes and\n- # dtypes agree (up to jax_enable_x64 flag)\n@property\ndef aval(self):\n@@ -325,6 +325,15 @@ class JVPTracer(Tracer):\nelse:\nreturn self\n+def _primal_tangent_shapes_match(primal, tangent):\n+ if type(tangent) is TangentTuple:\n+ for p, t in zip(primal, tangent):\n+ _primal_tangent_shapes_match(p, t)\n+ elif tangent is not zero:\n+ primal_aval = raise_to_shaped(get_aval(primal))\n+ tangent_aval = raise_to_shaped(get_aval(tangent))\n+ assert primal_aval == tangent_aval\n+\n# -------------------- Primitives --------------------\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -374,7 +374,8 @@ def join_pvals(pval1, pval2):\npvals1, pvals2 = zip(pv1, const1), zip(pv2, const2)\njoin_pvs, join_consts = unzip2(map(join_pvals, pvals1, pvals2))\nif all(isinstance(pv, AbstractValue) for pv in join_pvs):\n- return PartialVal((AbstractTuple(join_pvs), pack(join_consts)))\n+ assert all(const == core.unit for const in join_consts)\n+ return PartialVal((AbstractTuple(join_pvs), core.unit))\nelse:\nreturn PartialVal((JaxprTracerTuple(join_pvs), pack(join_consts)))\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -533,13 +533,17 @@ def scan(f, init, xs):\nloop carry value and the second element represents the stacked outputs of\nthe second output of ``f`` when scanned over the leading axis of the inputs.\n\"\"\"\n+ (init, xs), in_trees = unzip2(map(pytree_to_jaxtupletree, (init, xs)))\n+ f, out_tree = pytree_fun_to_jaxtupletree_fun(lu.wrap_init(f), in_trees)\ncarry_pval = carry_aval, _ = _abstractify(init)\nxs_aval, _ = _abstractify(xs)\nx_aval = _demote_aval_rank(xs_aval)\nx_pval = pe.PartialVal((x_aval, core.unit))\njaxpr, pval_out, consts = pe.trace_to_jaxpr(\n- lu.wrap_init(f), (carry_pval, x_pval), instantiate=True)\n- (carry_aval_out, y_aval), _ = pval_out\n+ f, (carry_pval, x_pval), instantiate=True)\n+ pv_out, const_out = pval_out\n+ assert isinstance(pv_out, core.AbstractTuple) and const_out == core.unit\n+ carry_aval_out, y_aval = pv_out\nif carry_aval != carry_aval_out:\nmsg = (\"scanned function carry output does not match carry input: \"\n\"input carry is {} and output carry is {}\")\n@@ -550,8 +554,9 @@ def scan(f, init, xs):\nout_aval = core.AbstractTuple((carry_aval, y_aval))\njaxpr = core.TypedJaxpr(lifted_jaxpr, (), in_avals, out_aval)\nlength = _leading_dim_size(xs)\n- return scan_p.bind(core.pack(consts), init, xs,\n+ out = scan_p.bind(core.pack(consts), init, xs,\nforward=True, length=length, jaxpr=jaxpr)\n+ return build_tree(out_tree(), out)\ndef _scan_impl(consts, init, xs, forward, length, jaxpr):\n@@ -631,7 +636,6 @@ def _binary_lattice_join(a, b):\ndef _scan_partial_eval(trace, *tracers, **kwargs):\n- # Implements the Rumsfeld Transform: turn uknown unknowns into known unknowns\njaxpr = kwargs.pop('jaxpr')\nlength = kwargs.pop('length')\nforward = kwargs.pop('forward')\n@@ -657,7 +661,10 @@ def _scan_partial_eval(trace, *tracers, **kwargs):\nlifted_tracers = consts_tracer, lifted_init_tracer, xs_tracer\nin_pvs, in_consts = unzip2([t.pval for t in lifted_tracers])\n- out_pv = _put_known_pvs(sc_out, jaxpr.out_aval)\n+ carry_aval, y_aval = jaxpr.out_aval\n+ ys_aval = _promote_aval_rank(length, y_aval)\n+ out_aval = core.AbstractTuple((carry_aval, ys_aval))\n+ out_pv = _put_known_pvs(sc_out, out_aval)\nout_carry, (ys, residuals) = scan_p.bind(\n*in_consts, forward=forward, length=length, jaxpr=jaxpr_1)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -546,6 +546,52 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nexpected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ def testScanRnn(self):\n+ r = npr.RandomState(0)\n+\n+ n_in = 4\n+ n_hid = 3\n+ n_out = 2\n+ length = 5\n+\n+ W_trans = r.randn(n_hid, n_hid + n_in)\n+ W_out = r.randn(n_out, n_hid + n_in)\n+ params = W_trans, W_out\n+\n+ inputs = r.randn(length, n_in)\n+ targets = r.randn(length, n_out)\n+\n+ def step(params, state, input):\n+ W_trans, W_out = params\n+ stacked = np.concatenate([state, input])\n+ output = np.tanh(np.dot(W_out, stacked))\n+ next_state = np.tanh(np.dot(W_trans, stacked))\n+ return core.pack((next_state, output))\n+\n+ def rnn(params, inputs):\n+ init_state = np.zeros(n_hid)\n+ _, outputs = lax.scan(partial(step, params), init_state, inputs)\n+ return outputs\n+\n+ def loss(params, inputs, targets):\n+ predictions = rnn(params, inputs)\n+ return np.sum((predictions - targets)**2)\n+\n+ # evaluation doesn't crash\n+ loss(params, inputs, targets)\n+\n+ # jvp evaluation doesn't crash\n+ api.jvp(lambda params: loss(params, inputs, targets), (params,), (params,))\n+\n+ # gradient evaluation doesn't crash\n+ api.grad(loss)(params, inputs, targets)\n+\n+ # gradient is zero in the right place\n+ predictions = rnn(params, inputs)\n+ ans = api.grad(loss)(params, inputs, predictions)\n+ expected = (onp.zeros_like(W_trans), onp.zeros_like(W_out))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | scan bug fixed, other cleanup |
260,335 | 10.05.2019 22:07:54 | 25,200 | 4fcd96f926b079f9bab2b0d1105f88a81067dd55 | make tests pass with skip_checks = False | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -30,7 +30,7 @@ from .pprint_util import pp, vcat, hcat, pp_kv_pairs\n# TODO(dougalm): the trace cache breaks the leak detector. Consisder solving.\ncheck_leaks = False\n# TODO(dougalm): put this behind a flag that's enabled during testing\n-skip_checks = False # not __debug__ # google doesn't use -O\n+skip_checks = True # not __debug__ # google doesn't use -O\nzip = safe_zip\nmap = safe_map\n@@ -523,6 +523,9 @@ class JaxTuple(six.with_metaclass(_TupleMeta)):\nelse:\nreturn 'JaxTuple({})'.format(','.join(map(repr, self)))\n+ def __eq__(self, other):\n+ return isinstance(other, JaxTuple) and tuple(self) == tuple(other)\n+\nclass AbstractTuple(AbstractValue, tuple):\ndef __new__(cls, xs=()):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax_linalg.py",
"new_path": "jax/lax_linalg.py",
"diff": "@@ -472,6 +472,7 @@ def svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\nif full_matrices:\n#TODO: implement full matrices case, documented here: https://people.maths.ox.ac.uk/gilesm/files/NA-08-01.pdf\nraise NotImplementedError(\"Singular value decomposition JVP not implemented for full matrices\")\n+\nA, = primals\ndA, = tangents\ns, U, Vt = svd_p.bind(A, full_matrices=False, compute_uv=True)\n@@ -480,7 +481,7 @@ def svd_jvp_rule(primals, tangents, full_matrices, compute_uv):\nUt, V = np.conj(U).T, np.conj(Vt).T\ns_dim = s[..., None, :]\ndS = np.dot(np.dot(Ut, dA), V)\n- ds = np.diag(dS)\n+ ds = np.real(np.diag(dS))\nF = 1 / (np.square(s_dim) - np.square(s_dim.T) + np.eye(k)) - np.eye(k)\ndSS = s_dim * dS\nSdS = s_dim.T * dS\n"
}
] | Python | Apache License 2.0 | google/jax | make tests pass with skip_checks = False |
260,335 | 11.05.2019 09:29:12 | 25,200 | 98ed8731a5e7fa7d2a8b0f518d54817cdc3fb76a | remove unneeded packs from scan tests | [
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -37,7 +37,7 @@ def scan_reference(f, init, xs):\n(carry, y) = f(carry, x)\nys.append(lax.reshape(y, (1,) + onp.shape(y)))\nys = lax.concatenate(ys, 0)\n- return core.pack((carry, ys))\n+ return carry, ys\nclass LaxControlFlowTest(jtu.JaxTestCase):\n@@ -443,7 +443,7 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nb = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\nc = np.sin(c * b)\nassert b.shape == ()\n- return core.pack((c, b))\n+ return c, b\nif jit_f:\nf = api.jit(f)\n@@ -472,7 +472,7 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nb = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\nc = np.sin(c * b)\nassert b.shape == ()\n- return core.pack((c, b))\n+ return c, b\nif jit_f:\nf = api.jit(f)\n@@ -501,7 +501,7 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nb = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\nc = np.sin(c * b)\nassert b.shape == ()\n- return core.pack((c, b))\n+ return c, b\nif jit_f:\nf = api.jit(f)\n@@ -530,7 +530,7 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nb = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))\nc = np.sin(c * b)\nassert b.shape == ()\n- return core.pack((c, b))\n+ return c, b\nif jit_f:\nf = api.jit(f)\n@@ -566,7 +566,7 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nstacked = np.concatenate([state, input])\noutput = np.tanh(np.dot(W_out, stacked))\nnext_state = np.tanh(np.dot(W_trans, stacked))\n- return core.pack((next_state, output))\n+ return next_state, output\ndef rnn(params, inputs):\ninit_state = np.zeros(n_hid)\n"
}
] | Python | Apache License 2.0 | google/jax | remove unneeded packs from scan tests |
260,335 | 11.05.2019 10:45:14 | 25,200 | 65202821df7dc73a5fd7c6191c473854cf658fe9 | improve core.typed_jaxpr arg typechecks | [
{
"change_type": "MODIFY",
"old_path": "jax/api.py",
"new_path": "jax/api.py",
"diff": "@@ -901,7 +901,8 @@ def jarrett(fun):\nreturn new_fun\n-def make_graphviz(fun):\n+# This function mostly exists for making slides about JAX.\n+def _make_graphviz(fun):\n\"\"\"Adapts `fun` to return a graphviz dot string of its program representation.\nArgs:\n@@ -914,6 +915,8 @@ def make_graphviz(fun):\nSee make_jaxpr for a related function.\n\"\"\"\n+ # TODO(mattjj): handle eqn.restructure\n+ # TODO(mattjj): handle subjaxprs\ndef pv_like(x):\naval = xla.abstractify(x)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -74,16 +74,17 @@ class TypedJaxpr(object):\n@curry\ndef jaxpr_as_fun(typed_jaxpr, *args):\n- from jax.lax import _abstractify # TODO\ninvars = typed_jaxpr.jaxpr.invars\n+ if not skip_checks:\nfor arg, in_aval, varname in zip(args, typed_jaxpr.in_avals, invars):\n- arg_aval, _ = _abstractify(arg)\n- if arg_aval != in_aval:\n+ arg_aval = get_aval(arg)\n+ if lattice_join(arg_aval, in_aval) != in_aval:\nmsg = \"input type mismatch for arg {}: arg {} for parameter {}.\"\nraise TypeError(msg.format(varname, arg_aval, in_aval))\nout = eval_jaxpr(typed_jaxpr.jaxpr, typed_jaxpr.literals, (), *args)\n- out_aval, _ = _abstractify(out)\n- if out_aval != typed_jaxpr.out_aval:\n+ if not skip_checks:\n+ out_aval = get_aval(out)\n+ if lattice_join(out_aval, typed_jaxpr.out_aval) != typed_jaxpr.out_aval:\nmsg = \"output type mismatch: output value {} for output type {}.\"\nraise TypeError(msg.format(out_aval, typed_jaxpr.out_aval))\nreturn out\n"
}
] | Python | Apache License 2.0 | google/jax | improve core.typed_jaxpr arg typechecks |
260,335 | 11.05.2019 13:28:47 | 25,200 | 6e9718a229d6c73e8102f5c731db7230affe0985 | add pretty-printing to TypedJaxpr | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -71,6 +71,13 @@ class TypedJaxpr(object):\ndef __iter__(self):\nreturn iter((self.jaxpr, self.literals, self.in_avals, self.out_aval))\n+ def __str__(self):\n+ # TODO(mattjj): improve this with type annotations?\n+ return str(pp_jaxpr(self.jaxpr))\n+\n+ def __repr__(self):\n+ return self.__str__()\n+\n@curry\ndef jaxpr_as_fun(typed_jaxpr, *args):\n"
}
] | Python | Apache License 2.0 | google/jax | add pretty-printing to TypedJaxpr |
260,335 | 13.05.2019 08:48:13 | 25,200 | d27bc0a12975c222cce81dc2f5dedd54db106cbf | add literals to jaxprs | [
{
"change_type": "MODIFY",
"old_path": "jax/core.py",
"new_path": "jax/core.py",
"diff": "@@ -100,6 +100,7 @@ def jaxpr_as_fun(typed_jaxpr, *args):\nJaxprEqn = namedtuple('JaxprEqn', ['invars', 'outvars', 'primitive',\n'bound_subjaxprs', 'restructure',\n'destructure', 'params'])\n+Literal = namedtuple('Literal', ['val'])\nclass Primitive(object):\ndef __init__(self, name):\n@@ -145,6 +146,9 @@ class Primitive(object):\ndef eval_jaxpr(jaxpr, consts, freevar_vals, *args):\ndef read(v):\n+ if type(v) is Literal:\n+ return v.val\n+ else:\nreturn env[v]\ndef write(v, val):\n@@ -653,7 +657,7 @@ def check_jaxpr(jaxpr):\nreturn \"\\njaxpr:\\n{}\\n\".format(jaxpr)\ndef read_env(env, v):\n- if v not in env:\n+ if v not in env and type(v) is not Literal:\nraise Exception(\"Variable '{}' not defined\".format(v) + context())\ndef write_env(env, v):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/ad.py",
"new_path": "jax/interpreters/ad.py",
"diff": "@@ -20,7 +20,7 @@ import itertools as it\nfrom . import partial_eval as pe\nfrom .. import core as core\n-from ..core import JaxTuple, Trace, Tracer, new_master, get_aval, pack, call_p, Primitive\n+from ..core import JaxTuple, Trace, Tracer, new_master, get_aval, pack, call_p, Primitive, Literal\nfrom ..ad_util import (add_jaxvals, add_jaxvals_p, zeros_like_jaxval, zeros_like_aval,\nzeros_like_p, zero, Zero)\nfrom ..abstract_arrays import raise_to_shaped\n@@ -147,6 +147,9 @@ def backward_pass(jaxpr, consts, freevar_vals, args, cotangent_in):\nreturn ct_env.get(v, zero)\ndef read_primal(v):\n+ if type(v) is Literal:\n+ return v.val\n+ else:\nreturn primal_env.get(v)\ndef write_primal(v, val):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -24,9 +24,9 @@ from .. import linear_util as lu\nfrom ..abstract_arrays import ShapedArray, ConcreteArray\nfrom ..linear_util import thunk, transformation, transformation_with_aux\nfrom ..util import unzip2, safe_zip, safe_map, toposort, partial\n-from ..core import (Trace, Tracer, new_master, Jaxpr, JaxprEqn, get_aval, pack,\n- AbstractValue, AbstractTuple, unit, unitvar, Primitive,\n- call_p, TypedJaxpr)\n+from ..core import (Trace, Tracer, new_master, Jaxpr, JaxprEqn, Literal,\n+ get_aval, pack, AbstractValue, AbstractTuple, unit, unitvar,\n+ Primitive, call_p, TypedJaxpr)\nmap = safe_map\nzip = safe_zip\n@@ -47,6 +47,9 @@ def identity(x): return x\nclass JaxprTrace(Trace):\ndef pure(self, val):\n+ if type(val) in (int, float):\n+ return JaxprTracer(self, PartialVal((None, val)), Literal(val))\n+ else:\nreturn self.new_const(val)\ndef lift(self, val):\n@@ -60,6 +63,9 @@ class JaxprTrace(Trace):\nraise Exception\nreturn JaxprTracer(self, PartialVal((None, val)), unit)\n+ def new_instantiated_literal(self, val):\n+ return JaxprTracer(self, PartialVal((get_aval(val), unit)), Literal(val))\n+\ndef new_instantiated_const(self, val):\nreturn JaxprTracer(self, PartialVal((get_aval(val), unit)), ConstVar(val))\n@@ -74,6 +80,9 @@ class JaxprTrace(Trace):\nelif isinstance(pv, JaxprTracerTuple):\nreturn pack(map(lambda t: self.instantiate_const(self.full_raise(t)), tracer))\nelif pv is None:\n+ if type(tracer.recipe) is Literal:\n+ return self.new_instantiated_literal(tracer.recipe.val)\n+ else:\nreturn self.new_instantiated_const(const)\nelse:\nraise TypeError(pv)\n@@ -491,6 +500,8 @@ def tracers_to_jaxpr(in_tracers, out_tracer):\nenv[var(t)] = recipe.val\nelif isinstance(recipe, ConstVar):\nconsts[var(t)] = recipe.val\n+ elif isinstance(recipe, Literal):\n+ t_to_var[id(t)] = recipe\nelif isinstance(recipe, Destructuring):\ni, eqn, key = recipe\nif key not in destructuring_vars:\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -257,6 +257,9 @@ def replicated_comp(jaxpr, ax_env, const_vals, freevar_shapes, *arg_shapes):\nc = xb.make_computation_builder(\"replicated_computation\")\ndef read(v):\n+ if type(v) is core.Literal:\n+ return c.Constant(v.val)\n+ else:\nreturn env[v]\ndef write(v, node):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -32,7 +32,7 @@ from .. import ad_util\nfrom .. import tree_util\nfrom .. import linear_util as lu\nfrom ..abstract_arrays import ConcreteArray, ShapedArray, make_shaped_array, array_types\n-from ..core import AbstractTuple, JaxTuple, pack, valid_jaxtype\n+from ..core import AbstractTuple, JaxTuple, pack, valid_jaxtype, Literal\nfrom ..util import partial, partialmethod, memoize, unzip2, concatenate, safe_map, prod\nfrom ..lib import xla_bridge as xb\nfrom . import partial_eval as pe\n@@ -262,6 +262,9 @@ def jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\nc = xb.make_computation_builder(\"jaxpr_computation\")\ndef read(v):\n+ if type(v) is Literal:\n+ return c.Constant(canonicalize_pyval_dtype(v.val))\n+ else:\nreturn env[v]\ndef write(v, node):\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -361,7 +361,8 @@ def _revise_cond_jaxpr(new_pval, old_pval, jaxpr, consts):\nnew_jaxpr = jaxpr.copy()\nnew_jaxpr.constvars = tuple(jaxpr.constvars) + tuple(new_constvars)\nnewvars = iter(new_constvars)\n- new_invars = [next(newvars) if old is None and new is not None else v\n+ new_invars = [next(newvars) if old is None and new is not None else\n+ (core.unitvar if new is None and old is None else v)\nfor new, old, v in zip(new_pv, old_pv, eqn.invars)]\nnew_jaxpr.eqns = (list(jaxpr.eqns[:-1]) +\n[_pack_eqn(new_invars, jaxpr.outvar)])\n"
}
] | Python | Apache License 2.0 | google/jax | add literals to jaxprs
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 13.05.2019 09:10:11 | 25,200 | 3c04dedcad2d707e5f5c01c741617f63c75f80fc | scan cleanup | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/partial_eval.py",
"new_path": "jax/interpreters/partial_eval.py",
"diff": "@@ -155,47 +155,6 @@ class JaxprTrace(Trace):\nmap_primitives = set()\n-def unzip_scan_jaxpr(jaxpr, consts, init, xs, avals):\n- f = lu.wrap_init(partial(core.eval_jaxpr, jaxpr))\n-\n-\n- assert False\n-\n-\n-def scan_process_primitive(trace, consts, init, xs, avals, jaxpr):\n- jaxpr1, jaxpr2, avals1, avals2, ans_pv = unzip_scan_jaxpr(\n- jaxpr, consts, init, xs, avals)\n- const_pv , consts_const = consts\n- init_pv , inits_const = init\n- xs_pv , xs_const = xs\n-\n- ans = scan_p.bind(consts_const, inits_const, xs_const,\n- avals=avals1, jaxpr=jaxpr1)\n-\n- params_out = {'avals' : avals2, 'jaxpr' : jaxpr2}\n- eqn = JaxprEqn([consts, init, xs], None, scan_p, (), False, False, params_out)\n- return JaxprTracer(trace, PartialVal((ans, ans_pv)), )\n-\n- # in_pvs, in_consts = unzip2([t.pval for t in tracers])\n- # fun, aux = partial_eval(f, self, in_pvs)\n- # out_pv_const, consts = call_primitive.bind(fun, *in_consts, **params)\n- # out_pv, jaxpr, env = aux()\n- # const_tracers = map(self.new_instantiated_const, consts)\n- # env_tracers = map(self.full_raise, env)\n- # bound_subjaxpr = (jaxpr, const_tracers, env_tracers)\n- # eqn = JaxprEqn(tracers, None, call_primitive, (bound_subjaxpr,), False, params)\n- # return JaxprTracer(self, PartialVal((out_pv, out_pv_const)), eqn)\n-\n-\n-\n- # tracers = map(self.instantiate_const, tracers)\n- # avals = [t.aval for t in tracers]\n- # out_aval = primitive.abstract_eval(*avals, **params)\n- # eqn = JaxprEqn(tracers, None, primitive, (), False, params)\n- # return JaxprTracer(self, PartialVal((out_aval, unit)), eqn)\n- assert False\n-\n-\ndef remove_axis_from_pv(pv):\nif pv is None:\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -252,7 +252,6 @@ def jaxpr_replicas(jaxpr):\ndef _max(itr): return max(list(itr) or [1])\ndef replicated_comp(jaxpr, ax_env, const_vals, freevar_shapes, *arg_shapes):\n- # TODO(mattjj): support argument pattern-matching\nassert not any(type(invar) in (tuple, list) for invar in jaxpr.invars)\nc = xb.make_computation_builder(\"replicated_computation\")\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/xla.py",
"new_path": "jax/interpreters/xla.py",
"diff": "@@ -257,7 +257,6 @@ def build_jaxpr(jaxpr, const_vals, *abstract_args):\nreturn built_c\ndef jaxpr_computation(jaxpr, const_vals, freevar_shapes, *arg_shapes):\n- # TODO(mattjj): support argument pattern-matching\nassert not any(type(invar) in (tuple, list) for invar in jaxpr.invars)\nc = xb.make_computation_builder(\"jaxpr_computation\")\n"
},
{
"change_type": "DELETE",
"old_path": "jax/scan.py",
"new_path": null,
"diff": "-# Copyright 2018 Google LLC\n-#\n-# Licensed under the Apache License, Version 2.0 (the \"License\");\n-# you may not use this file except in compliance with the License.\n-# You may obtain a copy of the License at\n-#\n-# https://www.apache.org/licenses/LICENSE-2.0\n-#\n-# Unless required by applicable law or agreed to in writing, software\n-# distributed under the License is distributed on an \"AS IS\" BASIS,\n-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n-# See the License for the specific language governing permissions and\n-# limitations under the License.\n-\n-from __future__ import absolute_import\n-from __future__ import division\n-from __future__ import print_function\n-\n-from functools import partial\n-\n-import jax.core as core\n-import jax.linear_util as lu\n-import jax.numpy as np\n-import jax.lax as lax\n-\n-from jax.lax import _abstractify\n-from jax.abstract_arrays import ShapedArray\n-from jax.interpreters import partial_eval as pe\n-from jax.interpreters import ad\n-\n-import jax.util as ju\n-\n-map = ju.safe_map\n-\n-# scan :: (a -> c -> c) -> c -> [a] -> [c]\n-# scan_cc :: (d ->a -> c -> c) -> d-> c -> [a] -> [c]\n-# pro: fewer types\n-# pro: all types are passed as args\n-\n-# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\n-# scan_cc :: (d -> a -> c -> (b,c)) -> d -> c -> [a] -> ([b],c)\n-# pro: fold and for_i are special cases without needing DCE\n-# pro: feels cleaner for transposition\n-# pro: accumulation without saving intermediates\n-\n-# design indecision: store intermediates from within f vs *just inputs*\n-# design indecision: *jvp splits scan* or partial eval splits scan\n-\n-def scan_reference(f, init, xs):\n- carry = init\n- ys = []\n- for x in xs:\n- (carry, y) = f(x, carry)\n- ys.append(y)\n-\n- ys = core.pack(map(np.stack, zip(*ys)))\n- return ys, np.array(carry)\n-\n-def demote_aval_rank(xs):\n- if isinstance(xs, core.AbstractTuple):\n- return core.AbstractTuple(map(demote_aval_rank, xs))\n- else:\n- return ShapedArray(xs.shape[1:], xs.dtype)\n-\n-def promote_aval_rank(n, xs):\n- if isinstance(xs, core.AbstractTuple):\n- return core.AbstractTuple(map(partial(promote_aval_rank, n), xs))\n- else:\n- return ShapedArray((n,) + xs.shape, xs.dtype)\n-\n-def leading_dim_size(xs):\n- if isinstance(xs, core.JaxTuple):\n- return leading_dim_size(xs[0])\n- else:\n- return xs.shape[0]\n-\n-def empty_arrays(aval):\n- if isinstance(aval, core.AbstractTuple):\n- return core.pack(map(empty_arrays, aval))\n- else:\n- return lax.full(aval.shape, 0, aval.dtype)\n-\n-def index_arrays(i, aval, xs):\n- if isinstance(aval, core.AbstractTuple):\n- return core.pack(map(partial(index_arrays, i), aval, xs))\n- else:\n- return lax.dynamic_index_in_dim(xs, i, keepdims=False)\n-\n-def update_arrays(i, aval, xs, x):\n- if isinstance(aval, core.AbstractTuple):\n- return core.pack(map(partial(update_arrays, i), aval, xs, x))\n- else:\n- return lax.dynamic_update_index_in_dim(xs, x[None, ...], i, axis=0)\n-\n-# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\n-def scan(f, init, xs):\n- consts, avals, jaxpr = trace_scan_fun(f, init, xs)\n- ys, carry = scan_p.bind(core.pack(consts), init, xs, avals=avals, jaxpr=jaxpr)\n- return ys, carry\n-\n-def trace_scan_fun(f, init, xs):\n- f = lu.wrap_init(f)\n- carry_pval = carry_aval, _ = _abstractify(init)\n- xs_aval, _ = _abstractify(xs)\n- x_aval = demote_aval_rank(xs_aval)\n- x_pval = pe.PartialVal((x_aval, core.unit))\n- jaxpr, pval_out, consts = pe.trace_to_jaxpr(f, (x_pval, carry_pval))\n- (y_aval, carry_aval_out), _ = pval_out\n- assert carry_aval == carry_aval_out\n- avals = (x_aval, y_aval, carry_aval)\n- return consts, avals, jaxpr\n-\n-def _scan_impl(consts, init, xs, avals, jaxpr):\n- length = leading_dim_size(xs)\n- (x_aval, y_aval, carry_aval) = avals\n- ys_aval = promote_aval_rank(length, y_aval)\n-\n- def body_fun(i, vals):\n- carry, ys = vals\n- x = index_arrays(i, x_aval, xs)\n- (y, carry_out) = core.eval_jaxpr(jaxpr, consts, (), x, carry)\n- ys_out = update_arrays(i, y_aval, ys, y)\n- return (carry_out, ys_out)\n-\n- ys_init = empty_arrays(ys_aval)\n- carry, out = lax.fori_loop(0, length, body_fun, (init, ys_init))\n- return core.pack((out, carry))\n-\n-# scan :: (a -> c -> (b,c)) -> c -> [a] -> ([b],c)\n-def _scan_jvp(primals, tangents, avals, jaxpr):\n- consts, init, xs = primals\n- consts_dot, init_dot, xs_dot = tangents\n- f = partial(core.eval_jaxpr, jaxpr)\n-\n- # TODO: plumb symbolic zeros in and out of jvp transformation so we can test\n- # that they're the same as the inputs and re-run if not\n- consts_dot = ad.instantiate_zeros(consts, consts_dot)\n- init_dot = ad.instantiate_zeros(init , init_dot)\n- xs_dot = ad.instantiate_zeros(xs , xs_dot)\n-\n- f_jvp = ad.jvp(lu.wrap_init(f)).call_wrapped\n-\n- def f_jvp_c(carry_dual, x_dual):\n- init, init_dot = carry_dual\n- x, x_dot = x_dual\n- ans = f_jvp(core.pack((consts , core.unit, init , x)),\n- core.pack((consts_dot, core.unit, init_dot, x_dot)))\n- (y, carry_out), (y_dot, carry_out_dot) = ans\n- return core.pack((core.pack((y, y_dot)),\n- core.pack((carry_out, carry_out_dot))))\n-\n- consts_dual = core.pack((consts, consts_dot))\n- init_dual = core.pack((init , init_dot))\n- xs_dual = core.pack((xs , xs_dot))\n- consts, avals, jvp_jaxpr = trace_scan_fun(f_jvp_c, init_dual, xs_dual)\n-\n- ans = scan_p.bind(core.pack(consts), init_dual, xs_dual,\n- avals=avals, jaxpr=jvp_jaxpr)\n- (y, y_dot), (carry_out, carry_out_dot) = ans\n- return core.pack((y, carry_out)), core.pack((y_dot, carry_out_dot))\n-\n-\n-scan_p = core.Primitive(\"scan\")\n-scan_p.def_impl(_scan_impl)\n-ad.primitive_jvps[scan_p] = _scan_jvp\n"
}
] | Python | Apache License 2.0 | google/jax | scan cleanup
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 13.05.2019 10:39:34 | 25,200 | dff8e60f5c78ef49cdf5430c14e55a902ebcd27c | fix typo unkown -> unknown | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -677,16 +677,16 @@ def _scan_partial_eval(trace, *tracers, **kwargs):\ndict(forward=forward, length=length, jaxpr=jaxpr_2))\nreturn pe.JaxprTracer(trace, pe.PartialVal((out_pv, out_const)), eqn)\n-def _lift_tracer(trace, tracer, is_unkown):\n- t = type(is_unkown)\n+def _lift_tracer(trace, tracer, is_unknown):\n+ t = type(is_unknown)\nif t is bool:\n- if is_unkown:\n+ if is_unknown:\nreturn trace.instantiate_const(tracer)\nelse:\nreturn tracer\nelif t is tuple:\ntracers = map(trace.full_raise, tracer)\n- return core.pack(map(partial(_lift_tracer, trace), tracers, is_unkown))\n+ return core.pack(map(partial(_lift_tracer, trace), tracers, is_unknown))\nelse:\nraise TypeError(t)\n"
}
] | Python | Apache License 2.0 | google/jax | fix typo unkown -> unknown |
260,295 | 13.05.2019 20:36:45 | 25,200 | a08f7ad5fa64c726d3e4f19e038063a97d3c23dc | Adagrad optimizer | [
{
"change_type": "MODIFY",
"old_path": "jax/experimental/optimizers.py",
"new_path": "jax/experimental/optimizers.py",
"diff": "@@ -229,6 +229,44 @@ def momentum(step_size, mass):\nreturn x\nreturn init, update, get_params\n+\n+@optimizer\n+def adagrad(step_size, momentum=0.9):\n+ \"\"\"Construct optimizer triple for Adagrad.\n+\n+ Adaptive Subgradient Methods for Online Learning and Stochastic Optimization:\n+ http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf\n+\n+ Args:\n+ step_size: positive scalar, or a callable representing a step size schedule\n+ that maps the iteration index to positive scalar.\n+ momentum: optional, a positive scalar value for momentum\n+\n+ Returns:\n+ An (init_fun, update_fun, get_params) triple.\n+ \"\"\"\n+ step_size = make_schedule(step_size)\n+\n+ def init(x0):\n+ g_sq = np.zeros_like(x0)\n+ m = np.zeros_like(x0)\n+ return x0, g_sq, m\n+\n+ def update(i, g, state):\n+ x, g_sq, m = state\n+ g_sq += g**2\n+ g_sq_inv_sqrt = np.where(g_sq > 0, 1. / np.sqrt(g_sq), 0.0)\n+ m = (1. - momentum) * (g * g_sq_inv_sqrt) + momentum * m\n+ x = x - step_size(i) * m\n+ return x, g_sq, m\n+\n+ def get_params(state):\n+ x, _, _ = state\n+ return x\n+\n+ return init, update, get_params\n+\n+\n@optimizer\ndef rmsprop(step_size, gamma=0.9, eps=1e-8):\n\"\"\"Construct optimizer triple for RMSProp.\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/optimizers_test.py",
"new_path": "tests/optimizers_test.py",
"diff": "@@ -131,6 +131,17 @@ class OptimizerTests(jtu.JaxTestCase):\npartial_loss = functools.partial(loss, y)\nself._CheckRun(optimizers.sgd, partial_loss, x0, num_iters, step_size)\n+ def testAdagrad(self):\n+\n+ def loss(xs):\n+ x1, x2 = xs\n+ return np.sum(x1**2) + np.sum(x2**2)\n+\n+ num_iters = 100\n+ step_size = 0.1\n+ x0 = (np.ones(2), np.ones((2, 2)))\n+ self._CheckOptimizer(optimizers.adagrad, loss, x0, num_iters, step_size)\n+\ndef testSM3(self):\ndef loss(xs):\nx1, x2 = xs\n"
}
] | Python | Apache License 2.0 | google/jax | Adagrad optimizer |
260,335 | 15.05.2019 08:13:30 | 25,200 | 38c6a8e89929e546a0cc2785e70e282ddf1f5ea1 | add pmap docstring | [
{
"change_type": "MODIFY",
"old_path": "jax/api.py",
"new_path": "jax/api.py",
"diff": "@@ -438,7 +438,7 @@ def _dtype(x):\ndef vmap(fun, in_axes=0, out_axes=0):\n- \"\"\"Vectorizing map. Creates a function which maps `fun` over additional axes.\n+ \"\"\"Vectorizing map. Creates a function which maps `fun` over argument axes.\nArgs:\nfun: Function to be mapped over additional axes.\n@@ -485,7 +485,89 @@ def vmap(fun, in_axes=0, out_axes=0):\ndef pmap(fun, axis_name=None):\n- \"\"\"Set up SPMD function for JIT compilation and parallel execution with XLA.\"\"\"\n+ \"\"\"Parallel map with support for collectives.\n+\n+ The purpose of ``pmap`` is to express single-program multiple-data (SPMD)\n+ programs and execute them in parallel on XLA devices, such as multiple GPUs or\n+ multiple TPU cores. Semantically it is comparable to ``vmap`` because both\n+ transformations map a function over array axes, but where ``vmap`` vectorizes\n+ functions by pushing the mapped axis down into primitive operations, ``pmap``\n+ instead replicates the function and executes each replica on its own XLA\n+ device in parallel.\n+\n+ Another key difference with ``vmap`` is that while ``vmap`` can only express\n+ pure maps, ``pmap`` enables the use of parallel SPMD collective operations,\n+ like all-reduce sum.\n+\n+ The mapped axis size must be less than or equal to the number of XLA devices\n+ available. For nested ``pmap`` calls, the product of the mapped axis sizes\n+ must be less than or equal to the number of XLA devices.\n+\n+ Args:\n+ fun: Function to be mapped over argument axes.\n+ axis_name: Optional, a hashable Python object used to identify the mapped\n+ axis so that parallel collectives, like all-reduce, can be applied.\n+\n+ Returns:\n+ A parallelized version of ``fun`` with arguments that correspond to those of\n+ ``fun`` but each with an additional leading array axis (with equal sizes)\n+ and with output that has an additional leading array axis (with the same\n+ size).\n+\n+ For example, assuming 8 XLA devices are available,``pmap`` can be used as a\n+ map along a leading array axes:\n+\n+ >>> out = pmap(lambda x: x ** 2)(np.arange(8))\n+ >>> print(out)\n+ [0, 1, 4, 9, 16, 25, 36, 49]\n+ >>> x = np.arange(3 * 2 * 2.).reshape((3, 2, 2))\n+ >>> y = np.arange(3 * 2 * 2.).reshape((3, 2, 2)) ** 2\n+ >>> out = pmap(np.dot)(x, y)\n+ >>> print(out)\n+ [[[ 4. 9.]\n+ [ 12. 29.]]\n+ [[ 244. 345.]\n+ [ 348. 493.]]\n+ [[ 1412. 1737.]\n+ [ 1740. 2141.]]]\n+\n+ In addition to expressing pure maps, ``pmap`` can also be used to express\n+ parallel single-program multiple-data (SPMD) programs that communicate via\n+ collectivce operations. For example:\n+\n+ >>> f = lambda x: x / jax.lax.psum(x, axis_name='i')\n+ >>> out = pmap(f, axis_name='i')(np.arange(4.))\n+ >>> print(out)\n+ [ 0. 0.16666667 0.33333334 0.5 ]\n+ >>> print(out.sum())\n+ 1.0\n+\n+ In this example, ``axis_name`` is a string, but it can be any Python object\n+ with ``__hash__`` and ``__eq__`` defined.\n+\n+ The argument ``axis_name`` to ``pmap`` names the mapped axis so that\n+ collective operations, like ``jax.lax.psum``, can refer to it. Axis names are\n+ important particularly in the case of nested ``pmap`` functions, where\n+ collectives can operate over distinct axes:\n+\n+ >>> from functools import partial\n+ >>> @partial(pmap, axis_name='rows')\n+ >>> @partial(pmap, axis_name='cols')\n+ >>> def normalize(x):\n+ >>> row_normed = x / jax.lax.psum(x, 'rows')\n+ >>> col_normed = x / jax.lax.psum(x, 'cols')\n+ >>> doubly_normed = x / jax.lax.psum(x, ('rows', 'cols'))\n+ >>> return row_normed, col_normed, doubly_normed\n+ >>>\n+ >>> x = np.arange(8.).reshape((4, 2))\n+ >>> row_normed, col_normed, doubly_normed = normalize(x)\n+ >>> print(row_normed.sum(0))\n+ [ 1. 1.]\n+ >>> print(col_normed.sum(1))\n+ [ 1. 1. 1. 1.]\n+ >>> print(doubly_normed.sum((0, 1)))\n+ 1.0\n+ \"\"\"\naxis_name = _TempAxisName() if axis_name is None else axis_name\n@wraps(fun)\n"
}
] | Python | Apache License 2.0 | google/jax | add pmap docstring
Co-authored-by: Peter Hawkins <phawkins@google.com> |
260,335 | 15.05.2019 08:21:12 | 25,200 | d19947169689dc7c1d1f77440a9feca863ef8de1 | add pmap to sphinx docs | [
{
"change_type": "MODIFY",
"old_path": "docs/jax.rst",
"new_path": "docs/jax.rst",
"diff": "@@ -19,6 +19,6 @@ Module contents\n---------------\n.. automodule:: jax\n- :members: jit, disable_jit, grad, value_and_grad, vmap, jacfwd, jacrev, hessian, jvp, linearize, vjp, make_jaxpr\n+ :members: jit, disable_jit, grad, value_and_grad, vmap, pmap, jacfwd, jacrev, hessian, jvp, linearize, vjp, make_jaxpr\n:undoc-members:\n:show-inheritance:\n"
}
] | Python | Apache License 2.0 | google/jax | add pmap to sphinx docs
Co-authored-by: Peter Hawkins <phawkins@google.com> |
260,335 | 15.05.2019 08:24:47 | 25,200 | 52a2fb3280dcf31c61804aa871e7d8e5f66fe2a7 | typo fixes in pmap docstring | [
{
"change_type": "MODIFY",
"old_path": "jax/api.py",
"new_path": "jax/api.py",
"diff": "@@ -506,7 +506,7 @@ def pmap(fun, axis_name=None):\nArgs:\nfun: Function to be mapped over argument axes.\naxis_name: Optional, a hashable Python object used to identify the mapped\n- axis so that parallel collectives, like all-reduce, can be applied.\n+ axis so that parallel collectives can be applied.\nReturns:\nA parallelized version of ``fun`` with arguments that correspond to those of\n@@ -533,7 +533,7 @@ def pmap(fun, axis_name=None):\nIn addition to expressing pure maps, ``pmap`` can also be used to express\nparallel single-program multiple-data (SPMD) programs that communicate via\n- collectivce operations. For example:\n+ collective operations. For example:\n>>> f = lambda x: x / jax.lax.psum(x, axis_name='i')\n>>> out = pmap(f, axis_name='i')(np.arange(4.))\n"
}
] | Python | Apache License 2.0 | google/jax | typo fixes in pmap docstring
Co-authored-by: Peter Hawkins <phawkins@google.com> |
260,335 | 15.05.2019 21:30:39 | 25,200 | c779e495c718f47f7d1f8cebd3561bc0052e4311 | fix typo in build_jaxlib_wheels_macos.sh | [
{
"change_type": "MODIFY",
"old_path": "build/build_jaxlib_wheels_macos.sh",
"new_path": "build/build_jaxlib_wheels_macos.sh",
"diff": "# Script that builds wheels for a JAX release on Mac OS X.\n# Builds wheels for multiple Python versions, using pyenv instead of Docker.\n# Usage: run from root of JAX source tree as:\n-# build/build_wheels_macos.sh\n+# build/build_jaxlib_wheels_macos.sh\n# The wheels will end up in build/dist.\n#\n# Requires pyenv, pyenv-virtualenv (e.g., from Homebrew). If you have Homebrew\n"
}
] | Python | Apache License 2.0 | google/jax | fix typo in build_jaxlib_wheels_macos.sh |
260,335 | 16.05.2019 10:20:37 | 25,200 | fc226dab406643a6e26df51891388eed0ff3b154 | update jaxlib references to 0.1.15 | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -164,7 +164,7 @@ PYTHON_VERSION=cp27 # alternatives: cp27, cp35, cp36, cp37\nCUDA_VERSION=cuda92 # alternatives: cuda90, cuda92, cuda100\nPLATFORM=linux_x86_64 # alternatives: linux_x86_64\nBASE_URL='https://storage.googleapis.com/jax-wheels'\n-pip install --upgrade $BASE_URL/$CUDA_VERSION/jaxlib-0.1.13-$PYTHON_VERSION-none-$PLATFORM.whl\n+pip install --upgrade $BASE_URL/$CUDA_VERSION/jaxlib-0.1.15-$PYTHON_VERSION-none-$PLATFORM.whl\npip install --upgrade jax # install jax\n```\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lib/xla_bridge.py",
"new_path": "jax/lib/xla_bridge.py",
"diff": "@@ -164,12 +164,6 @@ def device_put(pyval, device_num=0):\nbackend=get_backend())\ndef device_put_many(pyvals_and_devices):\n- # TODO(phawkins): remove the fallback path after dropping dependencies on\n- # Jaxlib older than 0.1.13.\n- if hasattr(xla_client.LocalBuffer, \"from_pyvals\"):\n- return xla_client.LocalBuffer.from_pyvals(pyvals_and_devices,\n- backend=get_backend())\n- else:\nreturn [device_put(pyval, device) for (pyval, device) in pyvals_and_devices]\ndef make_tuple(bufs, device_num=0):\n"
},
{
"change_type": "MODIFY",
"old_path": "notebooks/XLA_in_Python.ipynb",
"new_path": "notebooks/XLA_in_Python.ipynb",
"diff": "},\n\"outputs\": [],\n\"source\": [\n- \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.13-cp36-none-linux_x86_64.whl\\n\",\n+ \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.15-cp36-none-linux_x86_64.whl\\n\",\n\"!pip install --upgrade -q jax\"\n]\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "notebooks/gufuncs.ipynb",
"new_path": "notebooks/gufuncs.ipynb",
"diff": "},\n\"cell_type\": \"code\",\n\"source\": [\n- \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.13-cp36-none-linux_x86_64.whl\\n\",\n+ \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.15-cp36-none-linux_x86_64.whl\\n\",\n\"!pip install --upgrade -q jax\"\n],\n\"execution_count\": 0,\n"
},
{
"change_type": "MODIFY",
"old_path": "notebooks/maml.ipynb",
"new_path": "notebooks/maml.ipynb",
"diff": "},\n\"outputs\": [],\n\"source\": [\n- \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.13-cp36-none-linux_x86_64.whl\\n\",\n+ \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.15-cp36-none-linux_x86_64.whl\\n\",\n\"!pip install --upgrade -q jax\"\n]\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "notebooks/neural_network_and_data_loading.ipynb",
"new_path": "notebooks/neural_network_and_data_loading.ipynb",
"diff": "},\n\"cell_type\": \"code\",\n\"source\": [\n- \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.13-cp36-none-linux_x86_64.whl\\n\",\n+ \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.15-cp36-none-linux_x86_64.whl\\n\",\n\"!pip install --upgrade -q jax\"\n],\n\"execution_count\": 0,\n"
},
{
"change_type": "MODIFY",
"old_path": "notebooks/neural_network_with_tfds_data.ipynb",
"new_path": "notebooks/neural_network_with_tfds_data.ipynb",
"diff": "}\n],\n\"source\": [\n- \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.13-cp36-none-linux_x86_64.whl\\n\",\n+ \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.15-cp36-none-linux_x86_64.whl\\n\",\n\"!pip install --upgrade -q jax\"\n]\n},\n"
},
{
"change_type": "MODIFY",
"old_path": "notebooks/quickstart.ipynb",
"new_path": "notebooks/quickstart.ipynb",
"diff": "},\n\"cell_type\": \"code\",\n\"source\": [\n- \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.13-cp36-none-linux_x86_64.whl\\n\",\n+ \"!pip install --upgrade -q https://storage.googleapis.com/jax-wheels/cuda$(echo $CUDA_VERSION | sed -e 's/\\\\.//' -e 's/\\\\..*//')/jaxlib-0.1.15-cp36-none-linux_x86_64.whl\\n\",\n\"!pip install --upgrade -q jax\"\n],\n\"execution_count\": 0,\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_test.py",
"new_path": "tests/lax_numpy_test.py",
"diff": "@@ -1453,8 +1453,6 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\ndef testLongLong(self):\n- # TODO(phawkins): enable after a Jaxlib update.\n- return SkipTest(\"Test disabled until jaxlib 0.1.13 is released.\")\nself.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),\ncheck_dtypes=True)\n"
}
] | Python | Apache License 2.0 | google/jax | update jaxlib references to 0.1.15 |
260,335 | 16.05.2019 10:32:28 | 25,200 | b68ed2787ab3838b64ff8ebdc57bf97528a7c344 | fix randint docstring | [
{
"change_type": "MODIFY",
"old_path": "jax/random.py",
"new_path": "jax/random.py",
"diff": "@@ -277,8 +277,10 @@ def randint(key, shape, minval, maxval, dtype=onp.int32):\nArgs:\nkey: a PRNGKey used as the random key.\nshape: a tuple of nonnegative integers representing the shape.\n- minval: optional, a minimum (inclusive) value for the range (default 0).\n- maxval: optional, a maximum (exclusive) value for the range (default 1).\n+ minval: int or array of ints broadcast-compatible with ``shape``, a minimum\n+ (inclusive) value for the range.\n+ maxval: int or array of ints broadcast-compatible with ``shape``, a maximum\n+ (exclusive) value for the range.\ndtype: optional, an int dtype for the returned values (default int32).\nReturns:\n"
}
] | Python | Apache License 2.0 | google/jax | fix randint docstring |
260,335 | 17.05.2019 07:36:52 | 25,200 | b6031ffdd752f6ae6b08b47018a04906a4abd9c8 | avoid packing leaf outputs for jit/pmap funs | [
{
"change_type": "MODIFY",
"old_path": "jax/api.py",
"new_path": "jax/api.py",
"diff": "@@ -41,7 +41,7 @@ from . import linear_util as lu\nfrom .core import pack, eval_jaxpr\nfrom .api_util import (pytree_fun_to_jaxtupletree_fun, pytree_to_jaxtupletree,\npytree_fun_to_flatjaxtuple_fun, apply_jaxtree_fun, wraps,\n- pytree_fun_to_jaxtupletree_fun2, flatten_fun)\n+ pytree_fun_to_jaxtupletree_fun2, flatten_fun_leafout)\nfrom .tree_util import (process_pytree, node_types, build_tree, PyTreeDef,\ntree_map, tree_flatten, tree_unflatten, tree_structure,\ntree_transpose, leaf)\n@@ -117,9 +117,9 @@ def _jit(fun, static_argnums, device_values=True):\nf, dyn_args = _argnums_partial(f, dyn_argnums, args)\nargs_flat, in_tree = tree_flatten((dyn_args, kwargs))\n_check_args(args_flat)\n- flat_fun, out_tree = flatten_fun(f, in_tree)\n+ flat_fun, out_tree = flatten_fun_leafout(f, in_tree)\nout = xla.xla_call(flat_fun, *args_flat, device_values=device_values)\n- return tree_unflatten(out_tree(), out)\n+ return out if out_tree() is leaf else tree_unflatten(out_tree(), out)\njitted_name = \"jit({}, static_argnums={})\"\nf_jitted.__name__ = jitted_name.format(f_jitted.__name__, static_argnums)\n@@ -571,19 +571,19 @@ def pmap(fun, axis_name=None):\naxis_name = _TempAxisName() if axis_name is None else axis_name\n@wraps(fun)\n- def f_jitted(*args, **kwargs):\n+ def f_pmapped(*args, **kwargs):\naxis_size = _pmap_axis_size(args)\nf = lu.wrap_init(fun)\nargs_flat, in_tree = tree_flatten((args, kwargs))\n_check_args(args_flat)\n- flat_fun, out_tree = flatten_fun(f, in_tree)\n+ flat_fun, out_tree = flatten_fun_leafout(f, in_tree)\nout = pxla.xla_pmap(flat_fun, *args_flat,\naxis_name=axis_name, axis_size=axis_size)\n- return tree_unflatten(out_tree(), out)\n+ return out if out_tree() is leaf else tree_unflatten(out_tree(), out)\nnamestr = \"pmap({}, axis_name={})\".format\n- f_jitted.__name__ = namestr(f_jitted.__name__, axis_name)\n- return f_jitted\n+ f_pmapped.__name__ = namestr(f_pmapped.__name__, axis_name)\n+ return f_pmapped\ndef _pmap_axis_size(args):\nleaves, _ = tree_flatten(args)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/api_util.py",
"new_path": "jax/api_util.py",
"diff": "@@ -71,13 +71,24 @@ def pytree_fun_to_flatjaxtuple_fun(in_trees, *args):\nans = yield py_args, {}\nyield pytree_to_flatjaxtuple(ans)\n+@transformation_with_aux\n+def flatten_fun(in_tree, *args_flat):\n+ py_args, py_kwargs = tree_unflatten(in_tree, args_flat)\n+ ans = yield py_args, py_kwargs\n+ yield pytree_to_flatjaxtuple(ans)\n+\ndef pytree_to_flatjaxtuple(pytree):\n- flat_ans, out_tree = tree_flatten(pytree)\n- return pack(flat_ans), out_tree\n+ flat, out_tree = tree_flatten(pytree)\n+ return pack(flat), out_tree\n@transformation_with_aux\n-def flatten_fun(in_tree, *args_flat):\n+def flatten_fun_leafout(in_tree, *args_flat):\n+ # like flatten_fun but doesn't pack output leaves\npy_args, py_kwargs = tree_unflatten(in_tree, args_flat)\nans = yield py_args, py_kwargs\n- yield pytree_to_flatjaxtuple(ans)\n+ flat_ans, out_tree = tree_flatten(ans)\n+ if out_tree is leaf:\n+ yield ans, out_tree\n+ else:\n+ yield pack(flat_ans), out_tree\n"
}
] | Python | Apache License 2.0 | google/jax | avoid packing leaf outputs for jit/pmap funs |
260,335 | 17.05.2019 09:08:08 | 25,200 | dad9193397e326cfeced8e141172e7a8077d4019 | make pmap catch device-count errors (+ tests) | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -479,6 +479,10 @@ def merged_aval(pval):\ndef execute_replicated(compiled, pval, nrep, handle_in,\nhandle_replica_result, handle_full_result, *args):\n+ if not nrep < xb.device_count():\n+ msg = (\"executing pmap computation that requires {} replicas, but only {} \"\n+ \"XLA devices are available\")\n+ raise ValueError(msg.format(nrep, xb.device_count()))\ninput_bufs = zip(*map(handle_in, args)) if args else [[]] * nrep\nout_bufs = compiled.ExecutePerReplica(list(input_bufs))\nresults = [merge_pvals(handle_replica_result(buf), pval) for buf in out_bufs]\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/pmap_test.py",
"new_path": "tests/pmap_test.py",
"diff": "@@ -384,6 +384,30 @@ class PmapTest(jtu.JaxTestCase):\nans = f(x)\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ def testDeviceCountError(self):\n+ device_count = xla_bridge.device_count()\n+\n+ f = pmap(lambda x: x)\n+ x = np.arange(device_count + 1)\n+ self.assertRaisesRegexp(\n+ ValueError,\n+ \".*requires.*replicas\",\n+ lambda: f(x))\n+\n+ f = pmap(lambda x: x)\n+ x = onp.ones((device_count + 1, 10))\n+ self.assertRaisesRegexp(\n+ ValueError,\n+ \".*requires.*replicas\",\n+ lambda: f(x))\n+\n+ f = pmap(lambda x: pmap(lambda x: x)(x))\n+ x = onp.ones((device_count, 2, 10))\n+ self.assertRaisesRegexp(\n+ ValueError,\n+ \".*requires.*replicas\",\n+ lambda: f(x))\n+\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | make pmap catch device-count errors (+ tests) |
260,335 | 17.05.2019 10:25:26 | 25,200 | f864edf777aee195156d2e7c5d9024009c17e52e | revise nested pmap gather to CRS-based impl | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -182,6 +182,7 @@ def replica_groups(nrep, mesh_spec, mesh_axes):\ndef xla_shard(c, sizes, x):\n\"\"\"Analog of shard_arg that performs sharding within an XLA computation.\"\"\"\n+\ndef _xla_shard(shape, x):\nif shape.is_tuple():\nelts = map(_xla_shard, shape.tuple_shapes(), xla_destructure(c, x))\n@@ -198,6 +199,7 @@ def xla_shard(c, sizes, x):\nreturn _xla_shard(c.GetShape(x), x)\n+# TODO(mattjj): plumb more ergonimic form of DynamicSlice / DynamicUpdateSlice\ndef _xla_shard_start_indices(c, axis_size, ndim):\nidx = c.Rem(c.ReplicaId(), c.Constant(onp.array(axis_size, onp.uint32)))\nzero = onp.zeros(ndim - 1, onp.uint32)\n@@ -206,6 +208,7 @@ def _xla_shard_start_indices(c, axis_size, ndim):\n# TODO(b/110096942): more efficient gather\ndef xla_unshard(c, device_groups, x):\n\"\"\"Analog of unshard_output that un-shards within an XLA computation.\"\"\"\n+\ndef _xla_unshard(shape, x):\nif shape.is_tuple():\nelts = map(_xla_unshard, shape.tuple_shapes(), xla_destructure(c, x))\n@@ -214,9 +217,14 @@ def xla_unshard(c, device_groups, x):\nreturn unshard_array(shape, x)\ndef unshard_array(shape, x):\n- group_size = len(device_groups[0])\n- broadcasted = c.Broadcast(x, (group_size,))\n- return c.AllToAll(broadcasted, 0, 0, device_groups)\n+ axis_size = len(device_groups[0])\n+ dims = list(shape.dimensions())\n+ start_indices = _xla_shard_start_indices(c, axis_size, len(dims) + 1)\n+ padded = c.Broadcast(c.Constant(onp.array(0, shape.numpy_dtype())),\n+ [axis_size] + dims)\n+ padded = c.DynamicUpdateSlice(padded, c.Reshape(x, None, [1] + dims),\n+ start_indices)\n+ return c.CrossReplicaSum(padded, device_groups)\nreturn _xla_unshard(c.GetShape(x), x)\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lib/xla_bridge.py",
"new_path": "jax/lib/xla_bridge.py",
"diff": "@@ -294,13 +294,14 @@ class _JaxComputationBuilder(xla_client.ComputationBuilder):\nelse:\nraise TypeError(\"No constant handler for type: {}\".format(py_type))\n- def AllToAll(self, operand, split_dimension, concat_dimension, replica_groups):\n- \"\"\"Workaround for AllToAll not being implemented on some backends.\"\"\"\n- if split_dimension == concat_dimension and len(replica_groups[0]) == 1:\n+ # TODO(mattjj): remove when CRS is added to XLA:CPU\n+ def CrossReplicaSum(self, operand, replica_groups):\n+ \"\"\"Workaround for CrossReplicaSum not being implemented on some backends.\"\"\"\n+ if len(replica_groups[0]) == 1:\nreturn operand\nelse:\n- return super(_JaxComputationBuilder, self).AllToAll(\n- operand, split_dimension, concat_dimension, replica_groups)\n+ return super(_JaxComputationBuilder, self).CrossReplicaSum(operand,\n+ replica_groups)\ndef make_computation_builder(name):\n"
}
] | Python | Apache License 2.0 | google/jax | revise nested pmap gather to CRS-based impl |
260,335 | 17.05.2019 12:27:09 | 25,200 | 9b1af47a590f3d3b7ece9ada88fdad8bab4b753e | improve documetnation of lax parallel operators | [
{
"change_type": "MODIFY",
"old_path": "docs/jax.lax.rst",
"new_path": "docs/jax.lax.rst",
"diff": "@@ -139,9 +139,8 @@ Parallelism support is experimental.\n.. autosummary::\n:toctree: _autosummary\n- pcollect\n- pmax\n- psplit\n- psplit_like\npsum\n+ pmax\n+ pmin\npswapaxes\n+ ppermute\n"
},
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_parallel.py",
"new_path": "jax/lax/lax_parallel.py",
"diff": "@@ -29,44 +29,125 @@ from jax.lib import xla_bridge\n### parallel traceables\ndef psum(x, axis_name):\n+ \"\"\"Compute an all-reduce sum on ``x`` over the pmapped axis ``axis_name``.\n+\n+ Args:\n+ x: array with a mapped axis named ``axis_name``.\n+ axis_name: hashable Python object used to name a pmapped axis (see the\n+ ``pmap`` docstring for more details).\n+\n+ Returns:\n+ An array with the same shape as ``x`` representing the result of an\n+ all-reduce sum along the axis ``axis_name``.\n+\n+ For example, with 4 XLA devices available:\n+\n+ >>> x = np.arange(4)\n+ >>> y = jax.pmap(lambda x: jax.lax.psum(x, 'i'), axis_name='i')(x)\n+ >>> print(y)\n+ [6 6 6 6]\n+ >>> y = jax.pmap(lambda x: x / jax.lax.psum(x, 'i'), axis_name='i')(x)\n+ >>> print(y)\n+ [ 0. 0.16666667 0.33333334 0.5 ]\n+ \"\"\"\nreturn psum_p.bind(x, axis_name=axis_name)\ndef pmax(x, axis_name):\n+ \"\"\"Compute an all-reduce max on ``x`` over the pmapped axis ``axis_name``.\n+\n+ Args:\n+ x: array with a mapped axis named ``axis_name``.\n+ axis_name: hashable Python object used to name a pmapped axis (see the\n+ ``pmap`` docstring for more details).\n+\n+ Returns:\n+ An array with the same shape as ``x`` representing the result of an\n+ all-reduce max along the axis ``axis_name``.\n+ \"\"\"\nreturn pmax_p.bind(x, axis_name=axis_name)\ndef pmin(x, axis_name):\n+ \"\"\"Compute an all-reduce min on ``x`` over the pmapped axis ``axis_name``.\n+\n+ Args:\n+ x: array with a mapped axis named ``axis_name``.\n+ axis_name: hashable Python object used to name a pmapped axis (see the\n+ ``pmap`` docstring for more details).\n+\n+ Returns:\n+ An array with the same shape as ``x`` representing the result of an\n+ all-reduce min along the axis ``axis_name``.\n+ \"\"\"\nreturn pmin_p.bind(x, axis_name=axis_name)\ndef ppermute(x, axis_name, perm):\n+ \"\"\"Perform a collective permutation according to the permutation ``perm``.\n+\n+ This function is an analog of the CollectivePermute XLA HLO.\n+\n+ Args:\n+ x: array with a mapped axis named ``axis_name``.\n+ axis_name: hashable Python object used to name a pmapped axis (see the\n+ ``pmap`` docstring for more details).\n+ perm: list of pairs of ints, representing (source_index, destination_index)\n+ pairs that encode how the mapped axis named ``axis_name`` should be\n+ shuffled. The integer values are treated as indices into the mapped axis\n+ ``axis_name``. Any two pairs should not have the same source index or the\n+ same destination index. For each index of the axis ``axis_name`` that does\n+ not correspond to a destination index in ``perm``, the corresponding\n+ values in ``x`` are filled with zeros of the appropriate type.\n+\n+ Returns:\n+ An array with the same shape as ``x`` representing the result of an\n+ all-reduce min along the axis ``axis_name``.\n+ \"\"\"\nreturn ppermute_p.bind(x, axis_name=axis_name, perm=perm)\ndef pswapaxes(x, axis_name, axis):\n- \"\"\"Analogue to `np.swapaxes` involving a hidden axis.\n-\n- Specifically, transposes the operand along the axis that's currently hidden\n- and the given concrete axis. The implicit position of the hidden axis remains\n- unchanged.\n+ \"\"\"Swap the pmapped axis ``axis_name`` with the unmapped axis ``axis``.\n+\n+ This function is similar to ``psplit`` except the pmapped axis of the input is\n+ placed at the position ``axis`` in the output.\n+\n+ Args:\n+ x: array with a mapped axis named ``axis_name``.\n+ axis_name: hashable Python object used to name a pmapped axis (see the\n+ ``pmap`` docstring for more details).\n+ axis: int indicating the unmapped axis of ``x`` to map with the name\n+ ``axis_name``.\n+\n+ Returns:\n+ An array with shape ``np.insert(np.delete(x.shape, axis), axis, axis_size)``\n+ where ``axis_size`` is the size of the mapped axis named ``axis_name`` in\n+ the input ``x``.\n\"\"\"\nreturn pswapaxes_p.bind(x, axis_name=axis_name, axis=axis)\ndef psplit(x, axis_name, axis):\n- \"\"\"Merge operand along the hidden axis and split it along `axis`.\n-\n- The newly split axis becomes the hidden axis for the output, and in particular\n- the implicit position of the hidden axis changes.\n+ \"\"\"Unmap the pmapped axis ``axis_name`` and map ``axis`` with the same name.\n+\n+ This function is similar to ``pswapaxes`` except the pmapped axis of the input\n+ is placed as the leading logical axis of the output.\n+\n+ Args:\n+ x: array with a mapped axis named ``axis_name``.\n+ axis_name: hashable Python object used to name a pmapped axis (see the\n+ ``pmap`` docstring for more details).\n+ axis: int indicating the unmapped axis of ``x`` to map with the name\n+ ``axis_name``.\n+\n+ Returns:\n+ An array with shape ``(axis_size,) + tuple(np.delete(x.shape, axis))`` where\n+ ``axis_size`` is the size of the mapped axis named ``axis_name`` in the\n+ input ``x``.\n\"\"\"\n- # lowering should be:\n- # return xla_all_to_all(x, hidden axis, axis)\nreturn psplit_p.bind(x, axis_name=axis_name, axis=axis)\ndef psplit_like(x, y, axis_name):\n- \"\"\"Split `x` along any axis on which `y` is split, if it is.\"\"\"\n+ \"\"\"Ensure the named mapped axis of ``x`` aligns with that of ``y``.\"\"\"\nreturn psplit_like_p.bind(x, y, axis_name=axis_name)\ndef pcollect(x, axis_name):\n- # lowering should be:\n- # x = xla_broadcast(x, (xb.get_replica_count(),))\n- # return xla_all_to_all(x, 0, dim(axis_name), **params)\nreturn pcollect_p.bind(x, axis_name=axis_name)\n@@ -126,12 +207,8 @@ pxla.parallel_translation_rules[pmin_p] = \\\ndef _ppermute_translation_rule(c, x, device_groups, perm):\ngroup_size = len(device_groups[0])\n- if not all(0 <= i < group_size and 0 <= j < group_size for i, j in perm):\n- msg = (\"ppermute permutation elements must take on values between 0 and \"\n- \"the group size {}, but got {}.\")\n- raise ValueError(msg.format(group_size, perm))\n- sources, dests = unzip2(perm)\n- if not (len(sources) == len(set(sources)) and len(dests) == len(set(dests))):\n+ srcs, dsts = unzip2((src % group_size, dst % group_size) for src, dst in perm)\n+ if not (len(srcs) == len(set(srcs)) and len(dsts) == len(set(dsts))):\nmsg = \"ppermute sources and destinations must be unique, got {}.\"\nraise ValueError(msg.format(perm))\n"
}
] | Python | Apache License 2.0 | google/jax | improve documetnation of lax parallel operators |
260,335 | 17.05.2019 12:38:45 | 25,200 | b1fd8e6eb60ba6f5cf219242ec6172167cadc33e | add test for DeviceConstant repr | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax.py",
"new_path": "jax/lax/lax.py",
"diff": "@@ -3646,7 +3646,7 @@ class _IotaConstant(xla.DeviceConstant):\ndef __init__(self, dtype, shape, axis):\nself.shape = shape\n- self.dtype = dtype\n+ self.dtype = onp.dtype(dtype)\nself.ndim = len(shape)\nself.size = prod(shape)\nself._npy_value = None\n@@ -3675,7 +3675,7 @@ class _EyeConstant(xla.DeviceConstant):\ndef __init__(self, shape, axes, dtype):\nself.shape = shape\n- self.dtype = dtype\n+ self.dtype = onp.dtype(dtype)\nself.ndim = len(shape)\nself.size = prod(shape)\nself._npy_value = None\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_test.py",
"new_path": "tests/lax_test.py",
"diff": "@@ -1343,6 +1343,9 @@ class DeviceConstantTest(jtu.JaxTestCase):\nself.assertAllClose(argument_result, expected, check_dtypes=True)\nself.assertAllClose(jit_result, expected, check_dtypes=True)\n+ # ensure repr doesn't crash\n+ repr(make_const())\n+\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_{}_fill={}\".format(\njtu.format_shape_dtype_string(shape, dtype) if dtype else shape,\n"
}
] | Python | Apache License 2.0 | google/jax | add test for DeviceConstant repr |
260,335 | 17.05.2019 12:46:11 | 25,200 | 8f9e4b12606abd89c5b44125dc75bafdad375721 | BroadcastedIota needs integer type (fixes | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax.py",
"new_path": "jax/lax/lax.py",
"diff": "@@ -3700,7 +3700,7 @@ class _EyeConstant(xla.DeviceConstant):\nelse:\netype = xla_bridge.dtype_to_etype_exact(diag_const.dtype)\netype = xla_bridge.dtype_to_etype(diag_const.dtype)\n- iotas = [c.BroadcastedIota(onp.bool_, diag_const.shape, axis)\n+ iotas = [c.BroadcastedIota(onp.uint32, diag_const.shape, axis)\nfor axis in diag_const.axes]\neyes = [c.Eq(i1, i2) for i1, i2 in zip(iotas[:-1], iotas[1:])]\nreturn c.ConvertElementType(_reduce(c.And, eyes), etype)\n"
}
] | Python | Apache License 2.0 | google/jax | BroadcastedIota needs integer type (fixes #728) |
260,335 | 17.05.2019 13:01:45 | 25,200 | 3b66c7eae79cdb2a2e1d8893cebfd32b827168e4 | add more tests that would have caught | [
{
"change_type": "MODIFY",
"old_path": "tests/lax_test.py",
"new_path": "tests/lax_test.py",
"diff": "@@ -1352,7 +1352,7 @@ class DeviceConstantTest(jtu.JaxTestCase):\nfill_value),\n\"shape\": shape, \"dtype\": dtype, \"fill_value\": fill_value}\nfor dtype in itertools.chain(default_dtypes, [None])\n- for shape in [(), (3,), (2, 3), (2, 3, 4)]\n+ for shape in [(), (3,), (2, 3), (2, 3, 4), (1001, 1001)]\nfor fill_value in [0, 1, onp.pi]))\ndef testFilledConstant(self, shape, fill_value, dtype):\nmake_const = lambda: lax.full(shape, fill_value, dtype)\n@@ -1364,7 +1364,7 @@ class DeviceConstantTest(jtu.JaxTestCase):\njtu.format_shape_dtype_string(shape, dtype), dimension),\n\"shape\": shape, \"dtype\": dtype, \"dimension\": dimension}\nfor dtype in default_dtypes\n- for shape in [(), (3,), (2, 3), (2, 3, 4)]\n+ for shape in [(), (3,), (2, 3), (2, 3, 4), (1001, 1001), (101, 101, 101)]\nfor dimension in range(len(shape))))\ndef testIotaConstant(self, dtype, shape, dimension):\nmake_const = lambda: lax.broadcasted_iota(dtype, shape, dimension)\n@@ -1389,6 +1389,7 @@ class DeviceConstantTest(jtu.JaxTestCase):\n[(2, 3, 4), (0, 1, 2)],\n[(2, 3, 4, 2), (0, 1, 2)],\n[(2, 3, 4, 2), (0, 2, 3)],\n+ [(1001, 1001), (0, 1)],\n]))\ndef testEyeConstant(self, dtype, shape, axes):\nmake_const = lambda: lax.broadcasted_eye(dtype, shape, axes)\n"
}
] | Python | Apache License 2.0 | google/jax | add more tests that would have caught #728 |
260,474 | 17.05.2019 16:54:27 | 14,400 | 7c348588d0baaabc2fd61a4aa05403422026e471 | Fix typo in BatchTrace.post_process_call | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/batching.py",
"new_path": "jax/interpreters/batching.py",
"diff": "@@ -136,7 +136,7 @@ class BatchTrace(Trace):\nmaster = self.master\ndef todo(x):\ntrace = BatchTrace(master, core.cur_sublevel())\n- return BatchTracer(trace, val, dim)\n+ return BatchTracer(trace, x, dim)\nreturn val, todo\n"
}
] | Python | Apache License 2.0 | google/jax | Fix typo in BatchTrace.post_process_call |
260,335 | 17.05.2019 13:56:39 | 25,200 | bb5cbaabaab033a78107987309161041cfa1780b | catch pmap num_devices errors at compile time too | [
{
"change_type": "MODIFY",
"old_path": "jax/interpreters/pxla.py",
"new_path": "jax/interpreters/pxla.py",
"diff": "@@ -246,6 +246,10 @@ def axis_groups(axis_env, name):\ndef compile_replicated(jaxpr, axis_name, axis_size, consts, *abstract_args):\nnum_replicas = axis_size * jaxpr_replicas(jaxpr)\n+ if num_replicas > xb.device_count():\n+ msg = (\"compiling pmap computation that requires {} replicas, but only {} \"\n+ \"XLA devices are available\")\n+ raise ValueError(msg.format(num_replicas, xb.device_count()))\naxis_env = AxisEnv(num_replicas, [axis_name], [axis_size])\narg_shapes = list(map(xla_shape, abstract_args))\nbuilt_c = replicated_comp(jaxpr, axis_env, consts, (), *arg_shapes)\n"
}
] | Python | Apache License 2.0 | google/jax | catch pmap num_devices errors at compile time too |
260,335 | 20.05.2019 06:59:20 | 25,200 | 560b0ba97bc5bdc8ecba09d81172afce74eab408 | mention numpy promotion differences in README | [
{
"change_type": "MODIFY",
"old_path": "README.md",
"new_path": "README.md",
"diff": "@@ -726,14 +726,24 @@ code to compile and end-to-end optimize much bigger functions.\n## Current gotchas\n-For a survey of current gotchas, with examples and explanations, we highly recommend reading the [Gotchas Notebook](https://colab.research.google.com/github/google/jax/blob/master/notebooks/Common_Gotchas_in_JAX.ipynb).\n+For a survey of current gotchas, with examples and explanations, we highly\n+recommend reading the [Gotchas Notebook](https://colab.research.google.com/github/google/jax/blob/master/notebooks/Common_Gotchas_in_JAX.ipynb).\nSome stand-out gotchas that might surprise NumPy users:\n-1. [`np.isnan` doesn't yet work](https://github.com/google/jax/issues/276), and in general nan semantics aren't preserved on some backends.\n-2. In-place mutation of arrays isn't supported, though [there is an alternative](https://jax.readthedocs.io/en/latest/jax.ops.html). Generally JAX requires functional code.\n-3. JAX enforces single-precision numbers (32-bit or `float32`) by default and to use double-precision (64-bit or\n-`float64`), one needs to set the `jax_enable_x64` variable **at startup** (set environment variable `JAX_ENABLE_x64 = True` or for other ways, see [here](https://colab.research.google.com/github/google/jax/blob/master/notebooks/Common_Gotchas_in_JAX.ipynb#scrollTo=YTktlwTTMgFl))\n-4. PRNGs are different and can be awkward, though for [good reasons](https://github.com/google/jax/blob/master/design_notes/prng.md), and non-reuse (linearity) is not yet checked.\n+1. JAX enforces single-precision (32-bit, e.g. `float32`) values by default, and\n+ to enable double-precision (64-bit, e.g. `float64`) one needs to set the\n+ `jax_enable_x64` variable **at startup** (or set the environment variable\n+ `JAX_ENABLE_x64=True`, see [the Gotchas Notebook](https://colab.research.google.com/github/google/jax/blob/master/notebooks/Common_Gotchas_in_JAX.ipynb#scrollTo=YTktlwTTMgFl))\n+2. Some of NumPy's dtype promotion semantics involving a mix of Python scalars\n+ and NumPy types aren't preserved, namely `np.add(1, np.array([2],\n+ np.float32)).dtype` is `float64` rather than `float32`.\n+3. In-place mutation of arrays isn't supported, though [there is an\n+ alternative](https://jax.readthedocs.io/en/latest/jax.ops.html). Generally\n+ JAX requires functional code.\n+4. PRNGs are different and can be awkward, though for [good\n+ reasons](https://github.com/google/jax/blob/master/design_notes/prng.md), and\n+ non-reuse (linearity) is not yet checked.\n+5. NumPy's nan semantics aren't preserved on some backends\nSee [the notebook](https://colab.research.google.com/github/google/jax/blob/master/notebooks/Common_Gotchas_in_JAX.ipynb) for much more information.\n"
}
] | Python | Apache License 2.0 | google/jax | mention numpy promotion differences in README |
260,335 | 20.05.2019 07:06:43 | 25,200 | 0ac4e4b0ac3c8d4f2d2ee530ac23777cfc3778f9 | test cleanup per review | [
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_test.py",
"new_path": "tests/lax_numpy_test.py",
"diff": "@@ -219,13 +219,9 @@ JAX_OPERATOR_OVERLOADS = [\nop_record(\"__invert__\", 1, int_dtypes, all_shapes, jtu.rand_default(), []),\n# TODO(mattjj): investigate these failures\n# op_record(\"__or__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n- # op_record(\"__ror__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n# op_record(\"__and__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n- # op_record(\"__rand__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n# op_record(\"__xor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n- # op_record(\"__rxor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n# op_record(\"__divmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n- # # op_record(\"__rdivmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n# TODO(mattjj): lshift, rshift\n]\n@@ -237,6 +233,10 @@ JAX_RIGHT_OPERATOR_OVERLOADS = [\nop_record(\"__rmod__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\nop_record(\"__rfloordiv__\", 2, default_dtypes, all_shapes, jtu.rand_nonzero(), []),\nop_record(\"__rtruediv__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n+ # op_record(\"__ror__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n+ # op_record(\"__rand__\", 2, number_dtypes, all_shapes, jtu.rand_default(), []),\n+ # op_record(\"__rxor__\", 2, number_dtypes, all_shapes, jtu.rand_bool(), []),\n+ # op_record(\"__rdivmod__\", 2, number_dtypes, all_shapes, jtu.rand_nonzero(), []),\n]\nnumpy_version = tuple(map(int, onp.version.version.split('.')))\n"
}
] | Python | Apache License 2.0 | google/jax | test cleanup per review |
260,335 | 20.05.2019 09:09:32 | 25,200 | 4ab26774369deba4ac9b95ce54e627a1f55a0648 | fix bug in scan utility _convert_zeros
also add some additional checks to catch errors sooner
fixes | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax_control_flow.py",
"new_path": "jax/lax/lax_control_flow.py",
"diff": "@@ -428,16 +428,25 @@ def _maybe_tracer_tuple_to_abstract_tuple(tup):\n### scan\n-def _convert_zeros(convert_symbolic, example, tangent):\n- if tangent is ad.zero:\n- if not convert_symbolic:\n+def _convert_zeros(instantiate, example, tangent):\n+ t = type(instantiate)\n+ if t is bool:\n+ if instantiate:\n+ return ad.instantiate_zeros(example, tangent)\n+ elif tangent is ad_util.zero:\nreturn core.unit\nelse:\n- return ad.zeros_like_jaxval(example)\n- elif type(tangent) is ad.TangentTuple:\n- return core.pack(map(_convert_zeros, convert_symbolic, example, tangent))\n+ raise TypeError(tangent) # not clear if ever reachable\n+ elif t is tuple:\n+ if type(tangent) is ad.TangentTuple:\n+ return core.pack(map(_convert_zeros, instantiate, example, tangent))\n+ elif tangent is ad_util.zero:\n+ zeros = [ad_util.zero] * len(instantiate)\n+ return core.pack(map(_convert_zeros, instantiate, example, zeros))\n+ else:\n+ raise TypeError(tangent)\nelse:\n- return tangent\n+ raise TypeError(t)\ndef _demote_aval_rank(xs):\nassert isinstance(xs, core.AbstractValue)\n@@ -641,7 +650,7 @@ def _scan_partial_eval(trace, *tracers, **kwargs):\nlength = kwargs.pop('length')\nforward = kwargs.pop('forward')\nassert not kwargs\n- in_pvs, in_consts = unzip2([t.pval for t in tracers])\n+ in_pvs, _ = unzip2([t.pval for t in tracers])\nsc_consts, sc_init, sc_xs = map(pe.unknown, in_pvs)\nsc_carry = sc_init\n@@ -819,7 +828,19 @@ def _make_typed_jaxpr(traceable, in_avals):\nclass FixedPointError(Exception): pass\n+# We use a custom bind for scan just to add some error checks\n+def scan_bind(consts, init, xs, forward, length, jaxpr):\n+ if not core.skip_checks:\n+ assert type(jaxpr.in_avals) is tuple\n+ consts_aval, init_aval, xs_aval = jaxpr.in_avals\n+ assert type(jaxpr.out_aval) is core.AbstractTuple\n+ carry_aval, y_aval = jaxpr.out_aval\n+ assert init_aval == carry_aval\n+ return core.Primitive.bind(scan_p, consts, init, xs,\n+ forward=forward, length=length, jaxpr=jaxpr)\n+\nscan_p = core.Primitive(\"scan\")\n+scan_p.def_custom_bind(scan_bind)\nscan_p.def_impl(_scan_impl)\nad.primitive_jvps[scan_p] = _scan_jvp\nad.primitive_transposes[scan_p] = _scan_transpose\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_control_flow_test.py",
"new_path": "tests/lax_control_flow_test.py",
"diff": "@@ -592,6 +592,37 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nexpected = (onp.zeros_like(W_trans), onp.zeros_like(W_out))\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ def testIssue711(self):\n+ # Tests reverse-mode differentiation through a scan for which the scanned\n+ # function also involves reverse-mode differentiation.\n+ # See https://github.com/google/jax/issues/711\n+ def harmonic_bond(conf, params):\n+ return np.sum(conf * params)\n+\n+ def minimize_structure(test_params):\n+ energy_fn = partial(harmonic_bond, params=test_params)\n+ grad_fn = api.grad(energy_fn)\n+\n+ def apply_carry(carry, _):\n+ i, x = carry\n+ new_x = x - 0.1 * api.grad(energy_fn)(x)\n+ new_carry = (i+1, new_x)\n+ return new_carry, _\n+\n+ x0 = np.array([1., 2., 3.])\n+ carry_final, _ = lax.scan(apply_carry, (0, x0), np.zeros((75, 0)))\n+ _, x_final = carry_final\n+ return x_final\n+\n+ initial_params = 0.5\n+ minimize_structure(initial_params) # doesn't crash\n+\n+ def loss(test_params):\n+ x_final = minimize_structure(test_params)\n+ return np.sum(np.sin(1.0 - x_final))\n+\n+ api.grad(loss)(0.25) # doesn't crash\n+\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | fix bug in scan utility _convert_zeros
also add some additional checks to catch errors sooner
fixes #711
Co-authored-by: Dougal Maclaurin <dougalm@google.com> |
260,335 | 20.05.2019 10:08:33 | 25,200 | 88f691f89663236427483db3707521de7e893b62 | make namedtuples transparent (act as pytree nodes) | [
{
"change_type": "MODIFY",
"old_path": "jax/tree_util.py",
"new_path": "jax/tree_util.py",
"diff": "@@ -56,7 +56,7 @@ def tree_map(f, tree):\nleaf given by `f(x)` where `x` is the value at the corresponding leaf in\n`tree`.\n\"\"\"\n- node_type = node_types.get(type(tree))\n+ node_type = _get_node_type(tree)\nif node_type:\nchildren, node_spec = node_type.to_iterable(tree)\nnew_children = [tree_map(f, child) for child in children]\n@@ -79,12 +79,12 @@ def tree_multimap(f, tree, *rest):\nleaf given by `f(x, *xs)` where `x` is the value at the corresponding leaf\nin `tree` and `xs` is the tuple of values at corresponding leaves in `rest`.\n\"\"\"\n- node_type = node_types.get(type(tree))\n+ node_type = _get_node_type(tree)\nif node_type:\nchildren, aux_data = node_type.to_iterable(tree)\nall_children = [children]\nfor other_tree in rest:\n- other_node_type = node_types.get(type(other_tree))\n+ other_node_type = _get_node_type(other_tree)\nif node_type != other_node_type:\nraise TypeError('Mismatch: {} != {}'.format(other_node_type, node_type))\nother_children, other_aux_data = node_type.to_iterable(other_tree)\n@@ -113,7 +113,7 @@ def process_pytree(process_node, tree):\ndef walk_pytree(f_node, f_leaf, tree):\n- node_type = node_types.get(type(tree))\n+ node_type = _get_node_type(tree)\nif node_type:\nchildren, node_spec = node_type.to_iterable(tree)\nproc_children, child_specs = unzip2([walk_pytree(f_node, f_leaf, child)\n@@ -236,3 +236,20 @@ register_pytree_node(tuple, lambda xs: (xs, None), lambda _, xs: tuple(xs))\nregister_pytree_node(list, lambda xs: (tuple(xs), None), lambda _, xs: list(xs))\nregister_pytree_node(dict, dict_to_iterable, lambda keys, xs: dict(zip(keys, xs)))\nregister_pytree_node(type(None), lambda z: ((), None), lambda _, xs: None)\n+\n+\n+# To handle namedtuples, we can't just use the standard table of node_types\n+# because every namedtuple creates its own type and thus would require its own\n+# entry in the table. Instead we use a heuristic check on the type itself to\n+# decide whether it's a namedtuple type, and if so treat it as a pytree node.\n+def _get_node_type(maybe_tree):\n+ t = type(maybe_tree)\n+ return node_types.get(t) or _namedtuple_node(t)\n+\n+def _namedtuple_node(t):\n+ if t.__bases__ == (tuple,) and hasattr(t, '_fields'):\n+ return NamedtupleNode\n+\n+NamedtupleNode = NodeType('namedtuple',\n+ lambda xs: (tuple(xs), type(xs)),\n+ lambda t, xs: t(*xs))\n"
}
] | Python | Apache License 2.0 | google/jax | make namedtuples transparent (act as pytree nodes) |
260,335 | 20.05.2019 10:15:20 | 25,200 | ca66c7693e74aa9f78e514fed3c40cbeef2694bf | add test for namedtuple transparency | [
{
"change_type": "MODIFY",
"old_path": "tests/api_test.py",
"new_path": "tests/api_test.py",
"diff": "@@ -16,11 +16,11 @@ from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n-import six\n+import collections\n-import numpy as onp\nfrom absl.testing import absltest\n-from jax import test_util as jtu\n+import numpy as onp\n+import six\nimport jax.numpy as np\nfrom jax import jit, grad, device_get, device_put, jacfwd, jacrev, hessian\n@@ -29,6 +29,7 @@ from jax.core import Primitive, pack, JaxTuple\nfrom jax.interpreters.ad import defjvp, defvjp, defvjp2, defvjp_all\nfrom jax.interpreters.xla import DeviceArray, DeviceTuple\nfrom jax.abstract_arrays import concretization_err_msg\n+from jax import test_util as jtu\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n@@ -568,6 +569,22 @@ class APITest(jtu.JaxTestCase):\nself.assertIsInstance(x, DeviceArray)\nrepr(x) # doesn't crash\n+ def test_namedtuple_transparency(self):\n+ # See https://github.com/google/jax/issues/446\n+ Point = collections.namedtuple(\"Point\", [\"x\", \"y\"])\n+\n+ def f(pt):\n+ return np.sqrt(pt.x ** 2 + pt.y ** 2)\n+\n+ pt = Point(1., 2.)\n+\n+ f(pt) # doesn't crash\n+ g = api.grad(f)(pt)\n+ self.assertIsInstance(g, Point)\n+\n+ f_jit = api.jit(f)\n+ self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False)\n+\nif __name__ == '__main__':\nabsltest.main()\n"
}
] | Python | Apache License 2.0 | google/jax | add test for namedtuple transparency |
260,335 | 20.05.2019 11:13:42 | 25,200 | adb15b7f4f53f7977d42f81514475328b8d8d46b | revise optimizers.py docstrings (no JaxTuples) | [
{
"change_type": "MODIFY",
"old_path": "jax/experimental/optimizers.py",
"new_path": "jax/experimental/optimizers.py",
"diff": "@@ -91,14 +91,9 @@ zip = safe_zip\n# as defining an \"outer pytree\", and a pytree produced by applying init_fun to\n# each leaf of the params pytree, which we can think of as the \"inner pytrees\".\n# Since pytrees can be flattened, that structure is isomorphic to a list of\n-# lists (with no further nesting). This implementation represents that structure\n-# as a JaxTuple-of-JaxTuples so that we can maintain the entire optimizer state\n-# as a single DeviceTuple, and thus pay no pytree traversal overhead when we\n-# dispatch to a `jit`-compiled `update_fun`. That JaxTuple-of-JaxTuples is\n-# stored together with the tree structure data in an OptimizerState instance.\n-\n-pack = tuple # TODO(mattjj): replace with core.pack\n+# lists (with no further nesting).\n+pack = tuple\nOptimizerState = namedtuple(\"OptimizerState\",\n[\"packed_state\", \"tree_def\", \"subtree_defs\"])\nregister_pytree_node(\n@@ -135,8 +130,8 @@ def optimizer(opt_maker):\nget_params :: OptimizerState -> ParameterPytree ndarray\nThe OptimizerState pytree type used by the returned functions is isomorphic\n- to ``ParameterPytree (OptStatePytree ndarray)`` but has an implementation\n- based on JaxTuples to avoid pytree structuring/destructuring overheads.\n+ to ``ParameterPytree (OptStatePytree ndarray)``, but may store the state\n+ instead as e.g. a partially-flattened data structure for performance.\n\"\"\"\n@functools.wraps(opt_maker)\n"
}
] | Python | Apache License 2.0 | google/jax | revise optimizers.py docstrings (no JaxTuples) |
260,335 | 20.05.2019 11:49:09 | 25,200 | f8aa563db1fe91fecea0cbfcba4ca49690d10141 | make jax.numpy.array(3) give 0D array, not scalar
the mechanism is to use lax.reshape (which was already there) and avoid
the optimization that skipped actually calling reshape_p.bind
fixes | [
{
"change_type": "MODIFY",
"old_path": "jax/lax/lax.py",
"new_path": "jax/lax/lax.py",
"diff": "@@ -514,7 +514,7 @@ def reshape(operand, new_sizes, dimensions=None):\n\"\"\"\nsame_shape = onp.shape(operand) == tuple(new_sizes)\nsame_dims = dimensions is None or tuple(dimensions) == tuple(range(onp.ndim(operand)))\n- if same_shape and same_dims:\n+ if onp.shape(operand) and same_shape and same_dims:\nreturn operand\nelse:\nreturn reshape_p.bind(\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_test.py",
"new_path": "tests/lax_numpy_test.py",
"diff": "@@ -1051,7 +1051,7 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_arg{}\".format(i), \"arg\": arg}\nfor i, arg in enumerate([\n- [1, 2, 3], [1., 2., 3.],\n+ 3., [1, 2, 3], [1., 2., 3.],\n[[1, 2], [3, 4], [5, 6]], [[1, 2.], [3, 4], [5, 6]],\n[[3, onp.array(2), 1], onp.arange(3.)],\n])))\n@@ -1060,6 +1060,9 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nself._CheckAgainstNumpy(onp.array, lnp.array, args_maker, check_dtypes=True)\nself._CompileAndCheck(lnp.array, args_maker, check_dtypes=True)\n+ def testIssue121(self):\n+ assert not onp.isscalar(lnp.array(3))\n+\ndef testArrayMethod(self):\nclass arraylike(object):\ndtype = onp.float32\n"
}
] | Python | Apache License 2.0 | google/jax | make jax.numpy.array(3) give 0D array, not scalar
the mechanism is to use lax.reshape (which was already there) and avoid
the optimization that skipped actually calling reshape_p.bind
fixes #121 |
260,335 | 20.05.2019 17:11:18 | 25,200 | d0e1b7be350bc4d705dd5a7193eea2b1b2984197 | wrap np.trace axes (fixes | [
{
"change_type": "MODIFY",
"old_path": "jax/numpy/lax_numpy.py",
"new_path": "jax/numpy/lax_numpy.py",
"diff": "@@ -1444,6 +1444,9 @@ def trace(a, offset=0, axis1=0, axis2=1, dtype=None, out=None):\nif out:\nraise NotImplementedError(\"The 'out' argument to trace is not supported.\")\n+ axis1 = axis1 % ndim(a)\n+ axis2 = axis2 % ndim(a)\n+\na_shape = shape(a)\nif dtype is None:\ndtype = _dtype(a)\n"
},
{
"change_type": "MODIFY",
"old_path": "tests/lax_numpy_test.py",
"new_path": "tests/lax_numpy_test.py",
"diff": "@@ -820,7 +820,9 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nfor dtype in default_dtypes\nfor out_dtype in [None] + number_dtypes\nfor shape in [shape for shape in all_shapes if len(shape) >= 2]\n- for (axis1, axis2) in itertools.combinations(range(len(shape)), 2)\n+ for axis1 in range(-len(shape), len(shape))\n+ for axis2 in range(-len(shape), len(shape))\n+ if (axis1 % len(shape)) != (axis2 % len(shape))\nfor offset in list(range(-4, 4))))\ndef testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng):\nonp_fun = lambda arg: onp.trace(arg, offset, axis1, axis2, out_dtype)\n"
}
] | Python | Apache License 2.0 | google/jax | wrap np.trace axes (fixes #738) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.