author
int64
658
755k
date
stringlengths
19
19
timezone
int64
-46,800
43.2k
hash
stringlengths
40
40
message
stringlengths
5
490
mods
list
language
stringclasses
20 values
license
stringclasses
3 values
repo
stringlengths
5
68
original_message
stringlengths
12
491
260,335
02.12.2019 18:39:59
28,800
ac2af106ed35556841a6fc3bf643b0e200ca0fa4
adjust scan docstring (thanks
[ { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -453,10 +453,12 @@ def scan(f, init, xs, length=None):\nrepresents the type with the same pytree structure and corresponding leaves\neach with an additional leading axis.\n- When both ``a`` and ``b`` are array types, the semantics of ``scan`` are given\n- by this Python implementation::\n+ When ``a`` is an array type or None, and ``b`` is an array type, the semantics\n+ of ``scan`` are given roughly by this Python implementation::\n- def scan(f, init, xs):\n+ def scan(f, init, xs, length=None):\n+ if xs is None:\n+ xs = [None] * length\ncarry = init\nys = []\nfor x in xs:\n@@ -466,7 +468,7 @@ def scan(f, init, xs, length=None):\nUnlike that Python version, both ``a`` and ``b`` may be arbitrary pytree\ntypes, and so multiple arrays can be scanned over at once and produce multiple\n- output arrays.\n+ output arrays. (None is actually an empty pytree.)\nAlso unlike that Python version, ``scan`` is a JAX primitive and is lowered to\na single XLA While HLO. That makes it useful for reducing compilation times\n" } ]
Python
Apache License 2.0
google/jax
adjust scan docstring (thanks @shoyer)
260,335
04.12.2019 09:50:29
28,800
c1aeaf511cb38c9d7a3174446d0525877256e6c9
xla_computation option to instantiate const output
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -198,7 +198,7 @@ def disable_jit():\ndef xla_computation(fun, static_argnums=(), axis_env=None, backend=None,\n- tuple_args=False):\n+ tuple_args=False, instantiate_const_outputs=False):\n\"\"\"Creates a function that produces its XLA computation given example args.\nArgs:\n@@ -212,9 +212,16 @@ def xla_computation(fun, static_argnums=(), axis_env=None, backend=None,\napplications of ``jax.pmap``. See the examples below.\nbackend: This is an experimental feature and the API is likely to change.\nOptional, a string representing the xla backend. 'cpu','gpu', or 'tpu'.\n- tuple_args: Optional, defaults to False. If True, the resulting XLA\n+ tuple_args: Optional bool, defaults to False. If True, the resulting XLA\ncomputation will have a single tuple argument that is unpacked into the\nspecified function arguments.\n+ instantiate_const_outputs: Optional bool, defaults to False. If False, then\n+ ``xla_computation`` does not instantiate constant-valued outputs in the\n+ XLA computation, and so the result is closer to the computation that\n+ ``jax.jit`` produces and may be more useful for studying ``jit`` behavior.\n+ If True, then constant-valued outputs are instantiated in the XLA\n+ computation, which may be more useful for staging computations out of JAX\n+ entirely.\nReturns:\nA wrapped version of ``fun`` that when applied to example arguments returns a\n@@ -294,7 +301,8 @@ def xla_computation(fun, static_argnums=(), axis_env=None, backend=None,\njaxtree_fun, out_tree = flatten_fun(wrapped, in_tree)\navals = map(xla.abstractify, jax_args)\npvals = [pe.PartialVal((aval, core.unit)) for aval in avals]\n- jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals)\n+ jaxpr, _, consts = pe.trace_to_jaxpr(jaxtree_fun, pvals,\n+ instantiate=instantiate_const_outputs)\naxis_env_ = make_axis_env(xla.jaxpr_replicas(jaxpr))\nc = xb.make_computation_builder('xla_computation_{}'.format(fun_name))\nxla_consts = map(c.Constant, consts)\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -1016,6 +1016,14 @@ class APITest(jtu.JaxTestCase):\nxla_comp = api.xla_computation(f)\nxla_comp(np.arange(8)).GetHloText() # doesn't crash\n+ def test_xla_computation_instantiate_constant_outputs(self):\n+ def f():\n+ return np.zeros((3, 4))\n+\n+ xla_comp = api.xla_computation(f, instantiate_const_outputs=True)()\n+ out_shape, = xla_comp.GetReturnValueShape().tuple_shapes()\n+ self.assertEqual(out_shape.dimensions(), (3, 4))\n+\ndef test_jit_device(self):\ndevice = xb.devices()[-1]\nx = api.jit(lambda x: x, device=device)(3.)\n" } ]
Python
Apache License 2.0
google/jax
xla_computation option to instantiate const output
260,335
04.12.2019 09:55:05
28,800
0899673363e4189dc670792bd3f0317795196b75
switch xla_computation instantiate outputs default
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -198,7 +198,7 @@ def disable_jit():\ndef xla_computation(fun, static_argnums=(), axis_env=None, backend=None,\n- tuple_args=False, instantiate_const_outputs=False):\n+ tuple_args=False, instantiate_const_outputs=True):\n\"\"\"Creates a function that produces its XLA computation given example args.\nArgs:\n@@ -215,7 +215,7 @@ def xla_computation(fun, static_argnums=(), axis_env=None, backend=None,\ntuple_args: Optional bool, defaults to False. If True, the resulting XLA\ncomputation will have a single tuple argument that is unpacked into the\nspecified function arguments.\n- instantiate_const_outputs: Optional bool, defaults to False. If False, then\n+ instantiate_const_outputs: Optional bool, defaults to True. If False, then\n``xla_computation`` does not instantiate constant-valued outputs in the\nXLA computation, and so the result is closer to the computation that\n``jax.jit`` produces and may be more useful for studying ``jit`` behavior.\n" } ]
Python
Apache License 2.0
google/jax
switch xla_computation instantiate outputs default
260,335
04.12.2019 19:34:21
28,800
0c0137d787830d8ebd584c4610f7932f3787cab6
avoid compiling trivial programs from partial_eval also minor clean up in api_test.py
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -37,7 +37,8 @@ from ..abstract_arrays import (ConcreteArray, ShapedArray, AbstractToken,\nmake_shaped_array, array_types, raise_to_shaped,\nabstract_token, make_abstract_python_scalar)\nfrom ..core import valid_jaxtype, Literal\n-from ..util import partial, partialmethod, cache, safe_map, prod, unzip2\n+from ..util import (partial, partialmethod, cache, safe_map, prod, unzip2,\n+ memoize)\nfrom ..lib import xla_bridge as xb\nfrom ..lib import xla_client as xc\nfrom . import partial_eval as pe\n@@ -382,16 +383,24 @@ def _xla_call_impl(fun, *args, **params):\n@lu.cache\ndef _xla_callable(fun, device, backend, *abstract_args):\n- log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG\n- logging.log(log_priority,\n- \"Compiling {} for args {}.\".format(fun.__name__, abstract_args))\n-\npvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]\nwith core.new_master(pe.JaxprTrace, True) as master:\njaxpr, (pvals, consts, env) = pe.trace_to_subjaxpr(fun, master, False).call_wrapped(pvals)\nassert not env # no subtraces here\ndel master, env\n_map(prefetch, it.chain(consts, jaxpr_literals(jaxpr)))\n+ result_handlers = tuple(map(_pval_to_result_handler, pvals))\n+\n+ # Computations that only produce constants and/or only rearrange their inputs,\n+ # which are often produced from partial evaluation, don't need compilation,\n+ # and don't need to force their (potentially lazy) arguments.\n+ if not jaxpr.eqns:\n+ device = _get_device(device, backend)\n+ return partial(_execute_trivial, jaxpr, device, consts, result_handlers)\n+\n+ log_priority = logging.WARNING if FLAGS.jax_log_compiles else logging.DEBUG\n+ logging.log(log_priority,\n+ \"Compiling {} for args {}.\".format(fun.__name__, abstract_args))\nnreps = jaxpr_replicas(jaxpr)\nif nreps > xb.device_count(backend):\n@@ -419,7 +428,6 @@ def _xla_callable(fun, device, backend, *abstract_args):\nnum_replicas=nreps, device_assignment=(device.id,) if device else None)\ncompiled = built.Compile(compile_options=options, backend=xb.get_backend(backend))\n- result_handlers = tuple(map(_pval_to_result_handler, pvals))\nif nreps == 1:\nreturn partial(_execute_compiled, compiled, backend, result_handlers, tuple_args)\nelse:\n@@ -466,9 +474,28 @@ def _execute_replicated(compiled, backend, handlers, tuple_args, *args):\nif FLAGS.jax_debug_nans: check_nans(xla_call_p, out_bufs)\nreturn [handler(out_buf) for handler, out_buf in zip(handlers, out_bufs)]\n+def _execute_trivial(jaxpr, device, consts, handlers, *args):\n+ env = {core.unitvar : core.unit}\n+ _map(env.setdefault, jaxpr.invars, args)\n+ _map(env.setdefault, jaxpr.constvars, consts)\n+ outs = [canonicalize_dtype(v.val) if type(v) is Literal else env[v]\n+ for v in jaxpr.outvars]\n+ return [x if type(x) is DeviceArray else handler(device_put(x, device))\n+ for handler, x in zip(handlers, outs)]\n+\ndef make_tuple(bufs, device, backend):\nreturn xb.get_backend(backend).make_tuple(bufs, device)\n+@memoize\n+def _get_device(device, backend):\n+ # TODO(mattjj): after jaxlib update, avoid compile here, just to get device\n+ c = xb.make_computation_builder(\"get_device\")\n+ built = c.Build(c.Tuple())\n+ options = xb.get_compile_options(\n+ num_replicas=1, device_assignment=(device.id,) if device else None)\n+ compiled = built.Compile(compile_options=options, backend=xb.get_backend(backend))\n+ out, = compiled.local_devices()\n+ return out\nxla_call_p = core.Primitive('xla_call')\nxla_call_p.multiple_results = True\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -152,7 +152,6 @@ class APITest(jtu.JaxTestCase):\nassert g(2.0) == 4.0\nassert len(side) == 1\n-\ndef test_bad_input(self):\ndef f(x):\nreturn x\n@@ -165,15 +164,6 @@ class APITest(jtu.JaxTestCase):\nTypeError, \".* 'foo' of type <.*'str'> is not a valid JAX type\",\nlambda: jit(f)(\"foo\"))\n- # TODO(dougalm): enable when we remove 'None' from pytree nodes\n- # def test_bad_output(self):\n- # def f(x):\n- # pass\n-\n- # grad(f)(onp.zeros(3))\n- # jit(f)(onp.zeros(3))\n- # assert False\n-\ndef test_grad_tuple_output(self):\njtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError,\n\"Gradient only defined for scalar-output functions. \")\n@@ -506,7 +496,6 @@ class APITest(jtu.JaxTestCase):\n\"primal and tangent arguments to jax.jvp must have equal types\",\nlambda: api.jvp(lambda x: -x, (onp.float16(2),), (onp.float32(4),)))\n-\ndef test_jvp_non_tuple_arguments(self):\ndef f(x, y): return x + y\nself.assertRaisesRegex(\n@@ -1310,49 +1299,6 @@ class APITest(jtu.JaxTestCase):\n\"positional arguments.\",\nlambda: partial(df, x=0.)(y=1.))\n-\n-class JaxprTest(jtu.JaxTestCase):\n-\n- def test_scalar_literals(self):\n- jaxpr = api.make_jaxpr(lambda x: x + 2)(42)\n- self.assertLen(jaxpr.jaxpr.constvars, 0)\n-\n- def test_const(self):\n- def fun(x):\n- return (x, 1., np.zeros(1))\n-\n- jaxpr = api.make_jaxpr(fun)(0.)\n- self.assertMultiLineStrippedEqual(str(jaxpr), \"\"\"\n- { lambda b ; ; a.\n- let\n- in [a, 1.0, b] }\n- \"\"\")\n-\n- def test_cond(self):\n- def f(x):\n- return lax.cond(x >= 0.,\n- x + 1.,\n- lambda xt: xt + x,\n- x + 2.,\n- lambda xf: xf - x)\n- jaxpr = api.make_jaxpr(f)(3.)\n- self.assertMultiLineStrippedEqual(str(jaxpr), \"\"\"\n- { lambda ; ; a.\n- let b = ge a 0.0\n- c = add a 1.0\n- d = add a 2.0\n- e = cond[ false_jaxpr={ lambda ; ; b a.\n- let c = sub a b\n- in [c] }\n- false_nconsts=1\n- true_jaxpr={ lambda ; ; b a.\n- let c = add a b\n- in [c] }\n- true_nconsts=1 ] b a c a d\n- in [e] }\n- \"\"\")\n-\n-\ndef test_grad_of_jit_compilation_caching(self):\nif not hasattr(self, \"assertLogs\"):\nraise unittest.SkipTest(\"test requires assertLogs (python 3)\")\n@@ -1561,6 +1507,62 @@ class JaxprTest(jtu.JaxTestCase):\nself.assertAllClose(f1(x), f2(x), check_dtypes=False)\nself.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False)\n+ def test_trivial_computations(self):\n+ x = np.array([1, 2, 3])\n+ y = api.jit(lambda x: x)(x)\n+ self.assertIs(x, y)\n+\n+ z1, z2 = api.jit(lambda x: (x, x))(x)\n+ self.assertIs(z1, z2)\n+\n+ x1, x2 = np.array([1, 2]), np.array([2, 3])\n+ z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2)\n+ self.assertIs(z1, x2)\n+ self.assertIs(z3, x1)\n+ self.assertEqual(z2, 1)\n+\n+\n+class JaxprTest(jtu.JaxTestCase):\n+\n+ def test_scalar_literals(self):\n+ jaxpr = api.make_jaxpr(lambda x: x + 2)(42)\n+ self.assertLen(jaxpr.jaxpr.constvars, 0)\n+\n+ def test_const(self):\n+ def fun(x):\n+ return (x, 1., np.zeros(1))\n+\n+ jaxpr = api.make_jaxpr(fun)(0.)\n+ self.assertMultiLineStrippedEqual(str(jaxpr), \"\"\"\n+ { lambda b ; ; a.\n+ let\n+ in [a, 1.0, b] }\n+ \"\"\")\n+\n+ def test_cond(self):\n+ def f(x):\n+ return lax.cond(x >= 0.,\n+ x + 1.,\n+ lambda xt: xt + x,\n+ x + 2.,\n+ lambda xf: xf - x)\n+ jaxpr = api.make_jaxpr(f)(3.)\n+ self.assertMultiLineStrippedEqual(str(jaxpr), \"\"\"\n+ { lambda ; ; a.\n+ let b = ge a 0.0\n+ c = add a 1.0\n+ d = add a 2.0\n+ e = cond[ false_jaxpr={ lambda ; ; b a.\n+ let c = sub a b\n+ in [c] }\n+ false_nconsts=1\n+ true_jaxpr={ lambda ; ; b a.\n+ let c = add a b\n+ in [c] }\n+ true_nconsts=1 ] b a c a d\n+ in [e] }\n+ \"\"\")\n+\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
avoid compiling trivial programs from partial_eval also minor clean up in api_test.py
260,335
06.12.2019 10:23:17
28,800
d17e69ee2dc3e973deb1392215e8a9541b669827
add whitelist for multi-host collectives
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -460,6 +460,14 @@ def parallel_callable(fun, backend, axis_name, axis_size, devices, *avals):\ndel master\nout_pvs, out_consts = unzip2(out_pvals)\n+ # TODO(skye,mattjj): allow more collectives on multi-host as we test them, but\n+ # for now raise an error\n+ if xb.host_count() > 1:\n+ used_collectives = set(xla.jaxpr_collectives(jaxpr))\n+ if not used_collectives.issubset(multi_host_supported_collectives):\n+ msg = \"using collectives that aren't supported for multi-host: {}\"\n+ raise TypeError(msg.format(\", \".join(map(str, used_collectives))))\n+\nif all(pv is None for pv in out_pvs):\n# When the output doesn't depend on the input we don't need to compile an\n# XLA computation at all; we handle this as a special case so we can stage\n@@ -514,6 +522,8 @@ def parallel_callable(fun, backend, axis_name, axis_size, devices, *avals):\nhandle_outs = _pvals_to_results_handler(axis_size, num_local_replicas, out_pvals)\nreturn partial(execute_replicated, compiled, backend, num_local_replicas, handle_args, handle_outs)\n+multi_host_supported_collectives = set()\n+\nclass ResultToPopulate(object): pass\nresult_to_populate = ResultToPopulate()\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -353,6 +353,9 @@ def eqn_replicas(eqn):\nelse:\nreturn 1\n+# TODO(mattjj,skyewm): the functions here are utilities for checking if\n+# not-yet-supported features are used with multi-host programming\n+\ndef jaxpr_has_pmap(jaxpr):\nreturn any(eqn_has_pmap(eqn) for eqn in jaxpr.eqns)\n@@ -368,6 +371,27 @@ def eqn_has_pmap(eqn):\nreturn 'pmap' in eqn.primitive.name\n+def jaxpr_collectives(jaxpr):\n+ return it.chain.from_iterable(eqn_collectives(eqn) for eqn in jaxpr.eqns)\n+\n+def eqn_collectives(eqn):\n+ if eqn.bound_subjaxprs:\n+ (subjaxpr, _, _), = eqn.bound_subjaxprs\n+ for c in jaxpr_collectives(subjaxpr):\n+ yield c\n+ elif eqn.primitive in initial_style_translations:\n+ for param in eqn.params.values():\n+ if type(param) is core.Jaxpr:\n+ for c in jaxpr_collectives(param):\n+ yield c\n+ elif type(param) is core.TypedJaxpr:\n+ for c in jaxpr_collectives(param.jaxpr):\n+ yield c\n+ else:\n+ if eqn.primitive in parallel_translations:\n+ yield eqn.primitive\n+\n+\n### xla_call underlying jit\ndef _xla_call_impl(fun, *args, **params):\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_parallel.py", "new_path": "jax/lax/lax_parallel.py", "diff": "@@ -210,6 +210,7 @@ pxla.split_axis_rules[psum_p] = \\\nxla.parallel_translations[psum_p] = _psum_translation_rule\npxla.parallel_pure_rules[psum_p] = lambda x, shape: x * prod(shape)\nad.deflinear(psum_p, lambda t, axis_name: [psum(t, axis_name)])\n+pxla.multi_host_supported_collectives.add(psum_p)\npmax_p = standard_pmap_primitive('pmax')\n" } ]
Python
Apache License 2.0
google/jax
add whitelist for multi-host collectives
260,335
06.12.2019 10:29:54
28,800
5eafc7baba966e868fba1093e8a50c445130b873
improve condition for multi-host pmap checking
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -462,7 +462,11 @@ def parallel_callable(fun, backend, axis_name, axis_size, devices, *avals):\n# TODO(skye,mattjj): allow more collectives on multi-host as we test them, but\n# for now raise an error\n- if xb.host_count() > 1:\n+ if devices is not None:\n+ is_multi_host_pmap = any(d.host_id != xb.host_id() for d in devices)\n+ else:\n+ is_multi_host_pmap = xb.host_count() > 1\n+ if is_multi_host_pmap:\nused_collectives = set(xla.jaxpr_collectives(jaxpr))\nif not used_collectives.issubset(multi_host_supported_collectives):\nmsg = \"using collectives that aren't supported for multi-host: {}\"\n" } ]
Python
Apache License 2.0
google/jax
improve condition for multi-host pmap checking
260,335
06.12.2019 20:32:46
28,800
80f455d3f0688734efc384c304e68f8c0d060501
make eval_jaxpr get jit cache hits
[ { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -27,7 +27,8 @@ import types\nimport six\nfrom . import linear_util as lu\n-from .util import safe_zip, safe_map, partial, curry\n+from .linear_util import transformation, transformation_with_aux, wrap_init\n+from .util import safe_zip, safe_map, partial, curry, WrapHashably\nfrom .pprint_util import pp, vcat, hcat, pp_kv_pairs\n# TODO(dougalm): the trace cache breaks the leak detector. Consisder solving.\n@@ -197,18 +198,28 @@ def eval_jaxpr(jaxpr, consts, freevar_vals, *args):\nmap(write, jaxpr.freevars, freevar_vals)\nfor eqn in jaxpr.eqns:\nin_vals = map(read, eqn.invars)\n- subfuns = [partial(eval_jaxpr, subjaxpr, map(read, const_bindings),\n- map(read, freevar_bindings))\n- for subjaxpr, const_bindings, freevar_bindings\n- in eqn.bound_subjaxprs]\n- subfuns = map(lu.wrap_init, subfuns)\n- ans = eqn.primitive.bind(*(subfuns + in_vals), **eqn.params)\n+ if not eqn.bound_subjaxprs:\n+ ans = eqn.primitive.bind(*in_vals, **eqn.params)\n+ else:\n+ (subjaxpr, const_bindings, freevar_bindings), = eqn.bound_subjaxprs\n+ sub_consts = tuple(WrapHashably(read(v)) for v in const_bindings)\n+ sub_freevar_vals = tuple(WrapHashably(read(v)) for v in freevar_bindings)\n+ fun = hashable_partial(wrap_init(eval_jaxpr), subjaxpr,\n+ sub_consts, sub_freevar_vals)\n+ ans = eqn.primitive.bind(fun, *in_vals, **eqn.params)\nif eqn.primitive.multiple_results:\nmap(write, eqn.outvars, ans)\nelse:\nwrite(eqn.outvars[0], ans)\nreturn map(read, jaxpr.outvars)\n+@transformation\n+def hashable_partial(jaxpr, consts_hashable, freevar_vals_hashable, *args):\n+ consts = [x.val for x in consts_hashable]\n+ freevar_vals = [x.val for x in freevar_vals_hashable]\n+ ans = yield (jaxpr, consts, freevar_vals) + args, {}\n+ yield ans\n+\ndef full_lower(val):\nif isinstance(val, Tracer):\n@@ -565,7 +576,7 @@ def apply_todos(todos, outs):\nouts = map(full_lower, todos.pop()(outs))\nreturn outs\n-@lu.transformation_with_aux\n+@transformation_with_aux\ndef process_env_traces(primitive, level, params_tuple, *args):\nouts = yield args, {}\nparams = dict(params_tuple)\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -34,9 +34,9 @@ import jax\nimport jax.numpy as np\nfrom jax import jit, grad, device_put, jacfwd, jacrev, hessian\nfrom jax import api, lax\n-from jax.core import Primitive\n+from jax import core\nfrom jax.interpreters import ad\n-from jax.interpreters.xla import DeviceArray\n+from jax.interpreters import xla\nfrom jax.abstract_arrays import concretization_err_msg\nfrom jax.lib import xla_bridge as xb\nfrom jax import test_util as jtu\n@@ -241,7 +241,7 @@ class APITest(jtu.JaxTestCase):\n\"|Abstract value passed to .*)\", lambda: jit(f)(0))\ndef test_unimplemented_interpreter_rules(self):\n- foo_p = Primitive('foo')\n+ foo_p = core.Primitive('foo')\ndef foo(x):\nreturn foo_p.bind(x)\n@@ -268,7 +268,7 @@ class APITest(jtu.JaxTestCase):\ndef test_device_put_and_get(self):\nx = onp.arange(12.).reshape((3, 4)).astype(\"float32\")\ndx = api.device_put(x)\n- self.assertIsInstance(dx, DeviceArray)\n+ self.assertIsInstance(dx, xla.DeviceArray)\nx2 = api.device_get(dx)\nself.assertIsInstance(x2, onp.ndarray)\nassert onp.all(x == x2)\n@@ -593,7 +593,7 @@ class APITest(jtu.JaxTestCase):\nself.assertRaises(TypeError, lambda: jacfwd(lambda x: np.sin(x))(1 + 2j))\ndef test_defvjp_all(self):\n- foo_p = Primitive('foo')\n+ foo_p = core.Primitive('foo')\ndef foo(x): return 2. * foo_p.bind(x)\nad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (4 * g * np.sin(x),)))\n@@ -602,7 +602,7 @@ class APITest(jtu.JaxTestCase):\nself.assertAllClose(grad_ans, 4 * 2 * onp.sin(3.), check_dtypes=False)\ndef test_defvjp_all_const(self):\n- foo_p = Primitive('foo')\n+ foo_p = core.Primitive('foo')\ndef foo(x): return foo_p.bind(x)\nad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (12.,)))\n@@ -611,7 +611,7 @@ class APITest(jtu.JaxTestCase):\nself.assertAllClose(grad_ans, 12., check_dtypes=True)\ndef test_defvjp_all_higher_order_revmode(self):\n- foo_p = Primitive('foo')\n+ foo_p = core.Primitive('foo')\ndef foo(x): return 2. * foo_p.bind(x)\nad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (g * x ** 2,)))\n@@ -622,7 +622,7 @@ class APITest(jtu.JaxTestCase):\n# also tests passing in symbolic zero tangents b/c we differentiate wrt only\n# the first argument in one case\n- foo_p = Primitive('foo')\n+ foo_p = core.Primitive('foo')\ndef foo(x, y): return foo_p.bind(x, y)\ndef vjpfun(x, y):\n@@ -791,11 +791,11 @@ class APITest(jtu.JaxTestCase):\ndef test_devicearray_repr(self):\nx = device_put(np.zeros(3))\n- self.assertIsInstance(x, DeviceArray)\n+ self.assertIsInstance(x, xla.DeviceArray)\nrepr(x) # doesn't crash\nx = device_put(np.ones(3) + 1j * np.ones(3))\n- self.assertIsInstance(x, DeviceArray)\n+ self.assertIsInstance(x, xla.DeviceArray)\nrepr(x) # doesn't crash\ndef test_devicearray_delete(self):\n@@ -1016,7 +1016,7 @@ class APITest(jtu.JaxTestCase):\ndef test_jit_device(self):\ndevice = xb.devices()[-1]\nx = api.jit(lambda x: x, device=device)(3.)\n- self.assertIsInstance(x, DeviceArray)\n+ self.assertIsInstance(x, xla.DeviceArray)\nself.assertEqual(x.device_buffer.device(), device)\ndef test_jit_of_noncallable(self):\n@@ -1563,6 +1563,26 @@ class JaxprTest(jtu.JaxTestCase):\nin [e] }\n\"\"\")\n+ def test_eval_jaxpr_caching(self):\n+ eval_jaxpr = core.eval_jaxpr\n+\n+ count = [0]\n+ def eval_jaxpr_and_count(*args):\n+ count[0] += 1\n+ return eval_jaxpr(*args)\n+\n+ f = jit(lambda x: 2 * x)\n+ jaxpr = api.make_jaxpr(f)(3)\n+\n+ try:\n+ core.eval_jaxpr = eval_jaxpr_and_count\n+ core.jaxpr_as_fun(jaxpr)(3) # once outer and once inner\n+ core.jaxpr_as_fun(jaxpr)(4) # once outer but inner is cached\n+ finally:\n+ core.eval_jaxpr = eval_jaxpr\n+\n+ self.assertEqual(count[0], 3)\n+\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
make eval_jaxpr get jit cache hits
260,335
06.12.2019 22:28:41
28,800
7083b0a78edd8f2e88abe3f395ee0f51ac915082
roll back previous commit There was a mysterious failure on an internal test, and that mysteriousness means I didn't fully understand the attempted fix, so best to roll back for now.
[ { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -27,8 +27,7 @@ import types\nimport six\nfrom . import linear_util as lu\n-from .linear_util import transformation, transformation_with_aux, wrap_init\n-from .util import safe_zip, safe_map, partial, curry, WrapHashably\n+from .util import safe_zip, safe_map, partial, curry\nfrom .pprint_util import pp, vcat, hcat, pp_kv_pairs\n# TODO(dougalm): the trace cache breaks the leak detector. Consisder solving.\n@@ -198,28 +197,18 @@ def eval_jaxpr(jaxpr, consts, freevar_vals, *args):\nmap(write, jaxpr.freevars, freevar_vals)\nfor eqn in jaxpr.eqns:\nin_vals = map(read, eqn.invars)\n- if not eqn.bound_subjaxprs:\n- ans = eqn.primitive.bind(*in_vals, **eqn.params)\n- else:\n- (subjaxpr, const_bindings, freevar_bindings), = eqn.bound_subjaxprs\n- sub_consts = tuple(WrapHashably(read(v)) for v in const_bindings)\n- sub_freevar_vals = tuple(WrapHashably(read(v)) for v in freevar_bindings)\n- fun = hashable_partial(wrap_init(eval_jaxpr), subjaxpr,\n- sub_consts, sub_freevar_vals)\n- ans = eqn.primitive.bind(fun, *in_vals, **eqn.params)\n+ subfuns = [partial(eval_jaxpr, subjaxpr, map(read, const_bindings),\n+ map(read, freevar_bindings))\n+ for subjaxpr, const_bindings, freevar_bindings\n+ in eqn.bound_subjaxprs]\n+ subfuns = map(lu.wrap_init, subfuns)\n+ ans = eqn.primitive.bind(*(subfuns + in_vals), **eqn.params)\nif eqn.primitive.multiple_results:\nmap(write, eqn.outvars, ans)\nelse:\nwrite(eqn.outvars[0], ans)\nreturn map(read, jaxpr.outvars)\n-@transformation\n-def hashable_partial(jaxpr, consts_hashable, freevar_vals_hashable, *args):\n- consts = [x.val for x in consts_hashable]\n- freevar_vals = [x.val for x in freevar_vals_hashable]\n- ans = yield (jaxpr, consts, freevar_vals) + args, {}\n- yield ans\n-\ndef full_lower(val):\nif isinstance(val, Tracer):\n@@ -576,7 +565,7 @@ def apply_todos(todos, outs):\nouts = map(full_lower, todos.pop()(outs))\nreturn outs\n-@transformation_with_aux\n+@lu.transformation_with_aux\ndef process_env_traces(primitive, level, params_tuple, *args):\nouts = yield args, {}\nparams = dict(params_tuple)\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -34,9 +34,9 @@ import jax\nimport jax.numpy as np\nfrom jax import jit, grad, device_put, jacfwd, jacrev, hessian\nfrom jax import api, lax\n-from jax import core\n+from jax.core import Primitive\nfrom jax.interpreters import ad\n-from jax.interpreters import xla\n+from jax.interpreters.xla import DeviceArray\nfrom jax.abstract_arrays import concretization_err_msg\nfrom jax.lib import xla_bridge as xb\nfrom jax import test_util as jtu\n@@ -241,7 +241,7 @@ class APITest(jtu.JaxTestCase):\n\"|Abstract value passed to .*)\", lambda: jit(f)(0))\ndef test_unimplemented_interpreter_rules(self):\n- foo_p = core.Primitive('foo')\n+ foo_p = Primitive('foo')\ndef foo(x):\nreturn foo_p.bind(x)\n@@ -268,7 +268,7 @@ class APITest(jtu.JaxTestCase):\ndef test_device_put_and_get(self):\nx = onp.arange(12.).reshape((3, 4)).astype(\"float32\")\ndx = api.device_put(x)\n- self.assertIsInstance(dx, xla.DeviceArray)\n+ self.assertIsInstance(dx, DeviceArray)\nx2 = api.device_get(dx)\nself.assertIsInstance(x2, onp.ndarray)\nassert onp.all(x == x2)\n@@ -593,7 +593,7 @@ class APITest(jtu.JaxTestCase):\nself.assertRaises(TypeError, lambda: jacfwd(lambda x: np.sin(x))(1 + 2j))\ndef test_defvjp_all(self):\n- foo_p = core.Primitive('foo')\n+ foo_p = Primitive('foo')\ndef foo(x): return 2. * foo_p.bind(x)\nad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (4 * g * np.sin(x),)))\n@@ -602,7 +602,7 @@ class APITest(jtu.JaxTestCase):\nself.assertAllClose(grad_ans, 4 * 2 * onp.sin(3.), check_dtypes=False)\ndef test_defvjp_all_const(self):\n- foo_p = core.Primitive('foo')\n+ foo_p = Primitive('foo')\ndef foo(x): return foo_p.bind(x)\nad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (12.,)))\n@@ -611,7 +611,7 @@ class APITest(jtu.JaxTestCase):\nself.assertAllClose(grad_ans, 12., check_dtypes=True)\ndef test_defvjp_all_higher_order_revmode(self):\n- foo_p = core.Primitive('foo')\n+ foo_p = Primitive('foo')\ndef foo(x): return 2. * foo_p.bind(x)\nad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (g * x ** 2,)))\n@@ -622,7 +622,7 @@ class APITest(jtu.JaxTestCase):\n# also tests passing in symbolic zero tangents b/c we differentiate wrt only\n# the first argument in one case\n- foo_p = core.Primitive('foo')\n+ foo_p = Primitive('foo')\ndef foo(x, y): return foo_p.bind(x, y)\ndef vjpfun(x, y):\n@@ -791,11 +791,11 @@ class APITest(jtu.JaxTestCase):\ndef test_devicearray_repr(self):\nx = device_put(np.zeros(3))\n- self.assertIsInstance(x, xla.DeviceArray)\n+ self.assertIsInstance(x, DeviceArray)\nrepr(x) # doesn't crash\nx = device_put(np.ones(3) + 1j * np.ones(3))\n- self.assertIsInstance(x, xla.DeviceArray)\n+ self.assertIsInstance(x, DeviceArray)\nrepr(x) # doesn't crash\ndef test_devicearray_delete(self):\n@@ -1016,7 +1016,7 @@ class APITest(jtu.JaxTestCase):\ndef test_jit_device(self):\ndevice = xb.devices()[-1]\nx = api.jit(lambda x: x, device=device)(3.)\n- self.assertIsInstance(x, xla.DeviceArray)\n+ self.assertIsInstance(x, DeviceArray)\nself.assertEqual(x.device_buffer.device(), device)\ndef test_jit_of_noncallable(self):\n@@ -1563,26 +1563,6 @@ class JaxprTest(jtu.JaxTestCase):\nin [e] }\n\"\"\")\n- def test_eval_jaxpr_caching(self):\n- eval_jaxpr = core.eval_jaxpr\n-\n- count = [0]\n- def eval_jaxpr_and_count(*args):\n- count[0] += 1\n- return eval_jaxpr(*args)\n-\n- f = jit(lambda x: 2 * x)\n- jaxpr = api.make_jaxpr(f)(3)\n-\n- try:\n- core.eval_jaxpr = eval_jaxpr_and_count\n- core.jaxpr_as_fun(jaxpr)(3) # once outer and once inner\n- core.jaxpr_as_fun(jaxpr)(4) # once outer but inner is cached\n- finally:\n- core.eval_jaxpr = eval_jaxpr\n-\n- self.assertEqual(count[0], 3)\n-\nif __name__ == '__main__':\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
roll back previous commit #1829 There was a mysterious failure on an internal test, and that mysteriousness means I didn't fully understand the attempted fix, so best to roll back for now.
260,270
09.12.2019 16:06:59
0
26e863923af0b1fbbbae5ef9396e33eb376ad22a
Support atrous conv in same padded convolution and add warning if use transposed convolution with same or valid padding.
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/Common_Gotchas_in_JAX.ipynb", "new_path": "docs/notebooks/Common_Gotchas_in_JAX.ipynb", "diff": "\"out = lax.conv_general_dilated(img, # lhs = image tensor\\n\",\n\" kernel, # rhs = conv kernel tensor\\n\",\n\" (1,1), # window strides\\n\",\n- \" 'SAME', # padding mode\\n\",\n+ \" ((0, 0), (0, 0)), # padding mode\\n\",\n\" (2,2), # lhs/image dilation\\n\",\n\" (1,1), # rhs/kernel dilation\\n\",\n\" dn) # dimension_numbers = lhs, rhs, out dimension permutation\\n\",\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax.py", "new_path": "jax/lax/lax.py", "diff": "@@ -484,15 +484,22 @@ def conv_general_dilated(lhs, rhs, window_strides, padding, lhs_dilation=None,\nif type(dimension_numbers) is not ConvDimensionNumbers:\ndimension_numbers = conv_dimension_numbers(\nlhs.shape, rhs.shape, dimension_numbers)\n- if isinstance(padding, str):\n- lhs_perm, rhs_perm, _ = dimension_numbers\n- padding = padtype_to_pads(\n- onp.take(lhs.shape, lhs_perm)[2:], onp.take(rhs.shape, rhs_perm)[2:],\n- window_strides, padding)\nif lhs_dilation is None:\nlhs_dilation = (1,) * (lhs.ndim - 2)\n+ elif isinstance(padding, str) and not len(lhs_dilation) == lhs_dilation.count(1):\n+ raise ValueError(\n+ \"String padding is not implemented for transposed convolution \"\n+ \"using this op. Please either exactly specify the required padding or \"\n+ \"use conv_transpose.\")\nif rhs_dilation is None:\nrhs_dilation = (1,) * (rhs.ndim - 2)\n+ if isinstance(padding, str):\n+ lhs_perm, rhs_perm, _ = dimension_numbers\n+ rhs_shape = onp.take(rhs.shape, rhs_perm)[2:]\n+ effective_rhs_shape = [(k-1) * r + 1 for k, r in zip(rhs_shape, rhs_dilation)]\n+ padding = padtype_to_pads(\n+ onp.take(lhs.shape, lhs_perm)[2:], effective_rhs_shape,\n+ window_strides, padding)\nreturn conv_general_dilated_p.bind(\nlhs, rhs, window_strides=tuple(window_strides), padding=tuple(padding),\nlhs_dilation=tuple(lhs_dilation), rhs_dilation=tuple(rhs_dilation),\n" } ]
Python
Apache License 2.0
google/jax
Support atrous conv in same padded convolution and add warning if use transposed convolution with same or valid padding. (#1806) PiperOrigin-RevId: 283517237
260,335
10.12.2019 14:10:57
28,800
3167b3ddcdfb92f9636935ba004d29979c471d3e
test tupling of args
[ { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -36,7 +36,7 @@ from jax import jit, grad, device_put, jacfwd, jacrev, hessian\nfrom jax import api, lax\nfrom jax.core import Primitive\nfrom jax.interpreters import ad\n-from jax.interpreters.xla import DeviceArray\n+from jax.interpreters import xla\nfrom jax.abstract_arrays import concretization_err_msg\nfrom jax.lib import xla_bridge as xb\nfrom jax import test_util as jtu\n@@ -118,12 +118,27 @@ class APITest(jtu.JaxTestCase):\nf(1, 2, z=onp.zeros(3)) # doesn't crash\n- def test_jit_many_args(self):\n+ def test_jit_many_args_tuples(self):\n@jit\ndef f(args_list):\nreturn sum(args_list)\n- self.assertEqual(f(list(range(500))), sum(range(500)))\n+ make_tuple = xla.make_tuple\n+\n+ counts = [0]\n+ def make_tuple_and_count(*args, **kwargs):\n+ counts[0] += 1\n+ return make_tuple(*args, **kwargs)\n+\n+ try:\n+ xla.make_tuple = make_tuple_and_count\n+ ans = f(list(range(500)))\n+ finally:\n+ xla.make_tuple = make_tuple\n+\n+ expected = sum(range(500))\n+ self.assertEqual(counts[0], 1) # formed a tuple on dispatch\n+ self.assertEqual(ans, expected) # computed the correct result\ndef test_grad_of_jit(self):\nside = []\n@@ -268,7 +283,7 @@ class APITest(jtu.JaxTestCase):\ndef test_device_put_and_get(self):\nx = onp.arange(12.).reshape((3, 4)).astype(\"float32\")\ndx = api.device_put(x)\n- self.assertIsInstance(dx, DeviceArray)\n+ self.assertIsInstance(dx, xla.DeviceArray)\nx2 = api.device_get(dx)\nself.assertIsInstance(x2, onp.ndarray)\nassert onp.all(x == x2)\n@@ -791,11 +806,11 @@ class APITest(jtu.JaxTestCase):\ndef test_devicearray_repr(self):\nx = device_put(np.zeros(3))\n- self.assertIsInstance(x, DeviceArray)\n+ self.assertIsInstance(x, xla.DeviceArray)\nrepr(x) # doesn't crash\nx = device_put(np.ones(3) + 1j * np.ones(3))\n- self.assertIsInstance(x, DeviceArray)\n+ self.assertIsInstance(x, xla.DeviceArray)\nrepr(x) # doesn't crash\ndef test_devicearray_delete(self):\n@@ -1016,7 +1031,7 @@ class APITest(jtu.JaxTestCase):\ndef test_jit_device(self):\ndevice = xb.devices()[-1]\nx = api.jit(lambda x: x, device=device)(3.)\n- self.assertIsInstance(x, DeviceArray)\n+ self.assertIsInstance(x, xla.DeviceArray)\nself.assertEqual(x.device_buffer.device(), device)\ndef test_jit_of_noncallable(self):\n" } ]
Python
Apache License 2.0
google/jax
test tupling of args
260,483
11.12.2019 02:48:51
0
a73106b37cf2f0a6a0148e0f9ed1d27c7821449a
Avoid stack overflow when JITting a function that uses copy.copy or copy.deepcopy.
[ { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -372,6 +372,12 @@ class Tracer(object):\ndef __repr__(self):\nreturn 'Traced<{}>with<{}>'.format(self.aval, self.trace)\n+ def __copy__(self):\n+ return self\n+\n+ def __deepcopy__(self, unused_memo):\n+ return self\n+\n# these can be used to set up forwarding of properties and instance methods from\n# Tracer instances to the underlying avals\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -17,6 +17,7 @@ from __future__ import division\nfrom __future__ import print_function\nimport collections\n+import copy\nfrom functools import partial\nimport unittest\nimport warnings\n@@ -1282,6 +1283,16 @@ class APITest(jtu.JaxTestCase):\npython_should_be_executing = False\napi.jit(f)(3)\n+ def test_jit_shallow_copy(self):\n+ def f(x):\n+ return copy.copy(x)\n+ api.jit(f)(1)\n+\n+ def test_jit_deep_copy(self):\n+ def f(x):\n+ return copy.deepcopy(x)\n+ api.jit(f)(1)\n+\ndef test_pmap_global_cache(self):\ndef f(x):\nassert python_should_be_executing\n" } ]
Python
Apache License 2.0
google/jax
Avoid stack overflow when JITting a function that uses copy.copy or copy.deepcopy. (#1834)
260,335
12.12.2019 05:14:57
28,800
fbde09f5677351431b91b66debc5f6d37a03f915
add tuple_args logic to xla primitive application
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -162,18 +162,20 @@ def xla_primitive_callable(prim, *abstract_args, **params):\nhandle_result = lambda xs: tuple(h(x) for h, x in zip(handlers, xs.destructure()))\nelse:\nhandle_result = aval_to_result_handler(aval_out)\n- built_c = primitive_computation(prim, *abstract_args, **params)\n+ tuple_args = len(abstract_args) > 100\n+ built_c = primitive_computation(prim, tuple_args, *abstract_args, **params)\ncompiled = built_c.Compile(compile_options=xb.get_compile_options(),\nbackend=xb.get_backend(backend))\n- return partial(_execute_compiled_primitive, prim, compiled, backend, handle_result)\n+ return partial(_execute_compiled_primitive, prim, compiled, backend,\n+ tuple_args, handle_result)\n@cache()\n-def primitive_computation(prim, *avals, **params):\n+def primitive_computation(prim, tuple_args, *avals, **params):\nc = xb.make_computation_builder(\"primitive_computation_{}\".format(prim.name))\nc.SetOpMetadata(xc.OpMetadata(op_type=prim.name, op_name=str(params)))\nbackend = params.pop(\"backend\", None)\nplatform = xb.get_backend(backend).platform\n- xla_args = _xla_callable_args(c, avals, False)\n+ xla_args = _xla_callable_args(c, avals, tuple_args)\nif prim in backend_specific_translations[platform]:\nrule = backend_specific_translations[platform][prim]\nrule(c, *xla_args, **params) # return val set as a side-effect on c\n@@ -197,9 +199,15 @@ def primitive_computation(prim, *avals, **params):\n\"https://github.com/google/jax/issues\\n\")\nraise RuntimeError(msg)\n-def _execute_compiled_primitive(prim, compiled, backend, result_handler, *args):\n+def primitive_subcomputation(prim, *avals, **params):\n+ return primitive_computation(prim, False, *avals, **params)\n+\n+def _execute_compiled_primitive(prim, compiled, backend, tuple_args,\n+ result_handler, *args):\ndevice, = compiled.local_devices()\ninput_bufs = [device_put(x, device) for x in args if x is not token]\n+ if tuple_args:\n+ input_bufs = [make_tuple(input_bufs, device, backend)]\nout_buf = compiled.Execute(input_bufs)\nif FLAGS.jax_debug_nans:\ncheck_nans(prim, out_buf.destructure() if prim.multiple_results else out_buf)\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax.py", "new_path": "jax/lax/lax.py", "diff": "@@ -3393,7 +3393,7 @@ def _reduce_sum_translation_rule(c, operand, axes, input_shape):\ndtype = c.GetShape(operand).numpy_dtype()\nscalar = ShapedArray((), dtype)\nreturn c.Reduce(operand, c.Constant(onp.array(0, dtype)),\n- xla.primitive_computation(add_p, scalar, scalar),\n+ xla.primitive_subcomputation(add_p, scalar, scalar),\naxes)\ndef _reduce_sum_transpose_rule(cotangent, input_shape, axes):\n@@ -3417,7 +3417,7 @@ def _reduce_prod_translation_rule(c, operand, axes):\ndtype = c.GetShape(operand).numpy_dtype()\nscalar = ShapedArray((), dtype)\nreturn c.Reduce(operand, c.Constant(onp.array(1, dtype)),\n- xla.primitive_computation(mul_p, scalar, scalar),\n+ xla.primitive_subcomputation(mul_p, scalar, scalar),\naxes)\ndef _reduce_prod_jvp_rule(tangent, operand, axes):\n@@ -3463,7 +3463,7 @@ def _reduce_chooser_translation_rule(prim, identity, c, operand, axes):\ndtype = c.GetShape(operand).numpy_dtype()\nscalar = ShapedArray((), dtype)\nreturn c.Reduce(operand, c.Constant(identity(dtype)),\n- xla.primitive_computation(prim, scalar, scalar), axes)\n+ xla.primitive_subcomputation(prim, scalar, scalar), axes)\ndef _reduce_chooser_jvp_rule(g, ans, operand, axes):\n# TODO(mattjj): an alternative is to use variadic reduce to compute the chosen\n@@ -3500,7 +3500,7 @@ def _reduce_logical_shape_rule(operand, axes):\ndef _reduce_logical_translation_rule(prim, identity, c, operand, axes):\nscalar = ShapedArray((), onp.bool_)\nreturn c.Reduce(operand, c.Constant(identity(onp.bool_)),\n- xla.primitive_computation(prim, scalar, scalar), axes)\n+ xla.primitive_subcomputation(prim, scalar, scalar), axes)\n_reduce_or_translation_rule = partial(_reduce_logical_translation_rule,\nor_p, _get_max_identity)\n@@ -3563,7 +3563,7 @@ def _reduce_window_sum_translation_rule(c, operand, window_dimensions,\ndtype = c.GetShape(operand).numpy_dtype()\nscalar = ShapedArray((), dtype)\nreturn c.ReduceWindow(operand, c.Constant(onp.array(0, dtype)),\n- xla.primitive_computation(add_p, scalar, scalar),\n+ xla.primitive_subcomputation(add_p, scalar, scalar),\nwindow_dimensions, window_strides, padding)\ndef _reduce_window_sum_transpose_rule(cotangent, window_dimensions,\n@@ -3610,7 +3610,7 @@ def _reduce_window_chooser_translation_rule(\ndtype = c.GetShape(operand).numpy_dtype()\nscalar = ShapedArray((), dtype)\nreturn c.ReduceWindow(operand, c.Constant(identity(dtype)),\n- xla.primitive_computation(prim, scalar, scalar),\n+ xla.primitive_subcomputation(prim, scalar, scalar),\nwindow_dimensions, window_strides, padding)\ndef _reduce_window_chooser_jvp_rule(prim, g, operand, window_dimensions,\n@@ -3700,8 +3700,8 @@ def _select_and_scatter_add_translation(\npadding):\ndtype = c.GetShape(operand).numpy_dtype()\nscalar = ShapedArray((), dtype)\n- select = xla.primitive_computation(select_prim, scalar, scalar)\n- scatter = xla.primitive_computation(add_p, scalar, scalar)\n+ select = xla.primitive_subcomputation(select_prim, scalar, scalar)\n+ scatter = xla.primitive_subcomputation(add_p, scalar, scalar)\nzero = c.Constant(onp.array(0, dtype))\nreturn c.SelectAndScatter(operand, select, window_dimensions, window_strides,\npadding, source, zero, scatter)\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -235,7 +235,7 @@ def _while_loop_translation_rule(c, axis_env, *args, **kwargs):\n_map(cond_c.Constant, cond_jaxpr.literals), (), *(x + z))\nif batched:\nscalar = ShapedArray((), onp.bool_)\n- or_ = xla.primitive_computation(lax.or_p, scalar, scalar)\n+ or_ = xla.primitive_subcomputation(lax.or_p, scalar, scalar)\npred = cond_c.Reduce(pred, cond_c.Constant(onp.array(False)), or_,\nlist(range(cond_jaxpr.out_avals[0].ndim)))\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_parallel.py", "new_path": "jax/lax/lax_parallel.py", "diff": "@@ -191,7 +191,7 @@ def _allreduce_split_axis_rule(prim, reducer, vals, which_mapped, axis_name):\ndef _allreduce_translation_rule(prim, c, val, replica_groups, backend=None):\ndtype = c.GetShape(val).numpy_dtype()\nscalar = ShapedArray((), dtype)\n- computation = xla.primitive_computation(prim, scalar, scalar, backend=backend)\n+ computation = xla.primitive_subcomputation(prim, scalar, scalar, backend=backend)\nreturn c.AllReduce(val, computation, replica_groups=replica_groups)\n# psum translation rule has special handling for complex dtypes\n" } ]
Python
Apache License 2.0
google/jax
add tuple_args logic to xla primitive application
260,285
13.12.2019 11:46:08
0
9d12a24b63ac10943006d588f601718c135c12ef
Add categorical sampler
[ { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "@@ -946,6 +946,24 @@ def _gumbel(key, shape, dtype):\nreturn -np.log(-np.log(\nuniform(key, shape, dtype, minval=np.finfo(dtype).eps, maxval=1.)))\n+def categorical(key, logits, axis=-1, shape=()):\n+ \"\"\"Sample random values from categorical distributions.\n+\n+ Args:\n+ key: a PRNGKey used as the random key.\n+ logits: Unnormalized log probabilities of the categorical distribution(s) to sample from.\n+ axis: Specifies the axis along which logits belong to the same categorical distribution.\n+ shape: Specifies how many samples to take per categorical distribution and in what shape to arrange them.\n+\n+ Returns:\n+ Samples of shape `shape + tuple(onp.delete(logits.shape, axis))`.\n+ The first len(shape) dimensions specify which sample,\n+ while the last len(logits.shape) - 1 dimensions make up the shape per sample.\n+ \"\"\"\n+ if axis >= 0:\n+ axis += len(shape)\n+\n+ return np.argmax(gumbel(key, shape + logits.shape, logits.dtype) + logits, axis=axis)\ndef laplace(key, shape=(), dtype=onp.float64):\n\"\"\"Sample Laplace random values with given shape and float dtype.\n" }, { "change_type": "MODIFY", "old_path": "tests/random_test.py", "new_path": "tests/random_test.py", "diff": "@@ -26,7 +26,7 @@ import numpy as onp\nimport scipy.special\nimport scipy.stats\n-from jax import api\n+from jax import api, safe_zip, safe_map\nfrom jax import lax\nfrom jax import numpy as np\nfrom jax import random\n@@ -37,6 +37,8 @@ from jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n+zip = safe_zip\n+map = safe_map\nclass LaxRandomTest(jtu.JaxTestCase):\n@@ -187,6 +189,34 @@ class LaxRandomTest(jtu.JaxTestCase):\nfor samples in [uncompiled_samples, compiled_samples]:\nself._CheckChiSquared(samples, scipy.stats.bernoulli(p).pmf)\n+ @parameterized.named_parameters(jtu.cases_from_list(\n+ {\"testcase_name\": \"_p={}_{}\".format(p, dtype),\n+ \"p\": p, \"axis\": axis, \"dtype\": onp.dtype(dtype).name}\n+ for (p, axis) in [([.25] * 4, -1), ([[.25, .25], [.1, .9]], 1), ([[.25, .1], [.25, .9]], 0)]\n+ for dtype in [onp.float32, onp.float64]))\n+ def testCategorical(self, p, axis, dtype):\n+ key = random.PRNGKey(0)\n+ p = onp.array(p, dtype=dtype)\n+ logits = onp.log(p) - 42 # test unnormalized\n+ shape = (10000,)\n+ rand = lambda key, p: random.categorical(key, logits, shape=shape, axis=axis)\n+ crand = api.jit(rand)\n+\n+ uncompiled_samples = rand(key, p)\n+ compiled_samples = crand(key, p)\n+\n+ for samples in [uncompiled_samples, compiled_samples]:\n+ if axis < 0:\n+ axis += len(logits.shape)\n+\n+ assert samples.shape == shape + tuple(onp.delete(p.shape, axis))\n+\n+ if len(p.shape[:-1]) > 0:\n+ for cat_index, p_ in enumerate(p):\n+ self._CheckChiSquared(samples[:, cat_index], pmf=lambda x: p_[x])\n+ else:\n+ self._CheckChiSquared(samples, pmf=lambda x: p[x])\n+\ndef testBernoulliShape(self):\nkey = random.PRNGKey(0)\nx = random.bernoulli(key, onp.array([0.2, 0.3]), shape=(3, 2))\n" } ]
Python
Apache License 2.0
google/jax
Add categorical sampler
260,445
13.12.2019 05:41:51
28,800
cc92bb64115b817f18a3ebf016170d9158845688
Improve the VJP structure mismatch errors.
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -1703,11 +1703,27 @@ def defvjp_all(fun, custom_vjp):\nargs = tree_unflatten(params['in_tree'], args_flat)\nout, vjp = custom_vjp(*args)\nout_flat, out_tree = tree_flatten(out)\n- assert out_tree == params['out_tree'] # TODO(mattjj): better error message\n+ if out_tree != params['out_tree']:\n+ msg = (\n+ \"First output of `custom_vjp`: {} doesn't match the structure of \"\n+ \"the output of `fun`: {}\\n\"\n+ \"{}\\n\"\n+ \"vs\\n\"\n+ \"{}\\n\".format(custom_vjp, fun, out_tree, params['out_tree'])\n+ )\n+ raise TypeError(msg)\ndef vjp_flat(*cts_flat):\ncts = tree_unflatten(out_tree, cts_flat)\nargs_cts_flat, in_tree2 = tree_flatten(vjp(cts))\n- assert in_tree == in_tree2 # TODO(mattjj): better error message\n+ if in_tree != in_tree2:\n+ msg = (\n+ \"Output of the `vjp`: {} doesn't match the structure of args of \"\n+ \"`fun`: {}\\n\"\n+ \"{}\\n\"\n+ \"vs\\n\"\n+ \"{}\\n\".format(vjp, fun, in_tree2, in_tree)\n+ )\n+ raise TypeError(msg)\nreturn [core.unit] * num_consts + list(args_cts_flat)\nreturn out_flat, vjp_flat\nad.defvjp_all(fun.prim, custom_transforms_vjp)\n" } ]
Python
Apache License 2.0
google/jax
Improve the VJP structure mismatch errors. (#1854)
260,285
13.12.2019 15:00:32
0
6178755281cd2ff7c48d26c584ac4e1e1f474d1c
Remove safe zip/map
[ { "change_type": "MODIFY", "old_path": "tests/random_test.py", "new_path": "tests/random_test.py", "diff": "@@ -26,7 +26,7 @@ import numpy as onp\nimport scipy.special\nimport scipy.stats\n-from jax import api, safe_zip, safe_map\n+from jax import api\nfrom jax import lax\nfrom jax import numpy as np\nfrom jax import random\n@@ -37,8 +37,6 @@ from jax.config import config\nconfig.parse_flags_with_absl()\nFLAGS = config.FLAGS\n-zip = safe_zip\n-map = safe_map\nclass LaxRandomTest(jtu.JaxTestCase):\n" } ]
Python
Apache License 2.0
google/jax
Remove safe zip/map
260,335
14.12.2019 08:16:01
28,800
0ad8837f2fc69fd944c9106342675ae24ac5d1d3
tweak wording around installing jaxlib
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -354,9 +354,9 @@ Some standouts:\n## Installation\n-JAX is written in pure Python, but it depends on XLA, which needs to be compiled\n-and installed as the `jaxlib` package. Use the following instructions to\n-install a binary package with `pip`, or to build JAX from source.\n+JAX is written in pure Python, but it depends on XLA, which needs to be\n+installed as the `jaxlib` package. Use the following instructions to install a\n+binary package with `pip`, or to build JAX from source.\nWe support installing or building `jaxlib` on Linux (Ubuntu 16.04 or later) and\nmacOS (10.12 or later) platforms, but not yet Windows. We're not currently\n" } ]
Python
Apache License 2.0
google/jax
tweak wording around installing jaxlib
260,335
14.12.2019 08:23:27
28,800
764f007f9a921d0c774d789176f4d62b0c0a6cef
point pmap links to cloud tpu colabs
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -282,7 +282,7 @@ print(normalize(np.arange(4.)))\n# prints [0. 0.16666667 0.33333334 0.5 ]\n```\n-You can even [nest `pmap` functions](https://github.com/google/jax) for more\n+You can even [nest `pmap` functions](https://colab.sandbox.google.com/github/google/jax/blob/master/cloud_tpu_colabs/Pmap_Cookbook.ipynb#scrollTo=MdRscR5MONuN) for more\nsophisticated communication patterns.\nIt all composes, so you're free to differentiate through parallel computations:\n@@ -314,8 +314,9 @@ print(grad(lambda x: np.sum(f(x)))(x))\nWhen reverse-mode differentiating a `pmap` function (e.g. with `grad`), the\nbackward pass of the computation is parallelized just like the forward pass.\n-See the [SPMD Cookbook](https://github.com/google/jax) and the [SPMD MNIST\n-classifier from scratch\n+See the [SPMD\n+Cookbook](https://colab.sandbox.google.com/github/google/jax/blob/master/cloud_tpu_colabs/Pmap_Cookbook.ipynb)\n+and the [SPMD MNIST classifier from scratch\nexample](https://github.com/google/jax/blob/master/examples/spmd_mnist_classifier_fromscratch.py)\nfor more.\n" } ]
Python
Apache License 2.0
google/jax
point pmap links to cloud tpu colabs
260,335
14.12.2019 08:34:01
28,800
5c800367d11be981857caff29a81fe3053b87f51
mention cloud tpus in readme
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -29,7 +29,9 @@ executed. But JAX also lets you just-in-time compile your own Python functions\ninto XLA-optimized kernels using a one-function API,\n[`jit`](#compilation-with-jit). Compilation and automatic differentiation can be\ncomposed arbitrarily, so you can express sophisticated algorithms and get\n-maximal performance without leaving Python.\n+maximal performance without leaving Python. You can even program multiple GPUs\n+or TPU cores at once using [`pmap`](#spmd-programming-with-pmap), and\n+differentiate through the whole thing.\nDig a little deeper, and you'll see that JAX is really an extensible system for\n[composable function transformations](#transformations). Both\n@@ -37,7 +39,7 @@ Dig a little deeper, and you'll see that JAX is really an extensible system for\nare instances of such transformations. Others are\n[`vmap`](#auto-vectorization-with-vmap) for automatic vectorization and\n[`pmap`](#spmd-programming-with-pmap) for single-program multiple-data (SPMD)\n-parallel programming, with more to come.\n+parallel programming of multiple accelerators, with more to come.\nThis is a research project, not an official Google product. Expect bugs and\n[sharp edges](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html).\n@@ -72,11 +74,15 @@ perex_grads = jit(vmap(grad_fun, in_axes=(None, 0, 0))) # fast per-example grad\n* [Reference documentation](#reference-documentation)\n## Quickstart: Colab in the Cloud\n-Jump right in using a notebook in your browser, connected to a Google Cloud GPU. Here are some starter notebooks:\n+Jump right in using a notebook in your browser, connected to a Google Cloud GPU.\n+Here are some starter notebooks:\n- [The basics: NumPy on accelerators, `grad` for differentiation, `jit` for compilation, and `vmap` for vectorization](https://jax.readthedocs.io/en/latest/notebooks/quickstart.html)\n- [Training a Simple Neural Network, with TensorFlow Dataset Data Loading](https://colab.research.google.com/github/google/jax/blob/master/docs/notebooks/neural_network_with_tfds_data.ipynb)\n-And for a deeper dive into JAX:\n+**JAX now runs on Cloud TPUs.** To try out the preview, see the [Cloud TPU\n+Colabs](https://github.com/google/jax/tree/master/cloud_tpu_colabs).\n+\n+For a deeper dive into JAX:\n- [The Autodiff Cookbook, Part 1: easy and powerful automatic differentiation in JAX](https://jax.readthedocs.io/en/latest/notebooks/autodiff_cookbook.html)\n- [Common gotchas and sharp edges](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html)\n- See the [full list of\n" }, { "change_type": "MODIFY", "old_path": "cloud_tpu_colabs/README.md", "new_path": "cloud_tpu_colabs/README.md", "diff": "@@ -23,7 +23,7 @@ Solve the wave equation with `pmap`, and make cool movies! The spatial domain is\n![](https://raw.githubusercontent.com/google/jax/master/cloud_tpu_colabs/images/wave_movie.gif)\n### [JAX Demo](https://colab.research.google.com/github/google/jax/blob/master/cloud_tpu_colabs/NeurIPS_2019_JAX_demo.ipynb)\n-An overview of JAX presented at the Program Transformations for ML workshop at NeurIPS 2019. Covers basic numpy usage, grad, jit, vmap, and pmap.\n+An overview of JAX presented at the [Program Transformations for ML workshop at NeurIPS 2019](https://program-transformations.github.io/). Covers basic numpy usage, `grad`, `jit`, `vmap`, and `pmap`.\n## Performance notes\n" } ]
Python
Apache License 2.0
google/jax
mention cloud tpus in readme
260,335
16.12.2019 09:32:55
28,800
c952ed4f5fb7c8df00fba4eefea060962fc277ad
update readme to clarify Cloud TPU preview status
[ { "change_type": "MODIFY", "old_path": "cloud_tpu_colabs/README.md", "new_path": "cloud_tpu_colabs/README.md", "diff": "-# Example Cloud TPU notebooks\n+# JAX Cloud TPU Preview\n-JAX now runs on Cloud TPUs!\n+JAX now runs on Cloud TPUs! **This is a preview**, and we're still working on it. Help us out by kicking the tires, and letting us know on [the issue tracker](https://github.com/google/jax/issues) if you run into any problems.\n+\n+## Example Cloud TPU notebooks\nThe following notebooks showcase how to use and what you can do with Cloud TPUs on Colab:\n" } ]
Python
Apache License 2.0
google/jax
update readme to clarify Cloud TPU preview status
260,270
17.12.2019 02:03:17
0
4af04cefa98dac7fc99ec6b5162580aa6750f5f2
Support dilated transposed convolutions in the conv_transpose op.
[ { "change_type": "MODIFY", "old_path": "jax/experimental/stax.py", "new_path": "jax/experimental/stax.py", "diff": "@@ -87,7 +87,7 @@ def GeneralConv(dimension_numbers, out_chan, filter_shape,\ndef apply_fun(params, inputs, **kwargs):\nW, b = params\nreturn lax.conv_general_dilated(inputs, W, strides, padding, one, one,\n- dimension_numbers) + b\n+ dimension_numbers=dimension_numbers) + b\nreturn init_fun, apply_fun\nConv = functools.partial(GeneralConv, ('NHWC', 'HWIO', 'NHWC'))\n@@ -115,7 +115,7 @@ def GeneralConvTranspose(dimension_numbers, out_chan, filter_shape,\ndef apply_fun(params, inputs, **kwargs):\nW, b = params\nreturn lax.conv_transpose(inputs, W, strides, padding,\n- dimension_numbers) + b\n+ dimension_numbers=dimension_numbers) + b\nreturn init_fun, apply_fun\nConv1DTranspose = functools.partial(GeneralConvTranspose, ('NHC', 'HIO', 'NHC'))\nConvTranspose = functools.partial(GeneralConvTranspose,\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax.py", "new_path": "jax/lax/lax.py", "diff": "@@ -1214,8 +1214,8 @@ def _flip_axes(x, axes):\nreturn x\n-def conv_transpose(lhs, rhs, strides, padding, dimension_numbers=None,\n- transpose_kernel=False, precision=None):\n+def conv_transpose(lhs, rhs, strides, padding, rhs_dilation=None,\n+ dimension_numbers=None, transpose_kernel=False, precision=None):\n\"\"\"Convenience wrapper for calculating the N-d convolution \"transpose\".\nThis function directly calculates a fractionally strided conv rather than\n@@ -1228,6 +1228,9 @@ def conv_transpose(lhs, rhs, strides, padding, dimension_numbers=None,\npadding: 'SAME', 'VALID' will set as transpose of corresponding forward\nconv, or a sequence of `n` integer 2-tuples describing before-and-after\npadding for each `n` spatial dimension.\n+ rhs_dilation: `None`, or a sequence of `n` integers, giving the\n+ dilation factor to apply in each spatial dimension of `rhs`. RHS dilation\n+ is also known as atrous convolution.\ndimension_numbers: tuple of dimension descriptors as in\nlax.conv_general_dilated. Defaults to tensorflow convention.\ntranspose_kernel: if True flips spatial axes and swaps the input/output\n@@ -1260,15 +1263,18 @@ def conv_transpose(lhs, rhs, strides, padding, dimension_numbers=None,\nk_sdims = k_shape[2:]\n# Calculate correct output shape given padding and strides.\nif padding in {'SAME', 'VALID'}:\n+ if rhs_dilation is None:\n+ rhs_dilation = (1,) * (rhs.ndim - 2)\n+ effective_k_size = map(lambda k, r: (k-1) * r + 1, k_sdims, rhs_dilation)\npads = [_conv_transpose_padding(k, s, padding)\n- for k,s in zip(k_sdims.tolist(), strides)]\n+ for k,s in zip(effective_k_size, strides)]\nelse:\npads = padding\nif transpose_kernel:\n# flip spatial dims and swap input / output channel axes\nrhs = _flip_axes(rhs, onp.array(dn.rhs_spec)[2:])\nrhs = onp.swapaxes(rhs, dn.rhs_spec[0], dn.rhs_spec[1])\n- return conv_general_dilated(lhs, rhs, one, pads, strides, one, dn,\n+ return conv_general_dilated(lhs, rhs, one, pads, strides, rhs_dilation, dn,\nprecision=precision)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -490,19 +490,21 @@ class LaxTest(jtu.JaxTestCase):\n@staticmethod\ndef _conv_transpose_via_grad(data, kernel, strides, padding,\n- dimension_numbers=None):\n+ rhs_dilation=None, dimension_numbers=None):\n\"\"\"Helper method: calculates conv transpose via grad for testing.\"\"\"\nassert len(data.shape) == len(kernel.shape)\nnspatial = len(data.shape) - 2\none = (1,) * nspatial\n+ rhs_dilation = rhs_dilation or one\ndn = lax.conv_dimension_numbers(data.shape, kernel.shape,\ndimension_numbers)\nin_shape = onp.take(data.shape, dn.lhs_spec)\nin_sdims = in_shape[2:]\nk_shape = onp.take(kernel.shape, dn.rhs_spec)\nk_sdims = k_shape[2:]\n+ e_k_sdims = [(k-1) * r + 1 for k, r in zip(k_sdims, rhs_dilation)]\nif padding == 'VALID':\n- o_sdims = [in_sdims[i]*strides[i] + max(k_sdims[i]-strides[i],0)\n+ o_sdims = [in_sdims[i]*strides[i] + max(e_k_sdims[i]-strides[i],0)\nfor i in range(nspatial)]\nelif padding == 'SAME':\no_sdims = [in_sdims[i]*strides[i] for i in range(nspatial)]\n@@ -512,7 +514,7 @@ class LaxTest(jtu.JaxTestCase):\no_layout = onp.take(onp.array(o_shape), out_spec_inv)\nplaceholder = onp.ones(o_layout, data.dtype)\nconv = lambda x: lax.conv_general_dilated(x, kernel, strides, padding,\n- one, one, dn)\n+ one, rhs_dilation, dn)\n_, g = api.vjp(conv, placeholder)\nreturn g(data)[0]\n@@ -528,11 +530,12 @@ class LaxTest(jtu.JaxTestCase):\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\":\n- \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n+ \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}\".format(\njtu.format_shape_dtype_string(lhs_shape, dtype),\n- jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n+ jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),\n\"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n- \"strides\": strides, \"padding\": padding, \"rng_factory\": rng_factory, 'dspec': dspec}\n+ \"strides\": strides, \"padding\": padding, \"rhs_dilation\": rhs_dilation,\n+ \"rng_factory\": rng_factory, 'dspec': dspec}\nfor lhs_shape, rhs_shape in [\n((b, 9, 10, i), (k, k, j, i)) # NB: i,j flipped in RHS for transpose\nfor b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n@@ -540,9 +543,10 @@ class LaxTest(jtu.JaxTestCase):\nfor strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]\nfor padding in [\"VALID\", \"SAME\"]\nfor dspec in [('NHWC', 'HWIO', 'NHWC'),]\n+ for rhs_dilation in [None, (2, 2)]\nfor rng_factory in [jtu.rand_small]))\ndef testConvTranspose2DT(self, lhs_shape, rhs_shape, dtype, strides,\n- padding, dspec, rng_factory):\n+ padding, dspec, rhs_dilation, rng_factory):\nrng = rng_factory()\nargs_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\n@@ -550,11 +554,13 @@ class LaxTest(jtu.JaxTestCase):\n# lhs-grad of conv.\ndef fun(lhs, rhs):\nreturn lax.conv_transpose(lhs, rhs, strides, padding,\n+ rhs_dilation=rhs_dilation,\ndimension_numbers=dspec,\ntranspose_kernel=True)\ndef fun_via_grad(lhs, rhs):\nreturn self._conv_transpose_via_grad(lhs, rhs, strides, padding,\n+ rhs_dilation=rhs_dilation,\ndimension_numbers=dspec)\n# NB: below just checks for agreement, we're not calling numpy.\n@@ -562,11 +568,12 @@ class LaxTest(jtu.JaxTestCase):\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\":\n- \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n+ \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}\".format(\njtu.format_shape_dtype_string(lhs_shape, dtype),\n- jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n+ jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),\n\"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n- \"strides\": strides, \"padding\": padding, \"rng_factory\": rng_factory, 'dspec': dspec}\n+ \"strides\": strides, \"padding\": padding, \"rhs_dilation\": rhs_dilation,\n+ \"rng_factory\": rng_factory, 'dspec': dspec}\nfor lhs_shape, rhs_shape in [\n((b, 9, 10, i), (k, k, i, j))\nfor b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n@@ -574,20 +581,23 @@ class LaxTest(jtu.JaxTestCase):\nfor strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]\nfor padding in [\"VALID\", \"SAME\"]\nfor dspec in [('NHWC', 'HWIO', 'NHWC'),]\n+ for rhs_dilation in [None, (2, 2)]\nfor rng_factory in [jtu.rand_small]))\ndef testConvTranspose2D(self, lhs_shape, rhs_shape, dtype, strides,\n- padding, dspec, rng_factory):\n+ padding, dspec, rhs_dilation, rng_factory):\nrng = rng_factory()\nargs_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\ndef fun(lhs, rhs):\nreturn lax.conv_transpose(lhs, rhs, strides, padding,\n+ rhs_dilation=rhs_dilation,\ndimension_numbers=dspec,\ntranspose_kernel=False)\ndef fun_via_grad(lhs, rhs):\nrhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)\nreturn self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,\n+ rhs_dilation=rhs_dilation,\ndimension_numbers=dspec)\n# NB: below just checks for agreement, we're not calling numpy.\n@@ -595,11 +605,12 @@ class LaxTest(jtu.JaxTestCase):\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\":\n- \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}\".format(\n+ \"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}\".format(\njtu.format_shape_dtype_string(lhs_shape, dtype),\n- jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),\n+ jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),\n\"lhs_shape\": lhs_shape, \"rhs_shape\": rhs_shape, \"dtype\": dtype,\n- \"strides\": strides, \"padding\": padding, \"rng_factory\": rng_factory, 'dspec': dspec}\n+ \"strides\": strides, \"padding\": padding, \"rhs_dilation\": rhs_dilation,\n+ \"rng_factory\": rng_factory, 'dspec': dspec}\nfor lhs_shape, rhs_shape in [\n((b, 10, i), (k, i, j))\nfor b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]\n@@ -607,20 +618,23 @@ class LaxTest(jtu.JaxTestCase):\nfor strides in [(1,), (2,), (3,)]\nfor padding in [\"VALID\", \"SAME\"]\nfor dspec in [('NHC', 'HIO', 'NHC'),]\n+ for rhs_dilation in [None, (2,)]\nfor rng_factory in [jtu.rand_small]))\ndef testConvTranspose1D(self, lhs_shape, rhs_shape, dtype, strides,\n- padding, dspec, rng_factory):\n+ padding, dspec, rhs_dilation, rng_factory):\nrng = rng_factory()\nargs_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]\ndef fun(lhs, rhs):\nreturn lax.conv_transpose(lhs, rhs, strides, padding,\ndimension_numbers=dspec,\n+ rhs_dilation=rhs_dilation,\ntranspose_kernel=False)\ndef fun_via_grad(lhs, rhs):\nrhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)\nreturn self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,\n+ rhs_dilation=rhs_dilation,\ndimension_numbers=dspec)\n# NB: below just checks for agreement, we're not calling numpy.\n" } ]
Python
Apache License 2.0
google/jax
Support dilated transposed convolutions in the conv_transpose op. (#1823) PiperOrigin-RevId: 284155973
260,335
17.12.2019 13:14:10
28,800
7175c1dfe1617ccdf26f408eb01cc0d9fa9dc298
fix transpose bug in multivariate normal, add test fixes
[ { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "@@ -39,8 +39,8 @@ from jax.lib import xla_bridge\nfrom jax.lib import cuda_prng\nfrom jax import core\nfrom jax import abstract_arrays\n+from jax.numpy.linalg import cholesky\nfrom jax.scipy.special import logit\n-from jax.scipy.linalg import cholesky\nfrom jax.interpreters import batching\nfrom jax.interpreters import xla\n" }, { "change_type": "MODIFY", "old_path": "tests/random_test.py", "new_path": "tests/random_test.py", "diff": "@@ -409,6 +409,30 @@ class LaxRandomTest(jtu.JaxTestCase):\n# eigenvectors follow a standard normal distribution.\nself._CheckKolmogorovSmirnovCDF(whitened.ravel(), scipy.stats.norm().cdf)\n+ def testMultivariateNormalCovariance(self):\n+ # test code based on https://github.com/google/jax/issues/1869\n+ N = 100000\n+ cov = np.array([[ 0.19, 0.00, -0.13, 0.00],\n+ [ 0.00, 0.29, 0.00, -0.23],\n+ [ -0.13, 0.00, 0.39, 0.00],\n+ [ 0.00, -0.23, 0.00, 0.49]])\n+ mean = np.zeros(4)\n+\n+ out_onp = onp.random.RandomState(0).multivariate_normal(mean, cov, N)\n+\n+ key = random.PRNGKey(0)\n+ out_jnp = random.multivariate_normal(key, mean=mean, cov=cov, shape=(N,))\n+\n+ var_onp = out_onp.var(axis=0)\n+ var_jnp = out_jnp.var(axis=0)\n+ self.assertAllClose(var_onp, var_jnp, rtol=1e-2, atol=1e-2,\n+ check_dtypes=False)\n+\n+ var_onp = onp.cov(out_onp, rowvar=False)\n+ var_jnp = onp.cov(out_jnp, rowvar=False)\n+ self.assertAllClose(var_onp, var_jnp, rtol=1e-2, atol=1e-2,\n+ check_dtypes=False)\n+\ndef testIssue222(self):\nx = random.randint(random.PRNGKey(10003), (), 0, 0)\nassert x == 0\n" } ]
Python
Apache License 2.0
google/jax
fix transpose bug in multivariate normal, add test fixes #1869
260,386
17.12.2019 19:38:32
18,000
9a0ed06647731abf8178df2c0cf1a040ef763b92
Add Delta orthogonal initialization
[ { "change_type": "MODIFY", "old_path": "jax/nn/initializers.py", "new_path": "jax/nn/initializers.py", "diff": "@@ -24,9 +24,10 @@ from functools import partial\nimport numpy as onp\n+import jax.numpy as np\nfrom jax import lax\n+from jax import ops\nfrom jax import random\n-import jax.numpy as np\ndef zeros(key, shape, dtype=np.float32): return np.zeros(shape, dtype)\ndef ones(key, shape, dtype=np.float32): return np.ones(shape, dtype)\n@@ -96,3 +97,31 @@ def orthogonal(scale=1.0, column_axis=-1):\nQ = np.moveaxis(Q, -1, column_axis)\nreturn scale * Q\nreturn init\n+\n+\n+def delta_orthogonal(scale=1.0, column_axis=-1):\n+ \"\"\"\n+ Construct an initializer for delta orthogonal kernels; see arXiv:1806.05393.\n+\n+ The shape must be 3D, 4D or 5D.\n+ \"\"\"\n+ def init(key, shape, dtype=np.float32):\n+ if len(shape) not in [3, 4, 5]:\n+ raise ValueError(\"Delta orthogonal initializer requires a 3D, 4D or 5D \"\n+ \"shape.\")\n+ if shape[-1] < shape[-2]:\n+ raise ValueError(\"`fan_in` must be less or equal than `fan_out`. \")\n+ ortho_init = orthogonal(scale=scale, column_axis=column_axis)\n+ ortho_matrix = ortho_init(key, shape[-2:])\n+ W = np.zeros(shape)\n+ if len(shape) == 3:\n+ k = shape[0]\n+ return ops.index_update(W, ops.index[(k-1)//2, ...], ortho_matrix)\n+ elif len(shape) == 4:\n+ k1, k2 = shape[:2]\n+ return ops.index_update(W, ops.index[(k1-1)//2, (k2-1)//2, ...], ortho_matrix)\n+ else:\n+ k1, k2, k3 = shape[:3]\n+ return ops.index_update(W, ops.index[(k1-1)//2, (k2-1)//2, (k3-1)//2, ...],\n+ ortho_matrix)\n+ return init\n" }, { "change_type": "MODIFY", "old_path": "tests/nn_test.py", "new_path": "tests/nn_test.py", "diff": "@@ -71,7 +71,8 @@ INITIALIZER_RECS = [\ninitializer_record(\"glorot_uniform\", nn.initializers.glorot_uniform()),\ninitializer_record(\"lecun_normal\", nn.initializers.lecun_normal()),\ninitializer_record(\"lecun_uniform\", nn.initializers.lecun_uniform()),\n- initializer_record(\"orthogonal\", nn.initializers.orthogonal(), 2, 2)\n+ initializer_record(\"orthogonal\", nn.initializers.orthogonal(), 2, 2),\n+ initializer_record(\"orthogonal\", nn.initializers.delta_orthogonal(), 4, 4)\n]\nclass NNInitializersTest(jtu.JaxTestCase):\n" } ]
Python
Apache License 2.0
google/jax
Add Delta orthogonal initialization (#1838)
260,335
17.12.2019 17:49:06
28,800
286ec51f6152d8972176bdf0f6fd2b38f1aa87ff
make op-by-op computation follow arg placement
[ { "change_type": "MODIFY", "old_path": ".travis.yml", "new_path": ".travis.yml", "diff": "@@ -42,7 +42,9 @@ install:\npip install sklearn;\nfi\nscript:\n- - if [[ \"$JAX_ONLY_DOCUMENTATION\" == \"\" ]]; then\n+ - if [[ \"$MULTI_DEVICE_TEST\" != \"\" ]]; then\n+ XLA_FLAGS=--xla_force_host_platform_device_count=2 python tests/api_test.py MultiDeviceTest;\n+ elif [[ \"$JAX_ONLY_DOCUMENTATION\" == \"\" ]]; then\npytest -n 1 tests examples -W ignore ;\nelse\nsphinx-build -b html -D nbsphinx_execute=always docs docs/build/html;\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -149,32 +149,48 @@ for _t in dtypes.python_scalar_dtypes.keys():\ndef apply_primitive(prim, *args, **params):\n\"\"\"Impl rule that compiles and runs a single primitive 'prim' using XLA.\"\"\"\n- abstract_args = map(abstractify, args)\n- compiled_fun = xla_primitive_callable(prim, *abstract_args, **params)\n+ compiled_fun = xla_primitive_callable(prim, *map(arg_spec, args), **params)\nreturn compiled_fun(*args)\n+def arg_spec(x):\n+ aval = abstractify(x)\n+ try:\n+ return aval, x._device\n+ except:\n+ return aval, None\n+\n@cache()\n-def xla_primitive_callable(prim, *abstract_args, **params):\n- backend = params.get('backend', None)\n- aval_out = prim.abstract_eval(*abstract_args, **params)\n+def xla_primitive_callable(prim, *arg_specs, **params):\n+ avals, devices = unzip2(arg_specs)\n+ # TODO(mattjj): make Device hashable instead of handling pairs here\n+ try:\n+ device, = set(d for d in devices if d is not None) or (None,)\n+ except ValueError:\n+ msg = \"primitive arguments must be colocated on the same device, got {}\"\n+ names = (\"{}({})\".format(d[0].__name__, d[1]) for d in devices if d is not None)\n+ raise ValueError(msg.format(\", \".join(names)))\n+ else:\n+ all_devices = it.chain(xb.devices(), xb.devices('cpu'))\n+ device = device and next(d for d in all_devices if (type(d), d.id) == device)\n+ aval_out = prim.abstract_eval(*avals, **params)\nif prim.multiple_results:\nhandlers = tuple(map(aval_to_result_handler, aval_out))\nhandle_result = lambda xs: tuple(h(x) for h, x in zip(handlers, xs.destructure()))\nelse:\nhandle_result = aval_to_result_handler(aval_out)\n- tuple_args = len(abstract_args) > 100\n- built_c = primitive_computation(prim, tuple_args, *abstract_args, **params)\n- compiled = built_c.Compile(compile_options=xb.get_compile_options(),\n- backend=xb.get_backend(backend))\n- return partial(_execute_compiled_primitive, prim, compiled, backend,\n- tuple_args, handle_result)\n+ tuple_args = len(avals) > 100\n+ built_c = primitive_computation(prim, tuple_args, *avals, **params)\n+ options = xb.get_compile_options(device_assignment=(device.id,) if device else None)\n+ compiled = built_c.Compile(compile_options=options,\n+ backend=xb.get_device_backend(device))\n+ return partial(_execute_compiled_primitive, prim, compiled, tuple_args,\n+ handle_result)\n@cache()\ndef primitive_computation(prim, tuple_args, *avals, **params):\nc = xb.make_computation_builder(\"primitive_computation_{}\".format(prim.name))\nc.SetOpMetadata(xc.OpMetadata(op_type=prim.name, op_name=str(params)))\n- backend = params.pop(\"backend\", None)\n- platform = xb.get_backend(backend).platform\n+ platform = xb.get_backend(None).platform\nxla_args = _xla_callable_args(c, avals, tuple_args)\nif prim in backend_specific_translations[platform]:\nrule = backend_specific_translations[platform][prim]\n@@ -184,10 +200,10 @@ def primitive_computation(prim, tuple_args, *avals, **params):\nrule(c, *xla_args, **params) # return val set as a side-effect on c\nelif prim in reduction_translations:\nrule = reduction_translations[prim]\n- rule(c, *xla_args, backend=backend, **params) # return val set as a side-effect on c\n+ rule(c, *xla_args, **params) # return val set as a side-effect on c\nelif prim in initial_style_translations:\nrule = initial_style_translations[prim]\n- rule(c, AxisEnv(), *xla_args, backend=backend, **params) # side-effect on c\n+ rule(c, AxisEnv(), *xla_args, **params) # side-effect on c\nelse:\nraise NotImplementedError(\"XLA translation rule for {} not found\".format(prim))\nc.ClearOpMetadata()\n@@ -202,12 +218,12 @@ def primitive_computation(prim, tuple_args, *avals, **params):\ndef primitive_subcomputation(prim, *avals, **params):\nreturn primitive_computation(prim, False, *avals, **params)\n-def _execute_compiled_primitive(prim, compiled, backend, tuple_args,\n+def _execute_compiled_primitive(prim, compiled, tuple_args,\nresult_handler, *args):\ndevice, = compiled.local_devices()\ninput_bufs = [device_put(x, device) for x in args if x is not token]\nif tuple_args:\n- input_bufs = [make_tuple(input_bufs, device, backend)]\n+ input_bufs = [make_tuple(input_bufs, device, None)]\nout_buf = compiled.Execute(input_bufs)\nif FLAGS.jax_debug_nans:\ncheck_nans(prim, out_buf.destructure() if prim.multiple_results else out_buf)\n@@ -292,7 +308,7 @@ def jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, freevars, *args):\nreplica_groups = axis_groups(axis_env, new_params['axis_name'])\nnew_params = {k: new_params[k] for k in new_params if k != 'axis_name'}\nrule = parallel_translations[eqn.primitive]\n- ans = rule(c, *in_nodes, replica_groups=replica_groups, backend=backend, **new_params)\n+ ans = rule(c, *in_nodes, replica_groups=replica_groups, **new_params)\nelif eqn.primitive in call_translations:\nnew_params = check_backend_params(eqn.params, backend)\n(subjaxpr, const_bindings, freevar_bindings), = eqn.bound_subjaxprs\n@@ -660,12 +676,16 @@ class DeviceArray(DeviceValue):\n\"\"\"A DeviceArray is an ndarray backed by a single device memory buffer.\"\"\"\n# We don't subclass ndarray because that would open up a host of issues,\n# but lax_numpy.py overrides isinstance behavior and attaches ndarray methods.\n- __slots__ = [\"_npy_value\"]\n+ __slots__ = [\"_npy_value\", \"_device\"]\n__array_priority__ = 100\ndef __init__(self, aval, device_buffer):\nself.aval = aval\nself.device_buffer = device_buffer\n+ # TODO(mattjj): make Device hashable\n+ device = device_buffer.device()\n+ self._device = device and (type(device), device.id)\n+\nself._npy_value = None\nif not core.skip_checks:\nassert type(aval) is ShapedArray\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_parallel.py", "new_path": "jax/lax/lax_parallel.py", "diff": "@@ -188,16 +188,16 @@ def _allreduce_split_axis_rule(prim, reducer, vals, which_mapped, axis_name):\nx, = vals\nreturn prim.bind(reducer(x, [0]), axis_name=axis_name), False\n-def _allreduce_translation_rule(prim, c, val, replica_groups, backend=None):\n+def _allreduce_translation_rule(prim, c, val, replica_groups):\ndtype = c.GetShape(val).numpy_dtype()\nscalar = ShapedArray((), dtype)\n- computation = xla.primitive_subcomputation(prim, scalar, scalar, backend=backend)\n+ computation = xla.primitive_subcomputation(prim, scalar, scalar)\nreturn c.AllReduce(val, computation, replica_groups=replica_groups)\n# psum translation rule has special handling for complex dtypes\n-def _psum_translation_rule(c, val, replica_groups, backend=None):\n+def _psum_translation_rule(c, val, replica_groups):\npsum = partial(_allreduce_translation_rule, lax.add_p, c,\n- replica_groups=replica_groups, backend=backend)\n+ replica_groups=replica_groups)\ndtype = c.GetShape(val).numpy_dtype()\nif dtypes.issubdtype(dtype, onp.complexfloating):\nreturn c.Complex(psum(c.Real(val)), psum(c.Imag(val)))\n@@ -227,8 +227,7 @@ pxla.split_axis_rules[pmin_p] = \\\npartial(_allreduce_split_axis_rule, pmin_p, lax._reduce_min)\n-def _ppermute_translation_rule(c, x, replica_groups, perm, backend=None):\n- del backend\n+def _ppermute_translation_rule(c, x, replica_groups, perm):\ngroup_size = len(replica_groups[0])\nsrcs, dsts = unzip2((src % group_size, dst % group_size) for src, dst in perm)\nif not (len(srcs) == len(set(srcs)) and len(dsts) == len(set(dsts))):\n@@ -251,8 +250,7 @@ ad.deflinear(ppermute_p, _ppermute_transpose_rule)\nxla.parallel_translations[ppermute_p] = _ppermute_translation_rule\n-def _all_to_all_translation_rule(c, x, split_axis, concat_axis, replica_groups, backend=None):\n- del backend\n+def _all_to_all_translation_rule(c, x, split_axis, concat_axis, replica_groups):\nreturn c.AllToAll(x, split_axis, concat_axis, replica_groups)\ndef _all_to_all_split_axis_rule(vals, which_mapped, split_axis, concat_axis,\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -1582,6 +1582,25 @@ class APITest(jtu.JaxTestCase):\nself.assertEqual(inner_jaxpr.eqns[0].primitive.name, 'mul')\nself.assertEqual(inner_jaxpr.eqns[1].primitive.name, 'add')\n+ def test_primitive_compilation_cache(self):\n+ primitive_computation = xla.primitive_computation\n+ xla.xla_primitive_callable.cache_clear() # clear op-by-op cache\n+\n+ count = [0]\n+ def primitive_computation_and_count(*args, **kwargs):\n+ count[0] += 1\n+ return primitive_computation(*args, **kwargs)\n+\n+\n+ try:\n+ xla.primitive_computation = primitive_computation_and_count\n+ lax.add(1, 2)\n+ lax.add(2, 3)\n+ finally:\n+ xla.primitive_computation = primitive_computation\n+\n+ self.assertEqual(count[0], 1)\n+\nclass JaxprTest(jtu.JaxTestCase):\n@@ -1625,5 +1644,65 @@ class JaxprTest(jtu.JaxTestCase):\n\"\"\")\n+class MultiDeviceTest(jtu.JaxTestCase):\n+\n+ def test_computation_follows_data(self):\n+ if len(jax.devices()) < 2:\n+ raise unittest.SkipTest(\"test requires multiple devices\")\n+\n+ x = jax.device_put(1, jax.devices()[0])\n+ y = jax.device_put(2, jax.devices()[0])\n+ z = x + y\n+ self.assertEqual(z, 3)\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[0])\n+\n+ x = jax.device_put(1, jax.devices()[1])\n+ y = jax.device_put(2, jax.devices()[1])\n+ z = x + y\n+ self.assertEqual(z, 3)\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n+\n+ x = jax.device_put(1, jax.devices()[1])\n+ y = 4\n+ z = x + y\n+ self.assertEqual(z, 5)\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n+\n+ x = jax.device_put(1, jax.devices()[0])\n+ y = jax.device_put(2, jax.devices()[1])\n+ self.assertRaisesRegex(\n+ ValueError,\n+ \"primitive arguments must be colocated on the same device\",\n+ lambda: x + y)\n+\n+ x = jax.device_put(1, jax.devices()[1])\n+ y = x.reshape((1, 1))\n+ self.assertEqual(y.device_buffer.device(), jax.devices()[1])\n+\n+ def test_primitive_compilation_cache(self):\n+ if len(jax.devices()) < 2:\n+ raise unittest.SkipTest(\"test requires multiple devices\")\n+\n+ primitive_computation = xla.primitive_computation\n+ xla.xla_primitive_callable.cache_clear() # clear op-by-op cache\n+\n+ count = [0]\n+ def primitive_computation_and_count(*args, **kwargs):\n+ count[0] += 1\n+ return primitive_computation(*args, **kwargs)\n+\n+ x = jax.device_put(1, jax.devices()[1])\n+\n+ try:\n+ xla.primitive_computation = primitive_computation_and_count\n+ y = lax.add(x, x)\n+ z = lax.add(y, y)\n+ finally:\n+ xla.primitive_computation = primitive_computation\n+\n+ self.assertEqual(count[0], 1)\n+ self.assertEqual(y.device_buffer.device(), jax.devices()[1])\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n+\nif __name__ == '__main__':\nabsltest.main()\n" }, { "change_type": "MODIFY", "old_path": "tests/multibackend_test.py", "new_path": "tests/multibackend_test.py", "diff": "@@ -121,7 +121,7 @@ class MultiBackendTest(jtu.JaxTestCase):\nz = fun(x, y)\nw = np.sin(z)\nself.assertEqual(z.device_buffer.platform(), backend)\n- self.assertEqual(w.device_buffer.platform(), jtu.device_under_test())\n+ self.assertEqual(w.device_buffer.platform(), backend)\nif __name__ == \"__main__\":\n" } ]
Python
Apache License 2.0
google/jax
make op-by-op computation follow arg placement
260,335
18.12.2019 11:18:33
28,800
8bd1a46ce7c7cc8e4827611531926ba200ad756e
revise handling of 'backend' values
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -172,6 +172,7 @@ def xla_primitive_callable(prim, *arg_specs, **params):\nelse:\nall_devices = it.chain(xb.devices(), xb.devices('cpu'))\ndevice = device and next(d for d in all_devices if (type(d), d.id) == device)\n+ backend = xb.get_device_backend(device)\naval_out = prim.abstract_eval(*avals, **params)\nif prim.multiple_results:\nhandlers = tuple(map(aval_to_result_handler, aval_out))\n@@ -179,18 +180,17 @@ def xla_primitive_callable(prim, *arg_specs, **params):\nelse:\nhandle_result = aval_to_result_handler(aval_out)\ntuple_args = len(avals) > 100\n- built_c = primitive_computation(prim, tuple_args, *avals, **params)\n+ built_c = primitive_computation(prim, backend, tuple_args, *avals, **params)\noptions = xb.get_compile_options(device_assignment=(device.id,) if device else None)\n- compiled = built_c.Compile(compile_options=options,\n- backend=xb.get_device_backend(device))\n- return partial(_execute_compiled_primitive, prim, compiled, tuple_args,\n- handle_result)\n+ compiled = built_c.Compile(compile_options=options, backend=backend)\n+ return partial(_execute_compiled_primitive, prim, compiled, backend,\n+ tuple_args, handle_result)\n@cache()\n-def primitive_computation(prim, tuple_args, *avals, **params):\n+def primitive_computation(prim, backend, tuple_args, *avals, **params):\nc = xb.make_computation_builder(\"primitive_computation_{}\".format(prim.name))\nc.SetOpMetadata(xc.OpMetadata(op_type=prim.name, op_name=str(params)))\n- platform = xb.get_backend(None).platform\n+ platform = xb.get_backend(backend).platform\nxla_args = _xla_callable_args(c, avals, tuple_args)\nif prim in backend_specific_translations[platform]:\nrule = backend_specific_translations[platform][prim]\n@@ -198,12 +198,9 @@ def primitive_computation(prim, tuple_args, *avals, **params):\nelif prim in translations:\nrule = translations[prim]\nrule(c, *xla_args, **params) # return val set as a side-effect on c\n- elif prim in reduction_translations:\n- rule = reduction_translations[prim]\n- rule(c, *xla_args, **params) # return val set as a side-effect on c\nelif prim in initial_style_translations:\nrule = initial_style_translations[prim]\n- rule(c, AxisEnv(), *xla_args, **params) # side-effect on c\n+ rule(c, AxisEnv(), *xla_args, backend=backend, **params) # side-effect on c\nelse:\nraise NotImplementedError(\"XLA translation rule for {} not found\".format(prim))\nc.ClearOpMetadata()\n@@ -216,14 +213,14 @@ def primitive_computation(prim, tuple_args, *avals, **params):\nraise RuntimeError(msg)\ndef primitive_subcomputation(prim, *avals, **params):\n- return primitive_computation(prim, False, *avals, **params)\n+ return primitive_computation(prim, None, False, *avals, **params)\n-def _execute_compiled_primitive(prim, compiled, tuple_args,\n+def _execute_compiled_primitive(prim, compiled, backend, tuple_args,\nresult_handler, *args):\ndevice, = compiled.local_devices()\ninput_bufs = [device_put(x, device) for x in args if x is not token]\nif tuple_args:\n- input_bufs = [make_tuple(input_bufs, device, None)]\n+ input_bufs = [make_tuple(input_bufs, device, backend)]\nout_buf = compiled.Execute(input_bufs)\nif FLAGS.jax_debug_nans:\ncheck_nans(prim, out_buf.destructure() if prim.multiple_results else out_buf)\n@@ -296,17 +293,13 @@ def jaxpr_subcomp(c, jaxpr, backend, axis_env, consts, freevars, *args):\nans = rule(c, *in_nodes, **eqn.params)\nelif eqn.primitive in translations:\nans = translations[eqn.primitive](c, *in_nodes, **eqn.params)\n- elif eqn.primitive in reduction_translations:\n- new_params = check_backend_params(eqn.params, backend)\n- ans = reduction_translations[eqn.primitive](c, *in_nodes, backend=backend, **new_params)\nelif eqn.primitive in initial_style_translations:\nnew_params = check_backend_params(eqn.params, backend)\nrule = initial_style_translations[eqn.primitive]\nans = rule(c, axis_env, *in_nodes, backend=backend, **new_params)\nelif eqn.primitive in parallel_translations:\n- new_params = check_backend_params(eqn.params, backend)\n- replica_groups = axis_groups(axis_env, new_params['axis_name'])\n- new_params = {k: new_params[k] for k in new_params if k != 'axis_name'}\n+ replica_groups = axis_groups(axis_env, eqn.params['axis_name'])\n+ new_params = {k: v for k, v in eqn.params.items() if k != 'axis_name'}\nrule = parallel_translations[eqn.primitive]\nans = rule(c, *in_nodes, replica_groups=replica_groups, **new_params)\nelif eqn.primitive in call_translations:\n@@ -431,7 +424,7 @@ def eqn_collectives(eqn):\ndef _xla_call_impl(fun, *args, **params):\ndevice = params['device']\n- backend = params.get('backend', None)\n+ backend = params['backend']\ncompiled_fun = _xla_callable(fun, device, backend, *map(abstractify, args))\ntry:\nreturn compiled_fun(*args)\n@@ -563,7 +556,7 @@ xla_call_p.def_custom_bind(xla_call)\nxla_call_p.def_impl(_xla_call_impl)\ndef _xla_call_translation_rule(c, jaxpr, axis_env, const_nodes, freevar_nodes,\n- in_nodes, device=None, backend=None):\n+ in_nodes, backend, device=None):\ndel device # Ignored.\nsubc = xb.make_computation_builder(\"jaxpr_subcomputation\") # TODO(mattjj): name\nconsts = [subc.ParameterWithShape(c.GetShape(n)) for n in const_nodes]\n@@ -578,7 +571,6 @@ ad.primitive_transposes[xla_call_p] = partial(ad.call_transpose, xla_call_p)\n### translation tables\ntranslations = {}\n-reduction_translations = {}\nparallel_translations = {}\ninitial_style_translations = {}\ncall_translations = {}\n@@ -853,7 +845,7 @@ ad.deflinear(device_put_p, lambda cotangent, **kwargs: [cotangent])\ndef _remat_translation_rule(c, jaxpr, axis_env, const_nodes, freevar_nodes, in_nodes,\n- backend=None, device=None, concrete=None):\n+ backend, device=None, concrete=None):\n# This looks a lot like _xla_call_translation_rule, except for a widget we use\n# to foil CSE.\ndel device, concrete # Unused.\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax.py", "new_path": "jax/lax/lax.py", "diff": "@@ -1488,14 +1488,6 @@ def standard_primitive(shape_rule, dtype_rule, name, translation_rule=None):\nreturn prim\n-def standard_reduction_primitive(shape_rule, dtype_rule, name, translation_rule=None):\n- prim = Primitive(name)\n- prim.def_impl(partial(xla.apply_primitive, prim))\n- prim.def_abstract_eval(partial(standard_abstract_eval, prim, shape_rule, dtype_rule))\n- xla.reduction_translations[prim] = translation_rule or partial(standard_translate, name)\n- return prim\n-\n-\ndef standard_abstract_eval(prim, shape_rule, dtype_rule, *args, **kwargs):\nassert all(isinstance(arg, UnshapedArray) for arg in args), args\nleast_specialized = _max(\n@@ -3100,11 +3092,11 @@ def _scatter_shape_rule(operand, scatter_indices, updates, **kwargs):\ndef _scatter_translation_rule(c, operand, scatter_indices, updates,\nupdate_jaxpr, update_consts, dimension_numbers,\n- updates_shape, backend=None):\n+ updates_shape):\ndtype = c.GetShape(operand).numpy_dtype()\ninit_value = c.Constant(onp.array(0, dtype))\nupdate_computation = _reduction_computation(\n- c, update_jaxpr, backend, update_consts, init_value)\n+ c, update_jaxpr, update_consts, init_value)\nindices_shape = c.GetShape(scatter_indices)\nreturn c.Scatter(operand, scatter_indices, updates, update_computation,\n_scatter_dimensions_proto(indices_shape, dimension_numbers))\n@@ -3203,7 +3195,7 @@ def _scatter_batching_rule(\nscatter_dims_to_operand_dims=scatter_dims_to_operand_dims)\nreturn scatter_op(operand, scatter_indices, updates, dnums), 0\n-scatter_add_p = standard_reduction_primitive(\n+scatter_add_p = standard_primitive(\n_scatter_shape_rule, _scatter_dtype_rule, 'scatter-add',\n_scatter_translation_rule)\nad.primitive_jvps[scatter_add_p] = _scatter_add_jvp\n@@ -3212,14 +3204,14 @@ batching.primitive_batchers[scatter_add_p] = (\npartial(_scatter_batching_rule, scatter_add))\n# TODO(jlebar): Add derivatives.\n-scatter_min_p = standard_reduction_primitive(\n+scatter_min_p = standard_primitive(\n_scatter_shape_rule, _scatter_dtype_rule, 'scatter-min',\n_scatter_translation_rule)\nbatching.primitive_batchers[scatter_min_p] = (\npartial(_scatter_batching_rule, scatter_min))\n# TODO(jlebar): Add derivatives.\n-scatter_max_p = standard_reduction_primitive(\n+scatter_max_p = standard_primitive(\n_scatter_shape_rule, _scatter_dtype_rule, 'scatter-max',\n_scatter_translation_rule)\nbatching.primitive_batchers[scatter_max_p] = (\n@@ -3319,7 +3311,7 @@ def _scatter_jvp(primals, tangents, update_jaxpr, update_consts,\nreturn val_out, tangent_out\n-scatter_p = standard_reduction_primitive(\n+scatter_p = standard_primitive(\n_scatter_shape_rule, _scatter_dtype_rule, 'scatter',\n_scatter_translation_rule)\nad.primitive_jvps[scatter_p] = _scatter_jvp\n@@ -3330,9 +3322,8 @@ batching.primitive_batchers[scatter_p] = (\ndef _reduce_shape_rule(operand, init_value, computation, jaxpr, consts, dimensions):\nreturn tuple(onp.delete(operand.shape, dimensions))\n-def _reduce_translation_rule(c, operand, init_value, computation, jaxpr, consts, dimensions,\n- backend=None):\n- xla_computation = _reduction_computation(c, jaxpr, backend, consts, init_value)\n+def _reduce_translation_rule(c, operand, init_value, computation, jaxpr, consts, dimensions):\n+ xla_computation = _reduction_computation(c, jaxpr, consts, init_value)\nreturn c.Reduce(operand, init_value, xla_computation, dimensions)\ndef _reduce_batch_rule(batched_args, batch_dims, computation, jaxpr, consts, dimensions):\n@@ -3346,13 +3337,13 @@ def _reduce_batch_rule(batched_args, batch_dims, computation, jaxpr, consts, dim\nelse:\nraise NotImplementedError # loop and stack\n-def _reduction_computation(c, jaxpr, backend, consts, init_value):\n+def _reduction_computation(c, jaxpr, consts, init_value):\nshape = c.GetShape(init_value)\naxis_env = xla.AxisEnv() # no parallel primitives inside reductions\nsubc = xla_bridge.make_computation_builder(\"reduction_computation\")\nconsts = [subc.ParameterWithShape(const) for const in consts]\nargs = [subc.ParameterWithShape(shape), subc.ParameterWithShape(shape)]\n- out, = xla.jaxpr_subcomp(subc, jaxpr, backend, axis_env, consts, (), *args)\n+ out, = xla.jaxpr_subcomp(subc, jaxpr, None, axis_env, consts, (), *args)\nreturn subc.Build(out)\ndef _masking_defreducer(prim, identity):\n@@ -3374,7 +3365,7 @@ def _reducer_masking_rule(prim, identity, padded_vals, logical_shapes,\nmasked_val = select(mask, padded_val, identity(padded_shape, padded_val.dtype))\nreturn prim.bind(masked_val, axes=axes, input_shape=padded_shape)\n-reduce_p = standard_reduction_primitive(_reduce_shape_rule, _input_dtype, 'reduce',\n+reduce_p = standard_primitive(_reduce_shape_rule, _input_dtype, 'reduce',\n_reduce_translation_rule)\nbatching.primitive_batchers[reduce_p] = _reduce_batch_rule\n@@ -3419,8 +3410,7 @@ def _reduce_prod_translation_rule(c, operand, axes):\ndtype = c.GetShape(operand).numpy_dtype()\nscalar = ShapedArray((), dtype)\nreturn c.Reduce(operand, c.Constant(onp.array(1, dtype)),\n- xla.primitive_subcomputation(mul_p, scalar, scalar),\n- axes)\n+ xla.primitive_subcomputation(mul_p, scalar, scalar), axes)\ndef _reduce_prod_jvp_rule(tangent, operand, axes):\ninput_shape = onp.array(operand.shape)\n@@ -3528,8 +3518,8 @@ def _reduce_window_shape_rule(operand, init_value, jaxpr, consts,\nwindow_strides, padding)\ndef _reduce_window_translation_rule(c, operand, init_value, jaxpr, consts,\n- window_dimensions, window_strides, padding, backend=None):\n- xla_computation = _reduction_computation(c, jaxpr, backend, consts, init_value)\n+ window_dimensions, window_strides, padding):\n+ xla_computation = _reduction_computation(c, jaxpr, consts, init_value)\nreturn c.ReduceWindow(operand, init_value, xla_computation, window_dimensions,\nwindow_strides, padding)\n@@ -3550,7 +3540,7 @@ def _generic_reduce_window_batch_rule(\nwindow_dimensions, window_strides, padding)\n-reduce_window_p = standard_reduction_primitive(\n+reduce_window_p = standard_primitive(\n_reduce_window_shape_rule, _input_dtype, 'reduce_window',\n_reduce_window_translation_rule)\nbatching.primitive_batchers[reduce_window_p] = _generic_reduce_window_batch_rule\n@@ -3683,13 +3673,13 @@ def _select_and_scatter_shape_rule(\ndef _select_and_scatter_translation(\nc, operand, source, init_value, select_jaxpr, select_consts, scatter_jaxpr,\n- scatter_consts, window_dimensions, window_strides, padding, backend=None):\n- select = _reduction_computation(c, select_jaxpr, backend, select_consts, init_value)\n- scatter = _reduction_computation(c, scatter_jaxpr, backend, scatter_consts, init_value)\n+ scatter_consts, window_dimensions, window_strides, padding):\n+ select = _reduction_computation(c, select_jaxpr, select_consts, init_value)\n+ scatter = _reduction_computation(c, scatter_jaxpr, scatter_consts, init_value)\nreturn c.SelectAndScatter(operand, select, window_dimensions, window_strides,\npadding, source, init_value, scatter)\n-select_and_scatter_p = standard_reduction_primitive(\n+select_and_scatter_p = standard_primitive(\n_select_and_scatter_shape_rule, _input_dtype, 'select_and_scatter',\n_select_and_scatter_translation)\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -213,7 +213,7 @@ def _while_loop_abstract_eval(*args, **kwargs):\nreturn kwargs[\"body_jaxpr\"].out_avals\ndef _while_loop_translation_rule(c, axis_env, *args, **kwargs):\n- backend = kwargs.pop('backend', None)\n+ backend = kwargs.pop('backend')\ncond_jaxpr, body_jaxpr, cond_nconsts, body_nconsts = split_dict(\nkwargs, [\"cond_jaxpr\", \"body_jaxpr\", \"cond_nconsts\", \"body_nconsts\"])\ncond_consts, body_consts, init_vals = split_list(args, [cond_nconsts, body_nconsts])\n" }, { "change_type": "MODIFY", "old_path": "jax/lib/xla_bridge.py", "new_path": "jax/lib/xla_bridge.py", "diff": "@@ -147,6 +147,11 @@ _backend_lock = threading.Lock()\n@util.memoize\ndef get_backend(platform=None):\n+ # TODO(mattjj,skyewm): remove this input polymorphism after we clean up how\n+ # 'backend' values are handled\n+ if isinstance(platform, xla_client.Backend):\n+ return platform\n+\nwith _backend_lock:\nbackend = _backends.get(FLAGS.jax_xla_backend)\nif backend is None:\n" } ]
Python
Apache License 2.0
google/jax
revise handling of 'backend' values
260,335
18.12.2019 11:20:42
28,800
00e621df757c1746bf8065ce19ec90b61b80337c
add multi_device_test in travis
[ { "change_type": "MODIFY", "old_path": ".travis.yml", "new_path": ".travis.yml", "diff": "@@ -13,6 +13,9 @@ matrix:\ninclude:\n- python: \"3.7\"\nenv: JAX_ENABLE_X64=1 JAX_ONLY_DOCUMENTATION=true\n+ include:\n+ - python: \"3.7\"\n+ env: MULTI_DEVICE_TEST=1\nbefore_install:\n- if [[ \"$TRAVIS_PYTHON_VERSION\" == \"2.7\" ]]; then\n" } ]
Python
Apache License 2.0
google/jax
add multi_device_test in travis
260,335
18.12.2019 11:26:58
28,800
2a394ce31ba61b587a9813cf72c7b320db202b81
move multi-device test into its own file
[ { "change_type": "MODIFY", "old_path": ".travis.yml", "new_path": ".travis.yml", "diff": "@@ -13,9 +13,6 @@ matrix:\ninclude:\n- python: \"3.7\"\nenv: JAX_ENABLE_X64=1 JAX_ONLY_DOCUMENTATION=true\n- include:\n- - python: \"3.7\"\n- env: MULTI_DEVICE_TEST=1\nbefore_install:\n- if [[ \"$TRAVIS_PYTHON_VERSION\" == \"2.7\" ]]; then\n@@ -45,9 +42,7 @@ install:\npip install sklearn;\nfi\nscript:\n- - if [[ \"$MULTI_DEVICE_TEST\" != \"\" ]]; then\n- XLA_FLAGS=--xla_force_host_platform_device_count=2 python tests/api_test.py MultiDeviceTest;\n- elif [[ \"$JAX_ONLY_DOCUMENTATION\" == \"\" ]]; then\n+ - if [[ \"$JAX_ONLY_DOCUMENTATION\" == \"\" ]]; then\npytest -n 1 tests examples -W ignore ;\nelse\nsphinx-build -b html -D nbsphinx_execute=always docs docs/build/html;\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -1644,65 +1644,5 @@ class JaxprTest(jtu.JaxTestCase):\n\"\"\")\n-class MultiDeviceTest(jtu.JaxTestCase):\n-\n- def test_computation_follows_data(self):\n- if len(jax.devices()) < 2:\n- raise unittest.SkipTest(\"test requires multiple devices\")\n-\n- x = jax.device_put(1, jax.devices()[0])\n- y = jax.device_put(2, jax.devices()[0])\n- z = x + y\n- self.assertEqual(z, 3)\n- self.assertEqual(z.device_buffer.device(), jax.devices()[0])\n-\n- x = jax.device_put(1, jax.devices()[1])\n- y = jax.device_put(2, jax.devices()[1])\n- z = x + y\n- self.assertEqual(z, 3)\n- self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n-\n- x = jax.device_put(1, jax.devices()[1])\n- y = 4\n- z = x + y\n- self.assertEqual(z, 5)\n- self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n-\n- x = jax.device_put(1, jax.devices()[0])\n- y = jax.device_put(2, jax.devices()[1])\n- self.assertRaisesRegex(\n- ValueError,\n- \"primitive arguments must be colocated on the same device\",\n- lambda: x + y)\n-\n- x = jax.device_put(1, jax.devices()[1])\n- y = x.reshape((1, 1))\n- self.assertEqual(y.device_buffer.device(), jax.devices()[1])\n-\n- def test_primitive_compilation_cache(self):\n- if len(jax.devices()) < 2:\n- raise unittest.SkipTest(\"test requires multiple devices\")\n-\n- primitive_computation = xla.primitive_computation\n- xla.xla_primitive_callable.cache_clear() # clear op-by-op cache\n-\n- count = [0]\n- def primitive_computation_and_count(*args, **kwargs):\n- count[0] += 1\n- return primitive_computation(*args, **kwargs)\n-\n- x = jax.device_put(1, jax.devices()[1])\n-\n- try:\n- xla.primitive_computation = primitive_computation_and_count\n- y = lax.add(x, x)\n- z = lax.add(y, y)\n- finally:\n- xla.primitive_computation = primitive_computation\n-\n- self.assertEqual(count[0], 1)\n- self.assertEqual(y.device_buffer.device(), jax.devices()[1])\n- self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n-\nif __name__ == '__main__':\nabsltest.main()\n" }, { "change_type": "ADD", "old_path": null, "new_path": "tests/multi_device_test.py", "diff": "+# Copyright 2019 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# https://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+import os\n+from unittest import SkipTest\n+\n+from absl.testing import absltest\n+\n+import jax\n+from jax import lax\n+from jax import test_util as jtu\n+from jax.lib import xla_bridge\n+from jax.interpreters import xla\n+\n+from jax.config import config\n+config.parse_flags_with_absl()\n+\n+prev_xla_flags = None\n+\n+\n+# Run all tests with 8 CPU devices.\n+def setUpModule():\n+ global prev_xla_flags\n+ prev_xla_flags = os.getenv(\"XLA_FLAGS\")\n+ flags_str = prev_xla_flags or \"\"\n+ # Don't override user-specified device count, or other XLA flags.\n+ if \"xla_force_host_platform_device_count\" not in flags_str:\n+ os.environ[\"XLA_FLAGS\"] = (flags_str +\n+ \" --xla_force_host_platform_device_count=8\")\n+ # Clear any cached backends so new CPU backend will pick up the env var.\n+ xla_bridge.get_backend.cache_clear()\n+\n+# Reset to previous configuration in case other test modules will be run.\n+def tearDownModule():\n+ if prev_xla_flags is None:\n+ del os.environ[\"XLA_FLAGS\"]\n+ else:\n+ os.environ[\"XLA_FLAGS\"] = prev_xla_flags\n+ xla_bridge.get_backend.cache_clear()\n+\n+\n+class MultiDeviceTest(jtu.JaxTestCase):\n+\n+ def test_computation_follows_data(self):\n+ if len(jax.devices()) < 2:\n+ raise SkipTest(\"test requires multiple devices\")\n+\n+ x = jax.device_put(1, jax.devices()[0])\n+ y = jax.device_put(2, jax.devices()[0])\n+ z = x + y\n+ self.assertEqual(z, 3)\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[0])\n+\n+ x = jax.device_put(1, jax.devices()[1])\n+ y = jax.device_put(2, jax.devices()[1])\n+ z = x + y\n+ self.assertEqual(z, 3)\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n+\n+ x = jax.device_put(1, jax.devices()[1])\n+ y = 4\n+ z = x + y\n+ self.assertEqual(z, 5)\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n+\n+ x = jax.device_put(1, jax.devices()[0])\n+ y = jax.device_put(2, jax.devices()[1])\n+ self.assertRaisesRegex(\n+ ValueError,\n+ \"primitive arguments must be colocated on the same device\",\n+ lambda: x + y)\n+\n+ x = jax.device_put(1, jax.devices()[1])\n+ y = x.reshape((1, 1))\n+ self.assertEqual(y.device_buffer.device(), jax.devices()[1])\n+\n+ def test_primitive_compilation_cache(self):\n+ if len(jax.devices()) < 2:\n+ raise SkipTest(\"test requires multiple devices\")\n+\n+ primitive_computation = xla.primitive_computation\n+ xla.xla_primitive_callable.cache_clear() # clear op-by-op cache\n+\n+ count = [0]\n+ def primitive_computation_and_count(*args, **kwargs):\n+ count[0] += 1\n+ return primitive_computation(*args, **kwargs)\n+\n+ x = jax.device_put(1, jax.devices()[1])\n+\n+ try:\n+ xla.primitive_computation = primitive_computation_and_count\n+ y = lax.add(x, x)\n+ z = lax.add(y, y)\n+ finally:\n+ xla.primitive_computation = primitive_computation\n+\n+ self.assertEqual(count[0], 1)\n+ self.assertEqual(y.device_buffer.device(), jax.devices()[1])\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n+\n+\n+if __name__ == '__main__':\n+ absltest.main()\n" } ]
Python
Apache License 2.0
google/jax
move multi-device test into its own file
260,296
18.12.2019 15:27:46
28,800
ec4b5445081b9ecd5d31c0b0e6e71a115c87125d
Stax: allow arbitrary dimension numbers in pooling layers.
[ { "change_type": "MODIFY", "old_path": "jax/experimental/stax.py", "new_path": "jax/experimental/stax.py", "diff": "@@ -167,18 +167,24 @@ Gelu = elementwise(gelu)\ndef _pooling_layer(reducer, init_val, rescaler=None):\n- def PoolingLayer(window_shape, strides=None, padding='VALID'):\n+ def PoolingLayer(window_shape, strides=None, padding='VALID', spec='NHWC'):\n\"\"\"Layer construction function for a pooling layer.\"\"\"\nstrides = strides or (1,) * len(window_shape)\nrescale = rescaler(window_shape, strides, padding) if rescaler else None\n- dims = (1,) + window_shape + (1,) # NHWC\n- strides = (1,) + strides + (1,)\n+\n+ non_spatial_axes = spec.index('N'), spec.index('C')\n+ for i in sorted(non_spatial_axes):\n+ window_shape = window_shape[:i] + (1,) + window_shape[i:]\n+ strides = strides[:i] + (1,) + strides[i:]\n+\ndef init_fun(rng, input_shape):\n- out_shape = lax.reduce_window_shape_tuple(input_shape, dims, strides, padding)\n+ out_shape = lax.reduce_window_shape_tuple(input_shape, window_shape,\n+ strides, padding)\nreturn out_shape, ()\ndef apply_fun(params, inputs, **kwargs):\n- out = lax.reduce_window(inputs, init_val, reducer, dims, strides, padding)\n- return rescale(out, inputs) if rescale else out\n+ out = lax.reduce_window(inputs, init_val, reducer, window_shape,\n+ strides, padding)\n+ return rescale(out, inputs, spec) if rescale else out\nreturn init_fun, apply_fun\nreturn PoolingLayer\nMaxPool = _pooling_layer(lax.max, -np.inf)\n@@ -186,10 +192,17 @@ SumPool = _pooling_layer(lax.add, 0.)\ndef _normalize_by_window_size(dims, strides, padding):\n- def rescale(outputs, inputs):\n- one = np.ones(inputs.shape[1:-1], dtype=inputs.dtype)\n+ def rescale(outputs, inputs, spec):\n+ non_spatial_axes = spec.index('N'), spec.index('C')\n+ spatial_shape = tuple(inputs.shape[i]\n+ for i in range(inputs.ndim)\n+ if i not in non_spatial_axes)\n+ one = np.ones(spatial_shape, dtype=inputs.dtype)\nwindow_sizes = lax.reduce_window(one, 0., lax.add, dims, strides, padding)\n- return outputs / window_sizes[..., np.newaxis]\n+ for i in sorted(non_spatial_axes):\n+ window_sizes = np.expand_dims(window_sizes, i)\n+\n+ return outputs / window_sizes\nreturn rescale\nAvgPool = _pooling_layer(lax.add, 0., _normalize_by_window_size)\n" }, { "change_type": "MODIFY", "old_path": "tests/stax_test.py", "new_path": "tests/stax_test.py", "diff": "@@ -138,20 +138,22 @@ class StaxTest(jtu.JaxTestCase):\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_window_shape={}_padding={}_strides={}_input_shape={}\"\n- \"_maxpool={}\"\n+ \"_maxpool={}_spec={}\"\n.format(window_shape, padding, strides, input_shape,\n- max_pool),\n+ max_pool, spec),\n\"window_shape\": window_shape, \"padding\": padding, \"strides\": strides,\n- \"input_shape\": input_shape, \"max_pool\": max_pool}\n+ \"input_shape\": input_shape, \"max_pool\": max_pool, \"spec\": spec}\nfor window_shape in [(1, 1), (2, 3)]\nfor padding in [\"VALID\"]\nfor strides in [None, (2, 1)]\n- for input_shape in [(2, 5, 6, 1)]\n- for max_pool in [False, True]))\n+ for input_shape in [(2, 5, 6, 4)]\n+ for max_pool in [False, True]\n+ for spec in [\"NHWC\", \"NCHW\", \"WHNC\", \"WHCN\"]))\ndef testPoolingShape(self, window_shape, padding, strides, input_shape,\n- max_pool):\n+ max_pool, spec):\nlayer = stax.MaxPool if max_pool else stax.AvgPool\n- init_fun, apply_fun = layer(window_shape, padding=padding, strides=strides)\n+ init_fun, apply_fun = layer(window_shape, padding=padding, strides=strides,\n+ spec=spec)\n_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)\n@parameterized.named_parameters(jtu.cases_from_list(\n" } ]
Python
Apache License 2.0
google/jax
Stax: allow arbitrary dimension numbers in pooling layers. (#1892)
260,335
18.12.2019 15:55:39
28,800
0ed842ed118ef361fb4396118d42cfa60da7279d
add another multi-device test
[ { "change_type": "MODIFY", "old_path": "tests/multi_device_test.py", "new_path": "tests/multi_device_test.py", "diff": "@@ -20,8 +20,10 @@ import os\nfrom unittest import SkipTest\nfrom absl.testing import absltest\n+import numpy as onp\nimport jax\n+import jax.numpy as np\nfrom jax import lax\nfrom jax import test_util as jtu\nfrom jax.lib import xla_bridge\n@@ -78,6 +80,12 @@ class MultiDeviceTest(jtu.JaxTestCase):\nself.assertEqual(z, 5)\nself.assertEqual(z.device_buffer.device(), jax.devices()[1])\n+ x = jax.device_put(1, jax.devices()[1])\n+ y = np.ones(3)\n+ z = x + y\n+ self.assertAllClose(z, 1 + onp.ones(3), check_dtypes=False)\n+ self.assertEqual(z.device_buffer.device(), jax.devices()[1])\n+\nx = jax.device_put(1, jax.devices()[0])\ny = jax.device_put(2, jax.devices()[1])\nself.assertRaisesRegex(\n" } ]
Python
Apache License 2.0
google/jax
add another multi-device test
260,314
23.12.2019 23:02:08
18,000
c75bf4ab72503314a092c707f264dfb2552d530b
make beta sampler faster
[ { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "@@ -618,7 +618,6 @@ def beta(key, a, b, shape=None, dtype=onp.float64):\ndtype = dtypes.canonicalize_dtype(dtype)\nreturn _beta(key, a, b, shape, dtype)\n-@partial(jit, static_argnums=(3, 4))\ndef _beta(key, a, b, shape, dtype):\nif shape is None:\nshape = lax.broadcast_shapes(onp.shape(a), onp.shape(b))\n@@ -628,6 +627,8 @@ def _beta(key, a, b, shape, dtype):\na = lax.convert_element_type(a, dtype)\nb = lax.convert_element_type(b, dtype)\nkey_a, key_b = split(key)\n+ a = np.broadcast_to(a, shape)\n+ b = np.broadcast_to(b, shape)\ngamma_a = gamma(key_a, a, shape, dtype)\ngamma_b = gamma(key_b, b, shape, dtype)\nreturn gamma_a / (gamma_a + gamma_b)\n" } ]
Python
Apache License 2.0
google/jax
make beta sampler faster
260,335
24.12.2019 11:16:42
28,800
3bf12f6ecde817f8e8f2c8ec6c6d7bc9e1a2a0b0
update readme jaxlib version
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -395,7 +395,7 @@ PYTHON_VERSION=cp37 # alternatives: cp27, cp35, cp36, cp37\nCUDA_VERSION=cuda92 # alternatives: cuda90, cuda92, cuda100, cuda101\nPLATFORM=linux_x86_64 # alternatives: linux_x86_64\nBASE_URL='https://storage.googleapis.com/jax-releases'\n-pip install --upgrade $BASE_URL/$CUDA_VERSION/jaxlib-0.1.36-$PYTHON_VERSION-none-$PLATFORM.whl\n+pip install --upgrade $BASE_URL/$CUDA_VERSION/jaxlib-0.1.37-$PYTHON_VERSION-none-$PLATFORM.whl\npip install --upgrade jax # install jax\n```\n" } ]
Python
Apache License 2.0
google/jax
update readme jaxlib version
260,314
26.12.2019 22:43:06
18,000
edf0e61bc913d0978ca1a7248b9dfe2d05193450
support nested vmap for gamma sampler
[ { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "@@ -45,6 +45,7 @@ from jax.interpreters import ad\nfrom jax.interpreters import batching\nfrom jax.interpreters import partial_eval as pe\nfrom jax.interpreters import xla\n+from jax.util import prod\ndef PRNGKey(seed):\n@@ -888,18 +889,18 @@ def _gamma_grad(sample, a):\nreturn grads.reshape(onp.shape(a))\ndef _gamma_impl(key, a):\n- if key.ndim == 2: # batch of keys and alphas\n- size = np.size(a[0])\n- key = vmap(split, in_axes=(0, None))(key, size)\n- else:\n- key = split(key, np.size(a))\n- alphas = np.reshape(a, -1)\n+ a_shape = np.shape(a)\n+ # split key to match the shape of a\n+ key_ndim = np.ndim(key) - 1\n+ key = np.reshape(key, (-1, 2))\n+ key = vmap(split, in_axes=(0, None))(key, prod(a_shape[key_ndim:]))\nkeys = np.reshape(key, (-1, 2))\n+ alphas = np.reshape(a, -1)\nif xla_bridge.get_backend().platform == 'cpu':\nsamples = lax.map(lambda args: _gamma_one(*args), (keys, alphas))\nelse:\nsamples = vmap(_gamma_one)(keys, alphas)\n- return np.reshape(samples, np.shape(a)),\n+ return np.reshape(samples, a_shape),\ndef _gamma_batching_rule(batched_args, batch_dims):\nk, a = batched_args\n" } ]
Python
Apache License 2.0
google/jax
support nested vmap for gamma sampler
260,335
30.12.2019 07:36:36
28,800
f5723848d349488edd27f1a8ad663f9d999ddab0
fix error in autodiff cookbook: 3x not 2x
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/autodiff_cookbook.ipynb", "new_path": "docs/notebooks/autodiff_cookbook.ipynb", "diff": "\"source\": [\n\"The `jvp`-transformed function is evaluated much like the original function, but paired up with each primal value of type `a` it pushes along tangent values of type `T a`. For each primitive numerical operation that the original function would have applied, the `jvp`-transformed function executes a \\\"JVP rule\\\" for that primitive that both evaluates the primitive on the primals and applies the primitive's JVP at those primal values.\\n\",\n\"\\n\",\n- \"That evaluation strategy has some immediate implications about computational complexity: since we evaluate JVPs as we go, we don't need to store anything for later, and so the memory cost is independent of the depth of the computation. In addition, the FLOP cost of the `jvp`-transformed function is about 2x the cost of just evaluating the function. Put another way, for a fixed primal point $x$, we can evaluate $v \\\\mapsto \\\\partial f(x) \\\\cdot v$ for about the same cost as evaluating $f$.\\n\",\n+ \"That evaluation strategy has some immediate implications about computational complexity: since we evaluate JVPs as we go, we don't need to store anything for later, and so the memory cost is independent of the depth of the computation. In addition, the FLOP cost of the `jvp`-transformed function is about 3x the cost of just evaluating the function (one unit of work for evaluating the original function, for example `sin(x)`; one unit for linearizing, like `cos(x)`; and one unit for applying the linearized function to a vector, like `cos_x * v`). Put another way, for a fixed primal point $x$, we can evaluate $v \\\\mapsto \\\\partial f(x) \\\\cdot v$ for about the same marginal cost as evaluating $f$.\\n\",\n\"\\n\",\n\"That memory complexity sounds pretty compelling! So why don't we see forward-mode very often in machine learning?\\n\",\n\"\\n\",\n\"\\n\",\n\"where we use `CT a` to denote the type for the cotangent space for `a`. In words, `vjp` takes as arguments a function of type `a -> b` and a point of type `a`, and gives back a pair consisting of a value of type `b` and a linear map of type `CT b -> CT a`.\\n\",\n\"\\n\",\n- \"This is great because it lets us build Jacobian matrices one row at a time, and the FLOP cost for evaluating $(x, v) \\\\mapsto (f(x), v^\\\\mathsf{T} \\\\partial f(x))$ is only about twice the cost of evaluating $f$. In particular, if we want the gradient of a function $f : \\\\mathbb{R}^n \\\\to \\\\mathbb{R}$, we can do it in just one call. That's how `grad` is efficient for gradient-based optimization, even for objectives like neural network training loss functions on millions or billions of parameters.\\n\",\n+ \"This is great because it lets us build Jacobian matrices one row at a time, and the FLOP cost for evaluating $(x, v) \\\\mapsto (f(x), v^\\\\mathsf{T} \\\\partial f(x))$ is only about three times the cost of evaluating $f$. In particular, if we want the gradient of a function $f : \\\\mathbb{R}^n \\\\to \\\\mathbb{R}$, we can do it in just one call. That's how `grad` is efficient for gradient-based optimization, even for objectives like neural network training loss functions on millions or billions of parameters.\\n\",\n\"\\n\",\n\"There's a cost, though: though the FLOPs are friendly, memory scales with the depth of the computation. Also, the implementation is traditionally more complex than that of forward-mode, though JAX has some tricks up its sleeve (that's a story for a future notebook!).\\n\",\n\"\\n\",\n" } ]
Python
Apache License 2.0
google/jax
fix error in autodiff cookbook: 3x not 2x
260,335
31.12.2019 10:38:45
28,800
82dbf9131105a0b3a22c191930bc42b15d420794
add tests for adapt make_jaxpr staging
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -1337,8 +1337,8 @@ def make_jaxpr(fun):\njax_args, in_tree = tree_flatten((args, kwargs))\njaxtree_fun, out_tree = flatten_fun(wrapped, in_tree)\nin_pvals = map(pv_like, jax_args)\n- jaxpr, out_pvals, consts = pe.trace_to_jaxpr(jaxtree_fun, in_pvals,\n- instantiate=True)\n+ jaxpr, out_pvals, consts = pe.trace_to_jaxpr(\n+ jaxtree_fun, in_pvals, instantiate=True, stage_out_calls=True)\nout_avals = map(raise_to_shaped, unzip2(out_pvals)[0])\nin_avals = tuple(raise_to_shaped(in_aval) for in_aval, _ in in_pvals)\ntyped_jaxpr = core.TypedJaxpr(jaxpr, consts, in_avals, out_avals)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/partial_eval.py", "new_path": "jax/interpreters/partial_eval.py", "diff": "@@ -325,10 +325,10 @@ def partial_val_aval(pv, const):\nelse:\nraise TypeError(pv)\n-def trace_to_jaxpr(fun, pvals, **kwargs):\n+def trace_to_jaxpr(fun, pvals, instantiate=False, stage_out_calls=False):\n\"\"\"Traces a function, given abstract inputs, to a jaxpr.\"\"\"\n- instantiate = kwargs.pop('instantiate', False)\n- with new_master(JaxprTrace) as master:\n+ trace_type = StagingJaxprTrace if stage_out_calls else JaxprTrace\n+ with new_master(trace_type) as master:\nfun = trace_to_subjaxpr(fun, master, instantiate)\njaxpr, (out_pvals, consts, env) = fun.call_wrapped(pvals)\nassert not env\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -57,7 +57,8 @@ _reduce = six.moves.reduce\ndef _initial_style_jaxpr(fun, in_tree, in_avals):\nin_pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]\nfun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)\n- jaxpr, out_pvals, consts = pe.trace_to_jaxpr(fun, in_pvals, instantiate=True)\n+ jaxpr, out_pvals, consts = pe.trace_to_jaxpr(fun, in_pvals, instantiate=True,\n+ stage_out_calls=True)\nout_avals = _map(raise_to_shaped, unzip2(out_pvals)[0])\nconst_avals = tuple(raise_to_shaped(core.get_aval(c)) for c in consts)\ntyped_jaxpr = core.TypedJaxpr(pe.closure_convert_jaxpr(jaxpr),\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -953,6 +953,14 @@ class APITest(jtu.JaxTestCase):\ndef test_partial_eval_lower(self):\n# this is a simplified model of a bug that arose when we first used @jit in\n# a jvp rule. it's in this file because we want to use make_jaxpr.\n+\n+ # NOTE(mattjj): I no longer understand what this was meant to test. My guess\n+ # is it was related to staging out the broadcast into a jaxpr to be\n+ # transposed, but after #1749 that's no longer a problem. After changing\n+ # make_jaxpr (and jit) to stage out sub-calls fully, this test started to\n+ # fail; I left it in as skipped because deleting tests feels wrong.\n+ raise unittest.SkipTest(\"obsolete test\")\n+\n@api.jit\ndef f(a, b, c):\na = lax.broadcast(a, (2,))\n" }, { "change_type": "MODIFY", "old_path": "tests/nn_test.py", "new_path": "tests/nn_test.py", "diff": "@@ -30,6 +30,7 @@ from jax.test_util import check_grads\nfrom jax import nn\nfrom jax import random\nimport jax\n+import jax.numpy as np\nfrom jax.config import config\nconfig.parse_flags_with_absl()\n@@ -51,6 +52,16 @@ class NNFunctionsTest(jtu.JaxTestCase):\nval = nn.elu(1e4)\nself.assertAllClose(val, 1e4, check_dtypes=False)\n+ @jtu.skip_on_devices(\"gpu\", \"tpu\")\n+ def testEluMemory(self):\n+ # see https://github.com/google/jax/pull/1640\n+ jax.make_jaxpr(nn.elu)(np.ones((10 ** 12,))) # don't oom\n+\n+ @jtu.skip_on_devices(\"gpu\", \"tpu\")\n+ def testHardTanhMemory(self):\n+ # see https://github.com/google/jax/pull/1640\n+ jax.make_jaxpr(nn.hard_tanh)(np.ones((10 ** 12,))) # don't oom\n+\nInitializerRecord = collections.namedtuple(\n\"InitializerRecord\",\n[\"name\", \"initializer\", \"shapes\"])\n" } ]
Python
Apache License 2.0
google/jax
add tests for #1640, adapt make_jaxpr staging
260,335
01.01.2020 11:43:44
28,800
b380ac1f7f7efd6a07edf102d9b1e2ee69535038
add faster reshape utility function
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -885,10 +885,18 @@ def reshape(a, newshape, order=\"C\"):\nexcept AttributeError:\nreturn _reshape(a, newshape, order=order)\n-def _reshape(a, newshape, order=\"C\"):\n- dummy_val = onp.broadcast_to(0, shape(a)) # zero strides\n- computed_newshape = onp.reshape(dummy_val, newshape).shape\n+def _compute_newshape(a, newshape):\n+ \"\"\"Fixes a -1 value in newshape, if present.\"\"\"\n+ # other errors, like having more than one -1, are caught downstream\n+ newsize = _prod(newshape)\n+ if newsize < 0:\n+ fix = a.size // -newsize\n+ return [d if d != -1 else fix for d in newshape]\n+ else:\n+ return newshape\n+def _reshape(a, newshape, order=\"C\"):\n+ computed_newshape = _compute_newshape(a, newshape)\nif order == \"C\":\nreturn lax.reshape(a, computed_newshape, None)\nelif order == \"F\":\n" } ]
Python
Apache License 2.0
google/jax
add faster reshape utility function
260,388
02.01.2020 12:57:24
0
fd6067471e08862547e76fd0bd80eaa3dbf69b53
Fix minor typo in Common_Gotchas_in_JAX.ipynb Moved misplaced backtick
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/Common_Gotchas_in_JAX.ipynb", "new_path": "docs/notebooks/Common_Gotchas_in_JAX.ipynb", "diff": "\"\\n\",\n\"If you want to trace where NaNs are occurring in your functions or gradients, you can turn on the NaN-checker by:\\n\",\n\"- setting the `JAX_DEBUG_NANS=True` environment variable.\\n\",\n- \"- adding from jax.config `import config` and `config.update(\\\"jax_debug_nans\\\", True)` near the top of your main file\\n\",\n+ \"- adding `from jax.config import config` and `config.update(\\\"jax_debug_nans\\\", True)` near the top of your main file\\n\",\n\"- adding `from jax.config import config` and `config.parse_flags_with_absl()` to your main file, then set the option using a command-line flag like `--jax_debug_nans=True`.\\n\",\n\"\\n\",\n\"This will cause computations to error-out immediately on production of a NaN.\\n\",\n" } ]
Python
Apache License 2.0
google/jax
Fix minor typo in Common_Gotchas_in_JAX.ipynb Moved misplaced backtick
260,312
04.01.2020 14:26:35
-3,600
592f167e5bfd7f4d6ff67bdaba94008895c5b5fa
Implement numpy.gradient
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -873,6 +873,49 @@ def diff(a, n=1, axis=-1,):\nreturn a\n+@partial(jit, static_argnums=1)\n+def _gradient(a, axis):\n+ def gradient_along_axis(a, axis):\n+ a_swap = swapaxes(a, 0, axis)\n+ a_grad = concatenate((\n+ (a_swap[1] - a_swap[0])[newaxis],\n+ (a_swap[2:] - a_swap[:-2]) * 0.5,\n+ (a_swap[-1] - a_swap[-2])[newaxis]\n+ ), axis=0)\n+ return swapaxes(a_grad, 0, axis)\n+\n+ if axis is None:\n+ axis = range(a.ndim)\n+ else:\n+ if isinstance(axis, int):\n+ axis = (axis,)\n+ if not (isinstance(axis, tuple) or isinstance(axis, list)):\n+ raise ValueError(\"Give `axis` either as int or iterable\")\n+ axis = [_canonicalize_axis(i, a.ndim) for i in axis]\n+\n+ if min([s for i, s in enumerate(a.shape) if i in axis]) < 2:\n+ raise ValueError(\n+ \"Shape of array too small to calculate a numerical gradient\")\n+\n+ # TODO: use jax.lax loop tools if possible\n+ a_grad = [gradient_along_axis(a, ax) for ax in axis]\n+\n+ if len(axis) == 1:\n+ a_grad = a_grad[0]\n+\n+ return a_grad\n+\n+\n+@_wraps(onp.gradient)\n+def gradient(a, *args, **kwargs):\n+ axis = kwargs.pop(\"axis\", None)\n+ if not len(args) == 0:\n+ raise ValueError(\"*varargs not implemented\")\n+ if not len(kwargs) == 0:\n+ raise ValueError(\"Only `axis` keyword is implemented\")\n+ return _gradient(a, axis)\n+\n+\n@_wraps(onp.isrealobj)\ndef isrealobj(x):\nreturn not iscomplexobj(x)\n" } ]
Python
Apache License 2.0
google/jax
Implement numpy.gradient
260,378
04.01.2020 18:21:30
28,800
ca155129322cc9fdb4e483c14d66d9be58e96152
added fft2 and ifft2, corresponding tests, and documentation links.
[ { "change_type": "MODIFY", "old_path": "docs/jax.numpy.rst", "new_path": "docs/jax.numpy.rst", "diff": "@@ -260,6 +260,8 @@ jax.numpy.fft\nifftn\nfft\nifft\n+ fft2\n+ ifft2\njax.numpy.linalg\n----------------\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/fft.py", "new_path": "jax/numpy/fft.py", "diff": "@@ -122,6 +122,28 @@ def ifft(a, n=None, axis=-1, norm=None):\nreturn _fft_core('ifft', xla_client.FftType.IFFT, a, s=n, axes=axis, norm=norm)\n+@_wraps(onp.fft.fft2)\n+def fft2(a, s=None, axes=(-2,-1), norm=None):\n+ if len(axes) != 2:\n+ raise ValueError(\n+ \"jax.np.fft.fft2 only supports 2 axes. \"\n+ \"Got axes = %s.\" % (list(axes))\n+ )\n+\n+ return _fft_core('fft', xla_client.FftType.FFT, a, s=s, axes=axes, norm=norm)\n+\n+\n+@_wraps(onp.fft.ifft2)\n+def ifft2(a, s=None, axes=(-2,-1), norm=None):\n+ if len(axes) != 2:\n+ raise ValueError(\n+ \"jax.np.fft.ifft2 only supports 2 axes. \"\n+ \"Got axes = %s.\" % (list(axes))\n+ )\n+\n+ return _fft_core('ifft', xla_client.FftType.IFFT, a, s=s, axes=axes, norm=norm)\n+\n+\nfor func in get_module_functions(onp.fft):\nif func.__name__ not in globals():\nglobals()[func.__name__] = _not_implemented(func)\n" }, { "change_type": "MODIFY", "old_path": "tests/fft_test.py", "new_path": "tests/fft_test.py", "diff": "@@ -159,7 +159,59 @@ class FftTest(jtu.JaxTestCase):\nValueError, lambda: func(rng([2, 3], dtype=onp.float64), axis=[2]))\nself.assertRaises(\nValueError, lambda: func(rng([2, 3], dtype=onp.float64), axis=[-3]))\n- pass\n+\n+ @parameterized.named_parameters(jtu.cases_from_list(\n+ {\"testcase_name\": \"_inverse={}_shape={}_axis={}\".format(\n+ inverse, jtu.format_shape_dtype_string(shape, dtype), axes),\n+ \"axes\": axes, \"shape\": shape, \"dtype\": dtype, \"rng_factory\": rng_factory,\n+ \"inverse\": inverse}\n+ for inverse in [False, True]\n+ for rng_factory in [jtu.rand_default]\n+ for dtype in all_dtypes\n+ for shape in [(16, 8, 4, 8), (16, 8, 4, 8, 4)]\n+ for axes in [(-2, -1), (0, 1), (1, 3), (-1, 2)]))\n+ def testFft2(self, inverse, shape, dtype, axes, rng_factory):\n+ rng = rng_factory()\n+ args_maker = lambda: (rng(shape, dtype),)\n+ np_op = np.fft.ifft2 if inverse else np.fft.fft2\n+ onp_op = onp.fft.ifft2 if inverse else onp.fft.fft2\n+ np_fn = lambda a: np_op(a, axes=axes)\n+ onp_fn = lambda a: onp_op(a, axes=axes)\n+ # Numpy promotes to complex128 aggressively.\n+ self._CheckAgainstNumpy(onp_fn, np_fn, args_maker, check_dtypes=False,\n+ tol=1e-4)\n+ self._CompileAndCheck(np_fn, args_maker, check_dtypes=True)\n+ # Test gradient for differentiable types.\n+ if dtype in inexact_dtypes:\n+ tol = 0.15 # TODO(skye): can we be more precise?\n+ jtu.check_grads(np_fn, args_maker(), order=1, atol=tol, rtol=tol)\n+ jtu.check_grads(np_fn, args_maker(), order=2, atol=tol, rtol=tol)\n+\n+ @parameterized.named_parameters(jtu.cases_from_list(\n+ {\"testcase_name\": \"_inverse={}\".format(inverse),\n+ \"inverse\": inverse}\n+ for inverse in [False, True]))\n+ def testFft2Errors(self, inverse):\n+ rng = jtu.rand_default()\n+ name = 'ifft2' if inverse else 'fft2'\n+ func = np.fft.ifft2 if inverse else np.fft.fft2\n+\n+ self.assertRaisesRegex(\n+ ValueError,\n+ \"jax.np.fft.{} only supports 2 axes. \"\n+ \"Got axes = \\\\[0\\\\].\".format(name, name),\n+ lambda: func(rng([2, 3], dtype=onp.float64), axes=[0])\n+ )\n+ self.assertRaisesRegex(\n+ ValueError,\n+ \"jax.np.fft.{} only supports 2 axes. \"\n+ \"Got axes = \\\\[0, 1, 2\\\\].\".format(name, name),\n+ lambda: func(rng([2, 3, 3], dtype=onp.float64), axes=(0, 1, 2))\n+ )\n+ self.assertRaises(\n+ ValueError, lambda: func(rng([2, 3], dtype=onp.float64), axes=[2, 3]))\n+ self.assertRaises(\n+ ValueError, lambda: func(rng([2, 3], dtype=onp.float64), axes=[-3, -4]))\nif __name__ == \"__main__\":\n" } ]
Python
Apache License 2.0
google/jax
added fft2 and ifft2, corresponding tests, and documentation links. (#1939)
260,335
06.01.2020 20:57:19
28,800
bb9cd233683565c5127f88bab5a51a504752cae2
tweak shape error message, add test
[ { "change_type": "MODIFY", "old_path": "jax/lax/lax.py", "new_path": "jax/lax/lax.py", "diff": "@@ -94,7 +94,11 @@ def _canonicalize_shape(shape):\nexcept TypeError:\npass\nmsg = (\"Shapes must be 1D sequences of concrete values of integer type, \"\n- \"got {}\")\n+ \"got {}.\")\n+ if any(isinstance(x, core.Tracer) and isinstance(core.get_aval(x), ShapedArray)\n+ and not isinstance(core.get_aval(x), ConcreteArray) for x in shape):\n+ msg += (\"\\nIf using `jit`, try using `static_argnums` or applying `jit` to \"\n+ \"smaller subfunctions.\")\nraise TypeError(msg.format(shape))\ndef _identity(x): return x\n@@ -1047,18 +1051,12 @@ def full(shape, fill_value, dtype=None):\n\"\"\"Returns an array of `shape` filled with `fill_value`.\nArguments:\n- shape: sequence of integers, describing the shape of the output array\n- fill_value: the value to fill the new array with\n+ shape: sequence of integers, describing the shape of the output array.\n+ fill_value: the value to fill the new array with.\ndtype: the type of the output array, or `None`. If not `None`, `fill_value`\nwill be cast to `dtype`.\n\"\"\"\n- try:\nshape = _canonicalize_shape(shape)\n- except TypeError as e:\n- msg = (\"Note: `full` requires shapes to be concrete. If using `jit`, try \"\n- \"using `static_argnums` or applying `jit` to smaller subfunctions.\")\n- raise TypeError(\"{}. {}\".format(e.message, msg))\n-\nif onp.shape(fill_value):\nmsg = \"full must be called with scalar fill_value, got fill_value.shape {}.\"\nraise TypeError(msg.format(onp.shape(fill_value)))\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -2565,6 +2565,19 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\npartial(lnp.inner, precision=HIGHEST),\nones_1d, ones_1d)\n+ def testZerosShapeErrors(self):\n+ # see https://github.com/google/jax/issues/1822\n+ self.assertRaisesRegex(\n+ TypeError,\n+ \"Shapes must be 1D sequences of concrete values of integer type.*\",\n+ lambda: lnp.zeros(1.))\n+ self.assertRaisesRegex(\n+ TypeError,\n+ \"Shapes must be 1D sequences of concrete values of integer type.*\\n\"\n+ \"If using `jit`, try using `static_argnums` or applying `jit` to smaller subfunctions.\",\n+ lambda: api.jit(lnp.zeros)(2))\n+\n+\n# Most grad tests are at the lax level (see lax_test.py), but we add some here\n# as needed for e.g. particular compound ops of interest.\n" } ]
Python
Apache License 2.0
google/jax
tweak shape error message, add test
260,335
06.01.2020 18:08:00
28,800
80d8ee4e3c6f1ed8f33c4a5dccb26285fa3aab0d
lower away grad_and_aux pure jaxpr tracers fixes
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/ad.py", "new_path": "jax/interpreters/ad.py", "diff": "@@ -79,6 +79,7 @@ def jvp_subtrace_aux(master, primals, tangents):\naux_tracers = map(trace.full_raise, aux)\nout_primals, out_tangents = unzip2((t.primal, t.tangent) for t in ans_tracers)\naux_primals, _ = unzip2((t.primal, t.tangent) for t in aux_tracers)\n+ aux_primals = map(core.full_lower, aux_primals)\nyield (out_primals, out_tangents), aux_primals\ndef linearize(traceable, *primals, **kwargs):\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -34,7 +34,7 @@ if six.PY3:\nimport jax\nimport jax.numpy as np\nfrom jax import jit, grad, device_put, jacfwd, jacrev, hessian\n-from jax import api, lax\n+from jax import api, core, lax\nfrom jax.core import Primitive\nfrom jax.interpreters import ad\nfrom jax.interpreters import xla\n@@ -495,6 +495,17 @@ class APITest(jtu.JaxTestCase):\nself.assertEqual(g, grad(lambda x: x**3)(4.))\nself.assertEqual(aux, [4.**2, 4.])\n+ def test_grad_and_aux_no_tracers(self):\n+ # see https://github.com/google/jax/issues/1950\n+ def f(x):\n+ aux = dict(identity=x, p1=x+1)\n+ return x ** 2, aux\n+\n+ _, aux = jax.grad(f, has_aux=True)(3.)\n+ self.assertIsInstance(aux, dict)\n+ for val in aux.values():\n+ self.assertNotIsInstance(val, core.Tracer)\n+\ndef test_jvp_mismatched_arguments(self):\nself.assertRaisesRegex(\nTypeError,\n" } ]
Python
Apache License 2.0
google/jax
lower away grad_and_aux pure jaxpr tracers fixes #1950
260,312
30.12.2019 23:20:08
-3,600
58ee0a8ea4a2699f723dc4fee3afe8887b91f9ec
Add np.iterable
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -391,6 +391,14 @@ def issubdtype(arg1, arg2): return dtypes.issubdtype(arg1, arg2)\n@_wraps(onp.isscalar)\ndef isscalar(num): return dtypes.is_python_scalar(num) or onp.isscalar(num)\n+@_wraps(onp.iterable)\n+def iterable(y):\n+ try:\n+ iter(y)\n+ except TypeError:\n+ return False\n+ return True\n+\n@_wraps(onp.result_type)\ndef result_type(*args):\nreturn dtypes.result_type(*args)\n" } ]
Python
Apache License 2.0
google/jax
Add np.iterable
260,312
04.01.2020 13:38:56
-3,600
0c9aacf1dac3804cad8044d517f009a15cf49654
Use numpy function directly instead of copying source code
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -391,13 +391,7 @@ def issubdtype(arg1, arg2): return dtypes.issubdtype(arg1, arg2)\n@_wraps(onp.isscalar)\ndef isscalar(num): return dtypes.is_python_scalar(num) or onp.isscalar(num)\n-@_wraps(onp.iterable)\n-def iterable(y):\n- try:\n- iter(y)\n- except TypeError:\n- return False\n- return True\n+iterable = onp.iterable\n@_wraps(onp.result_type)\ndef result_type(*args):\n" } ]
Python
Apache License 2.0
google/jax
Use numpy function directly instead of copying source code
260,335
06.01.2020 22:45:44
28,800
7da75587b5ccc708d7267aef16f182ed39e098d0
make control flow abstract eval to shaped level fixes
[ { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -211,7 +211,7 @@ def while_loop(cond_fun, body_fun, init_val):\nreturn tree_unflatten(body_tree, outs)\ndef _while_loop_abstract_eval(*args, **kwargs):\n- return kwargs[\"body_jaxpr\"].out_avals\n+ return _map(raise_to_shaped, kwargs[\"body_jaxpr\"].out_avals)\ndef _while_loop_translation_rule(c, axis_env, *args, **kwargs):\nbackend = kwargs.pop('backend')\n@@ -363,7 +363,7 @@ def cond(pred, true_operand, true_fun, false_operand, false_fun):\nreturn tree_unflatten(true_out_tree, out)\ndef _cond_abstract_eval(*args, **kwargs):\n- return kwargs[\"true_jaxpr\"].out_avals\n+ return _map(raise_to_shaped, kwargs[\"true_jaxpr\"].out_avals)\ndef _cond_translation_rule(c, axis_env, pred, *args, **kwargs):\nbackend = kwargs.pop(\"backend\", None)\n" } ]
Python
Apache License 2.0
google/jax
make control flow abstract eval to shaped level fixes #1919
260,312
07.01.2020 12:34:34
-3,600
b15a27a7fc4b8dce0056c53e5ec45d97ab40dee8
Tests for jax.numpy.gradient and minor tweaks
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -889,7 +889,7 @@ def _gradient(a, axis):\nelse:\nif isinstance(axis, int):\naxis = (axis,)\n- if not (isinstance(axis, tuple) or isinstance(axis, list)):\n+ if not isinstance(axis, tuple) and not isinstance(axis, list):\nraise ValueError(\"Give `axis` either as int or iterable\")\naxis = [_canonicalize_axis(i, a.ndim) for i in axis]\n@@ -910,7 +910,7 @@ def _gradient(a, axis):\ndef gradient(a, *args, **kwargs):\naxis = kwargs.pop(\"axis\", None)\nif not len(args) == 0:\n- raise ValueError(\"*varargs not implemented\")\n+ raise ValueError(\"*args (sample distances) not implemented\")\nif not len(kwargs) == 0:\nraise ValueError(\"Only `axis` keyword is implemented\")\nreturn _gradient(a, axis)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -2565,6 +2565,26 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\npartial(lnp.inner, precision=HIGHEST),\nones_1d, ones_1d)\n+ @parameterized.named_parameters(\n+ jtu.cases_from_list(\n+ {\"testcase_name\": (\"_shape={}_dtype={}\").format(shape, dtype),\n+ \"shape\": shape,\n+ \"dtype\": dtype, \"rng_factory\": rng_factory}\n+ for shape in [(10,), (10, 15), (10, 15, 20)]\n+ for dtype in inexact_dtypes\n+ for rng_factory in [jtu.rand_default]))\n+ def testGradient(self, shape, dtype, rng_factory):\n+ rng = rng_factory()\n+ args_maker = self._GetArgsMaker(rng, [shape], [dtype])\n+ ndim = len(shape)\n+ for num_axes in range(ndim):\n+ for axis in itertools.combinations(range(ndim), num_axes):\n+ lnp_fun = lambda y: lnp.gradient(y, axis=axis)\n+ onp_fun = lambda y: onp.gradient(y, axis=axis)\n+ self._CheckAgainstNumpy(\n+ onp_fun, lnp_fun, args_maker, check_dtypes=True)\n+ self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\n+\n# Most grad tests are at the lax level (see lax_test.py), but we add some here\n# as needed for e.g. particular compound ops of interest.\n" } ]
Python
Apache License 2.0
google/jax
Tests for jax.numpy.gradient and minor tweaks
260,312
08.01.2020 12:22:12
-3,600
48cb6af6b4125b5874c6ffc2586dbe9c5c1565f0
Support None and negative indices in slice_in_dim
[ { "change_type": "MODIFY", "old_path": "jax/lax/lax.py", "new_path": "jax/lax/lax.py", "diff": "@@ -1309,6 +1309,17 @@ def slice_in_dim(operand, start_index, limit_index, stride=1, axis=0):\nlimit_indices = list(operand.shape)\nstrides = [1] * operand.ndim\n+ # translate `None`\n+ len_axis = operand.shape[axis]\n+ start_index = start_index if start_index is not None else 0\n+ limit_index = limit_index if limit_index is not None else len_axis\n+\n+ # translate negative indices\n+ if start_index < 0:\n+ start_index = start_index + len_axis\n+ if limit_index < 0:\n+ limit_index = limit_index + len_axis\n+\naxis = int(axis)\nstart_indices[axis] = int(start_index)\nlimit_indices[axis] = int(limit_index)\n" } ]
Python
Apache License 2.0
google/jax
Support None and negative indices in slice_in_dim
260,312
08.01.2020 12:31:45
-3,600
ac1aaedc4f2ec6acf352e7ccb718a9a4fb59ae06
Change from swapaxes to slice_in_dim in numpy.gradient
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -876,13 +876,13 @@ def diff(a, n=1, axis=-1,):\n@partial(jit, static_argnums=1)\ndef _gradient(a, axis):\ndef gradient_along_axis(a, axis):\n- a_swap = swapaxes(a, 0, axis)\n+ sliced = partial(lax.slice_in_dim, a, axis=axis)\na_grad = concatenate((\n- (a_swap[1] - a_swap[0])[newaxis],\n- (a_swap[2:] - a_swap[:-2]) * 0.5,\n- (a_swap[-1] - a_swap[-2])[newaxis]\n- ), axis=0)\n- return swapaxes(a_grad, 0, axis)\n+ sliced(1, 2) - sliced(0, 1),\n+ (sliced(2, None) - sliced(0, -2)) * 0.5,\n+ sliced(-1, None) - sliced(-2, -1),\n+ ), axis)\n+ return a_grad\nif axis is None:\naxis = range(a.ndim)\n" } ]
Python
Apache License 2.0
google/jax
Change from swapaxes to slice_in_dim in numpy.gradient
260,312
09.01.2020 08:46:36
-3,600
9ef9b38b4e2274608d7449269af9c29780482ed1
Put axis in named_parameters for numpy.gradient test
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -2560,22 +2560,22 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\n@parameterized.named_parameters(\njtu.cases_from_list(\n- {\"testcase_name\": (\"_shape={}_dtype={}\").format(shape, dtype),\n+ {\"testcase_name\": (\"_shape={}_axis={}_dtype={}\").format(shape, axis, dtype),\n\"shape\": shape,\n+ \"axis\": axis,\n\"dtype\": dtype, \"rng_factory\": rng_factory}\nfor shape in [(10,), (10, 15), (10, 15, 20)]\n+ for _num_axes in range(len(shape))\n+ for axis in itertools.combinations(range(len(shape)), _num_axes)\nfor dtype in inexact_dtypes\nfor rng_factory in [jtu.rand_default]))\n- def testGradient(self, shape, dtype, rng_factory):\n+ def testGradient(self, shape, axis, dtype, rng_factory):\nrng = rng_factory()\nargs_maker = self._GetArgsMaker(rng, [shape], [dtype])\n- ndim = len(shape)\n- for num_axes in range(ndim):\n- for axis in itertools.combinations(range(ndim), num_axes):\nlnp_fun = lambda y: lnp.gradient(y, axis=axis)\nonp_fun = lambda y: onp.gradient(y, axis=axis)\nself._CheckAgainstNumpy(\n- onp_fun, lnp_fun, args_maker, check_dtypes=True)\n+ onp_fun, lnp_fun, args_maker, check_dtypes=False)\nself._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)\ndef testZerosShapeErrors(self):\n" } ]
Python
Apache License 2.0
google/jax
Put axis in named_parameters for numpy.gradient test
260,285
10.01.2020 13:28:03
0
f36d858c4ef62f5ea2f9616701f33fb38b74b464
Require shape = sample_shape + batch_shape in random.categorical
[ { "change_type": "MODIFY", "old_path": "jax/random.py", "new_path": "jax/random.py", "diff": "@@ -946,25 +946,34 @@ def _gumbel(key, shape, dtype):\nreturn -np.log(-np.log(\nuniform(key, shape, dtype, minval=np.finfo(dtype).eps, maxval=1.)))\n-def categorical(key, logits, axis=-1, shape=()):\n+def categorical(key, logits, axis=-1, shape=None):\n\"\"\"Sample random values from categorical distributions.\nArgs:\nkey: a PRNGKey used as the random key.\n- logits: Unnormalized log probabilities of the categorical distribution(s) to sample from, so that\n- `softmax(logits, axis)` gives the corresponding probabilities.\n- axis: Specifies the axis along which logits belong to the same categorical distribution.\n- shape: Specifies how many samples to take per categorical distribution and in what shape to arrange them.\n+ logits: Unnormalized log probabilities of the categorical distribution(s) to sample from,\n+ so that `softmax(logits, axis)` gives the corresponding probabilities.\n+ axis: Axis along which logits belong to the same categorical distribution.\n+ shape: Optional, a tuple of nonnegative integers representing the result shape.\n+ Must be broadcast-compatible with ``onp.delete(logits.shape, axis)``.\n+ The default (None) produces a result shape equal to ``onp.delete(logits.shape, axis)``.\nReturns:\n- Samples of shape `shape + tuple(onp.delete(logits.shape, axis))`.\n- The first len(shape) dimensions specify which sample,\n- while the last len(logits.shape) - 1 dimensions make up the shape per sample.\n+ A random array with int dtype and shape given by ``shape`` if ``shape``\n+ is not None, or else ``onp.delete(logits.shape, axis)``.\n\"\"\"\n+\nif axis >= 0:\n- axis += len(shape)\n+ axis -= len(logits.shape)\n+\n+ batch_shape = tuple(onp.delete(logits.shape, axis))\n+ if shape is None:\n+ shape = batch_shape\n+ else:\n+ _check_shape(\"categorical\", shape, batch_shape)\n- return np.argmax(gumbel(key, shape + logits.shape, logits.dtype) + logits, axis=axis)\n+ sample_shape = shape[:len(shape)-len(batch_shape)]\n+ return np.argmax(gumbel(key, sample_shape + logits.shape, logits.dtype) + logits, axis=axis)\ndef laplace(key, shape=(), dtype=onp.float64):\n\"\"\"Sample Laplace random values with given shape and float dtype.\n" }, { "change_type": "MODIFY", "old_path": "tests/random_test.py", "new_path": "tests/random_test.py", "diff": "@@ -188,15 +188,16 @@ class LaxRandomTest(jtu.JaxTestCase):\nself._CheckChiSquared(samples, scipy.stats.bernoulli(p).pmf)\n@parameterized.named_parameters(jtu.cases_from_list(\n- {\"testcase_name\": \"_p={}_{}\".format(p, dtype),\n- \"p\": p, \"axis\": axis, \"dtype\": onp.dtype(dtype).name}\n+ {\"testcase_name\": \"_p={}_{}_{}\".format(p, dtype, sample_shape),\n+ \"p\": p, \"axis\": axis, \"dtype\": onp.dtype(dtype).name, 'sample_shape': sample_shape}\nfor (p, axis) in [([.25] * 4, -1), ([[.25, .25], [.1, .9]], 1), ([[.25, .1], [.25, .9]], 0)]\n+ for sample_shape in [(10000,), (5000, 2)]\nfor dtype in [onp.float32, onp.float64]))\n- def testCategorical(self, p, axis, dtype):\n+ def testCategorical(self, p, axis, dtype, sample_shape):\nkey = random.PRNGKey(0)\np = onp.array(p, dtype=dtype)\nlogits = onp.log(p) - 42 # test unnormalized\n- shape = (10000,)\n+ shape = sample_shape + tuple(onp.delete(logits.shape, axis))\nrand = lambda key, p: random.categorical(key, logits, shape=shape, axis=axis)\ncrand = api.jit(rand)\n@@ -207,7 +208,7 @@ class LaxRandomTest(jtu.JaxTestCase):\nif axis < 0:\naxis += len(logits.shape)\n- assert samples.shape == shape + tuple(onp.delete(p.shape, axis))\n+ assert samples.shape == shape\nif len(p.shape[:-1]) > 0:\nfor cat_index, p_ in enumerate(p):\n" } ]
Python
Apache License 2.0
google/jax
Require shape = sample_shape + batch_shape in random.categorical
260,393
10.01.2020 16:49:08
28,800
34ede6b72ec9a49386b55a55e40d7efbc46956c2
Added pshuffle
[ { "change_type": "MODIFY", "old_path": "jax/lax/lax_parallel.py", "new_path": "jax/lax/lax_parallel.py", "diff": "@@ -113,6 +113,30 @@ def ppermute(x, axis_name, perm):\n\"\"\"\nreturn ppermute_p.bind(x, axis_name=axis_name, perm=tuple(perm))\n+def pshuffle(x, axis_name, perm):\n+ \"\"\"Perform a collective shuffle according to the permutation ``perm``.\n+\n+ This function is a simple wrapper around jax.lax.ppermute.\n+\n+ Args:\n+ x: array with a mapped axis named ``axis_name``.\n+ axis_name: hashable Python object used to name a pmapped axis (see the\n+ ``pmap`` docstring for more details).\n+ perm: list of of ints, representing the new order of the source indicies\n+ that encode how the mapped axis named ``axis_name`` should be\n+ shuffled. The integer values are treated as indices into the mapped axis\n+ ``axis_name``. Every int between 0 and ``len(perm)-1`` should be included.\n+\n+ Returns:\n+ An array with the same shape as ``x`` with slices along the axis\n+ ``axis_name`` gathered from ``x`` according to the permutation ``perm``.\n+ \"\"\"\n+ if set(perm) != set(range(len(perm))):\n+ raise AssertionError(\n+ \"Given `perm` does not represent a real permutation: {}\".format(perm))\n+ return ppermute(x, axis_name, list(zip(perm, range(len(perm)))))\n+\n+\ndef pswapaxes(x, axis_name, axis):\n\"\"\"Swap the pmapped axis ``axis_name`` with the unmapped axis ``axis``.\n" }, { "change_type": "MODIFY", "old_path": "tests/pmap_test.py", "new_path": "tests/pmap_test.py", "diff": "@@ -354,6 +354,28 @@ class PmapTest(jtu.JaxTestCase):\nexpected = onp.roll(onp.pi + onp.arange(device_count), 1)\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ @jtu.skip_on_devices(\"cpu\")\n+ def testCollectivePermuteCyclicWithPShuffle(self):\n+ device_count = xla_bridge.device_count()\n+ values = onp.arange(device_count)\n+ shift_right = [(i - 1) % device_count for i in range(device_count)]\n+ f = lambda x: lax.pshuffle(x, perm=shift_right, axis_name='i')\n+ expected = onp.transpose(values, shift_right)\n+ ans = onp.asarray(pmap(f, \"i\")(values))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\n+ @jtu.skip_on_devices(\"cpu\")\n+ def testPShuffleWithBadPerm(self):\n+ device_count = xla_bridge.device_count()\n+ bad_perm = list(range(device_count))\n+ bad_perm[0] = 1\n+ f = lambda x: lax.pshuffle(x, perm=bad_perm, axis_name='i')\n+ g = lambda: pmap(f, \"i\")(onp.arange(device_count))\n+ self.assertRaisesRegex(\n+ AssertionError,\n+ \"Given `perm` does not represent a real permutation: {}\".format(bad_perm),\n+ g)\n+\n@jtu.skip_on_devices(\"cpu\", \"gpu\")\ndef testPpermuteWithZipObject(self):\n# https://github.com/google/jax/issues/1703\n" } ]
Python
Apache License 2.0
google/jax
Added pshuffle (#1975)
260,335
10.01.2020 20:44:24
28,800
9afa2c6b69695a015b1af7d1ccbb3546ddc002ae
fix broken link to trax, fixes
[ { "change_type": "MODIFY", "old_path": "cloud_tpu_colabs/Pmap_Cookbook.ipynb", "new_path": "cloud_tpu_colabs/Pmap_Cookbook.ipynb", "diff": "\"\\n\",\n\"To run this notebook with any parallelism, you'll need multiple XLA devices available, e.g. with a multi-GPU machine or a Cloud TPU.\\n\",\n\"\\n\",\n- \"The code in this notebook is simple. For an example of how to use these tools to do data-parallel neural network training, check out [the SPMD MNIST example](https://github.com/google/jax/blob/master/examples/spmd_mnist_classifier_fromscratch.py) or the much more capable [Trax library](https://github.com/tensorflow/trax/).\"\n+ \"The code in this notebook is simple. For an example of how to use these tools to do data-parallel neural network training, check out [the SPMD MNIST example](https://github.com/google/jax/blob/master/examples/spmd_mnist_classifier_fromscratch.py) or the much more capable [Trax library](https://github.com/google/trax/).\"\n]\n},\n{\n" } ]
Python
Apache License 2.0
google/jax
fix broken link to trax, fixes #1974
260,335
11.01.2020 16:31:59
28,800
a7eb5897d31f5f0fd2b346cfd26e51faa11e9c97
add mini-libraries readme
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -88,6 +88,14 @@ For a deeper dive into JAX:\n- See the [full list of\nnotebooks](https://github.com/google/jax/tree/master/docs/notebooks).\n+You can also take a look at [the mini-libraries in\n+`jax.experimental`](https://github.com/google/jax/tree/master/jax/experimental/README.md),\n+like [`stax` for building neural\n+networks](https://github.com/google/jax/tree/master/jax/experimental/README.md#neural-net-building-with-stax)\n+and [`optimizers` for first-order stochastic\n+optimization](https://github.com/google/jax/tree/master/jax/experimental/README.md#first-order-optimization),\n+or the [examples](https://github.com/google/jax/tree/master/examples).\n+\n## Transformations\nAt its core, JAX is an extensible system for transforming numerical functions.\n" } ]
Python
Apache License 2.0
google/jax
add mini-libraries readme
260,292
17.01.2020 07:05:15
-19,080
71323b5d023a33bf8c06d435c4a6e406dea3c0a8
changes loop_mjp(f, x, M) a minor change: we iterate over M and not S
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/autodiff_cookbook.ipynb", "new_path": "docs/notebooks/autodiff_cookbook.ipynb", "diff": "\"def loop_jmp(f, x, M):\\n\",\n\" # jvp immediately returns the primal and tangent values as a tuple,\\n\",\n\" # so we'll compute and select the tangents in a list comprehension\\n\",\n- \" return np.vstack([jvp(f, (W,), (si,))[1] for si in S])\\n\",\n+ \" return np.vstack([jvp(f, (W,), (si,))[1] for si in M])\\n\",\n\"\\n\",\n\"def vmap_jmp(f, x, M):\\n\",\n\" _jvp = lambda s: jvp(f, (W,), (s,))[1]\\n\",\n" } ]
Python
Apache License 2.0
google/jax
changes loop_mjp(f, x, M) (#2013) a minor change: we iterate over M and not S
260,299
17.01.2020 17:48:27
0
3974df0aeeeb89d74ee6832894ab153406626266
[docs] Pmap compiles functions with XLA
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -261,7 +261,9 @@ differentiation for fast Jacobian and Hessian matrix calculations in\nFor parallel programming of multiple accelerators, like multiple GPUs, use\n[`pmap`](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap).\nWith `pmap` you write single-program multiple-data (SPMD) programs, including\n-fast parallel collective communication operations.\n+fast parallel collective communication operations. Applying `pmap` will mean\n+that the function you write is compiled by XLA (similarly to `jit`), then\n+replicated and executed in parallel accross devices.\nHere's an example on an 8-GPU machine:\n" }, { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -721,12 +721,13 @@ def pmap(fun, axis_name=None, devices=None, backend=None, axis_size=None):\n\"\"\"Parallel map with support for collectives.\nThe purpose of ``pmap`` is to express single-program multiple-data (SPMD)\n- programs and execute them in parallel on XLA devices, such as multiple GPUs or\n- multiple TPU cores. Semantically it is comparable to ``vmap`` because both\n- transformations map a function over array axes, but where ``vmap`` vectorizes\n- functions by pushing the mapped axis down into primitive operations, ``pmap``\n- instead replicates the function and executes each replica on its own XLA\n- device in parallel.\n+ programs. Applying ``pmap`` to a function will compile the function with XLA\n+ (similarly to ``jit``), then execute it in parallel on XLA devices, such as\n+ multiple GPUs or multiple TPU cores. Semantically it is comparable to\n+ ``vmap`` because both transformations map a function over array axes, but\n+ where ``vmap`` vectorizes functions by pushing the mapped axis down into\n+ primitive operations, ``pmap`` instead replicates the function and executes\n+ each replica on its own XLA device in parallel.\nAnother key difference with ``vmap`` is that while ``vmap`` can only express\npure maps, ``pmap`` enables the use of parallel SPMD collective operations,\n" } ]
Python
Apache License 2.0
google/jax
[docs] Pmap compiles functions with XLA (#2021)
260,375
21.01.2020 13:26:36
28,800
2c80cd3d88c7393a9bfaa80da626be1a868c0049
Fix Sysml paper link in README The original ULR was broken as sysml updated their links.
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -454,7 +454,7 @@ the year corresponds to the project's open-source release.\nA nascent version of JAX, supporting only automatic differentiation and\ncompilation to XLA, was described in a [paper that appeared at SysML\n-2018](https://www.sysml.cc/doc/2018/146.pdf). We're currently working on\n+2018](https://mlsys.org/Conferences/2019/doc/2018/146.pdf). We're currently working on\ncovering JAX's ideas and capabilities in a more comprehensive and up-to-date\npaper.\n" } ]
Python
Apache License 2.0
google/jax
Fix Sysml paper link in README (#2036) The original ULR was broken as sysml updated their links.
260,335
22.01.2020 17:19:14
28,800
07260f6572ac436317558a4b78b4b0931e0b30ad
remove hasing methods from core.Literal
[ { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -129,10 +129,10 @@ class Literal(object):\nself.hash = None\ndef __hash__(self):\n- return id(self.val) if self.hash is None else self.hash\n+ assert False\ndef __eq__(self, other):\n- return self.val is other.val if self.hash is None else self.val == other.val\n+ assert False\ndef __repr__(self):\nif self.hash is None:\n@@ -627,7 +627,7 @@ def check_jaxpr(jaxpr):\nreturn \"\\njaxpr:\\n{}\\n\".format(jaxpr)\ndef read_env(env, v):\n- if v not in env and type(v) is not Literal:\n+ if type(v) is not Literal and v not in env:\nraise Exception(\"Variable '{}' not defined\".format(v) + context())\ndef write_env(env, v):\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/ad.py", "new_path": "jax/interpreters/ad.py", "diff": "@@ -146,7 +146,7 @@ def backward_pass(jaxpr, consts, freevar_vals, args, cotangents_in):\ndef write_cotangent(v, ct):\n# assert v not in primal_env\n- if ct is not None:\n+ if ct is not None and type(v) is not Literal:\nct_env[v] = add_tangents(ct_env[v], ct) if v in ct_env else ct\ndef read_cotangent(v):\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/partial_eval.py", "new_path": "jax/interpreters/partial_eval.py", "diff": "@@ -571,12 +571,12 @@ def _dce_jaxpr(typed_jaxpr, outputs):\nfor var, aval, output in zip(outvars, out_avals, outputs)]\nnew_outvars, new_out_avals = unzip2(out_pairs)\n- needed_vars = set(new_outvars)\n+ needed_vars = {v for v in new_outvars if type(v) is not Literal}\nnew_eqns = []\nfor eqn in jaxpr.eqns[::-1]:\nif set(eqn.outvars) & needed_vars:\nnew_eqns.append(eqn)\n- needed_vars.update(eqn.invars)\n+ needed_vars.update(v for v in eqn.invars if type(v) is not Literal)\nnew_eqns = new_eqns[::-1]\nnew_jaxpr = core.Jaxpr(jaxpr.constvars, jaxpr.freevars, jaxpr.invars,\n" } ]
Python
Apache License 2.0
google/jax
remove hasing methods from core.Literal (#2038)
260,335
23.01.2020 10:21:55
28,800
6b5ef898dc7eaa3295ef3556a523507620d05793
fix autodiff cookbook np.allclose tuple bug
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/autodiff_cookbook.ipynb", "new_path": "docs/notebooks/autodiff_cookbook.ipynb", "diff": "\"# multiply, rather than an outer loop over vector-matrix multiplies.\\n\",\n\"def vmap_mjp(f, x, M):\\n\",\n\" y, vjp_fun = vjp(f, x)\\n\",\n- \" return vmap(vjp_fun)(M)\\n\",\n+ \" outs, = vmap(vjp_fun)(M)\\n\",\n+ \" return outs\\n\",\n\"\\n\",\n\"key = random.PRNGKey(0)\\n\",\n\"num_covecs = 128\\n\",\n\"def loop_jmp(f, x, M):\\n\",\n\" # jvp immediately returns the primal and tangent values as a tuple,\\n\",\n\" # so we'll compute and select the tangents in a list comprehension\\n\",\n- \" return np.vstack([jvp(f, (W,), (si,))[1] for si in M])\\n\",\n+ \" return np.vstack([jvp(f, (W,), (mi,))[1] for mi in M])\\n\",\n\"\\n\",\n\"def vmap_jmp(f, x, M):\\n\",\n\" _jvp = lambda s: jvp(f, (W,), (s,))[1]\\n\",\n" } ]
Python
Apache License 2.0
google/jax
fix autodiff cookbook np.allclose tuple bug (#2055)
260,335
23.01.2020 10:25:49
28,800
a61bcff54d3167ac22d098341de309bf22c128b4
update readme gotchas about pure functions
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -343,22 +343,23 @@ we highly recommend reading the [Gotchas\nNotebook](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html).\nSome standouts:\n+1. JAX transformations only work on [pure functions](https://en.wikipedia.org/wiki/Pure_function), which don't have side-effects and respect [referential transparency](https://en.wikipedia.org/wiki/Referential_transparency) (i.e. object identity testing with `is` isn't preserved). If you use a JAX transformation on an impure Python function, you might see an error like `Exception: Can't lift Traced...` or `Exception: Different traces at same level`.\n1. [In-place mutating updates of\narrays](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#%F0%9F%94%AA-In-Place-Updates), like `x[i] += y`, aren't supported, but [there are functional alternatives](https://jax.readthedocs.io/en/latest/jax.ops.html). Under a `jit`, those functional alternatives will reuse buffers in-place automatically.\n-2. [Random numbers are\n+1. [Random numbers are\ndifferent](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#%F0%9F%94%AA-Random-Numbers), but for [good reasons](https://github.com/google/jax/blob/master/design_notes/prng.md).\n-3. If you're looking for [convolution\n+1. If you're looking for [convolution\noperators](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#%F0%9F%94%AA-Convolutions),\nthey're in the `jax.lax` package.\n-4. JAX enforces single-precision (32-bit, e.g. `float32`) values by default, and\n+1. JAX enforces single-precision (32-bit, e.g. `float32`) values by default, and\n[to enable\ndouble-precision](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#Double-(64bit)-precision)\n(64-bit, e.g. `float64`) one needs to set the `jax_enable_x64` variable at\nstartup (or set the environment variable `JAX_ENABLE_X64=True`).\n-5. Some of NumPy's dtype promotion semantics involving a mix of Python scalars\n+1. Some of NumPy's dtype promotion semantics involving a mix of Python scalars\nand NumPy types aren't preserved, namely `np.add(1, np.array([2],\nnp.float32)).dtype` is `float64` rather than `float32`.\n-6. Some transformations, like `jit`, [constrain how you can use Python control\n+1. Some transformations, like `jit`, [constrain how you can use Python control\nflow](https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#%F0%9F%94%AA-Control-Flow).\nYou'll always get loud errors if something goes wrong. You might have to use\n[`jit`'s `static_argnums`\n" } ]
Python
Apache License 2.0
google/jax
update readme gotchas about pure functions
260,393
27.01.2020 15:44:33
28,800
82d6c6ce518d4d6925c42f3cd2cc1fd1a2d15146
Added better error messages. Added better error messages for when a user accidentally uses a python cast instead of a the `jax.numpy` casting.
[ { "change_type": "MODIFY", "old_path": "jax/abstract_arrays.py", "new_path": "jax/abstract_arrays.py", "diff": "@@ -24,17 +24,18 @@ from . import dtypes\nfrom . util import prod, partialmethod\n-def concretization_err_msg(fun):\n+def concretization_err_msg(fun, context=None):\nfname = getattr(fun, \"__name__\", fun)\n- msg = (\"Abstract value passed to `{}`, which requires a concrete value. \"\n- \"The function to be transformed can't be traced at the required level \"\n+ if context is None:\n+ context = (\"The function to be transformed can't be traced at the required level \"\n\"of abstraction. If using `jit`, try using `static_argnums` or \"\n\"applying `jit` to smaller subfunctions instead.\")\n- return msg.format(fname)\n+ msg = \"Abstract value passed to `{}`, which requires a concrete value. {}\"\n+ return msg.format(fname, context)\n-def concretization_function_error(fun):\n+def concretization_function_error(fun, context=None):\ndef error(self, *args):\n- raise TypeError(concretization_err_msg(fun))\n+ raise TypeError(concretization_err_msg(fun, context))\nreturn error\n@@ -64,9 +65,12 @@ class UnshapedArray(core.AbstractValue):\n\", weak_type=True\" if self.weak_type else \"\")\n_bool = _nonzero = concretization_function_error(bool)\n- _float = concretization_function_error(float)\n- _int = concretization_function_error(int)\n- _complex = concretization_function_error(complex)\n+ _float = concretization_function_error(\n+ float, \"Try using `value.astype(float)` instead.\")\n+ _int = concretization_function_error(\n+ int, \"Try using `value.astype(int)` instead.\")\n+ _complex = concretization_function_error(\n+ complex, \"Try using `value.astype(complex)` instead.\")\n_hex = concretization_function_error(hex)\n_oct = concretization_function_error(oct)\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -222,6 +222,16 @@ class APITest(jtu.JaxTestCase):\nTypeError, \"Incompatible shapes for dot: got \\\\(3L?,\\\\) and \\\\(4L?,\\\\).\",\nlambda: grad(f)(onp.zeros(3), onp.zeros(4)))\n+ def test_abstract_error_message(self):\n+ for castfun in [float, complex, int]:\n+ def f(x):\n+ return castfun(x)\n+\n+ self.assertRaisesRegex(\n+ TypeError,\n+ \"Try using `value.astype\\({}\\)` instead\".format(castfun.__name__),\n+ lambda: jit(f)(1.0))\n+\ndef test_switch_value_jit(self):\ndef f(x):\ny = x > 0\n" } ]
Python
Apache License 2.0
google/jax
Added better error messages. (#2058) #2057 Added better error messages for when a user accidentally uses a python cast instead of a the `jax.numpy` casting.
260,296
27.01.2020 16:14:28
28,800
95ccaae8058f8fb49c81680f1f9061bf96d8d95e
Add test for empty dimension list for reversion
[ { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -913,6 +913,10 @@ class LaxTest(jtu.JaxTestCase):\ndef testReverse(self):\nrev = api.jit(lambda operand: lax.rev(operand, dimensions))\n+ dimensions = []\n+ self.assertAllClose(onp.array([0, 1, 2, 3]), rev(onp.array([0, 1, 2, 3])),\n+ check_dtypes=False)\n+\ndimensions = [0]\nself.assertAllClose(onp.array([3, 2, 1]), rev(onp.array([1, 2, 3])),\ncheck_dtypes=False)\n" } ]
Python
Apache License 2.0
google/jax
Add test for empty dimension list for reversion
260,384
28.01.2020 15:48:37
18,000
b68d8b5c4fead01ba85da9a9574a686493a5b7ba
Clarify instructions for building from source. Adds additional subsections of the `Building from source` documentation page to make it more obvious that you can install `jaxlib` from pip when doing Python-only development.
[ { "change_type": "MODIFY", "old_path": "docs/developer.rst", "new_path": "docs/developer.rst", "diff": "@@ -8,14 +8,30 @@ First, obtain the JAX source code.\ngit clone https://github.com/google/jax\ncd jax\n+Building JAX involves two steps:\n-There are two steps to building JAX: building ``jaxlib`` and installing ``jax``.\n+1. Building or installing ``jaxlib``, the C++ support library for ``jax``.\n+2. Installing the ``jax`` Python package.\n+\n+Building or installing ``jaxlib``\n+---------------------------------\n+\n+Installing ``jaxlib`` with pip\n+..............................\nIf you're only modifying Python portions of JAX, you may be able to install\n-``jaxlib`` from pip or a prebuilt wheel and skip to installing ``jax`` from\n-source.\n+``jaxlib`` from pip (or a prebuilt wheel). You can install with pip by running:\n+\n+.. code-block:: shell\n+\n+ pip install jaxlib\n+\n+\n+Building ``jaxlib`` from source\n+...............................\n+\n+To build ``jaxlib`` from source, you must also install some prerequisites:\n-To build ``jaxlib``, you must also install some prerequisites:\n* a C++ compiler (g++ or clang)\n* Numpy\n* Scipy\n@@ -58,6 +74,9 @@ To build ``jaxlib`` without CUDA GPU support (CPU only), drop the ``--enable_cud\npython build/build.py\npip install -e build # installs jaxlib (includes XLA)\n+Installing ``jax``\n+------------------\n+\nOnce ``jaxlib`` has been installed, you can install ``jax`` by running\n.. code-block:: shell\n@@ -66,8 +85,8 @@ Once ``jaxlib`` has been installed, you can install ``jax`` by running\nTo upgrade to the latest version from GitHub, just run ``git pull`` from the JAX\nrepository root, and rebuild by running ``build.py`` or upgrading ``jaxlib`` if\n-necessary. You shouldn't have to reinstall because ``pip install -e`` sets up\n-symbolic links from site-packages into the repository.\n+necessary. You shouldn't have to reinstall ``jax`` because ``pip install -e``\n+sets up symbolic links from site-packages into the repository.\nRunning the tests\n=================\n" } ]
Python
Apache License 2.0
google/jax
Clarify instructions for building from source. (#2093) Adds additional subsections of the `Building from source` documentation page to make it more obvious that you can install `jaxlib` from pip when doing Python-only development.
260,335
28.01.2020 16:41:21
28,800
1afcac70dfeaa4ffc89d79dc64f72361e18c4a91
tweak readme not to have bad line wrap
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "| [**Install guide**](#installation)\n| [**Reference docs**](https://jax.readthedocs.io/en/latest/)\n-## Announcements\n-\n-* `jax` 0.1.58 has been released. As of `jax` 0.1.58, JAX has dropped Python 2\n- support. Please update to Python 3.5 or newer.\n+As of `jax` 0.1.58, JAX has dropped Python 2 support. Please update to Python 3.5 or newer.\n## What is JAX?\n" } ]
Python
Apache License 2.0
google/jax
tweak readme not to have bad line wrap
260,335
28.01.2020 18:15:16
28,800
71811be3b9257ab1fc48fce8fb0512c0a384d901
tweak top-line announcement text in readme
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "| [**Install guide**](#installation)\n| [**Reference docs**](https://jax.readthedocs.io/en/latest/)\n-As of `jax` 0.1.58, JAX has dropped Python 2 support. Please update to Python 3.5 or newer.\n+**Announcement:** As of version 0.1.58, JAX has dropped Python 2 support, and requires Python 3.5 or newer. See [CHANGELOG.md](https://github.com/google/jax/blob/master/CHANGELOG.md).\n## What is JAX?\n" } ]
Python
Apache License 2.0
google/jax
tweak top-line announcement text in readme
260,335
28.01.2020 18:16:04
28,800
d46e82d0abbb4e0d009d5b201a178871d5e2c672
tweak readme announcement text again
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "| [**Install guide**](#installation)\n| [**Reference docs**](https://jax.readthedocs.io/en/latest/)\n-**Announcement:** As of version 0.1.58, JAX has dropped Python 2 support, and requires Python 3.5 or newer. See [CHANGELOG.md](https://github.com/google/jax/blob/master/CHANGELOG.md).\n+**Announcement:** JAX 0.1.58 has dropped Python 2 support, and requires Python 3.5 or newer. See [CHANGELOG.md](https://github.com/google/jax/blob/master/CHANGELOG.md).\n## What is JAX?\n" } ]
Python
Apache License 2.0
google/jax
tweak readme announcement text again
260,304
30.01.2020 15:03:00
28,800
664a4e123d83fb1d17cd31451fbebc6f1568707a
VJP of cond, via partial eval + transpose VJP (grad) of lax.cond, via partial eval + transpose
[ { "change_type": "MODIFY", "old_path": "CHANGELOG.md", "new_path": "CHANGELOG.md", "diff": "@@ -8,6 +8,11 @@ These are the release notes for JAX.\n* The minimum jaxlib version is now 0.1.38.\n+### New features\n+\n+* Reverse-mode automatic differentiation (e.g. `grad`) of `lax.cond`, making it\n+ now differentiable in both modes (https://github.com/google/jax/pull/2091)\n+\n## jaxlib 0.1.38 (January 29, 2020)\n* CUDA 9.0 is no longer supported.\n" }, { "change_type": "MODIFY", "old_path": "jax/experimental/loops.py", "new_path": "jax/experimental/loops.py", "diff": "@@ -507,10 +507,13 @@ class _CondBuilder(_LoopBuilder):\nlax_control_flow._initial_style_jaxpr(lambda *args: args,\ncarried_tree,\ntuple(init_avals)))\n+ args = list(itertools.chain(body_const_vals, init_vals,\n+ false_body_const_vals, init_vals))\nreturn lax_control_flow.cond_p.bind(\n- *itertools.chain([self.pred], body_const_vals,\n- init_vals, false_body_const_vals, init_vals),\n- true_jaxpr=body_typed_jaxpr, false_jaxpr=false_body_typed_jaxpr)\n+ self.pred, *args,\n+ true_jaxpr=body_typed_jaxpr,\n+ false_jaxpr=false_body_typed_jaxpr,\n+ linear=(False,) * len(args))\nclass _WhileBuilder(_LoopBuilder):\n@@ -562,4 +565,3 @@ class _WhileBuilder(_LoopBuilder):\ncond_jaxpr=cond_jaxpr,\nbody_nconsts=len(body_const_vals),\nbody_jaxpr=body_typed_jaxpr)\n-\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/partial_eval.py", "new_path": "jax/interpreters/partial_eval.py", "diff": "@@ -95,6 +95,9 @@ class JaxprTrace(Trace):\nif primitive in custom_partial_eval_rules:\nreturn custom_partial_eval_rules[primitive](self, *tracers, **params)\nelse:\n+ return self.default_process_primitive(primitive, tracers, params)\n+\n+ def default_process_primitive(self, primitive, tracers, params):\npvs, consts = unzip2(t.pval for t in tracers)\nif all(pv is None for pv in pvs):\nreturn primitive.bind(*consts, **params)\n@@ -628,4 +631,3 @@ def move_binders_to_front(typed_jaxpr, to_move):\ndef _move_to_front(lst, to_move):\nreturn ([elt for elt, move in zip(lst, to_move) if move] +\n[elt for elt, move in zip(lst, to_move) if not move])\n-\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -417,16 +417,19 @@ def cond(pred, true_operand, true_fun, false_operand, false_fun):\n_check_tree_and_avals(\"true_fun and false_fun output\",\ntrue_out_tree, true_jaxpr.out_avals,\nfalse_out_tree, false_jaxpr.out_avals)\n+ linear = (False,) * (len(true_consts) + len(true_ops) + len(false_consts) +\n+ len(false_ops))\nout = cond_p.bind(\n*itertools.chain([pred], true_consts, true_ops, false_consts, false_ops),\n- true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr)\n+ true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, linear=linear)\nreturn tree_unflatten(true_out_tree, out)\ndef _cond_abstract_eval(*args, **kwargs):\nreturn _map(raise_to_shaped, kwargs[\"true_jaxpr\"].out_avals)\ndef _cond_translation_rule(c, axis_env, name_stack, pred, *args,\n- true_jaxpr, false_jaxpr, backend=None):\n+ true_jaxpr, false_jaxpr, linear, backend=None):\n+ del linear # Unused.\ntrue_ops, false_ops = split_list(args, [len(true_jaxpr.in_avals)])\ndef make_computation(name, jaxpr, op_shape):\n@@ -450,7 +453,7 @@ def _cond_pred_bcast_select(pred, x, y):\nbcast_pred = lax.broadcast_in_dim(pred, onp.shape(x), list(range(onp.ndim(pred))))\nreturn lax.select(bcast_pred, x, y)\n-def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr):\n+def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, linear):\n# TODO: maybe avoid moving arg axes to front if we're promoting to select?\nargs = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0\nelse x for x, d in zip(args, dims)]\n@@ -479,10 +482,10 @@ def _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr):\nout_dims = [0 if b else batching.not_mapped for b in out_bat]\nout = cond_p.bind(\n*itertools.chain([pred], true_ops, false_ops),\n- true_jaxpr=true_jaxpr_batched, false_jaxpr=false_jaxpr_batched)\n+ true_jaxpr=true_jaxpr_batched, false_jaxpr=false_jaxpr_batched, linear=linear)\nreturn out, out_dims\n-def _cond_jvp(primals, tangents, true_jaxpr, false_jaxpr):\n+def _cond_jvp(primals, tangents, true_jaxpr, false_jaxpr, linear):\nnonzeros = [t is not ad_util.zero for t in tangents]\n(pred_nz,), t_nz, f_nz = split_list(nonzeros, [1, len(true_jaxpr.in_avals)])\n@@ -501,20 +504,186 @@ def _cond_jvp(primals, tangents, true_jaxpr, false_jaxpr):\ntops_dot = _prune_zeros(tops_dot)\nfops_dot = _prune_zeros(fops_dot)\n+ tops_lin, fops_lin = _map(tuple, split_list(linear, [len(tops)]))\n+ linear_jvp = (tops_lin + (True,) * len(tops_dot) +\n+ fops_lin + (True,) * len(fops_dot))\nout = cond_p.bind(\n*itertools.chain([pred], tops, tops_dot, fops, fops_dot),\n- true_jaxpr=true_jvp, false_jaxpr=false_jvp)\n+ true_jaxpr=true_jvp, false_jaxpr=false_jvp, linear=linear_jvp)\nout_primals, out_tangents = split_list(out, [len(out_nz)])\nout_tangents_iter = iter(out_tangents)\nout_tangents = [\nnext(out_tangents_iter) if nz else ad_util.zero for nz in out_nz]\nreturn out_primals, out_tangents\n+def _cond_partial_eval(trace, *tracers, true_jaxpr, false_jaxpr, linear):\n+ unknowns = [t.pval[0] is not None for t in tracers]\n+\n+ (pred_uk,), t_uk, f_uk = split_list(unknowns, [1, len(true_jaxpr.in_avals)])\n+\n+ if pred_uk:\n+ # When the predicate is unknown, we stage out the whole cond.\n+ params = dict(true_jaxpr=true_jaxpr, false_jaxpr=false_jaxpr, linear=linear)\n+ return trace.default_process_primitive(cond_p, tracers, params)\n+\n+ _, _, t_out_uks = pe.partial_eval_jaxpr(true_jaxpr, t_uk, instantiate=False)\n+ _, _, f_out_uks = pe.partial_eval_jaxpr(false_jaxpr, f_uk, instantiate=False)\n+ out_uks = [a or b for a, b in zip(t_out_uks, f_out_uks)]\n+\n+ true_jaxpr_1, true_jaxpr_2, _ = pe.partial_eval_jaxpr(true_jaxpr, t_uk,\n+ instantiate=out_uks)\n+ false_jaxpr_1, false_jaxpr_2, _ = pe.partial_eval_jaxpr(false_jaxpr, f_uk,\n+ instantiate=out_uks)\n+\n+ num_t_res = len(true_jaxpr_1.out_avals) - len(out_uks)\n+ num_f_res = len(false_jaxpr_1.out_avals) - len(out_uks)\n+\n+ move = [False] * len(true_jaxpr.in_avals) + [True] * num_t_res\n+ true_jaxpr_2 = pe.move_binders_to_front(true_jaxpr_2, move)\n+ move = [False] * len(false_jaxpr.in_avals) + [True] * num_f_res\n+ false_jaxpr_2 = pe.move_binders_to_front(false_jaxpr_2, move)\n+\n+ # TODO(frostig,mattjj): pe.partial_eval_jaxpr should raise to shaped avals\n+ t_res_avals = _map(raise_to_shaped, true_jaxpr_2.in_avals[:num_t_res])\n+ f_res_avals = _map(raise_to_shaped, false_jaxpr_2.in_avals[:num_f_res])\n+\n+ assert len(true_jaxpr_2.out_avals) == len(false_jaxpr_2.out_avals)\n+ num_outs = len(true_jaxpr_2.out_avals)\n+\n+ true_jaxpr_1 = _join_cond_outputs(\n+ true_jaxpr_1, num_outs, f_res_avals, zeros_on_left=False)\n+ false_jaxpr_1 = _join_cond_outputs(\n+ false_jaxpr_1, num_outs, t_res_avals, zeros_on_left=True)\n+\n+ # TODO(frostig,mattjj): reinstate this assertion once pe.partial_eval_jaxpr\n+ # raises to shaped avals\n+ # assert true_jaxpr_1.out_avals == false_jaxpr_1.out_avals\n+ num_res = num_t_res + num_f_res\n+\n+ _, in_consts = unzip2([t.pval for t in tracers])\n+ out_consts_res = cond_p.bind(\n+ *in_consts, true_jaxpr=true_jaxpr_1, false_jaxpr=false_jaxpr_1,\n+ linear=linear)\n+ out_consts, res = split_list(out_consts_res, [len(out_consts_res) - num_res])\n+\n+ # TODO(frostig,mattjj): remove raised_to_shaped of avals once\n+ # pe.partial_eval_jaxpr handles it\n+ out_avals = _map(raise_to_shaped, true_jaxpr_2.out_avals)\n+ out_pvs = [aval if uk else None for aval, uk in zip(out_avals, out_uks)]\n+\n+ pred_tracer = trace.instantiate_const(tracers[0])\n+\n+ ops_tracers = [trace.instantiate_const(t) if uk\n+ else trace.new_instantiated_literal(core.unit)\n+ for uk, t in zip(unknowns[1:], tracers[1:])]\n+ true_ops_tracers, false_ops_tracers = split_list(\n+ ops_tracers, [len(true_jaxpr.in_avals)])\n+\n+ res_tracers = _map(trace.new_instantiated_const, res)\n+ true_res_tracers, false_res_tracers = split_list(res_tracers, [num_t_res])\n+\n+ out_tracers = [pe.JaxprTracer(trace, pe.PartialVal((pv, const)), None)\n+ for pv, const in zip(out_pvs, out_consts)]\n+\n+ tops_lin, fops_lin = _map(tuple, split_list(linear, [len(true_jaxpr.in_avals)]))\n+ linear_2 = ((False,) * num_t_res + tops_lin + (False,) * num_f_res + fops_lin)\n+ params = dict(true_jaxpr=true_jaxpr_2, false_jaxpr=false_jaxpr_2,\n+ linear=linear_2)\n+ eqn = pe.new_eqn_recipe([pred_tracer] +\n+ true_res_tracers + true_ops_tracers +\n+ false_res_tracers + false_ops_tracers,\n+ out_tracers,\n+ cond_p, (), params)\n+ for t in out_tracers: t.recipe = eqn\n+ return out_tracers\n+\n+def _join_cond_outputs(jaxpr, num_prefix, zeros_avals, zeros_on_left):\n+ @lu.wrap_init\n+ def f_aug(*args):\n+ prefix_and_rest = core.jaxpr_as_fun(jaxpr)(*args)\n+ prefix, rest = split_list(prefix_and_rest, [num_prefix])\n+ zeros = [ad_util.zeros_like_aval(a) for a in zeros_avals]\n+ if zeros_on_left:\n+ return prefix + zeros + rest\n+ else:\n+ return prefix + rest + zeros\n+\n+ return _make_typed_jaxpr(f_aug, jaxpr.in_avals)\n+\n+def _transpose_cond_jaxpr(jaxpr, num_res):\n+ num_non_res = len(jaxpr.in_avals) - num_res\n+ res_avals, primal_avals = split_list(jaxpr.in_avals, [num_res])\n+ primal_avals = _map(raise_to_shaped, primal_avals)\n+\n+ @lu.wrap_init\n+ def transposed(*args):\n+ res, cts_out = split_list(args, [num_res])\n+ primals = res + [ad.undefined_primal] * num_non_res\n+ _, cts_in = ad.backward_pass(\n+ jaxpr.jaxpr, jaxpr.literals, (), primals, cts_out)\n+ _, cts_in = split_list(cts_in, [num_res])\n+ return _map(ad.instantiate_zeros_aval, primal_avals, cts_in)\n+\n+ return _make_typed_jaxpr(transposed, res_avals + jaxpr.out_avals)\n+\n+def _cond_transpose(cts, *args, true_jaxpr, false_jaxpr, linear):\n+ (pred,), tops, fops = split_list(args, [1, len(true_jaxpr.in_avals)])\n+ tops_lin, fops_lin = split_list(linear, [len(true_jaxpr.in_avals)])\n+ in_avals = _map(raise_to_shaped, true_jaxpr.in_avals + false_jaxpr.in_avals)\n+\n+ num_t_res = len(tops) - sum(tops_lin)\n+ num_f_res = len(fops) - sum(fops_lin)\n+\n+ t_jaxpr_trans = _transpose_cond_jaxpr(true_jaxpr, num_t_res)\n+ f_jaxpr_trans = _transpose_cond_jaxpr(false_jaxpr, num_f_res)\n+ lin_in_avals = _map(raise_to_shaped, [a for a, l in zip(in_avals, linear) if l])\n+ assert t_jaxpr_trans.out_avals + f_jaxpr_trans.out_avals == lin_in_avals\n+\n+ t_jaxpr_trans_ = _join_cond_outputs(\n+ t_jaxpr_trans, 0, f_jaxpr_trans.out_avals, zeros_on_left=False)\n+ f_jaxpr_trans_ = _join_cond_outputs(\n+ f_jaxpr_trans, 0, t_jaxpr_trans.out_avals, zeros_on_left=True)\n+ assert t_jaxpr_trans_.out_avals == f_jaxpr_trans_.out_avals == lin_in_avals\n+\n+ t_res, _ = split_list(tops, [num_t_res])\n+ f_res, _ = split_list(fops, [num_f_res])\n+\n+ linear_trans = ((False,) * num_t_res + (True,) * len(cts) +\n+ (False,) * num_f_res + (True,) * len(cts))\n+\n+ cts = _map(ad.instantiate_zeros_aval, true_jaxpr.out_avals, cts)\n+\n+ out = cond_p.bind(\n+ pred, *itertools.chain(t_res, cts, f_res, cts),\n+ true_jaxpr=t_jaxpr_trans_, false_jaxpr=f_jaxpr_trans_,\n+ linear=linear_trans)\n+ assert all(_map(typecheck, lin_in_avals, out))\n+\n+ out_iter = iter(out)\n+ out = [next(out_iter) if l else None for l in linear]\n+ assert next(out_iter, None) is None\n+ return [None] + out\n+\n+def cond_bind(*args, true_jaxpr, false_jaxpr, linear):\n+ if not core.skip_checks:\n+ assert len(linear) + 1 == len(args)\n+ assert len(args) == 1 + len(true_jaxpr.in_avals) + len(false_jaxpr.in_avals)\n+ (pred,), tops, fops = split_list(args, [1, len(true_jaxpr.in_avals)])\n+ assert all(_map(typecheck, true_jaxpr.in_avals, tops))\n+ assert all(_map(typecheck, false_jaxpr.in_avals, fops))\n+ core.check_jaxpr(true_jaxpr.jaxpr)\n+ core.check_jaxpr(false_jaxpr.jaxpr)\n+ return core.Primitive.bind(cond_p, *args, true_jaxpr=true_jaxpr,\n+ false_jaxpr=false_jaxpr, linear=linear)\n+\ncond_p = lax.Primitive('cond')\ncond_p.multiple_results = True\ncond_p.def_impl(partial(xla.apply_primitive, cond_p))\ncond_p.def_abstract_eval(_cond_abstract_eval)\n+cond_p.def_custom_bind(cond_bind)\nad.primitive_jvps[cond_p] = _cond_jvp\n+ad.primitive_transposes[cond_p] = _cond_transpose\n+pe.custom_partial_eval_rules[cond_p] = _cond_partial_eval\nbatching.primitive_batchers[cond_p] = _cond_batching_rule\nxla.initial_style_translations[cond_p] = _cond_translation_rule\n@@ -853,7 +1022,8 @@ def _scan_transpose(cts, *args, **kwargs):\n# jaxpr :: [ires, T d] -> [T c] -> [T a, eres] -> ([T c], [T b])\n# jaxpr_trans :: [ires] -> [CT d, CT c] -> [CT b, eres] -> ([CT d, CT c], [CT a])\n- jaxpr_trans = _transpose_jaxpr(num_ires, num_consts - num_ires, num_eres, jaxpr)\n+ jaxpr_trans = _transpose_scan_jaxpr(\n+ num_ires, num_consts - num_ires, num_eres, jaxpr)\nlinear_trans = ([False] * num_ires +\n[True] * (len(ct_consts) + len(ct_carry) + len(ct_ys)) +\n[False] * num_eres)\n@@ -865,9 +1035,9 @@ def _scan_transpose(cts, *args, **kwargs):\nct_consts, ct_init, ct_xs = split_list(outs, [num_consts - num_ires, num_carry])\nreturn [None] * num_ires + ct_consts + ct_init + ct_xs + [None] * num_eres\n-# transpose_jaxpr :: ([res1, c, a, res2] -> b)\n+# transpose_scan_jaxpr :: ([res1, c, a, res2] -> b)\n# -> ([res1, CT c, CT b, res2] -> [CT c, CT a])\n-def _transpose_jaxpr(num_res1, num_c, num_res2, jaxpr):\n+def _transpose_scan_jaxpr(num_res1, num_c, num_res2, jaxpr):\nnum_a = len(jaxpr.in_avals) - num_res1 - num_c - num_res2\nres1_avals, c_avals, a_avals, res2_avals = split_list(\njaxpr.in_avals, [num_res1, num_c, num_a])\n@@ -892,7 +1062,7 @@ def _make_typed_jaxpr(traceable, in_avals):\npvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]\njaxpr, pvals_out, consts = pe.trace_to_jaxpr(traceable, pvals, instantiate=True)\nout_avals, _ = unzip2(pvals_out)\n- return core.TypedJaxpr(jaxpr, consts, in_avals, out_avals)\n+ return core.TypedJaxpr(jaxpr, consts, in_avals, _map(raise_to_shaped, out_avals))\ndef _scan_batching_rule(args, dims, forward, length, jaxpr, num_consts,\n@@ -978,25 +1148,18 @@ def _masked_scan_jaxpr(jaxpr, num_consts, num_carry):\nconst_avals, carry_avals, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])\nreturn _make_typed_jaxpr(masked, [aval] + const_avals + [aval] + carry_avals + x_avals)\n-def scan_bind(*args, **kwargs):\n- forward, length, num_consts, num_carry, jaxpr, linear = split_dict(\n- kwargs, [\"forward\", \"length\", \"num_consts\", \"num_carry\", \"jaxpr\", \"linear\"])\n- consts, init, xs = split_list(args, [num_consts, num_carry])\n+def scan_bind(*args, forward, length, num_consts, num_carry, jaxpr, linear):\n+ if not core.skip_checks:\nassert len(linear) == len(args)\n-\n- # check that args match input types\n+ consts, init, xs = split_list(args, [num_consts, num_carry])\nconsts_avals, init_avals, x_avals = split_list(jaxpr.in_avals, [num_consts, num_carry])\nxs_avals = _map(partial(_promote_aval_rank, length), x_avals)\nassert all(_map(typecheck, consts_avals, consts)), (consts, consts_avals)\nassert all(_map(typecheck, init_avals, init))\n# assert all(_map(typecheck, xs_avals, xs))\n- # check that output carry type matches input carry type\ncarry_avals, _ = split_list(jaxpr.out_avals, [num_carry])\nassert all(_map(typematch, init_avals, carry_avals))\n-\n- # check that the data flow is sensible\ncore.check_jaxpr(jaxpr.jaxpr)\n-\nreturn core.Primitive.bind(scan_p, *args, forward=forward, length=length,\njaxpr=jaxpr, num_consts=num_consts,\nnum_carry=num_carry, linear=linear)\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -1697,6 +1697,7 @@ class JaxprTest(jtu.JaxTestCase):\ne = cond[ false_jaxpr={ lambda ; ; b a.\nlet c = sub a b\nin [c] }\n+ linear=(False, False, False, False)\ntrue_jaxpr={ lambda ; ; b a.\nlet c = add a b\nin [c] } ] b a c a d\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_control_flow_test.py", "new_path": "tests/lax_control_flow_test.py", "diff": "@@ -648,9 +648,178 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nans = api.jvp(fun, (x,), (x,))\nexpected = api.jvp(fun_ref, (x,), (x,))\nself.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(fun, (x,), order=2, modes=[\"fwd\"])\n+\n+ x = 2.72\n+ ans = api.jvp(fun, (x,), (x,))\n+ expected = api.jvp(fun_ref, (x,), (x,))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(fun, (x,), order=2, modes=[\"fwd\"])\n+\n+ def testCondJVP2(self):\n+ def fun_ref(x):\n+ if x < 3:\n+ return 2.\n+ else:\n+ return 2. * x\n+\n+ def fun(x):\n+ return lax.cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)\n+\n+ x = 3.14\n+ ans = api.jvp(fun, (x,), (x,))\n+ expected = api.jvp(fun_ref, (x,), (x,))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(fun, (x,), order=2, modes=[\"fwd\"])\n+ x = 2.72\n+ ans = api.jvp(fun, (x,), (x,))\n+ expected = api.jvp(fun_ref, (x,), (x,))\n+ self.assertAllClose(ans, expected, check_dtypes=False)\njtu.check_grads(fun, (x,), order=2, modes=[\"fwd\"])\n+ def testCondGrad(self):\n+ def f_ref(x):\n+ return 3. * x if x < 2 else np.sin(x)\n+\n+ def f(x):\n+ return lax.cond(x < 2, x, lambda x: 3. * x, x, lambda x: np.sin(x))\n+\n+ x = 2.14\n+ ans = api.grad(f)(x)\n+ expected = api.grad(f_ref)(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(f, (x,), order=2, modes=[\"fwd\", \"rev\"])\n+\n+ x = 1.72\n+ ans = api.grad(f)(x)\n+ expected = api.grad(f_ref)(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(f, (x,), order=2, modes=[\"fwd\", \"rev\"])\n+\n+ def testCondGrad2(self):\n+ def f_ref(x):\n+ z = np.array([1., 2.]) * x if x[0] < 2 else np.sin(x)\n+ return z.sum()\n+\n+ def _f(x):\n+ return lax.cond(\n+ x[0] < 2,\n+ x, lambda x: np.array([1., 2.]) * x,\n+ x, lambda x: np.sin(x))\n+\n+ f = lambda x: api.jit(_f)(x).sum()\n+\n+ x = 2.14 * np.ones(2)\n+ ans = api.grad(f)(x)\n+ expected = api.grad(f_ref)(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(f, (x,), order=2, modes=[\"fwd\", \"rev\"])\n+\n+ x = 1.72 * np.ones(2)\n+ ans = api.grad(f)(x)\n+ expected = api.grad(f_ref)(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(f, (x,), order=2, modes=[\"fwd\", \"rev\"])\n+\n+ def testCondGrad3(self):\n+ def fun_ref(x):\n+ if x < 3:\n+ return 2.\n+ else:\n+ return 2. * x\n+\n+ def fun(x):\n+ return lax.cond(x < 3, (), lambda _: 2., x, lambda x: 2. * x)\n+\n+ x = 3.14\n+ ans = api.grad(fun)(x)\n+ expected = api.grad(fun_ref)(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(fun, (x,), order=2, modes=[\"fwd\", \"rev\"])\n+\n+ x = 2.72\n+ ans = api.grad(fun)(x)\n+ expected = api.grad(fun_ref)(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(fun, (x,), order=2, modes=[\"fwd\", \"rev\"])\n+\n+ def testCondGrad4(self):\n+ def fun_ref(x, y):\n+ if x < 3:\n+ return 2. * np.sin(y)\n+ else:\n+ return 2. * np.cos(x)\n+\n+ def fun(x, y):\n+ return lax.cond(\n+ x < 3,\n+ (), lambda _: 2. * np.sin(y),\n+ x, lambda x: 2. * x)\n+\n+ y = 5.8\n+ x = 3.14\n+ ans = api.grad(fun, 1)(x, y)\n+ expected = api.grad(fun_ref, 1)(x, y)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(fun, (x, y), order=2, modes=[\"fwd\", \"rev\"])\n+\n+ x = 2.72\n+ ans = api.grad(fun, 1)(x, y)\n+ expected = api.grad(fun_ref, 1)(x, y)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+ jtu.check_grads(fun, (x, y), order=2, modes=[\"fwd\", \"rev\"])\n+\n+ def testCondLinearize(self):\n+ def f(x):\n+ return lax.cond(x < 2, x, lambda x: 3. * x, x, lambda x: np.sin(x))\n+ y, f_lin = api.linearize(f, 1.)\n+ self.assertAllClose(y, 3., check_dtypes=False)\n+ self.assertAllClose(f_lin(2.), 6., check_dtypes=False)\n+ y, f_lin = api.linearize(f, 4.)\n+ self.assertAllClose(y, np.sin(4.), check_dtypes=False)\n+ self.assertAllClose(f_lin(2.), np.cos(4.) * 2., check_dtypes=False)\n+\n+ def testCondLinearize2(self):\n+ def f_ref(x):\n+ z = np.array([1., 2.]) * x if x[0] < 2 else np.cos(np.sin(x))\n+ return z.sum()\n+\n+ def f(x):\n+ return lax.cond(\n+ x[0] < 2,\n+ x, lambda x: np.array([1., 2.]) * x,\n+ x, lambda x: np.cos(np.sin(x))).sum()\n+\n+ x = 2.14 * np.ones(2)\n+ y, f_lin = api.linearize(f, x)\n+ y_ref, f_lin_ref = api.linearize(f_ref, x)\n+ self.assertAllClose(y, y_ref, check_dtypes=False)\n+ self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)\n+\n+ x = -2.14 * np.ones(2)\n+ y, f_lin = api.linearize(f, x)\n+ y_ref, f_lin_ref = api.linearize(f_ref, x)\n+ self.assertAllClose(y, y_ref, check_dtypes=False)\n+ self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)\n+\n+ f = api.jit(f)\n+ x = 2.14 * np.ones(2)\n+ y, f_lin = api.linearize(f, x)\n+ y_ref, f_lin_ref = api.linearize(f_ref, x)\n+ self.assertAllClose(y, y_ref, check_dtypes=False)\n+ self.assertAllClose(f_lin(x), f_lin_ref(x), check_dtypes=False)\n+\n+ def testCondJit(self):\n+ def f(x):\n+ return lax.cond(x < 2, x, lambda x: 3. * x, x, lambda x: np.sin(x))\n+ y = api.jit(f)(1.)\n+ expected = f(1.)\n+ self.assertAllClose(y, expected, check_dtypes=False)\n+ y = api.jit(f)(4.)\n+ expected = f(4.)\n+ self.assertAllClose(y, expected, check_dtypes=False)\n+\ndef testIssue1263(self):\ndef f(rng, x):\ncond = random.bernoulli(rng)\n" }, { "change_type": "MODIFY", "old_path": "tests/metadata_test.py", "new_path": "tests/metadata_test.py", "diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n+from unittest import SkipTest\nfrom absl.testing import absltest\nfrom jax import test_util as jtu\n@@ -43,6 +44,7 @@ class MetadataTest(jtu.JaxTestCase):\ndel xb._JaxComputationBuilder.SetOpMetadata\ndef test_primitive_metadata(self):\n+ raise SkipTest # TODO(jekbradbury)\n_ = jnp.sin(1.)\nassert self.op_types[-1] == 'sin'\nassert self.op_names[-1] == 'sin'\n@@ -63,6 +65,7 @@ class MetadataTest(jtu.JaxTestCase):\nassert self.op_names[-1] == 'jit(foo)/sin'\ndef test_nested_jit_metadata(self):\n+ raise SkipTest # TODO(jekbradbury)\n@jax.jit\ndef foo(x):\nreturn jnp.sin(x)\n@@ -102,7 +105,7 @@ class MetadataTest(jtu.JaxTestCase):\nreturn jnp.cos(x)\n_ = jax.lax.cond(True, 1., true_fun, 1., false_fun)\nassert self.op_types[-3] == 'cond'\n- assert self.op_names[-3] == 'cond'\n+ assert self.op_names[-3] == 'cond[ linear=(False, False) ]'\nassert self.op_types[-2] == 'sin'\nassert self.op_names[-2] == 'cond/true_fun/sin'\nassert self.op_types[-1] == 'cos'\n" } ]
Python
Apache License 2.0
google/jax
VJP of cond, via partial eval + transpose (#2091) VJP (grad) of lax.cond, via partial eval + transpose Co-authored-by: Matthew Johnson <mattjj@google.com>
260,620
30.01.2020 17:19:01
28,800
4c30c0285c509764f3b1b937b8a6bc9f9b8dd061
Implement scipy.stats.logistic
[ { "change_type": "MODIFY", "old_path": "jax/scipy/stats/__init__.py", "new_path": "jax/scipy/stats/__init__.py", "diff": "@@ -25,3 +25,4 @@ from . import norm\nfrom . import pareto\nfrom . import t\nfrom . import uniform\n+from . import logistic\n" }, { "change_type": "ADD", "old_path": null, "new_path": "jax/scipy/stats/logistic.py", "diff": "+# Copyright 2020 Google LLC\n+#\n+# Licensed under the Apache License, Version 2.0 (the \"License\");\n+# you may not use this file except in compliance with the License.\n+# You may obtain a copy of the License at\n+#\n+# https://www.apache.org/licenses/LICENSE-2.0\n+#\n+# Unless required by applicable law or agreed to in writing, software\n+# distributed under the License is distributed on an \"AS IS\" BASIS,\n+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n+# See the License for the specific language governing permissions and\n+# limitations under the License.\n+\n+from __future__ import absolute_import\n+from __future__ import division\n+from __future__ import print_function\n+\n+import scipy.stats as osp_stats\n+from jax.scipy.special import expit, logit\n+\n+from ... import lax\n+from ...numpy.lax_numpy import _promote_args_inexact, _wraps\n+\n+\n+@_wraps(osp_stats.logistic.logpdf, update_doc=False)\n+def logpdf(x):\n+ return lax.neg(x) - 2. * lax.log1p(lax.exp(lax.neg(x)))\n+\n+@_wraps(osp_stats.logistic.pdf, update_doc=False)\n+def pdf(x):\n+ return lax.exp(logpdf(x))\n+\n+@_wraps(osp_stats.logistic.ppf, update_doc=False)\n+def ppf(x):\n+ return logit(x)\n+\n+@_wraps(osp_stats.logistic.sf, update_doc=False)\n+def sf(x):\n+ return expit(lax.neg(x))\n+\n+@_wraps(osp_stats.logistic.isf, update_doc=False)\n+def isf(x):\n+ return -logit(x)\n+\n+@_wraps(osp_stats.logistic.cdf, update_doc=False)\n+def cdf(x):\n+ return expit(x)\n" }, { "change_type": "MODIFY", "old_path": "tests/scipy_stats_test.py", "new_path": "tests/scipy_stats_test.py", "diff": "@@ -208,6 +208,58 @@ class LaxBackedScipyStatsTests(jtu.JaxTestCase):\ntol=1e-6)\nself._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n+ @genNamedParametersNArgs(1, jtu.rand_default)\n+ def testLogisticCdf(self, rng_factory, shapes, dtypes):\n+ rng = rng_factory()\n+ scipy_fun = osp_stats.logistic.cdf\n+ lax_fun = lsp_stats.logistic.cdf\n+\n+ def args_maker():\n+ return list(map(rng, shapes, dtypes))\n+\n+ self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,\n+ tol=1e-6)\n+ self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n+\n+ @genNamedParametersNArgs(1, jtu.rand_default)\n+ def testLogisticLogpdf(self, rng_factory, shapes, dtypes):\n+ rng = rng_factory()\n+ scipy_fun = osp_stats.logistic.logpdf\n+ lax_fun = lsp_stats.logistic.logpdf\n+\n+ def args_maker():\n+ return list(map(rng, shapes, dtypes))\n+\n+ self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,\n+ tol=1e-6)\n+ self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n+\n+ @genNamedParametersNArgs(1, jtu.rand_default)\n+ def testLogisticPpf(self, rng_factory, shapes, dtypes):\n+ rng = rng_factory()\n+ scipy_fun = osp_stats.logistic.ppf\n+ lax_fun = lsp_stats.logistic.ppf\n+\n+ def args_maker():\n+ return list(map(rng, shapes, dtypes))\n+\n+ self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,\n+ tol=1e-6)\n+ self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n+\n+ @genNamedParametersNArgs(1, jtu.rand_default)\n+ def testLogisticSf(self, rng_factory, shapes, dtypes):\n+ rng = rng_factory()\n+ scipy_fun = osp_stats.logistic.sf\n+ lax_fun = lsp_stats.logistic.sf\n+\n+ def args_maker():\n+ return list(map(rng, shapes, dtypes))\n+\n+ self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,\n+ tol=1e-6)\n+ self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)\n+\n# TODO: currently it ignores the argument \"shapes\" and only tests dim=4\n@genNamedParametersNArgs(3, jtu.rand_default)\ndef testMultivariateNormalLogPdf(self, rng_factory, shapes, dtypes):\n" } ]
Python
Apache License 2.0
google/jax
Implement scipy.stats.logistic (#1993)
260,335
31.01.2020 23:47:30
28,800
ae1d6b875fcc2b23909b360c73db00489f32068e
fix remat with nontrivial env fixes
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -2027,7 +2027,8 @@ def checkpoint(fun, concrete=False):\ndef fun_remat(*args, **kwargs):\nargs_flat, in_tree = tree_flatten((args, kwargs))\nflat_fun, out_tree = flatten_fun(lu.wrap_init(fun), in_tree)\n- out_flat = pe.remat_call(flat_fun, *args_flat, concrete=concrete)\n+ out_flat = pe.remat_call(flat_fun, *args_flat, name=flat_fun.__name__,\n+ concrete=concrete)\nreturn tree_unflatten(out_tree(), out_flat)\nreturn fun_remat\nremat = checkpoint\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/partial_eval.py", "new_path": "jax/interpreters/partial_eval.py", "diff": "@@ -526,7 +526,7 @@ def _remat_partial_eval(trace, f, tracers, params):\n# Since we traced with everything marked as unknown, but we need to know which\n# outputs are known/unknown, we use partial_eval_jaxpr to get out_unknowns.\njaxpr_converted = convert_freevars_jaxpr(jaxpr)\n- in_avals = ([raise_to_shaped(t.pval[0]) for t in env]\n+ in_avals = ([raise_to_shaped(partial_val_aval(*t.pval)) for t in env]\n+ [raise_to_shaped(pv) for pv in in_pvs])\nout_avals = [raise_to_shaped(pv if pv is not None\nelse abstract_unit if var is unitvar\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -1600,6 +1600,46 @@ class APITest(jtu.JaxTestCase):\napi.grad(func)(5.0) # doesn't crash\n+ def test_remat_jit2(self):\n+ @api.jit\n+ def f(x):\n+ y = 2 * x\n+\n+ @api.remat\n+ def g():\n+ return y\n+\n+ return g()\n+\n+ self.assertAllClose(f(3), 6, check_dtypes=False)\n+\n+ def test_remat_nontrivial_env(self):\n+ # simplified from https://github.com/google/jax/issues/2030\n+\n+ @api.remat\n+ def foo(state, dt=0.5, c=1):\n+ u, u_t = state\n+ u_tt = c**2 * u\n+ u_t = u_t + u_tt * dt\n+ return (u, u_t)\n+\n+ @partial(api.jit, static_argnums=(1,))\n+ def _multi_step(state, count, dt, c):\n+ f = lambda s, _: (foo(s, dt, c), _)\n+ return lax.scan(f, state, None, count)\n+\n+ def multi_step(state, count, dt=1/np.sqrt(2), c=1):\n+ return _multi_step(state, count, dt, c)\n+\n+ def loss(u0, target, steps, dt=1/np.sqrt(2), c=1):\n+ init = (u0, np.zeros_like(u0))\n+ (uf, _), _ = multi_step(init, steps, dt, c)\n+ return ((uf - target) ** 2).mean()\n+\n+ target = np.zeros((128, 128))\n+ u0 = np.ones_like(target)\n+ loss(u0, target, 10) # doesn't crash\n+\ndef test_trivial_computations(self):\nx = np.array([1, 2, 3])\ny = api.jit(lambda x: x)(x)\n" } ]
Python
Apache License 2.0
google/jax
fix remat with nontrivial env (#2136) fixes #2030
260,425
03.02.2020 15:12:40
0
8c7fc3919d3e131da6a2121158084ed480dbec2a
Upgrade bazel from 0.29.1 to 1.2.1
[ { "change_type": "MODIFY", "old_path": "build/build.py", "new_path": "build/build.py", "diff": "@@ -57,19 +57,19 @@ def get_python_bin_path(python_bin_path_flag):\n# Bazel\n-BAZEL_BASE_URI = \"https://github.com/bazelbuild/bazel/releases/download/0.29.1/\"\n+BAZEL_BASE_URI = \"https://github.com/bazelbuild/bazel/releases/download/1.2.1/\"\nBazelPackage = collections.namedtuple(\"BazelPackage\", [\"file\", \"sha256\"])\nbazel_packages = {\n\"Linux\":\nBazelPackage(\n- file=\"bazel-0.29.1-linux-x86_64\",\n+ file=\"bazel-1.2.1-linux-x86_64\",\nsha256=\n- \"da3031d811f42f6208d24a87984b5b07e1c75afede184cad86eb02bef6c3b9b0\"),\n+ \"f5e21d7448419d1596ad0c5bb71fb336a0af08c832587aec394970ea56701d88\"),\n\"Darwin\":\nBazelPackage(\n- file=\"bazel-0.29.1-darwin-x86_64\",\n+ file=\"bazel-1.2.1-darwin-x86_64\",\nsha256=\n- \"34daae4caafbdb0952415ed6f97f47f03df84df9af146e9eb910ba65c073efdd\"),\n+ \"6729be5a56e6eadf7a9112afd2d87ce348da8fca22077b882d9bb7a6f5d41d1c\"),\n}\n" } ]
Python
Apache License 2.0
google/jax
Upgrade bazel from 0.29.1 to 1.2.1 (#2137)
260,296
03.02.2020 07:31:12
28,800
1022573b26a1996db524229de10fb84dbe6e08b3
Make stax pooling layers accept `spec=None` Currently pooling layers have a default channel-last spec that is explicitly 2D. This change will make this default work for arbitrary input dimensionality.
[ { "change_type": "MODIFY", "old_path": "jax/experimental/stax.py", "new_path": "jax/experimental/stax.py", "diff": "@@ -163,12 +163,16 @@ Gelu = elementwise(gelu)\ndef _pooling_layer(reducer, init_val, rescaler=None):\n- def PoolingLayer(window_shape, strides=None, padding='VALID', spec='NHWC'):\n+ def PoolingLayer(window_shape, strides=None, padding='VALID', spec=None):\n\"\"\"Layer construction function for a pooling layer.\"\"\"\nstrides = strides or (1,) * len(window_shape)\nrescale = rescaler(window_shape, strides, padding) if rescaler else None\n+ if spec is None:\n+ non_spatial_axes = 0, len(window_shape) + 1\n+ else:\nnon_spatial_axes = spec.index('N'), spec.index('C')\n+\nfor i in sorted(non_spatial_axes):\nwindow_shape = window_shape[:i] + (1,) + window_shape[i:]\nstrides = strides[:i] + (1,) + strides[i:]\n@@ -189,7 +193,11 @@ SumPool = _pooling_layer(lax.add, 0.)\ndef _normalize_by_window_size(dims, strides, padding):\ndef rescale(outputs, inputs, spec):\n+ if spec is None:\n+ non_spatial_axes = 0, inputs.ndim - 1\n+ else:\nnon_spatial_axes = spec.index('N'), spec.index('C')\n+\nspatial_shape = tuple(inputs.shape[i]\nfor i in range(inputs.ndim)\nif i not in non_spatial_axes)\n" } ]
Python
Apache License 2.0
google/jax
Make stax pooling layers accept `spec=None` (#2145) Currently pooling layers have a default channel-last spec that is explicitly 2D. This change will make this default work for arbitrary input dimensionality.
260,662
04.02.2020 15:24:10
0
4080a1c2ce95dc4a90f899fe4bf9ad5ac6a7b8b3
Add np.fft.fftshift/ifftshift
[ { "change_type": "MODIFY", "old_path": "jax/numpy/fft.py", "new_path": "jax/numpy/fft.py", "diff": "@@ -208,6 +208,34 @@ def rfftfreq(n, d=1.0):\nreturn k / (d * n)\n+@_wraps(onp.fft.fftshift)\n+def fftshift(x, axes=None):\n+ x = np.asarray(x)\n+ if axes is None:\n+ axes = tuple(range(x.ndim))\n+ shift = [dim // 2 for dim in x.shape]\n+ elif isinstance(axes, int):\n+ shift = x.shape[axes] // 2\n+ else:\n+ shift = [x.shape[ax] // 2 for ax in axes]\n+\n+ return np.roll(x, shift, axes)\n+\n+\n+@_wraps(onp.fft.ifftshift)\n+def ifftshift(x, axes=None):\n+ x = np.asarray(x)\n+ if axes is None:\n+ axes = tuple(range(x.ndim))\n+ shift = [-(dim // 2) for dim in x.shape]\n+ elif isinstance(axes, int):\n+ shift = -(x.shape[axes] // 2)\n+ else:\n+ shift = [-(x.shape[ax] // 2) for ax in axes]\n+\n+ return np.roll(x, shift, axes)\n+\n+\nfor func in get_module_functions(onp.fft):\nif func.__name__ not in globals():\nglobals()[func.__name__] = _not_implemented(func)\n" }, { "change_type": "MODIFY", "old_path": "tests/fft_test.py", "new_path": "tests/fft_test.py", "diff": "@@ -350,5 +350,35 @@ class FftTest(jtu.JaxTestCase):\nlambda: func(n=10, d=n)\n)\n+ @parameterized.named_parameters(jtu.cases_from_list(\n+ {\"testcase_name\": \"dtype={}_axes={}\".format(\n+ jtu.format_shape_dtype_string(shape, dtype), axes),\n+ \"dtype\": dtype, \"shape\": shape, \"rng_factory\": rng_factory, \"axes\": axes}\n+ for rng_factory in [jtu.rand_default]\n+ for dtype in all_dtypes\n+ for shape in [[9], [10], [101], [102], [3, 5], [3, 17], [5, 7, 11]]\n+ for axes in _get_fftn_test_axes(shape)))\n+ def testFftshift(self, shape, dtype, rng_factory, axes):\n+ rng = rng_factory()\n+ args_maker = lambda: (rng(shape, dtype),)\n+ np_fn = lambda arg: np.fft.fftshift(arg, axes=axes)\n+ onp_fn = lambda arg: onp.fft.fftshift(arg, axes=axes)\n+ self._CheckAgainstNumpy(onp_fn, np_fn, args_maker, check_dtypes=True)\n+\n+ @parameterized.named_parameters(jtu.cases_from_list(\n+ {\"testcase_name\": \"dtype={}_axes={}\".format(\n+ jtu.format_shape_dtype_string(shape, dtype), axes),\n+ \"dtype\": dtype, \"shape\": shape, \"rng_factory\": rng_factory, \"axes\": axes}\n+ for rng_factory in [jtu.rand_default]\n+ for dtype in all_dtypes\n+ for shape in [[9], [10], [101], [102], [3, 5], [3, 17], [5, 7, 11]]\n+ for axes in _get_fftn_test_axes(shape)))\n+ def testIfftshift(self, shape, dtype, rng_factory, axes):\n+ rng = rng_factory()\n+ args_maker = lambda: (rng(shape, dtype),)\n+ np_fn = lambda arg: np.fft.ifftshift(arg, axes=axes)\n+ onp_fn = lambda arg: onp.fft.ifftshift(arg, axes=axes)\n+ self._CheckAgainstNumpy(onp_fn, np_fn, args_maker, check_dtypes=True)\n+\nif __name__ == \"__main__\":\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
Add np.fft.fftshift/ifftshift (#1850)
260,335
07.02.2020 14:25:03
28,800
5c9438864e64c8b02b0e13fce9759d8a8ed3d488
fix cond batching bug reading axis size
[ { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -454,11 +454,12 @@ def _cond_pred_bcast_select(pred, x, y):\ndef _cond_batching_rule(args, dims, true_jaxpr, false_jaxpr, linear):\n# TODO: maybe avoid moving arg axes to front if we're promoting to select?\n+ size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}\nargs = [batching.moveaxis(x, d, 0) if d is not batching.not_mapped and d != 0\nelse x for x, d in zip(args, dims)]\n- (pred,), true_ops, false_ops = split_list(args, [1, len(true_jaxpr.in_avals)])\n- size, = {x.shape[d] for x, d in zip(args, dims) if d is not batching.not_mapped}\norig_bat = [d is not batching.not_mapped for d in dims]\n+ del dims\n+ (pred,), true_ops, false_ops = split_list(args, [1, len(true_jaxpr.in_avals)])\n(pred_bat,), t_bat, f_bat = split_list(orig_bat, [1, len(true_jaxpr.in_avals)])\n_, true_out_bat = batching.batch_jaxpr(true_jaxpr, size, t_bat, False)\n" } ]
Python
Apache License 2.0
google/jax
fix cond batching bug reading axis size (#2193)
260,299
10.02.2020 18:23:19
0
c999a482b09712879555522c40db6369a29ecd55
Test for PRNG consistency accross JAX versions
[ { "change_type": "MODIFY", "old_path": "tests/random_test.py", "new_path": "tests/random_test.py", "diff": "@@ -519,6 +519,39 @@ class LaxRandomTest(jtu.JaxTestCase):\nfinally:\nxla.apply_primitive = apply_primitive\n+ def testPRNGValues(self):\n+ # Test to ensure consistent random values between JAX versions\n+ k = random.PRNGKey(0)\n+\n+ randints = random.randint(k, (3, 3), 0, 8)\n+ if FLAGS.jax_enable_x64:\n+ self.assertAllClose(\n+ random.randint(k, (3, 3), 0, 8),\n+ onp.array([[7, 2, 6],\n+ [2, 1, 0],\n+ [6, 7, 7]], dtype='int64'),\n+ check_dtypes=True)\n+ else:\n+ self.assertAllClose(\n+ random.randint(k, (3, 3), 0, 8),\n+ onp.array([[2, 1, 3],\n+ [6, 1, 5],\n+ [6, 3, 4]], dtype='int32'),\n+ check_dtypes=True)\n+\n+ self.assertAllClose(\n+ random.split(k, 4),\n+ onp.array([[2285895361, 1501764800],\n+ [1518642379, 4090693311],\n+ [ 433833334, 4221794875],\n+ [ 839183663, 3740430601]], dtype='uint32'),\n+ check_dtypes=True)\n+\n+ self.assertAllClose(\n+ random.fold_in(k, 4),\n+ onp.array([2285895361, 433833334], dtype='uint32'),\n+ check_dtypes=True)\n+\nif __name__ == \"__main__\":\nabsltest.main()\n" } ]
Python
Apache License 2.0
google/jax
Test for PRNG consistency accross JAX versions
260,335
11.02.2020 07:21:17
28,800
9e6fe64a66a4fac78ef9c8e57bb0818e4af6b619
bump version and update changelog for pypi
[ { "change_type": "MODIFY", "old_path": "CHANGELOG.md", "new_path": "CHANGELOG.md", "diff": "These are the release notes for JAX.\n-## jax 0.1.59 (unreleased)\n+## jax 0.1.60 (unreleased)\n+\n+## jax 0.1.59 (February 11, 2020)\n### Breaking changes\n" }, { "change_type": "MODIFY", "old_path": "jax/version.py", "new_path": "jax/version.py", "diff": "# See the License for the specific language governing permissions and\n# limitations under the License.\n-__version__ = \"0.1.58\"\n+__version__ = \"0.1.59\"\n" } ]
Python
Apache License 2.0
google/jax
bump version and update changelog for pypi
260,335
11.02.2020 15:56:53
28,800
7ca43f0ea96a3e2253b346bad8b2c1764290bd50
more nonlinear evaluation in backward_pass.py * more nonlinear evaluation in backward_pass.py fixes * add tests, fix by not raising error eagerly
[ { "change_type": "MODIFY", "old_path": "jax/interpreters/ad.py", "new_path": "jax/interpreters/ad.py", "diff": "@@ -183,21 +183,12 @@ def backward_pass(jaxpr: core.Jaxpr, consts, args, cotangents_in):\nelse:\nwrite_primal(eqn.outvars[0], ans)\nelse:\n- call_jaxpr = eqn.params[\"call_jaxpr\"]\n+ call_jaxpr, params = core.extract_call_jaxpr(eqn.primitive, eqn.params)\nif any(is_linear(v) for v in eqn.invars):\nlinear_eqns.append(eqn)\n- elif eqn.primitive is not pe.remat_call_p:\n- ans = _eval_subjaxpr_primals(\n- eqn.primitive, call_jaxpr,\n- map(read_primal, eqn.invars), eqn.params)\n- map(write_primal, eqn.outvars, ans)\n-\n- # we special-case remat_call here because it can be mixed linear /\n- # nonlinear, so we always evaluate it even if it has a linear part\n- if eqn.primitive is pe.remat_call_p:\n- ans = _eval_subjaxpr_primals(\n- eqn.primitive, call_jaxpr,\n- map(read_primal, eqn.invars), eqn.params)\n+ if any(not is_linear(v) for v in eqn.invars):\n+ ans = _eval_subjaxpr_primals(eqn.primitive, call_jaxpr,\n+ map(read_primal, eqn.invars), params)\nmap(write_primal, eqn.outvars, ans)\nct_env = {}\n@@ -260,12 +251,10 @@ def _eval_primals(jaxpr, args):\nelse:\nwrite_primal(eqn.outvars[0], ans)\nelse:\n- call_jaxpr = eqn.params[\"call_jaxpr\"]\n- if (eqn.primitive is pe.remat_call_p or\n- not any(is_linear(v) for v in eqn.invars)):\n- ans = _eval_subjaxpr_primals(\n- eqn.primitive, call_jaxpr,\n- map(read_primal, eqn.invars), eqn.params)\n+ call_jaxpr, params = core.extract_call_jaxpr(eqn.primitive, eqn.params)\n+ if any(not is_linear(v) for v in eqn.invars):\n+ ans = _eval_subjaxpr_primals(eqn.primitive, call_jaxpr,\n+ map(read_primal, eqn.invars), params)\nmap(write_primal, eqn.outvars, ans)\nreturn map(read_primal, jaxpr.outvars)\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -1006,11 +1006,11 @@ def _scan_transpose(cts, *args, forward, length, num_consts, num_carry, jaxpr, l\nif xs_lin != [True] * (len(xs_lin) - num_eres) + [False] * num_eres:\nraise NotImplementedError\nif not all(init_lin):\n- raise NotImplementedError\n+ pass # TODO(mattjj): error check https://github.com/google/jax/issues/1963\n- consts, init, xs = split_list(args, [num_consts, num_carry])\n- ires, consts = split_list(consts, [num_ires])\n- xs, eres = split_list(xs, [sum(xs_lin)])\n+ consts, _, xs = split_list(args, [num_consts, num_carry])\n+ ires, _ = split_list(consts, [num_ires])\n+ _, eres = split_list(xs, [sum(xs_lin)])\nassert not any(r is ad.undefined_primal for r in ires)\nassert not any(r is ad.undefined_primal for r in eres)\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -1660,6 +1660,45 @@ class APITest(jtu.JaxTestCase):\nu0 = np.ones_like(target)\nloss(u0, target, 10) # doesn't crash\n+ def test_remat_jit3(self):\n+ # https://github.com/google/jax/issues/2180\n+ def f(w, x):\n+ a = np.dot(x, w)\n+ b = np.einsum(\"btd,bTd->btT\", a, a)\n+ c = np.einsum(\"btT,btd->btd\", b, a)\n+ return np.sum(c)\n+\n+ w = np.ones([1, 1])\n+ x = np.ones([1, 1, 1])\n+ f = api.remat(f)\n+ api.grad(f)(w, x) # doesn't crash\n+\n+ @api.jit\n+ def mul(a, b):\n+ return a * b\n+\n+ def f(w, x):\n+ a = mul(w, x)\n+ b = mul(a, a)\n+ return b\n+\n+ w = 1.\n+ x = 1.\n+ f = api.remat(f)\n+ api.grad(f)(w, x) # doesn't crash\n+\n+ def test_remat_scan2(self):\n+ # https://github.com/google/jax/issues/1963\n+\n+ def scan_bug(x0):\n+ f = lambda x, _: (x + 1, None)\n+ def scanned_f(x, _):\n+ return lax.scan(f, x, xs=None, length=1)[0], None\n+ x, _ = jax.remat(scanned_f)(x0, None)\n+ return x\n+\n+ jax.grad(scan_bug)(1.0) # doesn't crash\n+\ndef test_trivial_computations(self):\nx = np.array([1, 2, 3])\ny = api.jit(lambda x: x)(x)\n" } ]
Python
Apache License 2.0
google/jax
more nonlinear evaluation in backward_pass.py (#2214) * more nonlinear evaluation in backward_pass.py fixes #2180 * add tests, fix #1963 by not raising error eagerly
260,272
17.02.2020 14:28:56
-3,600
3a0690fa11d303e988ddb8cc05bfcb8ea0cf0e04
Correct sign mistake in complex autodiff docs.
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/autodiff_cookbook.ipynb", "new_path": "docs/notebooks/autodiff_cookbook.ipynb", "diff": "\"source\": [\n\"def grad_f(z):\\n\",\n\" x, y = real(z), imag(z)\\n\",\n- \" return grad(u, 0)(x, y) + grad(u, 1)(x, y) * 1j\"\n+ \" return grad(u, 0)(x, y) - grad(u, 1)(x, y) * 1j\"\n]\n},\n{\n\"id\": \"4j0F28bB8bgK\"\n},\n\"source\": [\n- \"In math symbols, that means we define $\\\\partial f(z) \\\\triangleq \\\\partial_0 u(x, y) + \\\\partial_1 u(x, y)$. So we throw out $v$, ignoring the complex component function of $f$ entirely!\"\n+ \"In math symbols, that means we define $\\\\partial f(z) \\\\triangleq \\\\partial_0 u(x, y) - \\\\partial_1 u(x, y) i$. So we throw out $v$, ignoring the complex component function of $f$ entirely!\"\n]\n},\n{\n" } ]
Python
Apache License 2.0
google/jax
Correct sign mistake in complex autodiff docs.
260,411
17.02.2020 16:01:10
-3,600
fcd949b695018bede6bc2c3c16f3947c3e326760
Added blank line to autodiff cookbook to trigger an enumeration
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/autodiff_cookbook.ipynb", "new_path": "docs/notebooks/autodiff_cookbook.ipynb", "diff": "},\n\"source\": [\n\"This convention covers three important cases:\\n\",\n+ \"\\n\",\n\"1. If `f` evaluates a holomorphic function, then we get the usual complex derivative, since $\\\\partial_0 u = \\\\partial_1 v$ and $\\\\partial_1 u = - \\\\partial_0 v$.\\n\",\n\"2. If `f` is evaluates the real-valued loss function of a complex parameter `x`, then we get a result that we can use in gradient-based optimization by taking steps in the direction of the conjugate of `grad(f)(x)`.\\n\",\n\"3. If `f` evaluates a real-to-real function, but its implementation uses complex primitives internally (some of which must be non-holomorphic, e.g. FFTs used in convolutions) then we get the same result that an implementation that only used real primitives would have given.\\n\",\n" } ]
Python
Apache License 2.0
google/jax
Added blank line to autodiff cookbook to trigger an enumeration
260,335
18.02.2020 12:39:03
28,800
ae1214de74e9ec42da8ff813dab8577c6bd9231d
add np.copy method to abstract arrays * add np.copy method to abstract arrays fixes * make device_get use onp.asarray, not .copy()
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -1383,11 +1383,10 @@ def device_put(x, device=None):\nreturn tree_map(lambda y: xla.device_put_p.bind(y, device=device), x)\n-# TODO(mattjj): consider revising\ndef _device_get(x):\nif isinstance(x, core.Tracer):\nreturn x\n- return x.copy()\n+ return onp.asarray(x)\ndef device_get(x):\nfor y in tree_leaves(x):\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -1816,6 +1816,10 @@ def asarray(a, dtype=None, order=None):\nlax._check_user_dtype_supported(dtype, \"asarray\")\nreturn array(a, dtype=dtype, copy=False, order=order)\n+@_wraps(onp.copy)\n+def copy(a, order='K'):\n+ return array(a, copy=True, order=order)\n+\n@_wraps(onp.zeros_like)\ndef zeros_like(x, dtype=None):\n@@ -3436,7 +3440,8 @@ _nondiff_methods = [\"all\", \"any\", \"argmax\", \"argmin\", \"argpartition\", \"argsort\",\n_diff_methods = [\"clip\", \"compress\", \"conj\", \"conjugate\", \"cumprod\", \"cumsum\",\n\"diagonal\", \"dot\", \"max\", \"mean\", \"min\", \"prod\", \"ptp\",\n\"ravel\", \"repeat\", \"sort\", \"squeeze\", \"std\", \"sum\",\n- \"swapaxes\", \"take\", \"tile\", \"trace\", \"transpose\", \"var\"]\n+ \"swapaxes\", \"take\", \"tile\", \"trace\", \"transpose\", \"var\",\n+ \"copy\"]\n# Set up operator, method, and property forwarding on Tracer instances containing\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -1826,6 +1826,9 @@ class APITest(jtu.JaxTestCase):\nre.DOTALL)):\napi.jit(func1)(2.)\n+ def test_array_tracer_copy(self):\n+ api.value_and_grad(lambda x: x.copy().sum())(np.ones(2)) # doesn't crash\n+\nclass JaxprTest(jtu.JaxTestCase):\n" } ]
Python
Apache License 2.0
google/jax
add np.copy method to abstract arrays (#2257) * add np.copy method to abstract arrays fixes #2248 * make device_get use onp.asarray, not .copy()
260,411
19.02.2020 16:03:10
-3,600
08eb0ee030cf4fe41eb719dffccd835a25e2e8a9
Disable newly added test on TPU (no float16) Added in
[ { "change_type": "MODIFY", "old_path": "tests/nn_test.py", "new_path": "tests/nn_test.py", "diff": "@@ -55,6 +55,8 @@ class NNFunctionsTest(jtu.JaxTestCase):\n(np.float32, np.bfloat16, np.float16),\n(nn.gelu, nn.relu, nn.softplus, nn.sigmoid)))\ndef testDtypeMatchesInput(self, dtype, fn):\n+ if dtype is np.float16 and jtu.device_under_test() == \"tpu\":\n+ self.skipTest(\"float16 not supported on TPU\")\nx = np.zeros((), dtype=dtype)\nout = fn(x)\nself.assertEqual(out.dtype, dtype)\n" } ]
Python
Apache License 2.0
google/jax
Disable newly added test on TPU (no float16) (#2262) Added in #2259
260,335
19.02.2020 21:57:04
28,800
ab327acab0303c1b6dd47c25515125ccb216a1f3
fix unit handling in vmap of cond, fixes
[ { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -449,6 +449,9 @@ def _cond_translation_rule(c, axis_env, name_stack, pred, *args,\nreturn c.Conditional(pred, true_op, true_c, false_op, false_c)\ndef _cond_pred_bcast_select(pred, x, y):\n+ if core.get_aval(x) is core.get_aval(y) is core.abstract_unit:\n+ return x\n+ else:\nbcast_pred = lax.broadcast_in_dim(pred, onp.shape(x), list(range(onp.ndim(pred))))\nreturn lax.select(bcast_pred, x, y)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_control_flow_test.py", "new_path": "tests/lax_control_flow_test.py", "diff": "@@ -820,6 +820,19 @@ class LaxControlFlowTest(jtu.JaxTestCase):\nexpected = f(4.)\nself.assertAllClose(y, expected, check_dtypes=False)\n+ def testCondVmapGrad(self):\n+ # https://github.com/google/jax/issues/2264\n+ def f_1(x): return x ** 2\n+ def f_2(x): return x ** 3\n+\n+ def f(x): return lax.cond(x > 0, x, f_1, x, f_2)\n+ def g(x): return np.where(x > 0, f_1(x), f_2(x))\n+\n+ x = np.linspace(-1, 1, 20)\n+ ans = api.vmap(api.grad(f))(x)\n+ expected = api.vmap(api.grad(g))(x)\n+ self.assertAllClose(ans, expected, check_dtypes=False)\n+\ndef testIssue1263(self):\ndef f(rng, x):\ncond = random.bernoulli(rng)\n" } ]
Python
Apache License 2.0
google/jax
fix unit handling in vmap of cond, fixes #2264 (#2268)
260,411
20.02.2020 09:41:08
-3,600
4978e3c285bbf052364b499c6ca9386ecae47629
Disable linalg_test:testMatrix power on TPU Due to internal test failures (b/149870255)
[ { "change_type": "MODIFY", "old_path": "tests/linalg_test.py", "new_path": "tests/linalg_test.py", "diff": "@@ -721,6 +721,7 @@ class NumpyLinalgTest(jtu.JaxTestCase):\nfor dtype in float_types + complex_types\nfor n in [-5, -2, -1, 0, 1, 2, 3, 4, 5, 10]\nfor rng_factory in [jtu.rand_default]))\n+ @jtu.skip_on_devices(\"tpu\") # TODO(b/149870255): Bug in XLA:TPU?.\ndef testMatrixPower(self, shape, dtype, n, rng_factory):\nrng = rng_factory()\n_skip_if_unsupported_type(dtype)\n" } ]
Python
Apache License 2.0
google/jax
Disable linalg_test:testMatrix power on TPU (#2269) Due to internal test failures (b/149870255)
260,411
23.02.2020 19:18:06
-3,600
89514f9278fb3705ca5ecab2fb4656a285ad2da4
Moved CHANGELOG to docs * Moved CHANGELOG to docs This puts the documentation also on RTD, with TOC. Also changed its format to .rst, for consistency. Added GitHub links to the change log. * Actually add the CHANGELOG.rst * Added reminder comments to the CHANGELOG.rst
[ { "change_type": "MODIFY", "old_path": "CHANGELOG.md", "new_path": "CHANGELOG.md", "diff": "-# Changelog\n+# Change Log\n-These are the release notes for JAX.\n-\n-## jax 0.1.60 (unreleased)\n-\n-### New features\n-\n-* `pmap` has `static_broadcast_argnums` argument which allows the user to\n- specify arguments that should be treated as compile-time constants and\n- should be broadcasted to all devices. It works analogously to\n- `static_argnums` in `jit`.\n-\n-## jax 0.1.59 (February 11, 2020)\n-\n-### Breaking changes\n-\n-* The minimum jaxlib version is now 0.1.38.\n-* Simplified `Jaxpr` by removing the `Jaxpr.freevars` and\n- `Jaxpr.bound_subjaxprs`. The call primitives (`xla_call`, `xla_pmap`,\n- `sharded_call`, and `remat_call`) get a new parameter `call_jaxpr` with a\n- fully-closed (no `constvars`) JAXPR.\n-\n-### New features\n-\n-* Reverse-mode automatic differentiation (e.g. `grad`) of `lax.cond`, making it\n- now differentiable in both modes (https://github.com/google/jax/pull/2091)\n-* JAX now supports DLPack, which allows sharing CPU and GPU arrays in a\n- zero-copy way with other libraries, such as PyTorch.\n-* JAX GPU DeviceArrays now support `__cuda_array_interface__`, which is another\n- zero-copy protocol for sharing GPU arrays with other libraries such as CuPy\n- and Numba.\n-* JAX CPU device buffers now implement the Python buffer protocol, which allows\n- zero-copy buffer sharing between JAX and NumPy.\n-* Added JAX_SKIP_SLOW_TESTS environment variable to skip tests known as slow.\n-\n-## jaxlib 0.1.38 (January 29, 2020)\n-\n-* CUDA 9.0 is no longer supported.\n-* CUDA 10.2 wheels are now built by default.\n-\n-## jax 0.1.58 (January 28, 2020)\n-\n-### Breaking changes\n-\n-* JAX has dropped Python 2 support, because Python 2 reached its end of life on\n- January 1, 2020. Please update to Python 3.5 or newer.\n-\n-### New features\n-\n-* Forward-mode automatic differentiation (`jvp`) of while loop\n- (https://github.com/google/jax/pull/1980)\n-* New NumPy and SciPy functions:\n- * `jax.numpy.fft.fft2`\n- * `jax.numpy.fft.ifft2`\n- * `jax.numpy.fft.rfft`\n- * `jax.numpy.fft.irfft`\n- * `jax.numpy.fft.rfft2`\n- * `jax.numpy.fft.irfft2`\n- * `jax.numpy.fft.rfftn`\n- * `jax.numpy.fft.irfftn`\n- * `jax.numpy.fft.fftfreq`\n- * `jax.numpy.fft.rfftfreq`\n- * `jax.numpy.linalg.matrix_rank`\n- * `jax.numpy.linalg.matrix_power`\n- * `jax.scipy.special.betainc`\n-* Batched Cholesky decomposition on GPU now uses a more efficient batched\n- kernel.\n-\n-\n-### Notable bug fixes\n-\n-* With the Python 3 upgrade, JAX no longer depends on `fastcache`, which should\n- help with installation.\n+See [docs/CHANGELOG.rst](https://jax.readthedocs.io/en/latest/CHANGELOG.html).\n\\ No newline at end of file\n" }, { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "[**Quickstart**](#quickstart-colab-in-the-cloud)\n| [**Transformations**](#transformations)\n| [**Install guide**](#installation)\n+| [**Change logs**](https://jax.readthedocs.io/en/latest/CHANGELOG.html)\n| [**Reference docs**](https://jax.readthedocs.io/en/latest/)\n-**Announcement:** JAX 0.1.58 has dropped Python 2 support, and requires Python 3.5 or newer. See [CHANGELOG.md](https://github.com/google/jax/blob/master/CHANGELOG.md).\n+**Announcement:** JAX 0.1.58 has dropped Python 2 support, and requires Python 3.5 or newer. See [docs/CHANGELOG.rst](https://jax.readthedocs.io/en/latest/CHANGELOG.html).\n## What is JAX?\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/CHANGELOG.rst", "diff": "+Change Log\n+==========\n+\n+.. This is a comment.\n+ Remember to leave an empty line before the start of an itemized list,\n+ and to align the itemized text with the first line of an item.\n+\n+These are the release notes for JAX.\n+\n+jax 0.1.60 (unreleased)\n+-----------------------\n+\n+.. PLEASE REMEMBER TO CHANGE THE '..master' WITH AN ACTUAL TAG in GITHUB LINK.\n+\n+* `GitHub commits <https://github.com/google/jax/compare/jax-v0.1.59...master>`_.\n+* New features:\n+\n+ * :py:func:`jax.pmap` has ``static_broadcast_argnums`` argument which allows the user to\n+ specify arguments that should be treated as compile-time constants and\n+ should be broadcasted to all devices. It works analogously to\n+ ``static_argnums`` in :py:func:`jax.jit`.\n+ * Improved error messages for when tracers are mistakenly saved in global state.\n+ * Added :py:func:`jax.nn.one_hot` utility function.\n+\n+jax 0.1.59 (February 11, 2020)\n+------------------------------\n+\n+* `GitHub commits <https://github.com/google/jax/compare/jax-v0.1.58...jax-v0.1.59>`_.\n+* Breaking changes\n+\n+ * The minimum jaxlib version is now 0.1.38.\n+ * Simplified :py:class:`Jaxpr` by removing the ``Jaxpr.freevars`` and\n+ ``Jaxpr.bound_subjaxprs``. The call primitives (``xla_call``, ``xla_pmap``,\n+ ``sharded_call``, and ``remat_call``) get a new parameter ``call_jaxpr`` with a\n+ fully-closed (no ``constvars``) JAXPR. Also, added a new field ``call_primitive``\n+ to primitives.\n+* New features:\n+\n+ * Reverse-mode automatic differentiation (e.g. ``grad``) of ``lax.cond``, making it\n+ now differentiable in both modes (https://github.com/google/jax/pull/2091)\n+ * JAX now supports DLPack, which allows sharing CPU and GPU arrays in a\n+ zero-copy way with other libraries, such as PyTorch.\n+ * JAX GPU DeviceArrays now support ``__cuda_array_interface__``, which is another\n+ zero-copy protocol for sharing GPU arrays with other libraries such as CuPy\n+ and Numba.\n+ * JAX CPU device buffers now implement the Python buffer protocol, which allows\n+ zero-copy buffer sharing between JAX and NumPy.\n+ * Added JAX_SKIP_SLOW_TESTS environment variable to skip tests known as slow.\n+\n+jaxlib 0.1.38 (January 29, 2020)\n+--------------------------------\n+\n+* CUDA 9.0 is no longer supported.\n+* CUDA 10.2 wheels are now built by default.\n+\n+jax 0.1.58 (January 28, 2020)\n+-----------------------------\n+\n+* `GitHub commits <https://github.com/google/jax/compare/46014da21...jax-v0.1.58>`_.\n+* Breaking changes\n+\n+ * JAX has dropped Python 2 support, because Python 2 reached its end of life on\n+ January 1, 2020. Please update to Python 3.5 or newer.\n+* New features\n+\n+ * Forward-mode automatic differentiation (`jvp`) of while loop\n+ (https://github.com/google/jax/pull/1980)\n+ * New NumPy and SciPy functions:\n+\n+ * :py:func:`jax.numpy.fft.fft2`\n+ * :py:func:`jax.numpy.fft.ifft2`\n+ * :py:func:`jax.numpy.fft.rfft`\n+ * :py:func:`jax.numpy.fft.irfft`\n+ * :py:func:`jax.numpy.fft.rfft2`\n+ * :py:func:`jax.numpy.fft.irfft2`\n+ * :py:func:`jax.numpy.fft.rfftn`\n+ * :py:func:`jax.numpy.fft.irfftn`\n+ * :py:func:`jax.numpy.fft.fftfreq`\n+ * :py:func:`jax.numpy.fft.rfftfreq`\n+ * :py:func:`jax.numpy.linalg.matrix_rank`\n+ * :py:func:`jax.numpy.linalg.matrix_power`\n+ * :py:func:`jax.scipy.special.betainc`\n+ * Batched Cholesky decomposition on GPU now uses a more efficient batched\n+ kernel.\n+\n+\n+Notable bug fixes\n+^^^^^^^^^^^^^^^^^\n+\n+* With the Python 3 upgrade, JAX no longer depends on ``fastcache``, which should\n+ help with installation.\n" }, { "change_type": "MODIFY", "old_path": "docs/index.rst", "new_path": "docs/index.rst", "diff": "@@ -34,6 +34,7 @@ For an introduction to JAX, start at the\n:maxdepth: 1\n:caption: Notes\n+ CHANGELOG\njaxpr\nasync_dispatch\nconcurrency\n" } ]
Python
Apache License 2.0
google/jax
Moved CHANGELOG to docs (#2252) * Moved CHANGELOG to docs This puts the documentation also on RTD, with TOC. Also changed its format to .rst, for consistency. Added GitHub links to the change log. * Actually add the CHANGELOG.rst * Added reminder comments to the CHANGELOG.rst
260,280
24.02.2020 06:04:02
-32,400
f6e1d01f94936c992d9e63810eae5db69c06a026
JIT differentiate -> JIT compile
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/How_JAX_primitives_work.ipynb", "new_path": "docs/notebooks/How_JAX_primitives_work.ipynb", "diff": "\"colab_type\": \"text\"\n},\n\"source\": [\n- \"Below is another use of `jit` where we differentiate only\\n\",\n+ \"Below is another use of `jit` where we compile only\\n\",\n\"with respect to the first argument. Notice how the second argument to `square_add_prim` is concrete, which leads\\n\",\n\"in the third argument to `multiply_add_abstract_eval` being \\n\",\n\"`ConcreteArray`. We see that `multiply_add_abstract_eval` may be used with\\n\",\n" } ]
Python
Apache License 2.0
google/jax
JIT differentiate -> JIT compile (#2279)
260,411
26.02.2020 21:30:21
-3,600
b21e7530f329ccc6ba6c73bd85f5d03dbec9be8d
Added the optix.py documentation to RTD Issue:
[ { "change_type": "MODIFY", "old_path": "docs/developer.rst", "new_path": "docs/developer.rst", "diff": "@@ -140,8 +140,9 @@ To rebuild the documentation, install several packages:\npip install -r docs/requirements.txt\nYou must also install ``pandoc`` in order to regenerate the notebooks.\n-See `Install Pandoc <https://pandoc.org/installing.html>`_. On Mac, I had success with\n-the miniconda installer, then ``conda install -c conda-forge pandoc``.\n+See `Install Pandoc<https://pandoc.org/installing.html>`_,\n+or using `Miniconda<https://docs.conda.io/en/latest/miniconda.html>`_ which\n+I have used successfully on the Mac: ``conda install -c conda-forge pandoc``.\nIf you do not want to install ``pandoc`` then you should regenerate the documentation\nwithout the notebooks.\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/jax.experimental.optix.rst", "diff": "+jax.experimental.optix module\n+==============================\n+\n+.. automodule:: jax.experimental.optix\n+ :members:\n+ :undoc-members:\n+ :show-inheritance:\n" }, { "change_type": "MODIFY", "old_path": "docs/jax.experimental.rst", "new_path": "docs/jax.experimental.rst", "diff": "@@ -6,6 +6,7 @@ jax.experimental package\njax.experimental.loops\njax.experimental.optimizers\n+ jax.experimental.optix\njax.experimental.stax\n.. automodule:: jax.experimental\n" }, { "change_type": "MODIFY", "old_path": "jax/experimental/optix.py", "new_path": "jax/experimental/optix.py", "diff": "\"\"\"A composable gradient processing and optimization library for JAX.\n-The `optix` module implements a number of composable gradient transformations,\n+The ``optix`` module implements a number of composable gradient transformations,\ntypically used in the context of optimizing neural nets.\nEach transformation defines:\n-* an `init_fn`, to initialize a (possibly empty) set of statistics, or `state`.\n-* an `update_fn` to transform an input gradient and update the state.\n+* an ``init_fn``, to initialize a (possibly empty) set of statistics, or ``state``.\n+* an ``update_fn`` to transform an input gradient and update the state.\n-An (optional) `chain` utility can be used to build custom optimizers by\n+An (optional) ``chain`` utility can be used to build custom optimizers by\nchaining arbitrary sequences of transformations. For any sequence of\n-transformations `chain` returns a single `init_fn` and `update_fn`.\n+transformations ``chain`` returns a single ``init_fn`` and ``update_fn``.\n-An (optional) `apply_updates` function can be used to eventually apply the\n+An (optional) ``apply_updates`` function can be used to eventually apply the\ntransformed gradients to the set of parameters of interest.\nSeparating gradient transformations from the parameter update allows to flexibly\n@@ -34,7 +34,7 @@ chain a sequence of transformations of the same gradients, as well as combine\nmultiple updates to the same parameters (e.g. in multi-task settings where the\ndifferent tasks may benefit from different sets of gradient transformations).\n-Many popular optimizers can be implemented using `optix` as one-liners, and,\n+Many popular optimizers can be implemented using ``optix`` as one-liners, and,\nfor convenience, we provide aliases for some of the most popular ones.\n\"\"\"\n" } ]
Python
Apache License 2.0
google/jax
Added the optix.py documentation to RTD (#2312) Issue: #2297
260,673
27.02.2020 10:20:02
18,000
a473e5b6bb3be5aa032d30ed023abb6b121af0e0
Reference deduped precision issue.
[ { "change_type": "MODIFY", "old_path": "cloud_tpu_colabs/README.md", "new_path": "cloud_tpu_colabs/README.md", "diff": "@@ -46,7 +46,7 @@ By default\\*, matrix multiplication in JAX on TPUs [uses bfloat16](https://cloud\nJAX also adds the `bfloat16` dtype, which you can use to explicitly cast arrays to bfloat16, e.g., `jax.numpy.array(x, dtype=jax.numpy.bfloat16)`.\n-\\* We might change the default precision in the future, since it is arguably surprising. Please comment/vote on [this issue](https://github.com/google/jax/issues/1856) if it affects you!\n+\\* We might change the default precision in the future, since it is arguably surprising. Please comment/vote on [this issue](https://github.com/google/jax/issues/2161) if it affects you!\n## Running JAX on a Cloud TPU from a GCE VM\n" } ]
Python
Apache License 2.0
google/jax
Reference deduped precision issue. (#2319)
260,297
27.02.2020 14:43:55
18,000
ce9b03866cf04a378d6cc44a5ce5f99700f0148c
Remove check comparing shift/axis and input dimensions in np.roll
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -2678,8 +2678,6 @@ def roll(a, shift, axis=None):\nif len(b_shape) != 1:\nmsg = \"'shift' and 'axis' arguments to roll must be scalars or 1D arrays\"\nraise ValueError(msg)\n- if b_shape[0] > a_ndim:\n- raise ValueError(\"More shifts/axes than dimensions of input to roll.\")\nfor x, i in zip(broadcast_to(shift, b_shape),\nonp.broadcast_to(axis, b_shape)):\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -1802,7 +1802,9 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\n(1, 1),\n((3,), (0,)),\n((-2,), (-2,)),\n- ((1, 2), (0, -1))\n+ ((1, 2), (0, -1)),\n+ ((4, 2, 5, 5, 2, 4), None),\n+ (100, None),\n]\nfor rng_factory in [jtu.rand_default]))\ndef testRoll(self, shape, dtype, shifts, axis, rng_factory):\n" } ]
Python
Apache License 2.0
google/jax
Remove check comparing shift/axis and input dimensions in np.roll (#2327)
260,700
28.02.2020 12:05:38
18,000
0dfa9ef5e4eb77acf4b2156687b8cf673a2d25b2
fix vjp odeint
[ { "change_type": "MODIFY", "old_path": "jax/experimental/ode.py", "new_path": "jax/experimental/ode.py", "diff": "@@ -362,8 +362,8 @@ def vjp_odeint(ofunc, y0, t, *args, **kwargs):\ntime_vjp_list = jax.ops.index_update(result[-1], -1, result[-3])\nvjp_times = np.hstack(time_vjp_list)[::-1]\n-\n- return tuple([result[-4], vjp_times] + list(result[-2]))\n+ vjp_args = unravel_args(result[-2])\n+ return (result[-4], vjp_times, *vjp_args)\nprimals_out = odeint(flat_func, y0, t, flat_args, rtol=rtol, atol=atol, mxstep=mxstep)\nvjp_fun = lambda g: vjp_all(g, primals_out, t)\n" } ]
Python
Apache License 2.0
google/jax
fix vjp odeint (#2321)
260,692
29.02.2020 00:06:38
0
2d9caba3169c7280fd09cb26d97ff8ef82626a30
Address Issue 2330 * fix issue 2330 * Update lax_numpy_test.py * Update lax_numpy_test.py * Update lax_numpy_test.py Fixed error in naming convention jnp -> lnp; np -> onp
[ { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -1721,6 +1721,9 @@ def concatenate(arrays, axis=0):\n# tree of concatenations as a workaround especially for op-by-op mode.\n# (https://github.com/google/jax/issues/653).\nk = 16\n+ if len(arrays) == 1:\n+ return array(arrays[0])\n+ else:\nwhile len(arrays) > 1:\narrays = [lax.concatenate(arrays[i:i+k], axis)\nfor i in range(0, len(arrays), k)]\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -1072,6 +1072,29 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nfor repeats in [2, [1,3,2], [2], lnp.array([1,3,2]), lnp.array([2])]:\ntest_single(m_rect, args_maker, repeats, axis=1)\n+ def testIssue2330(self):\n+ '''\n+ Make sure return value of jnp.concatenate is a jax.ndarray and is side-effect save\n+ '''\n+ def attempt_sideeffect(x):\n+ x = [x]\n+ x = lnp.concatenate(x)\n+ x -= 1.\n+ return x\n+\n+ onp_input = onp.ones((1))\n+ lnp_input = lnp.ones((1))\n+ expected_onp_input_after_call = onp.ones((1))\n+ expected_lnp_input_after_call = lnp.ones((1))\n+\n+ self.assertTrue(type(lnp.concatenate([onp_input])) is lnp.DeviceArray)\n+\n+ attempt_sideeffect(onp_input)\n+ attempt_sideeffect(lnp_input)\n+\n+ self.assertAllClose(onp_input, expected_onp_input_after_call, check_dtypes=True)\n+ self.assertAllClose(lnp_input, expected_lnp_input_after_call, check_dtypes=True)\n+\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"op={}_shape=[{}]_axis={}_out_dtype={}\".format(\nop, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype),\n" } ]
Python
Apache License 2.0
google/jax
Address Issue 2330 (#2331) * fix issue 2330 * Update lax_numpy_test.py * Update lax_numpy_test.py * Update lax_numpy_test.py Fixed error in naming convention jnp -> lnp; np -> onp
260,358
01.03.2020 02:34:33
0
e7debd732f1e8ea3fe76d0bd054da7ef59217544
Replace jnp.clip_by_value with jnp.clip jnp.clip_by_value does not exist
[ { "change_type": "MODIFY", "old_path": "jax/experimental/optix.py", "new_path": "jax/experimental/optix.py", "diff": "@@ -58,10 +58,10 @@ ClipState = collections.namedtuple(\"ClipState\", \"\")\ndef clip(max_delta):\n- \"\"\"Clip updates element-wise.\n+ \"\"\"Clip updates element-wise, to be between -max_delta and +max_delta.\nArgs:\n- max_delta: the maximum size of an update, for each variable\n+ max_delta: the maximum absolute value for each element in the update.\nReturns:\nAn (init_fn, update_fn) tuple.\n@@ -72,7 +72,7 @@ def clip(max_delta):\ndef update_fn(updates, state):\nupdates = tree_multimap(\n- lambda g: jnp.clip_by_value(g, -max_delta, max_delta), updates)\n+ lambda g: jnp.clip(g, -max_delta, max_delta), updates)\nreturn updates, state\nreturn InitUpdate(init_fn, update_fn)\n" } ]
Python
Apache License 2.0
google/jax
Replace jnp.clip_by_value with jnp.clip (#2339) jnp.clip_by_value does not exist
260,335
03.03.2020 16:27:53
28,800
1e61ba429d950629d2f04ca569e2621f8419d344
improve jax.nn.relu differentiation
[ { "change_type": "MODIFY", "old_path": "jax/nn/functions.py", "new_path": "jax/nn/functions.py", "diff": "import numpy as onp\nfrom jax import dtypes\n+from jax import custom_transforms, defjvp\nfrom jax import lax\nfrom jax import random\nfrom jax.scipy.special import expit\nimport jax.numpy as np\n-from jax import jarrett\n# activations\n+@custom_transforms\ndef relu(x):\nr\"\"\"Rectified linear unit activation function.\n@@ -35,6 +36,7 @@ def relu(x):\n\\mathrm{relu}(x) = \\max(x, 0)\n\"\"\"\nreturn np.maximum(x, 0)\n+defjvp(relu, lambda g, ans, x: lax.select(x > 0, g, lax.full_like(g, 0)))\ndef softplus(x):\nr\"\"\"Softplus activation function.\n" }, { "change_type": "MODIFY", "old_path": "tests/nn_test.py", "new_path": "tests/nn_test.py", "diff": "@@ -36,16 +36,23 @@ class NNFunctionsTest(jtu.JaxTestCase):\n@jtu.skip_on_flag(\"jax_skip_slow_tests\", True)\ndef testSoftplusGrad(self):\n- check_grads(nn.softplus, (1e-8,), 4,\n+ check_grads(nn.softplus, (1e-8,), order=4,\nrtol=1e-2 if jtu.device_under_test() == \"tpu\" else None)\n+ def testReluGrad(self):\n+ rtol = 1e-2 if jtu.device_under_test() == \"tpu\" else None\n+ check_grads(nn.relu, (1.,), order=3, rtol=rtol)\n+ check_grads(nn.relu, (-1.,), order=3, rtol=rtol)\n+ jaxpr = jax.make_jaxpr(jax.grad(nn.relu))(0.)\n+ self.assertEqual(len(jaxpr.jaxpr.eqns), 2)\n+\ndef testSoftplusValue(self):\nval = nn.softplus(89.)\nself.assertAllClose(val, 89., check_dtypes=False)\n@jtu.skip_on_flag(\"jax_skip_slow_tests\", True)\ndef testEluGrad(self):\n- check_grads(nn.elu, (1e4,), 4, eps=1.)\n+ check_grads(nn.elu, (1e4,), order=4, eps=1.)\ndef testEluValue(self):\nval = nn.elu(1e4)\n" } ]
Python
Apache License 2.0
google/jax
improve jax.nn.relu differentiation (#2342)
260,411
09.03.2020 10:01:09
-3,600
04a027b85309b87e92d8b77f7d425838bc8de593
Replaced self.assertTrue(...===...) with assertEquals This was caught in internal presubmit tests
[ { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -1087,7 +1087,7 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nexpected_onp_input_after_call = onp.ones((1))\nexpected_jnp_input_after_call = jnp.ones((1))\n- self.assertTrue(type(jnp.concatenate([onp_input])) is jnp.DeviceArray)\n+ self.assertIs(type(jnp.concatenate([onp_input])), jnp.DeviceArray)\nattempt_sideeffect(onp_input)\nattempt_sideeffect(jnp_input)\n@@ -2186,13 +2186,13 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nself.assertRaises(TypeError, lambda: jnp.arange())\n# test that jnp.arange(N) doesn't instantiate an ndarray\n- self.assertFalse(type(jnp.arange(77)) == type(onp.arange(77)))\n- self.assertTrue(type(jnp.arange(77)) == type(lax.iota(onp.int32, 77)))\n+ self.assertNotEquals(type(jnp.arange(77)), type(onp.arange(77)))\n+ self.assertEquals(type(jnp.arange(77)), type(lax.iota(onp.int32, 77)))\n# test that jnp.arange(N, dtype=int32) doesn't instantiate an ndarray\n- self.assertFalse(type(jnp.arange(77, dtype=jnp.int32)) ==\n+ self.assertNotEquals(type(jnp.arange(77, dtype=jnp.int32)),\ntype(onp.arange(77, dtype=onp.int32)))\n- self.assertTrue(type(jnp.arange(77, dtype=jnp.int32)) ==\n+ self.assertEquals(type(jnp.arange(77, dtype=jnp.int32)),\ntype(lax.iota(onp.int32, 77)))\n# test laziness for int dtypes\n" } ]
Python
Apache License 2.0
google/jax
Replaced self.assertTrue(...===...) with assertEquals (#2383) This was caught in internal presubmit tests
260,699
09.03.2020 17:07:12
-10,800
8339511eb50891b66536b03330e844c415e3e86b
Implement NumPy sorting routines. Implement `np.msort`. Related issue:
[ { "change_type": "MODIFY", "old_path": "docs/jax.numpy.rst", "new_path": "docs/jax.numpy.rst", "diff": "@@ -170,6 +170,7 @@ Not every function in NumPy is implemented; contributions are welcome!\nminimum\nmod\nmoveaxis\n+ msort\nmultiply\nnan_to_num\nnancumprod\n" }, { "change_type": "MODIFY", "old_path": "jax/numpy/lax_numpy.py", "new_path": "jax/numpy/lax_numpy.py", "diff": "@@ -2667,6 +2667,11 @@ def argsort(a, axis=-1, kind='quicksort', order=None):\nreturn perm\n+@_wraps(onp.msort)\n+def msort(a):\n+ return sort(a, axis=0)\n+\n+\n@_wraps(onp.roll)\ndef roll(a, shift, axis=None):\na = asarray(a)\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_numpy_test.py", "new_path": "tests/lax_numpy_test.py", "diff": "@@ -1812,6 +1812,13 @@ class LaxBackedNumpyTests(jtu.JaxTestCase):\nexpected = onp.argsort(x)\nself.assertAllClose(expected, ans, check_dtypes=False)\n+ def testMsortManually(self):\n+ args_maker = lambda: [onp.random.randint(50, size=(5 ,5))]\n+ lnp_op = lambda x: lnp.msort(x)\n+ onp_op = lambda x: onp.msort(x)\n+ self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)\n+ self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)\n+\n@parameterized.named_parameters(jtu.cases_from_list(\n{\"testcase_name\": \"_{}_shifts={}_axis={}\".format(\njtu.format_shape_dtype_string(shape, dtype),\n" } ]
Python
Apache License 2.0
google/jax
Implement NumPy sorting routines. (#2318) Implement `np.msort`. Related issue: #2079
260,323
09.03.2020 11:43:45
25,200
c53ae2c47706d3c6cae5c50b88e8b1f2ef392700
automatic detection of wheel version
[ { "change_type": "MODIFY", "old_path": "README.md", "new_path": "README.md", "diff": "@@ -433,6 +433,12 @@ for Python 3.5, 3.6, 3.7, and 3.8; for anything else, you must build from\nsource. Jax requires Python 3.5 or above. Jax does not support Python 2 any\nmore.\n+To try automatic detection of the correct version for your system, you can run:\n+\n+```bash\n+pip install --upgrade https://storage.googleapis.com/jax-releases/`nvcc -V | sed -En \"s/.* release ([0-9]*)\\.([0-9]*),.*/cuda\\1\\2/p\"`/jaxlib-0.1.40-`python3 -V | sed -En \"s/Python ([0-9]*)\\.([0-9]*).*/cp\\1\\2/p\"`-none-linux_x86_64.whl jax\n+```\n+\nPlease let us know on [the issue tracker](https://github.com/google/jax/issues)\nif you run into any errors or problems with the prebuilt wheels.\n" } ]
Python
Apache License 2.0
google/jax
automatic detection of wheel version (#2373)
260,411
09.03.2020 20:41:01
-3,600
282225f676a4dddd170047de377f831f44ea2304
Added some pytype annotations Tried to catch all uses of linear_util.WrappedFun
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -1427,7 +1427,7 @@ def device_get(x):\nreturn tree_map(_device_get, x)\n-def _argnums_partial(f, dyn_argnums, args):\n+def _argnums_partial(f: lu.WrappedFun, dyn_argnums, args):\nif isinstance(dyn_argnums, int):\ndyn_argnums = (dyn_argnums,)\nelse:\n" }, { "change_type": "MODIFY", "old_path": "jax/core.py", "new_path": "jax/core.py", "diff": "@@ -876,7 +876,7 @@ def process_env_traces(primitive, level, params_tuple, *args):\ntodo.append(cur_todo)\nyield outs, tuple(todo) # Ensure the aux output is immutable\n-def call_bind(primitive, f, *args, **params):\n+def call_bind(primitive, f: lu.WrappedFun, *args, **params):\ntop_trace = find_top_trace(args)\nlevel = trace_state.trace_stack.next_level(True) if top_trace is None else top_trace.level\nparams_tuple = tuple(params.items())\n@@ -890,7 +890,7 @@ def call_bind(primitive, f, *args, **params):\nreturn apply_todos(env_trace_todo(), outs)\n-def call_impl(f, *args, **params):\n+def call_impl(f: lu.WrappedFun, *args, **params):\ndel params # params parameterize the call primitive, not the function\nreturn f.call_wrapped(*args)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/ad.py", "new_path": "jax/interpreters/ad.py", "diff": "@@ -34,7 +34,7 @@ map = safe_map\ndef identity(x): return x\n-def jvp(fun, has_aux=False, instantiate=True) -> Any:\n+def jvp(fun: lu.WrappedFun, has_aux=False, instantiate=True) -> Any:\nif not has_aux:\nreturn jvpfun(jvp_subtrace(fun), instantiate)\nelse:\n@@ -297,7 +297,7 @@ class JVPTrace(Trace):\nelse:\nreturn JVPTracer(self, primal_out, tangent_out)\n- def process_call(self, call_primitive, f, tracers, params):\n+ def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):\nassert call_primitive.multiple_results\nprimals = [t.primal for t in tracers]\ntangents = [t.tangent for t in tracers]\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/batching.py", "new_path": "jax/interpreters/batching.py", "diff": "@@ -32,12 +32,12 @@ from . import partial_eval as pe\nmap = safe_map\n-def batch(fun, in_vals, in_dims, out_dim_dests):\n+def batch(fun: lu.WrappedFun, in_vals, in_dims, out_dim_dests):\nsize, = {x.shape[d] for x, d in zip(in_vals, in_dims) if d is not not_mapped}\nout_vals, out_dims = batch_fun(fun, in_vals, in_dims)\nreturn map(partial(matchaxis, size), out_dims, out_dim_dests(), out_vals)\n-def batch_fun(fun, in_vals, in_dims):\n+def batch_fun(fun: lu.WrappedFun, in_vals, in_dims):\nwith new_master(BatchTrace) as master:\nfun, out_dims = batch_subtrace(fun, master, in_dims)\nout_vals = fun.call_wrapped(*in_vals)\n@@ -114,7 +114,7 @@ class BatchTrace(Trace):\nelse:\nreturn BatchTracer(self, val_out, dim_out)\n- def process_call(self, call_primitive, f, tracers, params):\n+ def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):\nassert call_primitive.multiple_results\nname = params.get('name', f.__name__)\nparams = dict(params, name=wrap_name(name, 'vmap'))\n@@ -128,7 +128,7 @@ class BatchTrace(Trace):\nvals_out = call_primitive.bind(f, *vals, **params)\nreturn [BatchTracer(self, v, d) for v, d in zip(vals_out, dims_out())]\n- def process_map(self, map_primitive, f, tracers, params):\n+ def process_map(self, map_primitive, f: lu.WrappedFun, tracers, params):\nvals, dims = unzip2((t.val, t.batch_dim) for t in tracers)\nif all(dim is not_mapped for dim in dims):\nreturn map_primitive.bind(f, *vals, **params)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/masking.py", "new_path": "jax/interpreters/masking.py", "diff": "@@ -386,7 +386,7 @@ class MaskTrace(Trace):\nelse:\nreturn map(partial(MaskTracer, self), out, out_shape)\n- def process_call(self, call_primitive, f, tracers, params):\n+ def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):\nraise NotImplementedError # TODO mask-of-jit\nshape_parameterized_primitive_rules = {}\n@@ -412,7 +412,7 @@ def naryop_masking_rule(prim, padded_vals, logical_shapes):\n### definition-time (import-time) shape checker tracer machinery\n-def shapecheck(fun, in_shapes):\n+def shapecheck(fun: lu.WrappedFun, in_shapes):\nwith core.new_master(ShapeCheckTrace) as master:\nout_shapes = check_subtrace(fun, master).call_wrapped(in_shapes)\ndel master\n@@ -460,7 +460,7 @@ class ShapeCheckTrace(Trace):\nout_shape = shape_rule(*avals, **params)\nreturn ShapeCheckTracer(self, out_shape)\n- def process_call(self, call_primitive, f, tracers, params):\n+ def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):\n# TODO apply proper subtrace:\nreturn map(self.full_raise, f.call_wrapped(*tracers))\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/parallel.py", "new_path": "jax/interpreters/parallel.py", "diff": "@@ -115,7 +115,7 @@ class PapplyTrace(Trace):\nval_out, axis_out = rule(name, size, vals, axes, **params)\nreturn PapplyTracer(self, name, size, val_out, axis_out)\n- def process_call(self, call_primitive, f, tracers, params):\n+ def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):\nif call_primitive in pe.map_primitives:\nreturn self.process_map(call_primitive, f, tracers, params)\nnames, vals, axes = unzip3((t.name, t.val, t.axis) for t in tracers)\n@@ -138,7 +138,7 @@ class PapplyTrace(Trace):\nreturn PapplyTracer(trace, name, size, x, axis)\nreturn val, todo\n- def process_map(self, map_primitive, f, tracers, params):\n+ def process_map(self, map_primitive, f :lu.WrappedFun, tracers, params):\nraise NotImplementedError # TODO(mattjj,frostig)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/partial_eval.py", "new_path": "jax/interpreters/partial_eval.py", "diff": "@@ -115,7 +115,7 @@ class JaxprTrace(Trace):\nout_tracer.recipe = new_eqn_recipe(tracers, [out_tracer], primitive, params)\nreturn out_tracer\n- def process_call(self, call_primitive, f, tracers, params):\n+ def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):\nname = params.get('name', f.__name__)\nif self.master.trace_type is StagingJaxprTrace:\ntracers = map(self.instantiate_const_abstracted, tracers)\n@@ -144,7 +144,7 @@ class JaxprTrace(Trace):\nt.recipe = eqn\nreturn out_tracers\n- def process_map(self, map_primitive, f, tracers, params):\n+ def process_map(self, map_primitive, f: lu.WrappedFun, tracers, params):\nin_pvs, in_consts = unzip2([t.pval for t in tracers])\nreduced_pvs = [None if pv is None else _mapped_aval(pv) for pv in in_pvs]\nfun, aux = partial_eval(f, self, reduced_pvs)\n@@ -347,7 +347,7 @@ def partial_val_aval(pv, const):\nelse:\nraise TypeError(pv)\n-def trace_to_jaxpr(fun, pvals, instantiate=False, stage_out_calls=False, bottom=False):\n+def trace_to_jaxpr(fun: lu.WrappedFun, pvals, instantiate=False, stage_out_calls=False, bottom=False):\n\"\"\"Traces a function, given abstract inputs, to a jaxpr.\"\"\"\ntrace_type = StagingJaxprTrace if stage_out_calls else JaxprTrace\nwith new_master(trace_type, bottom=bottom) as master:\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/pxla.py", "new_path": "jax/interpreters/pxla.py", "diff": "@@ -826,7 +826,7 @@ class SplitAxisTrace(core.Trace):\nelse:\nreturn new_tracer(val_out, axis_out)\n- def process_call(self, call_primitive, f, tracers, params):\n+ def process_call(self, call_primitive, f: lu.WrappedFun, tracers, params):\nassert call_primitive.multiple_results\nif call_primitive in pe.map_primitives:\nreturn self.process_map(call_primitive, f, tracers, params)\n@@ -839,7 +839,7 @@ class SplitAxisTrace(core.Trace):\nvals_out = call_primitive.bind(f, *vals, **params)\nreturn [SplitAxisTracer(self, a, x) for a, x in zip(names_out(), vals_out)]\n- def process_map(self, map_primitive, f, tracers, params):\n+ def process_map(self, map_primitive, f: lu.WrappedFun, tracers, params):\nvals, names = unzip2((t.val, t.axis_name) for t in tracers)\nif all(name is not_mapped for name in names):\nreturn map_primitive.bind(f, *vals, **params)\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/sharded_jit.py", "new_path": "jax/interpreters/sharded_jit.py", "diff": "@@ -134,7 +134,7 @@ result_handlers[ConcreteArray] = _array_result_handler\n@lu.cache\n-def _sharded_callable(fun, partitions, name, *abstract_args):\n+def _sharded_callable(fun: lu.WrappedFun, partitions, name, *abstract_args):\nnrep = 1 # TODO generalize\nin_pvals = [pe.PartialVal((aval, core.unit)) for aval in abstract_args]\n@@ -263,7 +263,7 @@ def jaxpr_partitions(jaxpr):\n### sharded_call\n-def _sharded_call_impl(fun, *args, **params):\n+def _sharded_call_impl(fun: lu.WrappedFun, *args, **params):\npartitions = params.pop(\"partitions\")\nname = params.pop(\"name\")\nassert not params, params\n" }, { "change_type": "MODIFY", "old_path": "jax/interpreters/xla.py", "new_path": "jax/interpreters/xla.py", "diff": "@@ -444,7 +444,7 @@ def jaxpr_collectives(jaxpr):\n### xla_call underlying jit\n-def _xla_call_impl(fun, *args, device, backend, name):\n+def _xla_call_impl(fun: lu.WrappedFun, *args, device, backend, name):\ncompiled_fun = _xla_callable(fun, device, backend, name, *map(arg_spec, args))\ntry:\nreturn compiled_fun(*args)\n@@ -454,7 +454,7 @@ def _xla_call_impl(fun, *args, device, backend, name):\nreturn fun.call_wrapped(*args) # probably won't return\n@lu.cache\n-def _xla_callable(fun, device, backend, name, *arg_specs):\n+def _xla_callable(fun: lu.WrappedFun, device, backend, name, *arg_specs):\nif device is not None and backend is not None:\nraise ValueError(\"can't specify both a device and a backend for jit, \"\n\"got device={} and backend={}\".format(device, backend))\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax_control_flow.py", "new_path": "jax/lax/lax_control_flow.py", "diff": "@@ -22,6 +22,7 @@ import functools\nimport itertools\nimport operator\nimport threading\n+from typing import Callable\nimport numpy as onp\n@@ -52,7 +53,7 @@ _reduce = functools.reduce\n@cache()\n-def _initial_style_jaxpr(fun, in_tree, in_avals):\n+def _initial_style_jaxpr(fun: Callable, in_tree, in_avals):\nin_pvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]\nfun, out_tree = flatten_fun_nokwargs(lu.wrap_init(fun), in_tree)\njaxpr, out_pvals, consts = pe.trace_to_jaxpr(fun, in_pvals, instantiate=True,\n@@ -1068,7 +1069,7 @@ def _transpose_scan_jaxpr(num_res1, num_c, num_res2, jaxpr):\nreturn c_bar + a_bar\nreturn _make_typed_jaxpr(transposed, res1_avals + c_avals + b_avals + res2_avals)\n-def _make_typed_jaxpr(traceable, in_avals):\n+def _make_typed_jaxpr(traceable: lu.WrappedFun, in_avals):\npvals = [pe.PartialVal((aval, core.unit)) for aval in in_avals]\njaxpr, pvals_out, consts = pe.trace_to_jaxpr(traceable, pvals, instantiate=True)\nout_avals, _ = unzip2(pvals_out)\n" }, { "change_type": "MODIFY", "old_path": "jax/linear_util.py", "new_path": "jax/linear_util.py", "diff": "@@ -62,7 +62,7 @@ dynamic positional arguments for the generators, and also the auxiliary output\ndata must be immutable, because it will be stored in function memoization tables.\n\"\"\"\n-\n+from typing import Any, Tuple\nimport weakref\nfrom .util import curry\n@@ -122,7 +122,7 @@ class WrappedFun(object):\ndef __name__(self):\nreturn getattr(self.f, '__name__', '<unnamed wrapped function>')\n- def wrap(self, gen, gen_static_args, out_store):\n+ def wrap(self, gen, gen_static_args, out_store) -> 'WrappedFun':\n\"\"\"Add another transform and its store.\"\"\"\nreturn WrappedFun(self.f, ((gen, gen_static_args),) + self.transforms,\n(out_store,) + self.stores, self.params)\n@@ -172,7 +172,7 @@ class WrappedFun(object):\nself.params == other.params)\n@curry\n-def transformation(gen, fun: WrappedFun, *gen_static_args):\n+def transformation(gen, fun: WrappedFun, *gen_static_args) -> WrappedFun:\n\"\"\"Adds one more transformation to a WrappedFun.\nArgs:\ngen: the transformation generator function\n@@ -182,7 +182,7 @@ def transformation(gen, fun: WrappedFun, *gen_static_args):\nreturn fun.wrap(gen, gen_static_args, None)\n@curry\n-def transformation_with_aux(gen, fun: WrappedFun, *gen_static_args):\n+def transformation_with_aux(gen, fun: WrappedFun, *gen_static_args) -> Tuple[WrappedFun, Any]:\n\"\"\"Adds one more transformation with auxiliary output to a WrappedFun.\"\"\"\nout_store = Store()\nout_thunk = lambda: out_store.val\n@@ -194,7 +194,7 @@ def fun_name(f):\nexcept:\nreturn str(f)\n-def wrap_init(f, params={}):\n+def wrap_init(f, params={}) -> WrappedFun:\n\"\"\"Wraps function `f` as a `WrappedFun`, suitable for transformation.\"\"\"\nreturn WrappedFun(f, (), (), tuple(sorted(params.items())))\n@@ -209,7 +209,7 @@ def cache(call):\n\"\"\"\nfun_caches = weakref.WeakKeyDictionary()\n- def memoized_fun(fun, *args):\n+ def memoized_fun(fun: WrappedFun, *args):\ncache = fun_caches.setdefault(fun.f, {})\nkey = (fun.transforms, fun.params, args)\nresult = cache.get(key, None)\n" } ]
Python
Apache License 2.0
google/jax
Added some pytype annotations (#2386) Tried to catch all uses of linear_util.WrappedFun
260,581
10.03.2020 14:40:38
-3,600
5c3b4786b2bd71fccecc5617de7a1d9f4269f2cb
Add a module to apply updates every k steps (and accumulate them otherwise)
[ { "change_type": "MODIFY", "old_path": "jax/experimental/optix.py", "new_path": "jax/experimental/optix.py", "diff": "@@ -324,6 +324,36 @@ def add_noise(eta, gamma, seed):\nreturn InitUpdate(init_fn, update_fn)\n+ApplyEvery = collections.namedtuple(\"ApplyEvery\", \"count grad_acc\")\n+\n+\n+def apply_every(k=1):\n+ \"\"\"accumulate gradients and apply them every k steps.\n+\n+ Args:\n+ k: apply the update every k steps otherwise accumulate the gradients.\n+\n+ Returns:\n+ An (init_fn, update_fn) tuple.\n+ \"\"\"\n+\n+ def init_fn(params):\n+ grad_acc = tree_multimap(jnp.zeros_like, params)\n+ return ApplyEvery(count=jnp.zeros([], jnp.int32), grad_acc=grad_acc)\n+\n+ def update_fn(updates, state):\n+\n+ c = state.count % k\n+ acc = c != 0\n+ grad_acc = tree_multimap(\n+ lambda g, ga: acc * ga + g, updates, state.grad_acc)\n+ emit = c == (k - 1)\n+ updates = tree_multimap(lambda ga: emit * ga, grad_acc)\n+ return updates, ApplyEvery(count=state.count + 1, grad_acc=grad_acc)\n+\n+ return InitUpdate(init_fn, update_fn)\n+\n+\n### Utilities for building and using custom optimizers. ###\n" }, { "change_type": "MODIFY", "old_path": "tests/optix_test.py", "new_path": "tests/optix_test.py", "diff": "@@ -60,6 +60,45 @@ class OptixTest(absltest.TestCase):\nfor x, y in zip(tree_leaves(jax_params), tree_leaves(optix_params)):\nonp.testing.assert_allclose(x, y, rtol=1e-5)\n+ def test_apply_every(self):\n+ # The frequency of the application of sgd\n+ k = 4\n+ zero_update = (jnp.array([0., 0.]), jnp.array([0., 0.]))\n+\n+ # experimental/optix.py sgd\n+ optix_sgd_params = self.init_params\n+ sgd = optix.sgd(LR, 0.0)\n+ state_sgd = sgd.init(optix_sgd_params)\n+\n+ # experimental/optix.py sgd apply every\n+ optix_sgd_apply_every_params = self.init_params\n+ sgd_apply_every = optix.chain(optix.apply_every(k=k),\n+ optix.trace(decay=0, nesterov=False),\n+ optix.scale(-LR))\n+ state_sgd_apply_every = sgd_apply_every.init(optix_sgd_apply_every_params)\n+ for i in range(STEPS):\n+ # Apply a step of sgd\n+ updates_sgd, state_sgd = sgd.update(self.per_step_updates, state_sgd)\n+ optix_sgd_params = optix.apply_updates(optix_sgd_params, updates_sgd)\n+\n+ # Apply a step of sgd_apply_every\n+ updates_sgd_apply_every, state_sgd_apply_every = sgd_apply_every.update(\n+ self.per_step_updates, state_sgd_apply_every)\n+ optix_sgd_apply_every_params = optix.apply_updates(\n+ optix_sgd_apply_every_params, updates_sgd_apply_every)\n+ if i % k == k-1:\n+ # Check equivalence.\n+ for x, y in zip(\n+ tree_leaves(optix_sgd_apply_every_params),\n+ tree_leaves(optix_sgd_params)):\n+ onp.testing.assert_allclose(x, y, atol=1e-6, rtol=100)\n+ else:\n+ # Check updaue is zero.\n+ for x, y in zip(\n+ tree_leaves(updates_sgd_apply_every),\n+ tree_leaves(zero_update)):\n+ onp.testing.assert_allclose(x, y, atol=1e-10, rtol=1e-5)\n+\ndef test_adam(self):\nb1, b2, eps = 0.9, 0.999, 1e-8\n" } ]
Python
Apache License 2.0
google/jax
Add a module to apply updates every k steps (and accumulate them otherwise) (#2350)
260,335
10.03.2020 06:59:54
25,200
cc53aa956b0571efb3b0237dd87d92d509f8b1fd
skip new optix test on tpu (cf.
[ { "change_type": "MODIFY", "old_path": "tests/optix_test.py", "new_path": "tests/optix_test.py", "diff": "@@ -19,7 +19,7 @@ from absl.testing import absltest\nfrom jax import numpy as jnp\nfrom jax.experimental import optimizers\nfrom jax.experimental import optix\n-import jax.test_util # imported only for flags\n+import jax.test_util\nfrom jax.tree_util import tree_leaves\nimport numpy as onp\n@@ -60,6 +60,7 @@ class OptixTest(absltest.TestCase):\nfor x, y in zip(tree_leaves(jax_params), tree_leaves(optix_params)):\nonp.testing.assert_allclose(x, y, rtol=1e-5)\n+ jax.test_util.skip_on_devices(\"tpu\")\ndef test_apply_every(self):\n# The frequency of the application of sgd\nk = 4\n" } ]
Python
Apache License 2.0
google/jax
skip new optix test on tpu (cf. #2350)
260,335
10.03.2020 08:29:46
25,200
ebbcbad547e56357e600cd8c19232d4b91cf4f00
allow vmap in_axes to be a list, fixes
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -657,6 +657,14 @@ def vmap(fun: Callable, in_axes=0, out_axes=0):\ndocstr = (\"Vectorized version of {fun}. Takes similar arguments as {fun} \"\n\"but with additional array axes over which {fun} is mapped.\")\n+ if isinstance(in_axes, list):\n+ # To be a tree prefix of the positional args tuple, in_axes can never be a\n+ # list: if in_axes is not a leaf, it must be a tuple of trees. However,\n+ # in cases like these users expect tuples and lists to be treated\n+ # essentially interchangeably, so we canonicalize lists to tuples here\n+ # rather than raising an error. https://github.com/google/jax/issues/2367\n+ in_axes = tuple(in_axes)\n+\n_check_callable(fun)\nif (not isinstance(in_axes, (list, tuple, type(None), int))\nor not isinstance(out_axes, (list, tuple, type(None), int))):\n" }, { "change_type": "MODIFY", "old_path": "tests/api_test.py", "new_path": "tests/api_test.py", "diff": "@@ -1229,6 +1229,20 @@ class APITest(jtu.JaxTestCase):\nb = np.dot(a + np.eye(a.shape[0]), real_x)\nprint(gf(a, b)) # doesn't crash\n+ def test_vmap_in_axes_list(self):\n+ # https://github.com/google/jax/issues/2367\n+ dictionary = {'a': 5., 'b': np.ones(2)}\n+ x = np.zeros(3)\n+ y = np.arange(3.)\n+\n+\n+ def f(dct, x, y):\n+ return dct['a'] + dct['b'] + x + y\n+\n+ out1 = api.vmap(f, (None, 0, 0))(dictionary, x, y)\n+ out2 = api.vmap(f, [None, 0, 0])(dictionary, x, y)\n+ self.assertAllClose(out1, out2, check_dtypes=True)\n+\ndef test_vmap_in_axes_tree_prefix_error(self):\n# https://github.com/google/jax/issues/795\nself.assertRaisesRegex(\n" } ]
Python
Apache License 2.0
google/jax
allow vmap in_axes to be a list, fixes #2367 (#2395)
260,335
10.03.2020 15:01:18
25,200
cfbdb65ad8637e883679a0d0516acdc28dd9e8ea
add register_pytree_node_class, fixes
[ { "change_type": "MODIFY", "old_path": "jax/tree_util.py", "new_path": "jax/tree_util.py", "diff": "@@ -38,6 +38,7 @@ for examples.\nimport functools\nimport collections\n+import operator as op\nfrom .lib import pytree\n@@ -105,6 +106,26 @@ def register_pytree_node(nodetype, flatten_func, unflatten_func):\npytree.register_node(nodetype, flatten_func, unflatten_func)\n_registry[nodetype] = _RegistryEntry(flatten_func, unflatten_func)\n+def register_pytree_node_class(cls):\n+ \"\"\"Extends the set of types that are considered internal nodes in pytrees.\n+\n+ This function is a thin wrapper around ``register_pytree_node``, and provides\n+ a class-oriented interface:\n+\n+ @register_pytree_node_class\n+ class Special:\n+ def __init__(self, x, y):\n+ self.x = x\n+ self.y = y\n+ def tree_flatten(self):\n+ return ((self.x, self.y), None)\n+ @classmethod\n+ def tree_unflatten(cls, aux_data, children):\n+ return cls(*children)\n+ \"\"\"\n+ register_pytree_node(cls, op.methodcaller('tree_flatten'), cls.tree_unflatten)\n+ return cls\n+\ndef tree_map(f, tree):\n\"\"\"Maps a function over a pytree to produce a new pytree.\n" }, { "change_type": "MODIFY", "old_path": "tests/tree_util_tests.py", "new_path": "tests/tree_util_tests.py", "diff": "@@ -47,10 +47,28 @@ class AnObject(object):\ndef __repr__(self):\nreturn \"AnObject({},{},{})\".format(self.x, self.y, self.z)\n-\ntree_util.register_pytree_node(AnObject, lambda o: ((o.x, o.y), o.z),\nlambda z, xy: AnObject(xy[0], xy[1], z))\n+@tree_util.register_pytree_node_class\n+class Special:\n+ def __init__(self, x, y):\n+ self.x = x\n+ self.y = y\n+\n+ def __repr__(self):\n+ return \"Special(x={}, y={})\".format(self.x, self.y)\n+\n+ def tree_flatten(self):\n+ return ((self.x, self.y), None)\n+\n+ @classmethod\n+ def tree_unflatten(cls, aux_data, children):\n+ return cls(*children)\n+\n+ def __eq__(self, other):\n+ return type(self) is type(other) and (self.x, self.y) == (other.x, other.y)\n+\nPYTREES = [\n(\"foo\",),\n((),),\n@@ -60,6 +78,7 @@ PYTREES = [\n([3],),\n([3, ATuple(foo=(3, ATuple(foo=3, bar=None)), bar={\"baz\": 34})],),\n([AnObject(3, None, [4, \"foo\"])],),\n+ (Special(2, 3.),),\n({\"a\": 1, \"b\": 2},),\n(collections.OrderedDict([(\"foo\", 34), (\"baz\", 101), (\"something\", -42)]),),\n(collections.defaultdict(dict,\n" } ]
Python
Apache License 2.0
google/jax
add register_pytree_node_class, fixes #2396 (#2400) Co-authored-by: Stephan Hoyer <shoyer@google.com> Co-authored-by: Stephan Hoyer <shoyer@google.com>
260,335
11.03.2020 09:42:25
25,200
cdf188af2fd4f256c2c5c390ec0d09ed321212d0
add raises-exception notebook cell metadata
[ { "change_type": "MODIFY", "old_path": "docs/notebooks/Common_Gotchas_in_JAX.ipynb", "new_path": "docs/notebooks/Common_Gotchas_in_JAX.ipynb", "diff": "{\n\"cell_type\": \"code\",\n\"execution_count\": 0,\n- \"metadata\": {},\n+ \"metadata\": {\n+ \"tags\": [\n+ \"raises-exception\"\n+ ]\n+ },\n\"outputs\": [\n{\n\"ename\": \"IndexError\",\n" } ]
Python
Apache License 2.0
google/jax
add raises-exception notebook cell metadata (#2402)
260,384
11.03.2020 16:19:46
14,400
2dfeaeb63fa9e884ef5b76bc43cf99b2c5a5c04f
Allow zero tolerance for jax.test_util.tolerance Currently, if a user passes any falsy value to jax.test_util.tolerance, it is changed to the default value. This makes sense when the value passed is None, but not when the value passed is 0 (which indicates a desired tolerance of exactly 0). Disables failing tests for now.
[ { "change_type": "MODIFY", "old_path": "jax/test_util.py", "new_path": "jax/test_util.py", "diff": "@@ -116,7 +116,7 @@ def _assert_numpy_allclose(a, b, atol=None, rtol=None):\nonp.testing.assert_allclose(a, b, **kw)\ndef tolerance(dtype, tol=None):\n- tol = tol or {}\n+ tol = {} if tol is None else tol\nif not isinstance(tol, dict):\nreturn tol\ntol = {onp.dtype(key): value for key, value in tol.items()}\n" }, { "change_type": "MODIFY", "old_path": "tests/lax_test.py", "new_path": "tests/lax_test.py", "diff": "@@ -199,6 +199,9 @@ class LaxTest(jtu.JaxTestCase):\nfor dtype in rec.dtypes)\nfor rec in LAX_OPS))\ndef testOpAgainstNumpy(self, op_name, rng_factory, shapes, dtype, tol):\n+ if op_name == \"nextafter\" and dtype == onp.float64:\n+ raise SkipTest(\"nextafter inconsistent for float64: \"\n+ \"https://github.com/google/jax/issues/2403\")\nrng = rng_factory()\nargs_maker = lambda: [rng(shape, dtype) for shape in shapes]\nop = getattr(lax, op_name)\n" } ]
Python
Apache License 2.0
google/jax
Allow zero tolerance for jax.test_util.tolerance (#2393) Currently, if a user passes any falsy value to jax.test_util.tolerance, it is changed to the default value. This makes sense when the value passed is None, but not when the value passed is 0 (which indicates a desired tolerance of exactly 0). Disables failing tests for now.
260,411
12.03.2020 10:59:30
-3,600
61b430eeb40aeef3254f50dbcb79271e7ab3db96
Added more documentation for how to fix notebook build failures
[ { "change_type": "MODIFY", "old_path": "docs/developer.rst", "new_path": "docs/developer.rst", "diff": "@@ -166,6 +166,16 @@ local repo), update it as needed, ``Run all cells`` then\n``Download ipynb``. You may want to test that it executes properly, using ``sphinx-build`` as\nexplained above.\n+Some of the notebooks are built automatically as part of the Travis pre-submit checks and\n+as part of the [Read the docs](https://jax.readthedocs.io/en/latest) build.\n+The build will fail if cells raise errors. If the errors are intentional, you can either catch them,\n+or tag the cell with `raises-exceptions` metadata ([example PR](https://github.com/google/jax/pull/2402/files)).\n+You have to add this metadata by hand in the `.ipynb` file. It will be preserved when somebody else\n+re-saves the notebook.\n+\n+We exclude some notebooks from the build, e.g., because they contain long computations.\n+See `exclude_patterns` in [conf.py](https://github.com/google/jax/blob/master/docs/conf.py).\n+\nDocumentation building on readthedocs.io\n----------------------------------------\n" }, { "change_type": "ADD", "old_path": null, "new_path": "docs/notebooks/README.md", "diff": "+For instructions on how to change and test notebooks, see\n+[Update Documentation](https://jax.readthedocs.io/en/latest/developer.html#update-documentation).\n" } ]
Python
Apache License 2.0
google/jax
Added more documentation for how to fix notebook build failures (#2404)
260,335
20.02.2020 08:04:21
28,800
e84a621184967618997e2b0018fa88979735a7cd
new jet implementation, with conv-based rules
[ { "change_type": "MODIFY", "old_path": "jax/api.py", "new_path": "jax/api.py", "diff": "@@ -57,6 +57,7 @@ from .interpreters import ad\nfrom .interpreters import batching\nfrom .interpreters import parallel\nfrom .interpreters import masking\n+from .interpreters import taylor\nfrom .interpreters.masking import shapecheck, ensure_poly\nfrom .config import flags, config, bool_env\n@@ -2107,3 +2108,8 @@ def checkpoint(fun: Callable, concrete: bool = False):\nreturn tree_unflatten(out_tree(), out_flat)\nreturn fun_remat\nremat = checkpoint\n+\n+def jet(fun, primals, series):\n+ f = lu.wrap_init(fun)\n+ out_primal, out_terms = taylor.jet(f).call_wrapped(primals, series)\n+ return out_primal, out_terms\n" }, { "change_type": "ADD", "old_path": null, "new_path": "jax/interpreters/taylor.py", "diff": "+from functools import partial\n+from collections import Counter\n+\n+import numpy as onp\n+from scipy.special import factorial as fact\n+\n+from jax import core\n+from jax.util import unzip2, prod\n+import jax.linear_util as lu\n+\n+\n+@lu.transformation\n+def jet(primals, series):\n+ with core.new_master(JetTrace) as master:\n+ trace = JetTrace(master, core.cur_sublevel())\n+ in_tracers = map(partial(JetTracer, trace), primals, series)\n+ ans = yield in_tracers, {}\n+ out_tracer = trace.full_raise(ans) # TODO multiple outputs\n+ out_primal, series_out = out_tracer.primal, out_tracer.terms\n+ yield out_primal, series_out\n+\n+\n+class JetTracer(core.Tracer):\n+ __slots__ = [\"primal\", \"terms\"]\n+\n+ def __init__(self, trace, primal, terms):\n+ assert type(terms) in (ZeroSeries, list, tuple)\n+ self._trace = trace\n+ self.primal = primal\n+ self.terms = terms\n+\n+ @property\n+ def aval(self):\n+ return core.get_aval(self.primal)\n+\n+ def full_lower(self):\n+ return self # TODO symbolic zeros\n+\n+class JetTrace(core.Trace):\n+\n+ def pure(self, val):\n+ return JetTracer(self, val, zero_series)\n+\n+ def lift(self, val):\n+ return JetTracer(self, val, zero_series)\n+\n+ def sublift(self, val):\n+ return JetTracer(self, val.primal, val.terms)\n+\n+ def process_primitive(self, primitive, tracers, params):\n+ primals_in, series_in = unzip2((t.primal, t.terms) for t in tracers)\n+ order, = {len(terms) for terms in series_in if terms is not zero_series}\n+ series_in = [[zero_term] * order if s is zero_series else s\n+ for s in series_in]\n+ series_in = [[onp.zeros(onp.shape(x), dtype=onp.result_type(x))\n+ if t is zero_term else t for t in series]\n+ for x, series in zip(primals_in, series_in)]\n+ rule = prop_rules[primitive]\n+ primal_out, terms_out = rule(primals_in, series_in, **params)\n+ return JetTracer(self, primal_out, terms_out)\n+\n+ def process_call(self, call_primitive, f, tracers, params):\n+ assert False\n+\n+ def post_process_call(self, call_primitive, out_tracer, params):\n+ assert False\n+\n+ def join(self, xt, yt):\n+ assert False\n+\n+\n+class ZeroTerm(object): pass\n+zero_term = ZeroTerm()\n+\n+class ZeroSeries(object): pass\n+zero_series = ZeroSeries()\n+\n+\n+prop_rules = {}\n+\n+def tay_to_deriv_coeff(u_tay):\n+ u_deriv = [ui * fact(i) for (i, ui) in enumerate(u_tay)]\n+ return u_deriv\n+\n+def deriv_to_tay_coeff(u_deriv):\n+ u_tay = [ui / fact(i) for (i, ui) in enumerate(u_deriv)]\n+ return u_tay\n+\n+def taylor_tilde(u_tay):\n+ u_tilde = [i * ui for (i, ui) in enumerate(u_tay)]\n+ return u_tilde\n+\n+def taylor_untilde(u_tilde):\n+ u_tay = [i * ui for (i, ui) in enumerate(u_tilde)]\n+ return u_tay\n+\n+\n+def deflinear(prim):\n+ prop_rules[prim] = partial(linear_prop, prim)\n+\n+def linear_prop(prim, primals_in, series_in, **params):\n+ primal_out = prim.bind(*primals_in, **params)\n+ series_out = [prim.bind(*terms_in, **params) for terms_in in zip(*series_in)]\n+ return primal_out, series_out\n" }, { "change_type": "MODIFY", "old_path": "jax/lax/lax.py", "new_path": "jax/lax/lax.py", "diff": "@@ -24,6 +24,7 @@ from typing import Any\nimport warnings\nimport numpy as onp\n+from scipy.special import factorial as fact # TODO scipy dep?\nfrom ..util import partial, prod\n@@ -43,6 +44,7 @@ from ..interpreters import xla\nfrom ..interpreters import pxla\nfrom ..interpreters import ad\nfrom ..interpreters import batching\n+from ..interpreters import taylor\nfrom ..interpreters import masking\nfrom ..util import curry, cache, safe_zip, unzip2, prod\nfrom ..tree_util import build_tree, tree_unflatten, tree_map\n@@ -1638,6 +1640,7 @@ _bool_or_int = _int | _bool\nneg_p = standard_unop(_num, 'neg')\nad.deflinear(neg_p, lambda t: [neg(t)])\n+taylor.deflinear(neg_p)\ndef _sign_translation_rule(c, x):\nshape = c.GetShape(x)\n@@ -1839,6 +1842,7 @@ def _add_transpose(t, x, y):\nadd_p = standard_naryop([_num, _num], 'add')\nad.defjvp(add_p, lambda g, x, y: _brcast(g, y), lambda g, x, y: _brcast(g, x))\nad.primitive_transposes[add_p] = _add_transpose\n+taylor.deflinear(add_p)\ndef _sub_transpose(t, x, y):\n@@ -1854,6 +1858,19 @@ ad.primitive_transposes[sub_p] = _sub_transpose\nmul_p = standard_naryop([_num, _num], 'mul')\nad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)\n+def prop_mul(primals_in, series_in):\n+ x, y = primals_in\n+ x_terms, y_terms = series_in\n+ u = [x] + x_terms\n+ w = [y] + y_terms\n+ v = [None] * len(u)\n+ def scale(k, j): return 1. / (fact(k - j) * fact(j))\n+ for k in range(0, len(v)):\n+ v[k] = fact(k) * sum([scale(k, j) * u[j] * w[k-j] for j in range(0, k+1)])\n+ primal_out, *series_out = v\n+ return primal_out, series_out\n+taylor.prop_rules[mul_p] = prop_mul\n+\ndef _safe_mul_translation_rule(c, x, y):\ndtype = c.GetShape(x).numpy_dtype()\n" }, { "change_type": "ADD", "old_path": null, "new_path": "mac.py", "diff": "+from jax import jet\n+\n+def f(x, y):\n+ return x + 2 * y\n+\n+out = jet(f, (1., 2.), [(1., 0.), (1., 0.)])\n+print(out)\n" } ]
Python
Apache License 2.0
google/jax
new jet implementation, with conv-based rules Co-authored-by: Jesse Bettencourt <jessebett@cs.toronto.edu> Co-authored-by: David Duvenaud <duvenaud@cs.toronto.edu>
260,335
20.02.2020 08:43:40
28,800
a21fdf8669437a7a052983e18112b3379b955290
more jet rules and tests
[ { "change_type": "MODIFY", "old_path": "jax/lax/lax.py", "new_path": "jax/lax/lax.py", "diff": "@@ -1674,6 +1674,18 @@ ad.defjvp_zero(is_finite_p)\nexp_p = standard_unop(_float | _complex, 'exp')\nad.defjvp2(exp_p, lambda g, ans, x: _safe_mul(g, ans))\n+def _exp_taylor(primals_in, series_in):\n+ x, = primals_in\n+ series, = series_in\n+ u = [x] + series\n+ v = [exp(x)] + [None] * len(series)\n+ def scale(k, j): return 1. / (fact(k-j) * fact(j-1))\n+ for k in range(1,len(v)):\n+ v[k] = fact(k-1) * sum([scale(k, j)* v[k-j] * u[j] for j in range(1, k+1)])\n+ primal_out, *series_out = v\n+ return primal_out, series_out\n+taylor.prop_rules[exp_p] = _exp_taylor\n+\nlog_p = standard_unop(_float | _complex, 'log')\nad.defjvp(log_p, lambda g, x: div(g, x))\n@@ -1858,7 +1870,7 @@ ad.primitive_transposes[sub_p] = _sub_transpose\nmul_p = standard_naryop([_num, _num], 'mul')\nad.defbilinear_broadcasting(_brcast, mul_p, mul, mul)\n-def prop_mul(primals_in, series_in):\n+def _mul_taylor(primals_in, series_in):\nx, y = primals_in\nx_terms, y_terms = series_in\nu = [x] + x_terms\n@@ -1869,7 +1881,7 @@ def prop_mul(primals_in, series_in):\nv[k] = fact(k) * sum([scale(k, j) * u[j] * w[k-j] for j in range(0, k+1)])\nprimal_out, *series_out = v\nreturn primal_out, series_out\n-taylor.prop_rules[mul_p] = prop_mul\n+taylor.prop_rules[mul_p] = _mul_taylor\ndef _safe_mul_translation_rule(c, x, y):\n@@ -2343,6 +2355,21 @@ ad.defbilinear(dot_general_p,\nbatching.primitive_batchers[dot_general_p] = _dot_general_batch_rule\nmasking.masking_rules[dot_general_p] = _dot_general_masking_rule\n+# TODO factor out a bilinear rule (mul, dot, conv, ...)\n+def _dot_general_taylor(primals_in, series_in, **params):\n+ x, y = primals_in\n+ x_terms, y_terms = series_in\n+ u = [x] + x_terms\n+ w = [y] + y_terms\n+ v = [None] * len(u)\n+ dot = partial(dot_general_p.bind, **params)\n+ def scale(k, j): return 1. / (fact(k - j) * fact(j))\n+ for k in range(0, len(v)):\n+ v[k] = fact(k) * sum([scale(k, j) * dot(u[j], w[k-j]) for j in range(0, k+1)])\n+ primal_out, *series_out = v\n+ return primal_out, series_out\n+taylor.prop_rules[dot_general_p] = _dot_general_taylor\n+\ndef _broadcast_shape_rule(operand, sizes):\n_check_shapelike('broadcast', 'sizes', sizes)\n" }, { "change_type": "MODIFY", "old_path": "mac.py", "new_path": "mac.py", "diff": "-from jax import jet\n+import jax.numpy as np\n+from jax import jet, jvp\ndef f(x, y):\n- return x + 2 * y\n+ return x + 2 * np.exp(y)\nout = jet(f, (1., 2.), [(1., 0.), (1., 0.)])\nprint(out)\n+\n+out = jvp(f, (1., 2.), (1., 1.))\n+print(out)\n+\n+\n+###\n+\n+from functools import reduce\n+import numpy.random as npr\n+from jax import jacobian\n+\n+from scipy.special import factorial as fact\n+\n+def jvp_taylor(f, primals, series):\n+ def expansion(eps):\n+ tayterms = [\n+ sum([eps**(i + 1) * terms[i] / fact(i + 1) for i in range(len(terms))])\n+ for terms in series\n+ ]\n+ return f(*map(sum, zip(primals, tayterms)))\n+\n+ n_derivs = []\n+ N = len(series[0]) + 1\n+ for i in range(1, N):\n+ d = repeated(jacobian, i)(expansion)(0.)\n+ n_derivs.append(d)\n+ return f(*primals), n_derivs\n+\n+def repeated(f, n):\n+ def rfun(p):\n+ return reduce(lambda x, _: f(x), range(n), p)\n+ return rfun\n+\n+def jvp_test_jet(f, primals, series, atol=1e-5):\n+ y, terms = jet(f, primals, series)\n+ y_jvp, terms_jvp = jvp_taylor(f, primals, series)\n+ # import ipdb; ipdb.set_trace()\n+ assert np.allclose(y, y_jvp)\n+ assert np.allclose(terms, terms_jvp, atol=atol)\n+\n+def test_exp():\n+ npr.seed(0)\n+ D = 3 # dimensionality\n+ N = 6 # differentiation order\n+ x = npr.randn(D)\n+ terms_in = list(npr.randn(N,D))\n+ jvp_test_jet(np.exp, (x,), (terms_in,), atol=1e-4)\n+\n+\n+def test_dot():\n+ D = 6\n+ N = 4\n+ x1 = npr.randn(D)\n+ x2 = npr.randn(D)\n+ primals = (x1, x2)\n+ terms_in1 = list(npr.randn(N,D))\n+ terms_in2 = list(npr.randn(N,D))\n+ series_in = (terms_in1, terms_in2)\n+ jvp_test_jet(np.dot, primals, series_in)\n+\n+\n+\n+def test_mlp():\n+ sigm = lambda x: 1. / (1. + np.exp(-x))\n+ def mlp(M1,M2,x):\n+ return np.dot(sigm(np.dot(x,M1)),M2)\n+ f_mlp = lambda x: mlp(M1,M2,x)\n+ M1,M2 = (npr.randn(10,10), npr.randn(10,5))\n+ x= npr.randn(2,10)\n+ terms_in = [np.ones_like(x), np.zeros_like(x), np.zeros_like(x), np.zeros_like(x)]\n+ jvp_test_jet(f_mlp,(x,),[terms_in])\n+\n+test_exp()\n+test_dot()\n+test_mlp() # TODO add div rule!\n" } ]
Python
Apache License 2.0
google/jax
more jet rules and tests Co-authored-by: Jesse Bettencourt <jessebett@cs.toronto.edu>