code stringlengths 118 171k | apis list | extract_api stringlengths 145 164k |
|---|---|---|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
config = flow.function_config()
def make_job(x_shape, like_shape, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def reshape_like_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
like=flow.FixedTensorDef(like_shape, dtype=dtype),
):
return flow.reshape_like(x, like)
return reshape_like_job
def make_xla_job(x_shape, like_shape, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_reshape_like_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
like=flow.FixedTensorDef(like_shape, dtype=dtype),
):
return flow.reshape_like(x, like)
return xla_reshape_like_job
def make_trt_job(x_shape, like_shape, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(True)
@flow.global_function(config)
def trt_reshape_like_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
like=flow.FixedTensorDef(like_shape, dtype=dtype),
):
return flow.reshape_like(x, like)
return trt_reshape_like_job
class TestReshapeLike(unittest.TestCase):
def _test_body(self, x, like, dtype=np.float32):
f1 = make_job(x.shape, like.shape, dtype=flow.float32)
f2 = make_xla_job(x.shape, like.shape, dtype=flow.float32)
a = f1(x, like).get()
b = f2(x, like).get()
print("without xla: ", a)
print("with xla: ", b)
self.assertTrue(a.shape == b.shape)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=1e-03, atol=1e-05))
flow.clear_default_session()
f3 = make_trt_job(x.shape, like.shape, dtype=flow.float32)
c = f3(x, like).get()
print("with tensorrt: ", c)
self.assertTrue(a.shape == c.shape)
self.assertTrue(np.allclose(a.numpy(), c.numpy(), rtol=1e-03, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, x_shape, like_shape, dtype=np.float32):
x = np.ones(x_shape, dtype=dtype)
like = np.ones(like_shape, dtype=dtype)
self._test_body(x, like, dtype=dtype)
def _test_random_body(self, x_shape, like_shape, dtype=np.float32):
x = np.random.random(x_shape).astype(dtype)
like = np.random.random(like_shape).astype(dtype)
self._test_body(x, like, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((1, 10), (10,))
self._test_ones_body((2, 10, 2), (4, 10))
self._test_ones_body((2, 5, 2, 2), (2, 5, 4))
def test_random_input(self):
self._test_random_body((1, 10), (10,))
self._test_random_body((2, 10, 2), (4, 10))
self._test_random_body((2, 5, 2, 2), (2, 5, 4))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.reshape_like",
"oneflow.function_config",
"oneflow.FixedTensorDef",
"oneflow.global_function",
"oneflow.clear_default_session"
] | [((659, 681), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (679, 681), True, 'import oneflow as flow\n'), ((806, 834), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (826, 834), True, 'import oneflow as flow\n'), ((1178, 1206), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (1198, 1206), True, 'import oneflow as flow\n'), ((1558, 1586), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (1578, 1586), True, 'import oneflow as flow\n'), ((3464, 3479), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3477, 3479), False, 'import unittest\n'), ((871, 912), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (890, 912), True, 'import oneflow as flow\n'), ((927, 971), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['like_shape'], {'dtype': 'dtype'}), '(like_shape, dtype=dtype)\n', (946, 971), True, 'import oneflow as flow\n'), ((995, 1021), 'oneflow.reshape_like', 'flow.reshape_like', (['x', 'like'], {}), '(x, like)\n', (1012, 1021), True, 'import oneflow as flow\n'), ((1247, 1288), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (1266, 1288), True, 'import oneflow as flow\n'), ((1303, 1347), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['like_shape'], {'dtype': 'dtype'}), '(like_shape, dtype=dtype)\n', (1322, 1347), True, 'import oneflow as flow\n'), ((1371, 1397), 'oneflow.reshape_like', 'flow.reshape_like', (['x', 'like'], {}), '(x, like)\n', (1388, 1397), True, 'import oneflow as flow\n'), ((1627, 1668), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (1646, 1668), True, 'import oneflow as flow\n'), ((1683, 1727), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['like_shape'], {'dtype': 'dtype'}), '(like_shape, dtype=dtype)\n', (1702, 1727), True, 'import oneflow as flow\n'), ((1751, 1777), 'oneflow.reshape_like', 'flow.reshape_like', (['x', 'like'], {}), '(x, like)\n', (1768, 1777), True, 'import oneflow as flow\n'), ((2298, 2326), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2324, 2326), True, 'import oneflow as flow\n'), ((2596, 2624), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2622, 2624), True, 'import oneflow as flow\n'), ((2708, 2737), 'numpy.ones', 'np.ones', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (2715, 2737), True, 'import numpy as np\n'), ((2753, 2785), 'numpy.ones', 'np.ones', (['like_shape'], {'dtype': 'dtype'}), '(like_shape, dtype=dtype)\n', (2760, 2785), True, 'import numpy as np\n'), ((2917, 2942), 'numpy.random.random', 'np.random.random', (['x_shape'], {}), '(x_shape)\n', (2933, 2942), True, 'import numpy as np\n'), ((2972, 3000), 'numpy.random.random', 'np.random.random', (['like_shape'], {}), '(like_shape)\n', (2988, 3000), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from oneflow.python.ops.constant_op import zeros_like
from test_util import GenArgList
def np_margin_ranking_loss(margin, input1, input2, targets, reduction="none"):
out = np.clip(margin + (-targets) * (input1 - input2), a_min=0, a_max=None)
if reduction == "sum":
return np.sum(out)
elif reduction == "mean":
return out.mean()
elif reduction == "none":
return out
def np_margin_ranking_loss_grad(margin, input1, input2, targets):
out = np.clip(margin + (-targets) * (input1 - input2), a_min=0, a_max=None)
out_grad1 = -1 * np.zeros_like(targets)
out_grad2 = np.zeros_like(targets)
out_grad1[np.nonzero(out)] = -targets[np.nonzero(out)]
out_grad2[np.nonzero(out)] = targets[np.nonzero(out)]
return out_grad1, out_grad2
def _test_marginrankingloss_none(test_case, shape, margin, device):
input1 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
target_pos = flow.Tensor(
np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
target_neg = flow.Tensor(
-1 * np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
margin_ranking_loss = flow.nn.MarginRankingLoss(margin=margin, reduction="none")
margin_ranking_loss = margin_ranking_loss.to(device)
of_out_pos = margin_ranking_loss(input1, input2, target_pos)
np_out_pos = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_pos.numpy(), reduction="none"
)
test_case.assertTrue(np.allclose(of_out_pos.numpy(), np_out_pos, 1e-5, 1e-5))
of_out_neg = margin_ranking_loss(input1, input2, target_neg)
np_out_neg = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_neg.numpy(), reduction="none"
)
test_case.assertTrue(np.allclose(of_out_neg.numpy(), np_out_neg, 1e-5, 1e-5))
def _test_marginrankingloss_mean(test_case, shape, margin, device):
input1 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
target_pos = flow.Tensor(
np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
target_neg = flow.Tensor(
-1 * np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
margin_ranking_loss = flow.nn.MarginRankingLoss(margin=margin, reduction="mean")
margin_ranking_loss = margin_ranking_loss.to(device)
of_out_pos = margin_ranking_loss(input1, input2, target_pos)
np_out_pos = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_pos.numpy(), reduction="mean"
)
test_case.assertTrue(np.allclose(of_out_pos.numpy(), np_out_pos, 1e-5, 1e-5))
of_out_neg = margin_ranking_loss(input1, input2, target_neg)
np_out_neg = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_neg.numpy(), reduction="mean"
)
test_case.assertTrue(np.allclose(of_out_neg.numpy(), np_out_neg, 1e-5, 1e-5))
def _test_marginrankingloss_sum(test_case, shape, margin, device):
input1 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
input2 = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
target_pos = flow.Tensor(
np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
target_neg = flow.Tensor(
-1 * np.ones(shape), dtype=flow.float32, device=flow.device(device)
)
margin_ranking_loss = flow.nn.MarginRankingLoss(margin=margin, reduction="sum")
margin_ranking_loss = margin_ranking_loss.to(device)
of_out_pos = margin_ranking_loss(input1, input2, target_pos)
np_out_pos = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_pos.numpy(), reduction="sum"
)
test_case.assertTrue(np.allclose(of_out_pos.numpy(), np_out_pos, 1e-5, 1e-5))
of_out_neg = margin_ranking_loss(input1, input2, target_neg)
np_out_neg = np_margin_ranking_loss(
margin, input1.numpy(), input2.numpy(), target_neg.numpy(), reduction="sum"
)
test_case.assertTrue(np.allclose(of_out_neg.numpy(), np_out_neg, 1e-5, 1e-5))
def _test_marginrankingloss_grad(test_case, shape, margin, device):
input1 = flow.Tensor(
np.random.randn(*shape),
dtype=flow.float32,
device=flow.device(device),
requires_grad=True,
)
input2 = flow.Tensor(
np.random.randn(*shape),
dtype=flow.float32,
device=flow.device(device),
requires_grad=True,
)
target = flow.Tensor(np.ones(shape), dtype=flow.float32, device=flow.device(device))
margin_ranking_loss = flow.nn.MarginRankingLoss(margin=margin, reduction="sum")
margin_ranking_loss = margin_ranking_loss.to(device)
of_out = margin_ranking_loss(input1, input2, target)
of_out.backward()
np_out_grad1, np_out_grad2 = np_margin_ranking_loss_grad(
margin, input1.numpy(), input2.numpy(), target.numpy()
)
test_case.assertTrue(np.allclose(input1.grad.numpy(), np_out_grad1, 1e-5, 1e-5))
test_case.assertTrue(np.allclose(input2.grad.numpy(), np_out_grad2, 1e-5, 1e-5))
@flow.unittest.skip_unless_1n1d()
class TestMarginRankingLossModule(flow.unittest.TestCase):
def test_margin_ranking_loss(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_marginrankingloss_none,
_test_marginrankingloss_mean,
_test_marginrankingloss_sum,
_test_marginrankingloss_grad,
]
arg_dict["shape"] = [(2, 3), (2, 4, 5, 6)]
arg_dict["margin"] = [1.0, 0.3, 10]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.unittest.skip_unless_1n1d",
"oneflow.experimental.nn.MarginRankingLoss",
"oneflow.experimental.device"
] | [((6121, 6153), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (6151, 6153), True, 'import oneflow.experimental as flow\n'), ((877, 944), 'numpy.clip', 'np.clip', (['(margin + -targets * (input1 - input2))'], {'a_min': '(0)', 'a_max': 'None'}), '(margin + -targets * (input1 - input2), a_min=0, a_max=None)\n', (884, 944), True, 'import numpy as np\n'), ((1184, 1251), 'numpy.clip', 'np.clip', (['(margin + -targets * (input1 - input2))'], {'a_min': '(0)', 'a_max': 'None'}), '(margin + -targets * (input1 - input2), a_min=0, a_max=None)\n', (1191, 1251), True, 'import numpy as np\n'), ((1314, 1336), 'numpy.zeros_like', 'np.zeros_like', (['targets'], {}), '(targets)\n', (1327, 1336), True, 'import numpy as np\n'), ((2025, 2083), 'oneflow.experimental.nn.MarginRankingLoss', 'flow.nn.MarginRankingLoss', ([], {'margin': 'margin', 'reduction': '"""none"""'}), "(margin=margin, reduction='none')\n", (2050, 2083), True, 'import oneflow.experimental as flow\n'), ((3239, 3297), 'oneflow.experimental.nn.MarginRankingLoss', 'flow.nn.MarginRankingLoss', ([], {'margin': 'margin', 'reduction': '"""mean"""'}), "(margin=margin, reduction='mean')\n", (3264, 3297), True, 'import oneflow.experimental as flow\n'), ((4452, 4509), 'oneflow.experimental.nn.MarginRankingLoss', 'flow.nn.MarginRankingLoss', ([], {'margin': 'margin', 'reduction': '"""sum"""'}), "(margin=margin, reduction='sum')\n", (4477, 4509), True, 'import oneflow.experimental as flow\n'), ((5623, 5680), 'oneflow.experimental.nn.MarginRankingLoss', 'flow.nn.MarginRankingLoss', ([], {'margin': 'margin', 'reduction': '"""sum"""'}), "(margin=margin, reduction='sum')\n", (5648, 5680), True, 'import oneflow.experimental as flow\n'), ((6755, 6770), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6768, 6770), False, 'import unittest\n'), ((989, 1000), 'numpy.sum', 'np.sum', (['out'], {}), '(out)\n', (995, 1000), True, 'import numpy as np\n'), ((1275, 1297), 'numpy.zeros_like', 'np.zeros_like', (['targets'], {}), '(targets)\n', (1288, 1297), True, 'import numpy as np\n'), ((1351, 1366), 'numpy.nonzero', 'np.nonzero', (['out'], {}), '(out)\n', (1361, 1366), True, 'import numpy as np\n'), ((1410, 1425), 'numpy.nonzero', 'np.nonzero', (['out'], {}), '(out)\n', (1420, 1425), True, 'import numpy as np\n'), ((1437, 1452), 'numpy.nonzero', 'np.nonzero', (['out'], {}), '(out)\n', (1447, 1452), True, 'import numpy as np\n'), ((1590, 1613), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1605, 1613), True, 'import numpy as np\n'), ((1702, 1725), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1717, 1725), True, 'import numpy as np\n'), ((1818, 1832), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1825, 1832), True, 'import numpy as np\n'), ((2804, 2827), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2819, 2827), True, 'import numpy as np\n'), ((2916, 2939), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2931, 2939), True, 'import numpy as np\n'), ((3032, 3046), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (3039, 3046), True, 'import numpy as np\n'), ((4017, 4040), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (4032, 4040), True, 'import numpy as np\n'), ((4129, 4152), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (4144, 4152), True, 'import numpy as np\n'), ((4245, 4259), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (4252, 4259), True, 'import numpy as np\n'), ((5228, 5251), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (5243, 5251), True, 'import numpy as np\n'), ((5385, 5408), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (5400, 5408), True, 'import numpy as np\n'), ((5533, 5547), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (5540, 5547), True, 'import numpy as np\n'), ((6277, 6290), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6288, 6290), False, 'from collections import OrderedDict\n'), ((6660, 6680), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6670, 6680), False, 'from test_util import GenArgList\n'), ((1379, 1394), 'numpy.nonzero', 'np.nonzero', (['out'], {}), '(out)\n', (1389, 1394), True, 'import numpy as np\n'), ((1642, 1661), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1653, 1661), True, 'import oneflow.experimental as flow\n'), ((1754, 1773), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1765, 1773), True, 'import oneflow.experimental as flow\n'), ((1861, 1880), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1872, 1880), True, 'import oneflow.experimental as flow\n'), ((1930, 1944), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (1937, 1944), True, 'import numpy as np\n'), ((1973, 1992), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1984, 1992), True, 'import oneflow.experimental as flow\n'), ((2856, 2875), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2867, 2875), True, 'import oneflow.experimental as flow\n'), ((2968, 2987), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2979, 2987), True, 'import oneflow.experimental as flow\n'), ((3075, 3094), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (3086, 3094), True, 'import oneflow.experimental as flow\n'), ((3144, 3158), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (3151, 3158), True, 'import numpy as np\n'), ((3187, 3206), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (3198, 3206), True, 'import oneflow.experimental as flow\n'), ((4069, 4088), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (4080, 4088), True, 'import oneflow.experimental as flow\n'), ((4181, 4200), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (4192, 4200), True, 'import oneflow.experimental as flow\n'), ((4288, 4307), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (4299, 4307), True, 'import oneflow.experimental as flow\n'), ((4357, 4371), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (4364, 4371), True, 'import numpy as np\n'), ((4400, 4419), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (4411, 4419), True, 'import oneflow.experimental as flow\n'), ((5296, 5315), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5307, 5315), True, 'import oneflow.experimental as flow\n'), ((5453, 5472), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5464, 5472), True, 'import oneflow.experimental as flow\n'), ((5576, 5595), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5587, 5595), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
from typing import Union, Optional, Sequence
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.module as module_util
import oneflow.python.ops.math_unary_elementwise_ops as math_unary_elementwise_ops
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("math.add")
def add(
x: Union[int, float, remote_blob_util.BlobDef],
y: Union[int, float, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if isinstance(x, (int, float)):
return scalar_add(y, x, name)
elif isinstance(y, (int, float)):
return scalar_add(x, y, name)
elif x.shape == y.shape and x.batch_axis == y.batch_axis:
return element_wise_add(x, y, name)
elif x.shape == (1,):
return scalar_add_by_tensor(y, x, name)
elif y.shape == (1,):
return scalar_add_by_tensor(x, y, name)
else:
return broadcast_add(x, y, name)
def _recursive_build_add_n(inputs, name=None):
inputs = list(inputs)
kernel_max_inputs = 8
if len(inputs) == 1:
return inputs[0]
elif len(inputs) <= kernel_max_inputs:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("AddN_")
)
.Op("add_n")
.Input("in", inputs)
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
assert len(inputs) > kernel_max_inputs
new_inputs = inputs[kernel_max_inputs:]
new_inputs.append(_recursive_build_add_n(inputs[:kernel_max_inputs]))
return _recursive_build_add_n(new_inputs)
@oneflow_export("math.add_n")
def add_n(
inputs: Sequence[remote_blob_util.BlobDef], name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return _recursive_build_add_n(inputs, name)
@oneflow_export("math.subtract")
def subtract(
x: Union[int, float, remote_blob_util.BlobDef],
y: Union[int, float, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if isinstance(x, (int, float)):
return scalar_add(-1 * y, x, name)
elif isinstance(y, (int, float)):
return scalar_add(x, -1 * y, name)
elif x.shape == y.shape:
# TODO: add element-wise op
return broadcast_sub(x, y, name)
elif x.shape == (1,):
return scalar_sub_by_tensor(y, x, name)
elif y.shape == (1,):
return scalar_sub_by_tensor(x, y, name)
else:
return broadcast_sub(x, y, name)
@oneflow_export("math.multiply")
def multiply(
x: Union[int, float, remote_blob_util.BlobDef],
y: Union[int, float, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if isinstance(x, (int, float)):
return scalar_mul(y, x, name)
elif isinstance(y, (int, float)):
return scalar_mul(x, y, name)
elif x.shape == y.shape and x.batch_axis == y.batch_axis:
return element_wise_mul(x, y, name)
elif x.shape == (1,):
return scalar_mul_by_tensor(y, x, name)
elif y.shape == (1,):
return scalar_mul_by_tensor(x, y, name)
else:
return broadcast_mul(x, y, name)
@oneflow_export("math.divide")
def divide(
x: Union[int, float, remote_blob_util.BlobDef],
y: Union[int, float, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if isinstance(x, (int, float)):
return scalar_mul(math_unary_elementwise_ops.reciprocal_no_nan(y), x, name)
elif isinstance(y, (int, float)):
if y == 0 or y == 0.0:
y = 0.0
else:
y = 1.0 / (float(y))
return scalar_mul(x, y, name)
elif x.shape == y.shape:
# TODO: add element-wise op
return broadcast_div(x, y, name)
elif x.shape == (1,):
return scalar_div_by_tensor(y, x, name)
elif y.shape == (1,):
return scalar_div_by_tensor(x, y, name)
else:
return broadcast_div(x, y, name)
@oneflow_export("math.mod")
def floor_mod(
x: Union[int, float, remote_blob_util.BlobDef],
y: Union[int, float, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if isinstance(x, (int, float)):
raise NotImplementedError
elif isinstance(y, (int, float)):
raise NotImplementedError
elif x.shape == y.shape:
# TODO: add element-wise op
return broadcast_floor_mod(x, y, name)
else:
return broadcast_floor_mod(x, y, name)
def scalar_add(x, operand, name=None):
if name is None:
name = id_util.UniqueStr("ScalarAdd_")
builder = flow.user_op_builder(name).Op("scalar_add").Input("in", [x]).Output("out")
if isinstance(operand, int):
builder = (
builder.Attr("has_int_operand", True)
.Attr("has_float_operand", False)
.Attr("int_operand", operand)
.Attr("float_operand", 0.0)
)
elif isinstance(operand, float):
builder = (
builder.Attr("has_int_operand", False)
.Attr("has_float_operand", True)
.Attr("int_operand", 0)
.Attr("float_operand", operand)
)
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
def scalar_add_by_tensor(x, scalar, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ScalarAddByTensor_"))
.Op("scalar_add_by_tensor")
.Input("x", [x])
.Input("scalar", [scalar])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def element_wise_add(x, y, name=None):
return flow.math.add_n([x, y], name)
def build_broadcast_binary_op(math_op, x, y, name=None):
if name is None:
name = id_util.UniqueStr(math_op + "_")
return (
flow.user_op_builder(name)
.Op(math_op)
.Input("x", [x])
.Input("y", [y])
.Output("z")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def broadcast_add(x, y, name=None):
return build_broadcast_binary_op("broadcast_add", x, y, name)
def broadcast_sub(x, y, name=None):
return build_broadcast_binary_op("broadcast_sub", x, y, name)
def scalar_sub_by_tensor(x, scalar, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ScalarSubByTensor_"))
.Op("scalar_sub_by_tensor")
.Input("x", [x])
.Input("scalar", [scalar])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def element_wise_mul(x, y, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ElementWiseMul_"))
.Op("multiply")
.Input("x", [x])
.Input("y", [y])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def broadcast_mul(x, y, name=None):
return build_broadcast_binary_op("broadcast_mul", x, y, name)
def scalar_mul(x, operand, name=None):
if name is None:
name = id_util.UniqueStr("ScalarMul_")
builder = flow.user_op_builder(name).Op("scalar_mul").Input("in", [x]).Output("out")
if isinstance(operand, int):
builder = (
builder.Attr("has_int_operand", True)
.Attr("has_float_operand", False)
.Attr("int_operand", operand)
.Attr("float_operand", 0.0)
)
elif isinstance(operand, float):
builder = (
builder.Attr("has_int_operand", False)
.Attr("has_float_operand", True)
.Attr("int_operand", 0)
.Attr("float_operand", operand)
)
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
def scalar_mul_by_tensor(x, scalar, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ScalarMulByTensor_"))
.Op("scalar_mul_by_tensor")
.Input("x", [x])
.Input("scalar", [scalar])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def broadcast_div(x, y, name=None):
return build_broadcast_binary_op("broadcast_div", x, y, name)
def scalar_div_by_tensor(x, scalar, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ScalarDivByTensor_"))
.Op("scalar_div_by_tensor")
.Input("x", [x])
.Input("scalar", [scalar])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def broadcast_floor_mod(x, y, name=None):
return build_broadcast_binary_op("broadcast_floor_mod", x, y, name)
@oneflow_export("math.tanh")
def tanh(
x: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
r"""Computes hyperbolic tangent of `x` element-wise.
Args:
x: Input `Blob`.
Returns:
A `Blob`
"""
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("TanH_"))
.Op("tanh")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.gelu")
def gelu(
x: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
r"""Gaussian Error Linear Units.
Args:
x: Input `Blob`.
Returns:
A `Blob`
"""
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Gelu_"))
.Op("gelu")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.relu", "nn.relu")
def relu(
x: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
r"""ReLU activation
Args:
x: Input `Blob`.
Returns:
A `Blob`
"""
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Relu_"))
.Op("relu")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.sigmoid")
def sigmoid(
x: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
r"""Computes sigmoid of `x` element-wise.
Args:
x: Input `Blob`.
Returns:
A `Blob`
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Sigmoid_")
)
.Op("sigmoid")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.unsorted_segment_sum", "unsorted_segment_sum")
def unsorted_segment_sum(
data: remote_blob_util.BlobDef,
segment_ids: remote_blob_util.BlobDef,
num_segments: int,
axis: int = 0,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("UnsortedSegmentSum_")
)
.Op("unsorted_segment_sum")
.Input("data", [data])
.Input("segment_ids", [segment_ids])
.Output("out")
.Attr("axis", int(axis))
.Attr("num_segments", int(num_segments))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.unsorted_segment_sum_like", "unsorted_segment_sum_like")
def unsorted_segment_sum_like(
data: remote_blob_util.BlobDef,
segment_ids: remote_blob_util.BlobDef,
like: remote_blob_util.BlobDef,
axis: int = 0,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("UnsortedSegmentSumLike_")
)
.Op("unsorted_segment_sum_like")
.Input("data", [data])
.Input("segment_ids", [segment_ids])
.Input("like", [like])
.Output("out")
.Attr("axis", int(axis))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.unsorted_batch_segment_sum", "unsorted_batch_segment_sum")
def unsorted_batch_segment_sum(
data: remote_blob_util.BlobDef,
segment_ids: remote_blob_util.BlobDef,
num_segments: int,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("UnsortedBatchSegmentSum_")
)
.Op("unsorted_batch_segment_sum")
.Input("data", [data])
.Input("segment_ids", [segment_ids])
.Output("out")
.Attr("num_segments", int(num_segments))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("cast")
def cast(
x: remote_blob_util.BlobDef, dtype: dtype_util.dtype, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
r"""Return a `Blob` of given data type `dtype` and indentical shape to `x`
Args:
x: a `Blob`.
dtype: a OneFlow data type. For instance, `oneflow.float`.
Returns:
A `Blob`
"""
if x.dtype == dtype:
return x
if name is None:
name = id_util.UniqueStr("Cast_")
return (
flow.user_op_builder(name)
.Op("cast")
.Input("in", [x])
.Output("out")
.Attr("dtype", dtype)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.equal")
def equal(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_equal", x, y, name)
@oneflow_export("math.not_equal")
def not_equal(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_not_equal", x, y, name)
@oneflow_export("math.less")
def less(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_less", x, y, name)
@oneflow_export("math.less_equal")
def less_equal(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_less_equal", x, y, name)
@oneflow_export("math.greater")
def greater(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_greater", x, y, name)
@oneflow_export("math.greater_equal")
def greater_equal(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_greater_equal", x, y, name)
@oneflow_export("math.logical_and")
def logical_and(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_logical_and", x, y, name)
@oneflow_export("math.minimum")
def broadcast_min(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_minimum", x, y, name)
@oneflow_export("math.maximum")
def broadcast_max(
x: remote_blob_util.BlobDef, y: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return build_broadcast_binary_op("broadcast_maximum", x, y, name)
@oneflow_export("math.reduced_shape_elem_cnt")
def elem_cnt(
input_blob: remote_blob_util.BlobDef,
axis: Optional[Sequence[int]] = None,
dtype: Optional[dtype_util.dtype] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
op_conf = op_conf_util.OperatorConf()
setattr(
op_conf,
"name",
name if name is not None else id_util.UniqueStr("ShapeElemCnt_"),
)
op_conf.shape_elem_cnt_conf.x = input_blob.unique_name
if axis is None:
op_conf.shape_elem_cnt_conf.exclude_axis_conf.SetInParent()
else:
assert isinstance(axis, (tuple, list))
op_conf.shape_elem_cnt_conf.include_axis_conf.axis.extend(axis)
if dtype is not None:
op_conf.shape_elem_cnt_conf.data_type = dtype.oneflow_proto_dtype
op_conf.shape_elem_cnt_conf.y = "y"
interpret_util.Forward(op_conf)
out_lbi = logical_blob_id_util.LogicalBlobId()
out_lbi.op_name = op_conf.name
out_lbi.blob_name = "y"
return remote_blob_util.RemoteBlob(out_lbi)
@oneflow_export("math.top_k")
def top_k(
input: remote_blob_util.BlobDef,
k: int = 1,
sorted: bool = True,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("TopK_"))
.Op("top_k")
.Input("in", [input])
.Output("out")
.Attr("k", k)
.Attr("sorted", sorted)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.argmax")
def argmax(
input: remote_blob_util.BlobDef, name: Optional[str] = None
) -> remote_blob_util.BlobDef:
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("ArgMax_"))
.Op("argmax")
.Input("in", [input])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.broadcast_to_compatible_with", "broadcast_to_compatible_with")
def broadcast_to_compatible_with(
x: remote_blob_util.BlobDef,
compatible: Sequence[remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
assert isinstance(compatible, (list, tuple))
if name is None:
name = id_util.UniqueStr("BroadcastToCompatibleWith_")
op_conf = op_conf_util.OperatorConf()
setattr(op_conf, "name", name)
setattr(op_conf.broadcast_to_compatible_with_conf, "x", x.unique_name)
setattr(op_conf.broadcast_to_compatible_with_conf, "y", "y")
op_conf.broadcast_to_compatible_with_conf.compatible.extend(
[cp.unique_name for cp in compatible]
)
interpret_util.Forward(op_conf)
ret_lbi = logical_blob_id_util.LogicalBlobId()
ret_lbi.op_name = op_conf.name
ret_lbi.blob_name = "y"
return remote_blob_util.RemoteBlob(ret_lbi)
@oneflow_export(
"math.clip_by_value", "clip_by_value", "clip_by_scalar", "clip", "clamp"
)
def clip_by_value(
values: remote_blob_util.BlobDef,
min_value: Optional[Union[int, float]] = None,
max_value: Optional[Union[int, float]] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if name is None:
name = id_util.UniqueStr("ClipByValue_")
if min_value is not None and max_value is not None:
op_builder = (
flow.user_op_builder(name)
.Op("clip_by_scalar")
.Attr("floating_min", float(min_value))
.Attr("integral_min", int(min_value))
.Attr("floating_max", float(max_value))
.Attr("integral_max", int(max_value))
)
elif min_value is not None:
op_builder = (
flow.user_op_builder(name)
.Op("clip_by_scalar_min")
.Attr("floating_min", float(min_value))
.Attr("integral_min", int(min_value))
)
elif max_value is not None:
op_builder = (
flow.user_op_builder(name)
.Op("clip_by_scalar_max")
.Attr("floating_max", float(max_value))
.Attr("integral_max", int(max_value))
)
else:
raise ValueError("min_value and max_value cannot be None at the same time")
return (
op_builder.Input("x", [values])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.l2_normalize")
def l2_normalize(
input: remote_blob_util.BlobDef,
axis: Optional[int] = None,
epsilon: float = 1e-12,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if axis < 0:
axis += len(input.shape)
assert axis >= 0 and axis < len(input.shape)
y, square_x_sum = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("L2Normalize_")
)
.Op("l2_normalize")
.Input("x", [input])
.Output("y")
.Output("square_x_sum")
.Attr("axis", int(axis))
.Attr("epsilon", float(epsilon))
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
return y
@oneflow_export("math.squared_difference")
def squared_difference(
x: Union[int, float, remote_blob_util.BlobDef],
y: Union[int, float, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
name_subtract, name_square = None, None
if name is not None:
name_subtract = name + "_subtract"
name_square = name + "_square"
return flow.math.square(flow.math.subtract(x, y, name_subtract), name_square)
@oneflow_export("math.gelu_grad")
def gelu_grad(
x: remote_blob_util.BlobDef,
dy: remote_blob_util.BlobDef,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("GeluGrad_")
)
.Op("gelu_grad")
.Input("x", [x])
.Input("dy", [dy])
.Output("dx")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.tanh_grad")
def tanh_grad(
y: remote_blob_util.BlobDef,
dy: remote_blob_util.BlobDef,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("TanhGrad_")
)
.Op("tanh_grad")
.Input("y", [y])
.Input("dy", [dy])
.Output("dx")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
| [
"oneflow.python.framework.interpret_util.Forward",
"oneflow.core.operator.op_conf_pb2.OperatorConf",
"oneflow.python.framework.remote_blob.RemoteBlob",
"oneflow.math.subtract",
"oneflow.user_op_builder",
"oneflow.math.add_n",
"oneflow.core.register.logical_blob_id_pb2.LogicalBlobId",
"oneflow.python.o... | [((1268, 1294), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.add"""'], {}), "('math.add')\n", (1282, 1294), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2673, 2701), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.add_n"""'], {}), "('math.add_n')\n", (2687, 2701), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2870, 2901), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.subtract"""'], {}), "('math.subtract')\n", (2884, 2901), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3551, 3582), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.multiply"""'], {}), "('math.multiply')\n", (3565, 3582), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4222, 4251), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.divide"""'], {}), "('math.divide')\n", (4236, 4251), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5033, 5059), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.mod"""'], {}), "('math.mod')\n", (5047, 5059), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((9656, 9683), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.tanh"""'], {}), "('math.tanh')\n", (9670, 9683), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((10167, 10194), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.gelu"""'], {}), "('math.gelu')\n", (10181, 10194), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((10657, 10695), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.relu"""', '"""nn.relu"""'], {}), "('math.relu', 'nn.relu')\n", (10671, 10695), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((11146, 11176), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.sigmoid"""'], {}), "('math.sigmoid')\n", (11160, 11176), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((11679, 11746), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.unsorted_segment_sum"""', '"""unsorted_segment_sum"""'], {}), "('math.unsorted_segment_sum', 'unsorted_segment_sum')\n", (11693, 11746), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((12391, 12468), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.unsorted_segment_sum_like"""', '"""unsorted_segment_sum_like"""'], {}), "('math.unsorted_segment_sum_like', 'unsorted_segment_sum_like')\n", (12405, 12468), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((13122, 13201), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.unsorted_batch_segment_sum"""', '"""unsorted_batch_segment_sum"""'], {}), "('math.unsorted_batch_segment_sum', 'unsorted_batch_segment_sum')\n", (13136, 13201), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((13811, 13833), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""cast"""'], {}), "('cast')\n", (13825, 13833), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((14510, 14538), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.equal"""'], {}), "('math.equal')\n", (14524, 14538), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((14741, 14773), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.not_equal"""'], {}), "('math.not_equal')\n", (14755, 14773), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((14984, 15011), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.less"""'], {}), "('math.less')\n", (14998, 15011), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15212, 15245), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.less_equal"""'], {}), "('math.less_equal')\n", (15226, 15245), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15458, 15488), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.greater"""'], {}), "('math.greater')\n", (15472, 15488), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15695, 15731), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.greater_equal"""'], {}), "('math.greater_equal')\n", (15709, 15731), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15950, 15984), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.logical_and"""'], {}), "('math.logical_and')\n", (15964, 15984), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((16199, 16229), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.minimum"""'], {}), "('math.minimum')\n", (16213, 16229), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((16442, 16472), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.maximum"""'], {}), "('math.maximum')\n", (16456, 16472), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((16685, 16730), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduced_shape_elem_cnt"""'], {}), "('math.reduced_shape_elem_cnt')\n", (16699, 16730), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((17724, 17752), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.top_k"""'], {}), "('math.top_k')\n", (17738, 17752), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((18214, 18243), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.argmax"""'], {}), "('math.argmax')\n", (18228, 18243), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((18609, 18696), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.broadcast_to_compatible_with"""', '"""broadcast_to_compatible_with"""'], {}), "('math.broadcast_to_compatible_with',\n 'broadcast_to_compatible_with')\n", (18623, 18696), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((19545, 19637), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.clip_by_value"""', '"""clip_by_value"""', '"""clip_by_scalar"""', '"""clip"""', '"""clamp"""'], {}), "('math.clip_by_value', 'clip_by_value', 'clip_by_scalar',\n 'clip', 'clamp')\n", (19559, 19637), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((21037, 21072), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.l2_normalize"""'], {}), "('math.l2_normalize')\n", (21051, 21072), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((21765, 21806), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.squared_difference"""'], {}), "('math.squared_difference')\n", (21779, 21806), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((22234, 22266), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.gelu_grad"""'], {}), "('math.gelu_grad')\n", (22248, 22266), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((22718, 22750), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.tanh_grad"""'], {}), "('math.tanh_grad')\n", (22732, 22750), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((6687, 6716), 'oneflow.math.add_n', 'flow.math.add_n', (['[x, y]', 'name'], {}), '([x, y], name)\n', (6702, 6716), True, 'import oneflow as flow\n'), ((16952, 16979), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (16977, 16979), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((17527, 17558), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (17549, 17558), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((17573, 17609), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (17607, 17609), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((17684, 17720), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['out_lbi'], {}), '(out_lbi)\n', (17711, 17720), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((19023, 19050), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (19048, 19050), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((19347, 19378), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (19369, 19378), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((19394, 19430), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (19428, 19430), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((19505, 19541), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['ret_lbi'], {}), '(ret_lbi)\n', (19532, 19541), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((5630, 5661), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarAdd_"""'], {}), "('ScalarAdd_')\n", (5647, 5661), True, 'import oneflow.python.framework.id_util as id_util\n'), ((6812, 6844), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (["(math_op + '_')"], {}), "(math_op + '_')\n", (6829, 6844), True, 'import oneflow.python.framework.id_util as id_util\n'), ((8092, 8123), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarMul_"""'], {}), "('ScalarMul_')\n", (8109, 8123), True, 'import oneflow.python.framework.id_util as id_util\n'), ((14254, 14280), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Cast_"""'], {}), "('Cast_')\n", (14271, 14280), True, 'import oneflow.python.framework.id_util as id_util\n'), ((18960, 19007), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BroadcastToCompatibleWith_"""'], {}), "('BroadcastToCompatibleWith_')\n", (18977, 19007), True, 'import oneflow.python.framework.id_util as id_util\n'), ((19898, 19931), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ClipByValue_"""'], {}), "('ClipByValue_')\n", (19915, 19931), True, 'import oneflow.python.framework.id_util as id_util\n'), ((22177, 22216), 'oneflow.math.subtract', 'flow.math.subtract', (['x', 'y', 'name_subtract'], {}), '(x, y, name_subtract)\n', (22195, 22216), True, 'import oneflow as flow\n'), ((4493, 4540), 'oneflow.python.ops.math_unary_elementwise_ops.reciprocal_no_nan', 'math_unary_elementwise_ops.reciprocal_no_nan', (['y'], {}), '(y)\n', (4537, 4540), True, 'import oneflow.python.ops.math_unary_elementwise_ops as math_unary_elementwise_ops\n'), ((17064, 17098), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ShapeElemCnt_"""'], {}), "('ShapeElemCnt_')\n", (17081, 17098), True, 'import oneflow.python.framework.id_util as id_util\n'), ((5676, 5702), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (5696, 5702), True, 'import oneflow as flow\n'), ((8138, 8164), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (8158, 8164), True, 'import oneflow as flow\n'), ((20366, 20392), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (20386, 20392), True, 'import oneflow as flow\n'), ((20610, 20636), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (20630, 20636), True, 'import oneflow as flow\n'), ((20024, 20050), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (20044, 20050), True, 'import oneflow as flow\n'), ((6866, 6892), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (6886, 6892), True, 'import oneflow as flow\n'), ((9989, 10015), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TanH_"""'], {}), "('TanH_')\n", (10006, 10015), True, 'import oneflow.python.framework.id_util as id_util\n'), ((10479, 10505), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Gelu_"""'], {}), "('Gelu_')\n", (10496, 10505), True, 'import oneflow.python.framework.id_util as id_util\n'), ((10968, 10994), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Relu_"""'], {}), "('Relu_')\n", (10985, 10994), True, 'import oneflow.python.framework.id_util as id_util\n'), ((11486, 11515), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Sigmoid_"""'], {}), "('Sigmoid_')\n", (11503, 11515), True, 'import oneflow.python.framework.id_util as id_util\n'), ((14303, 14329), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (14323, 14329), True, 'import oneflow as flow\n'), ((18423, 18451), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ArgMax_"""'], {}), "('ArgMax_')\n", (18440, 18451), True, 'import oneflow.python.framework.id_util as id_util\n'), ((2217, 2243), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""AddN_"""'], {}), "('AddN_')\n", (2234, 2243), True, 'import oneflow.python.framework.id_util as id_util\n'), ((6399, 6438), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarAddByTensor_"""'], {}), "('ScalarAddByTensor_')\n", (6416, 6438), True, 'import oneflow.python.framework.id_util as id_util\n'), ((7371, 7410), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarSubByTensor_"""'], {}), "('ScalarSubByTensor_')\n", (7388, 7410), True, 'import oneflow.python.framework.id_util as id_util\n'), ((7698, 7734), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ElementWiseMul_"""'], {}), "('ElementWiseMul_')\n", (7715, 7734), True, 'import oneflow.python.framework.id_util as id_util\n'), ((8861, 8900), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarMulByTensor_"""'], {}), "('ScalarMulByTensor_')\n", (8878, 8900), True, 'import oneflow.python.framework.id_util as id_util\n'), ((9301, 9340), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarDivByTensor_"""'], {}), "('ScalarDivByTensor_')\n", (9318, 9340), True, 'import oneflow.python.framework.id_util as id_util\n'), ((22497, 22527), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""GeluGrad_"""'], {}), "('GeluGrad_')\n", (22514, 22527), True, 'import oneflow.python.framework.id_util as id_util\n'), ((22981, 23011), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TanhGrad_"""'], {}), "('TanhGrad_')\n", (22998, 23011), True, 'import oneflow.python.framework.id_util as id_util\n'), ((13484, 13529), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""UnsortedBatchSegmentSum_"""'], {}), "('UnsortedBatchSegmentSum_')\n", (13501, 13529), True, 'import oneflow.python.framework.id_util as id_util\n'), ((17977, 18003), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TopK_"""'], {}), "('TopK_')\n", (17994, 18003), True, 'import oneflow.python.framework.id_util as id_util\n'), ((21446, 21479), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""L2Normalize_"""'], {}), "('L2Normalize_')\n", (21463, 21479), True, 'import oneflow.python.framework.id_util as id_util\n'), ((12042, 12082), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""UnsortedSegmentSum_"""'], {}), "('UnsortedSegmentSum_')\n", (12059, 12082), True, 'import oneflow.python.framework.id_util as id_util\n'), ((12782, 12826), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""UnsortedSegmentSumLike_"""'], {}), "('UnsortedSegmentSumLike_')\n", (12799, 12826), True, 'import oneflow.python.framework.id_util as id_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow._oneflow_internal
import numpy as np
import inspect
from typing import Union
import oneflow._oneflow_internal.oneflow.core.job.placement as placement_cfg
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.check_point_v2 as check_point_v2
from oneflow.python.framework.function_util import global_function_or_identity
import oneflow.python.framework.runtime_mode as rt_mode
import oneflow.python.framework.ofblob as ofblob_util
import oneflow.python.lib.core.async_util as async_util
import oneflow.python.ops.initializer_util as initializer_util
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.tensor_str as tensor_str_util
import oneflow as flow
def register_local_tensor_method(name=None):
def decorator(method):
if name is None:
op_name = method.__name__
else:
op_name = name
setattr(oneflow._oneflow_internal.Tensor, op_name, method)
return method
return decorator
@register_local_tensor_method("numpy")
def _local_tensor_numpy(eager_local_tensor):
if eager_local_tensor.dtype == flow.tensor_buffer:
shapes, dtypes = eager_local_tensor._tensor_buffer_shapes_and_dtypes
tensors = flow.experimental.tensor_buffer_to_list_of_tensors(
Tensor(eager_local_tensor), shapes, dtypes
)
return [t.numpy() for t in tensors]
method_name = eager_local_tensor._get_copy_mirrored_tensor_to_numpy_func_name()
copy_to_numpy = getattr(eager_local_tensor, method_name)
ndarray = np.empty(
tuple(eager_local_tensor.shape),
dtype=flow.convert_oneflow_dtype_to_numpy_dtype(eager_local_tensor.dtype),
)
copy_to_numpy(ndarray)
return ndarray
@register_local_tensor_method("copy_")
def _copy_from_numpy_to_eager_local_tensor(eager_local_tensor, np_arr):
method_name = eager_local_tensor._get_copy_mirrored_tensor_from_numpy_func_name()
copy_from_numpy = getattr(eager_local_tensor, method_name)
assert np_arr.dtype == flow.convert_oneflow_dtype_to_numpy_dtype(
eager_local_tensor.dtype
)
if np_arr.shape == ():
assert tuple(eager_local_tensor.shape) == (1,)
else:
assert np_arr.shape == tuple(eager_local_tensor.shape)
copy_from_numpy(np_arr)
@register_local_tensor_method("_init_by_initializer_conf")
def _init_eager_local_tensor_by_initializer_conf(
eager_local_tensor, initializer_conf, random_seed=0
):
shape = tuple(eager_local_tensor.shape)
initializer = initializer_util.GetInitializer(initializer_conf, random_seed, shape)
# initializer is None if and only if the initializer_conf is empty_initializer
if initializer is None:
return
_copy_from_numpy_to_eager_local_tensor(
eager_local_tensor,
check_point_v2.generate_values_by_initializer(
initializer, shape, eager_local_tensor.dtype
),
)
@oneflow_export("tensor")
def construct_tensor(
data,
dtype=None,
device=None,
requires_grad=False,
placement=None,
sbp=None,
is_consistent=False,
is_lazy=False,
):
if _is_scalar(data) or _input_args_is_data(data):
if (
not _input_args_is_numpy(data)
and dtype is None
and _input_dtype_is_float(data)
):
dtype = flow.float32
data = np.array(data)
if dtype is None:
dtype = dtype_util.convert_numpy_dtype_to_oneflow_dtype(data.dtype)
return Tensor(
data,
dtype=dtype,
device=device,
requires_grad=requires_grad,
placement=placement,
sbp=sbp,
is_consistent=is_consistent,
is_lazy=is_lazy,
)
else:
raise TypeError("Construction error, invalid combination of arguments")
@oneflow_export("Tensor")
class Tensor:
def __init__(
self,
*args,
dtype=None,
device=None,
requires_grad=False,
placement=None,
sbp=None,
is_consistent=False,
is_lazy=False,
data_initializer=None,
determining_initializer=None,
):
assert len(args) > 0
dtype = dtype if dtype is not None else oneflow._oneflow_internal.float32
if isinstance(device, str):
device = flow.device(device)
if placement is None:
device = (
device
if device is not None
else oneflow._oneflow_internal.device("cpu")
)
if _input_args_is_tensor(*args):
self._local_or_consistent_tensor = flow.to(
*args, device=args[0].device, dtype=args[0].dtype, copy=True
)
self._undetermined_tensor = None
elif _input_args_is_consistent_or_local(*args):
self._local_or_consistent_tensor = args[0]
self._undetermined_tensor = None
elif _input_args_is_data(*args):
self._local_or_consistent_tensor = None
self._construct_with_data(
*args,
dtype=dtype,
device=device,
requires_grad=requires_grad,
placement=placement,
sbp=sbp,
is_consistent=is_consistent,
is_lazy=is_lazy,
)
elif _input_args_is_shape(*args):
shape = args
self._local_or_consistent_tensor = None
self._undetermined_tensor = UndeterminedTensor(
shape,
dtype,
device=device,
requires_grad=requires_grad,
placement=placement,
sbp=sbp,
is_consistent=is_consistent,
is_lazy=is_lazy,
data_initializer=data_initializer,
)
if determining_initializer is None:
determining_initializer = _default_initializer_for_determining
self._determining_initializer = determining_initializer
else:
# Maybe some other arguments to be supported, reported as error for now
raise TypeError("new() received an invalid combination of arguments")
@property
def shape(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.shape
else:
return self._undetermined_tensor.shape
@property
def device(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.device
else:
return self._undetermined_tensor.device
@register_local_tensor_method("ndim")
@property
def ndim(self):
return len(self.shape)
@property
def is_cuda(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.is_cuda
else:
return self._undetermined_tensor.is_cuda
@property
def dtype(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.dtype
else:
return self._undetermined_tensor.dtype
# internal decorator
def _auto_determine(func):
def wrapped_func(*args, **kwargs):
tensor = args[0]
if not tensor.is_determined:
tensor.determine()
return func(*args, **kwargs)
return wrapped_func
@property
@_auto_determine
def data(self):
if self._local_or_consistent_tensor is not None:
return flow.Tensor(self._local_or_consistent_tensor.data)
else:
return None
@property
def grad(self):
if self._local_or_consistent_tensor is not None:
if self._local_or_consistent_tensor.grad is not None:
return flow.Tensor(self._local_or_consistent_tensor.grad)
else:
return None
@grad.setter
@_auto_determine
def grad(self, new_grad):
def check_grad(grad, new_grad):
assert (
grad.shape == new_grad.shape
), f"Shape of grads are not equal, {grad.shape} vs {new_grad.shape}"
assert (
grad.device == new_grad.device
), f"Device of grads are not equal, {grad.device} vs {new_grad.device}"
assert (
grad.dtype == new_grad.dtype
), f"Data type of grads are not equal, {grad.dtype} vs {new_grad.dtype}"
if self._local_or_consistent_tensor is not None:
if new_grad is None:
self._local_or_consistent_tensor.set_grad(None)
else:
if isinstance(new_grad, Tensor):
if not new_grad.is_determined:
new_grad.determine()
new_grad = new_grad._local_or_consistent_tensor
new_grad_detach = new_grad.detach()
check_grad(self.grad, new_grad_detach)
self._local_or_consistent_tensor.set_grad(new_grad_detach)
@property
def grad_fn(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.grad_fn
else:
return None
@property
def requires_grad(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.requires_grad
else:
return self._undetermined_tensor.requires_grad
@property
def is_leaf(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.is_leaf
else:
return True
@requires_grad.setter
def requires_grad(self, requires_grad):
if self._local_or_consistent_tensor is not None:
self._local_or_consistent_tensor.requires_grad = requires_grad
else:
self._undetermined_tensor.requires_grad = requires_grad
@register_local_tensor_method()
def size(self, idx=None):
if idx is None:
return self.shape
else:
return self.shape[idx]
@register_local_tensor_method()
def dim(self):
return self.ndim
@register_local_tensor_method()
def ndimension(self):
return self.ndim
@_auto_determine
def detach(self):
if self._local_or_consistent_tensor is not None:
return flow.Tensor(self._local_or_consistent_tensor.detach())
else:
return None
@_auto_determine
def clone(self):
if self._local_or_consistent_tensor is not None:
return flow.Tensor(self._local_or_consistent_tensor.clone())
else:
return None
def requires_grad_(self, requires_grad=True):
self.requires_grad = requires_grad
def get_device(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.device
else:
return self._undetermined_tensor.device
@register_local_tensor_method()
def nelement(self):
prod = 1
for dim in self.shape:
prod *= dim
return prod
@register_local_tensor_method()
def numel(self):
return self.nelement()
def retain_grad(self):
assert self.is_determined
self._local_or_consistent_tensor.retain_grad()
def data_ptr(self):
TODO()
def element_size(self):
return self.dtype.bytes
@_auto_determine
def numpy(self):
internal_tensor = self._local_or_consistent_tensor
if not internal_tensor.is_lazy and not internal_tensor.is_consistent:
return _local_tensor_numpy(internal_tensor)
raise NotImplementedError()
@register_local_tensor_method()
def tolist(self):
return self.numpy().tolist()
@_auto_determine
@register_local_tensor_method()
def backward(self, gradient=None, retain_graph=False, create_graph=False):
flow.autograd.backward(self, gradient, retain_graph, create_graph)
@register_local_tensor_method()
def _transform_ellipsis_type(self, key):
d = self.ndim - len(key) # exclude all Ellipsis type
new_key = list()
for k in key:
if isinstance(k, type(Ellipsis)):
new_key.append(slice(None, None, None))
while d > 0:
new_key.append(slice(None, None, None))
d -= 1
else:
new_key.append(k)
return tuple(new_key)
@register_local_tensor_method()
def _get_slice_obj(self, key):
def get_or_default(x, default):
return x if x is not None else default
def get_canonical_index(index, length, *, start=0):
if index < 0:
index += length
if index > length or index < 0:
raise IndexError(f"Index should be in [0, {length}), but got {index}")
return max(min(index, length), start)
def get_slice_if_int(x):
if isinstance(x, slice):
return x
return slice(x, x + 1)
if isinstance(key, tuple):
assert all(isinstance(x, (slice, int)) for x in key)
else:
assert isinstance(key, (slice, int))
key = (key,)
key = list(map(get_slice_if_int, key))
assert len(key) <= len(self.shape)
for i in range(len(key), len(self.shape)):
key += (slice(None, None, None),)
starts = [
get_canonical_index(get_or_default(x.start, 0), self.shape[i])
for i, x in enumerate(key)
]
stops = [
get_canonical_index(
get_or_default(x.stop, self.shape[i]), self.shape[i], start=starts[i]
)
for i, x in enumerate(key)
]
steps = [get_or_default(x.step, 1) for x in key]
assert all(x > 0 for x in steps)
# np.abs is for compatibility of negative steps in the future
shape = (np.abs(np.array(stops) - np.array(starts)) - 1) // np.abs(
np.array(steps)
) + 1
shape = shape.tolist()
return starts, stops, steps, shape
@_auto_determine
@register_local_tensor_method()
def __getitem__(self, key):
return flow.F.tensor_getitem(self, key)
@_auto_determine
@register_local_tensor_method()
def __setitem__(self, key, value):
if isinstance(key, tuple):
key = self._transform_ellipsis_type(key)
unsqueeze_dims = list(
filter(lambda idx: isinstance(key[idx], int), range(len(key)))
)
elif isinstance(key, int):
if key < 0:
key = self.shape[0] + key
unsqueeze_dims = [0]
else:
unsqueeze_dims = []
start, stop, step, shape = self._get_slice_obj(key)
if isinstance(value, (int, float)):
scalar = value
value = flow.Tensor(*shape)
value.fill_(scalar)
else:
prepended_broadcasting_dims = range(
len(self.shape) - len(unsqueeze_dims) - len(value.shape)
)
for dim in prepended_broadcasting_dims:
value = flow.experimental.unsqueeze(value, dim)
for dim in unsqueeze_dims:
value = flow.experimental.unsqueeze(value, dim)
value = flow.experimental.expand(value, *shape)
flow.experimental.tmp.logical_slice_assign(
self, value, list(zip(start, stop, step))
)
return self
@register_local_tensor_method()
def __str__(self):
return self.__repr__()
@register_local_tensor_method()
def __repr__(self):
return tensor_str_util._gen_tensor_str(self)
@register_local_tensor_method()
def __gt__(self, other):
return self.gt(other)
@register_local_tensor_method()
def __lt__(self, other):
return self.lt(other)
@register_local_tensor_method()
def __ge__(self, other):
return self.ge(other)
@register_local_tensor_method()
def __le__(self, other):
return self.le(other)
def __array__(self):
TODO()
def __sizeof__(self):
TODO()
def __deepcopy__(self, memo):
TODO()
@register_local_tensor_method()
def __mul__(self, other):
return self.mul(other)
@register_local_tensor_method()
def __rmul__(self, other):
return self.mul(other)
@register_local_tensor_method()
def __add__(self, other):
return self.add(other)
@register_local_tensor_method()
def __iadd__(self, other):
return self.add_(other)
@register_local_tensor_method()
def __radd__(self, other):
return self.add(other)
@register_local_tensor_method()
def __sub__(self, other):
return self.sub(other)
@register_local_tensor_method()
def __rsub__(self, other):
return flow.experimental.sub(other, self)
@register_local_tensor_method()
def __truediv__(self, other):
return self.div(other)
@register_local_tensor_method()
def __rtruediv__(self, other):
return flow.experimental.div(other, self)
@register_local_tensor_method()
def __neg__(self):
return flow.experimental.neg(self)
@register_local_tensor_method()
def __pow__(self, b):
return flow.experimental.pow(self, b)
def _determine_if_needed(self, determining_initializer=None):
if not self.is_determined:
self.determine(determining_initializer)
def determine(self, determining_initializer=None):
assert not self.is_determined
if determining_initializer is None:
determining_initializer = self._determining_initializer
self._local_or_consistent_tensor = determining_initializer(self)
self._undetermined_tensor = None
@property
def is_determined(self):
if self._local_or_consistent_tensor is not None:
assert self._undetermined_tensor is None
return True
else:
assert self._undetermined_tensor is not None
return False
def set_placement(self, placement):
assert isinstance(placement, flow.placement)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
self._undetermined_tensor.placement = placement
self._undetermined_tensor.device = None
def set_sbp(self, sbp):
assert isinstance(sbp, oneflow._oneflow_internal.Distribute)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
self._undetermined_tensor.sbp = sbp
def set_is_consistent(self, is_consistent):
assert isinstance(is_consistent, bool)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
self._undetermined_tensor.is_consistent = is_consistent
def set_is_lazy(self, is_lazy):
assert isinstance(is_lazy, bool)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
self._undetermined_tensor.is_lazy = is_lazy
def set_data_initializer(self, data_initializer):
assert isinstance(data_initializer, initializer_conf_util.InitializerConf)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
self._undetermined_tensor.data_initializer = data_initializer
@property
def placement(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.placement
else:
return self._undetermined_tensor.placement
@property
def is_lazy(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.is_lazy
else:
return self._undetermined_tensor.is_lazy
@property
def is_consistent(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.is_consistent
else:
return self._undetermined_tensor.is_consistent
@property
def sbp(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.sbp
else:
return self._undetermined_tensor.sbp
@register_local_tensor_method()
def uniform_(self, a=0, b=1):
initializer_conf = flow.random_uniform_initializer(
minval=a, maxval=b, dtype=self.dtype
)
return self._init_by_initializer_conf(initializer_conf)
@register_local_tensor_method()
def kaiming_uniform_(
self, a=0, mode="fan_in", nonlinearity="leaky_relu", *, data_format="NCHW"
):
initializer_conf = flow.kaiming_initializer(
shape=self.shape,
distribution="random_uniform",
mode=mode,
nonlinearity=nonlinearity,
negative_slope=a,
data_format=data_format,
)
return self._init_by_initializer_conf(initializer_conf)
@register_local_tensor_method()
def kaiming_normal_(
self, a=0, mode="fan_in", nonlinearity="leaky_relu", *, data_format="NCHW"
):
initializer_conf = flow.kaiming_initializer(
shape=self.shape,
distribution="random_normal",
mode=mode,
nonlinearity=nonlinearity,
negative_slope=a,
data_format=data_format,
)
return self._init_by_initializer_conf(initializer_conf)
@register_local_tensor_method()
def xavier_normal_(self, gain=1.0, *, data_format="NCHW"):
assert gain == 1.0, "Only gain == 1.0 is supported now"
initializer_conf = flow.xavier_normal_initializer(data_format=data_format)
return self._init_by_initializer_conf(initializer_conf)
@register_local_tensor_method()
def xavier_uniform_(self, gain=1.0, *, data_format="NCHW"):
assert gain == 1.0, "Only gain == 1.0 is supported now"
initializer_conf = flow.xavier_uniform_initializer(data_format=data_format)
return self._init_by_initializer_conf(initializer_conf)
@register_local_tensor_method()
def normal_(self, mean=0, std=1):
initializer_conf = flow.random_normal_initializer(mean=mean, stddev=std)
return self._init_by_initializer_conf(initializer_conf)
@register_local_tensor_method()
def fill_(self, value):
initializer_conf = flow.constant_initializer(value=value, dtype=self.dtype)
return self._init_by_initializer_conf(initializer_conf)
@_auto_determine
def zeros_(self):
internal_tensor = self._local_or_consistent_tensor
if internal_tensor.is_lazy:
TODO()
if internal_tensor.is_consistent:
TODO()
internal_tensor.zeros_()
@_auto_determine
@register_local_tensor_method()
def register_hook(self, hook):
assert self.is_leaf, "register_hook only supports leaf tensor for now"
assert (
self.requires_grad
), "register_hook only supports tensor with requires_grad=True"
def hook_returning_determined_tensor(grad):
new_grad = hook(grad)
if isinstance(new_grad, Tensor) and not new_grad.is_determined:
new_grad.determine()
new_grad = new_grad._local_or_consistent_tensor
return new_grad
self._local_or_consistent_tensor._register_hook(
hook_returning_determined_tensor
)
@_auto_determine
def copy_(self, other: Union["Tensor", np.ndarray]):
internal_tensor = self._local_or_consistent_tensor
if internal_tensor.is_lazy:
TODO()
if internal_tensor.is_consistent:
TODO()
if isinstance(other, (Tensor, check_point_v2.FileBackendVariableBlob)):
src_np = other.numpy()
else:
assert isinstance(other, np.ndarray)
src_np = other
_copy_from_numpy_to_eager_local_tensor(internal_tensor, src_np)
def _init_by_initializer_conf(self, initializer_conf):
if self.is_determined:
if self.is_consistent:
with self._placement_scope():
check_point_v2.init_by_initializer_conf(
self, initializer_conf, True, None
)
else:
_init_eager_local_tensor_by_initializer_conf(
self._local_or_consistent_tensor, initializer_conf
)
else:
self.set_data_initializer(initializer_conf)
return self
def _placement_scope(self):
if self.is_consistent:
return _convert_to_placement_scope(self.placement)
else:
return _convert_to_placement_scope(self.device)
def _construct_with_data(
self,
*args,
dtype=None,
device=None,
requires_grad=False,
placement=None,
sbp=None,
is_consistent=False,
is_lazy=False,
):
numpy_data = None
if _input_args_is_tuple_or_list(*args):
numpy_data = np.array(args[0])
elif _input_args_is_numpy(*args):
numpy_data = np.ascontiguousarray(args[0])
numpy_data = numpy_data.astype(flow.convert_oneflow_dtype_to_numpy_dtype(dtype))
shape = oneflow._oneflow_internal.Size(tuple(numpy_data.shape))
self._determining_initializer = _numpy_initializer_for_determining
self._undetermined_tensor = UndeterminedTensor(
shape,
dtype,
device=device,
requires_grad=requires_grad,
placement=placement,
sbp=sbp,
is_consistent=is_consistent,
is_lazy=is_lazy,
numpy_data=numpy_data,
)
class UndeterminedTensor:
def __init__(
self,
shape,
dtype,
device=None,
requires_grad=False,
placement=None,
sbp=None,
is_consistent=False,
is_lazy=False,
data_initializer=None,
numpy_data=None,
):
if not isinstance(shape, oneflow._oneflow_internal.Size):
if not isinstance(shape, tuple):
shape = tuple(shape)
shape = oneflow._oneflow_internal.Size(shape)
data_initializer = (
data_initializer
if data_initializer is not None
else flow.empty_initializer(dtype=dtype)
)
device = (
device if device is not None else oneflow._oneflow_internal.device("cpu")
)
self.shape = shape
self.dtype = dtype
self.device = device
self.requires_grad = requires_grad
self.placement = placement
self.sbp = sbp
self.is_consistent = is_consistent
self.is_lazy = is_lazy
self.data_initializer = data_initializer
self.numpy_data = numpy_data
@property
def is_cuda(self):
device_type = None
if self.placement is not None:
device_type = self.placement.device_tag
elif self.device is not None:
device_type = self.device.type
else:
raise ValueError("Neither placement nor device found.")
return device_type == "gpu" or device_type == "cuda"
def _default_initializer_for_determining(tensor):
assert not tensor.is_determined
undetermined_tensor = tensor._undetermined_tensor
if undetermined_tensor.is_consistent:
raise NotImplementedError()
else:
shape = undetermined_tensor.shape
dtype = undetermined_tensor.dtype
determined_tensor = oneflow._oneflow_internal.Tensor(
shape,
dtype,
undetermined_tensor.device,
undetermined_tensor.is_lazy,
undetermined_tensor.requires_grad,
True,
)
_init_eager_local_tensor_by_initializer_conf(
determined_tensor, undetermined_tensor.data_initializer
)
return determined_tensor
def _numpy_initializer_for_determining(tensor):
assert not tensor.is_determined
undetermined_tensor = tensor._undetermined_tensor
numpy_data = undetermined_tensor.numpy_data
assert numpy_data is not None
if undetermined_tensor.is_consistent:
raise NotImplementedError()
else:
determined_tensor = oneflow._oneflow_internal.Tensor(
undetermined_tensor.shape,
undetermined_tensor.dtype,
undetermined_tensor.device,
undetermined_tensor.is_lazy,
undetermined_tensor.requires_grad,
True,
)
_copy_from_numpy_to_eager_local_tensor(determined_tensor, numpy_data)
return determined_tensor
def _input_args_is_tuple_or_list(*args):
return len(args) == 1 and isinstance(args[0], (tuple, list))
def _input_args_is_numpy(*args):
return len(args) == 1 and isinstance(args[0], np.ndarray)
def _input_args_is_consistent_or_local(*args):
return len(args) == 1 and isinstance(args[0], oneflow._oneflow_internal.Tensor)
def _input_args_is_tensor(*args):
return len(args) == 1 and isinstance(args[0], flow.Tensor)
def _input_args_is_data(*args):
return _input_args_is_numpy(*args) or _input_args_is_tuple_or_list(*args)
def _input_args_is_shape(*args):
return all(isinstance(x, int) for x in args)
def register_tensor_op(op_name):
def set_tensor_op(method):
setattr(Tensor, op_name, method)
setattr(oneflow._oneflow_internal.Tensor, op_name, method)
return method
return set_tensor_op
def _convert_to_placement_scope(placement_or_device):
if isinstance(placement_or_device, flow.placement):
placement = placement_or_device
return flow.scope.placement(
placement.device_tag,
list(placement.parallel_conf.device_name()),
placement.hierarchy,
)
else:
device = placement_or_device
# TODO(jianhao): replace 0 with real machine id
machine_id = 0
# TODO(jianhao): support cuda in of
if device.type == "cuda":
device_tag = "gpu"
else:
device_tag = device.type
return flow.scope.placement(
device_tag, "{}:{}".format(machine_id, device.index), None
)
def _is_scalar(data):
return isinstance(data, (int, float, bool, complex))
def _flatten_list_or_tuple(list_or_tuple):
for item in list_or_tuple:
if isinstance(item, (list, tuple)):
yield from _flatten_list_or_tuple(item)
else:
yield item
def _input_dtype_is_float(data):
if _is_scalar(data):
return isinstance(data, float)
elif isinstance(data, (list, tuple)):
return any(isinstance(x, float) for x in _flatten_list_or_tuple(data))
return False
| [
"oneflow.convert_oneflow_dtype_to_numpy_dtype",
"oneflow.python.framework.dtype.convert_numpy_dtype_to_oneflow_dtype",
"oneflow.to",
"oneflow.empty_initializer",
"oneflow.experimental.sub",
"oneflow.kaiming_initializer",
"oneflow.xavier_normal_initializer",
"oneflow.xavier_uniform_initializer",
"one... | [((3729, 3753), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tensor"""'], {}), "('tensor')\n", (3743, 3753), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4650, 4674), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""Tensor"""'], {}), "('Tensor')\n", (4664, 4674), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3328, 3397), 'oneflow.python.ops.initializer_util.GetInitializer', 'initializer_util.GetInitializer', (['initializer_conf', 'random_seed', 'shape'], {}), '(initializer_conf, random_seed, shape)\n', (3359, 3397), True, 'import oneflow.python.ops.initializer_util as initializer_util\n'), ((2831, 2898), 'oneflow.convert_oneflow_dtype_to_numpy_dtype', 'flow.convert_oneflow_dtype_to_numpy_dtype', (['eager_local_tensor.dtype'], {}), '(eager_local_tensor.dtype)\n', (2872, 2898), True, 'import oneflow as flow\n'), ((3605, 3700), 'oneflow.python.framework.check_point_v2.generate_values_by_initializer', 'check_point_v2.generate_values_by_initializer', (['initializer', 'shape', 'eager_local_tensor.dtype'], {}), '(initializer, shape,\n eager_local_tensor.dtype)\n', (3650, 3700), True, 'import oneflow.python.framework.check_point_v2 as check_point_v2\n'), ((4168, 4182), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (4176, 4182), True, 'import numpy as np\n'), ((12862, 12928), 'oneflow.autograd.backward', 'flow.autograd.backward', (['self', 'gradient', 'retain_graph', 'create_graph'], {}), '(self, gradient, retain_graph, create_graph)\n', (12884, 12928), True, 'import oneflow as flow\n'), ((15201, 15233), 'oneflow.F.tensor_getitem', 'flow.F.tensor_getitem', (['self', 'key'], {}), '(self, key)\n', (15222, 15233), True, 'import oneflow as flow\n'), ((16664, 16701), 'oneflow.python.framework.tensor_str._gen_tensor_str', 'tensor_str_util._gen_tensor_str', (['self'], {}), '(self)\n', (16695, 16701), True, 'import oneflow.python.framework.tensor_str as tensor_str_util\n'), ((17894, 17928), 'oneflow.experimental.sub', 'flow.experimental.sub', (['other', 'self'], {}), '(other, self)\n', (17915, 17928), True, 'import oneflow as flow\n'), ((18118, 18152), 'oneflow.experimental.div', 'flow.experimental.div', (['other', 'self'], {}), '(other, self)\n', (18139, 18152), True, 'import oneflow as flow\n'), ((18228, 18255), 'oneflow.experimental.neg', 'flow.experimental.neg', (['self'], {}), '(self)\n', (18249, 18255), True, 'import oneflow as flow\n'), ((18334, 18364), 'oneflow.experimental.pow', 'flow.experimental.pow', (['self', 'b'], {}), '(self, b)\n', (18355, 18364), True, 'import oneflow as flow\n'), ((21494, 21563), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': 'a', 'maxval': 'b', 'dtype': 'self.dtype'}), '(minval=a, maxval=b, dtype=self.dtype)\n', (21525, 21563), True, 'import oneflow as flow\n'), ((21830, 21993), 'oneflow.kaiming_initializer', 'flow.kaiming_initializer', ([], {'shape': 'self.shape', 'distribution': '"""random_uniform"""', 'mode': 'mode', 'nonlinearity': 'nonlinearity', 'negative_slope': 'a', 'data_format': 'data_format'}), "(shape=self.shape, distribution='random_uniform',\n mode=mode, nonlinearity=nonlinearity, negative_slope=a, data_format=\n data_format)\n", (21854, 21993), True, 'import oneflow as flow\n'), ((22311, 22473), 'oneflow.kaiming_initializer', 'flow.kaiming_initializer', ([], {'shape': 'self.shape', 'distribution': '"""random_normal"""', 'mode': 'mode', 'nonlinearity': 'nonlinearity', 'negative_slope': 'a', 'data_format': 'data_format'}), "(shape=self.shape, distribution='random_normal',\n mode=mode, nonlinearity=nonlinearity, negative_slope=a, data_format=\n data_format)\n", (22335, 22473), True, 'import oneflow as flow\n'), ((22803, 22858), 'oneflow.xavier_normal_initializer', 'flow.xavier_normal_initializer', ([], {'data_format': 'data_format'}), '(data_format=data_format)\n', (22833, 22858), True, 'import oneflow as flow\n'), ((23115, 23171), 'oneflow.xavier_uniform_initializer', 'flow.xavier_uniform_initializer', ([], {'data_format': 'data_format'}), '(data_format=data_format)\n', (23146, 23171), True, 'import oneflow as flow\n'), ((23338, 23391), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'mean': 'mean', 'stddev': 'std'}), '(mean=mean, stddev=std)\n', (23368, 23391), True, 'import oneflow as flow\n'), ((23548, 23604), 'oneflow.constant_initializer', 'flow.constant_initializer', ([], {'value': 'value', 'dtype': 'self.dtype'}), '(value=value, dtype=self.dtype)\n', (23573, 23604), True, 'import oneflow as flow\n'), ((2421, 2488), 'oneflow.convert_oneflow_dtype_to_numpy_dtype', 'flow.convert_oneflow_dtype_to_numpy_dtype', (['eager_local_tensor.dtype'], {}), '(eager_local_tensor.dtype)\n', (2462, 2488), True, 'import oneflow as flow\n'), ((4229, 4288), 'oneflow.python.framework.dtype.convert_numpy_dtype_to_oneflow_dtype', 'dtype_util.convert_numpy_dtype_to_oneflow_dtype', (['data.dtype'], {}), '(data.dtype)\n', (4276, 4288), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((5144, 5163), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (5155, 5163), True, 'import oneflow as flow\n'), ((5441, 5510), 'oneflow.to', 'flow.to', (['*args'], {'device': 'args[0].device', 'dtype': 'args[0].dtype', 'copy': '(True)'}), '(*args, device=args[0].device, dtype=args[0].dtype, copy=True)\n', (5448, 5510), True, 'import oneflow as flow\n'), ((8425, 8475), 'oneflow.Tensor', 'flow.Tensor', (['self._local_or_consistent_tensor.data'], {}), '(self._local_or_consistent_tensor.data)\n', (8436, 8475), True, 'import oneflow as flow\n'), ((15879, 15898), 'oneflow.Tensor', 'flow.Tensor', (['*shape'], {}), '(*shape)\n', (15890, 15898), True, 'import oneflow as flow\n'), ((16320, 16359), 'oneflow.experimental.expand', 'flow.experimental.expand', (['value', '*shape'], {}), '(value, *shape)\n', (16344, 16359), True, 'import oneflow as flow\n'), ((26255, 26272), 'numpy.array', 'np.array', (['args[0]'], {}), '(args[0])\n', (26263, 26272), True, 'import numpy as np\n'), ((26409, 26457), 'oneflow.convert_oneflow_dtype_to_numpy_dtype', 'flow.convert_oneflow_dtype_to_numpy_dtype', (['dtype'], {}), '(dtype)\n', (26450, 26457), True, 'import oneflow as flow\n'), ((27559, 27594), 'oneflow.empty_initializer', 'flow.empty_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (27581, 27594), True, 'import oneflow as flow\n'), ((8695, 8745), 'oneflow.Tensor', 'flow.Tensor', (['self._local_or_consistent_tensor.grad'], {}), '(self._local_or_consistent_tensor.grad)\n', (8706, 8745), True, 'import oneflow as flow\n'), ((16157, 16196), 'oneflow.experimental.unsqueeze', 'flow.experimental.unsqueeze', (['value', 'dim'], {}), '(value, dim)\n', (16184, 16196), True, 'import oneflow as flow\n'), ((16260, 16299), 'oneflow.experimental.unsqueeze', 'flow.experimental.unsqueeze', (['value', 'dim'], {}), '(value, dim)\n', (16287, 16299), True, 'import oneflow as flow\n'), ((26340, 26369), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['args[0]'], {}), '(args[0])\n', (26360, 26369), True, 'import numpy as np\n'), ((14992, 15007), 'numpy.array', 'np.array', (['steps'], {}), '(steps)\n', (15000, 15007), True, 'import numpy as np\n'), ((25343, 25418), 'oneflow.python.framework.check_point_v2.init_by_initializer_conf', 'check_point_v2.init_by_initializer_conf', (['self', 'initializer_conf', '(True)', 'None'], {}), '(self, initializer_conf, True, None)\n', (25382, 25418), True, 'import oneflow.python.framework.check_point_v2 as check_point_v2\n'), ((14928, 14943), 'numpy.array', 'np.array', (['stops'], {}), '(stops)\n', (14936, 14943), True, 'import numpy as np\n'), ((14946, 14962), 'numpy.array', 'np.array', (['starts'], {}), '(starts)\n', (14954, 14962), True, 'import numpy as np\n')] |
# Copyright 2021 Fedlearn authors.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import math
import numpy as np
import oneflow as flow
import pickle
from omegaconf import open_dict
from classifier_util import GlueBERT
from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig
import config as configs
from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score
from typing import List, Dict
from config import logger
args = configs.get_config()
dataset = "CoLA"
if dataset == "CoLA":
# train_example_num=8551
# eval_example_num=1043
train_example_num=370000//4
eval_example_num=10430
test_example_num=1063
learning_rate=1e-5
wd=0.01
else:
train_example_num=3668
eval_example_num=408
test_example_num=1725
learning_rate=2e-6
wd=0.001
with open_dict(args):
args.task_name = 'CoLA'
args.num_epochs = 3
args.train_data_prefix = 'train.of_record-'
args.train_example_num = 370000//4
args.train_data_part_num = 1
args.eval_data_prefix = 'eval.of_record-'
args.eval_example_num = 10833
args.eval_batch_size_per_device = 64
args.eval_data_part_num = 1
args.label_num = 15
#args.label_num = 2
# ----------- Model and Data Path ----------------------------------
#Root_Dir = '/data/tzeng/source/project/OneFlow-Benchmark/LanguageModeling/BERT'
Root_Dir = '../OneFlow-Benchmark/LanguageModeling/BERT'
# dataset = 'dataset/glue_ofrecord/CoLA'
dataset ='toutiao-text-classfication-dataset/of_data'
args.train_data_dir=f'{Root_Dir}/{dataset}/train'
args.eval_data_dir=f'{Root_Dir}/{dataset}/eval'
# args.model_load_dir=f'{Root_Dir}/bert_model/uncased_L-12_H-768_A-12/uncased_L-12_H-768_A-12-oneflow'
args.model_load_dir=f'{Root_Dir}/bert_model/chinese_L-12_H-768_A-12/chinese_L-12_H-768_A-12-oneflow'
args.model_save_dir=f'./snapshots'
args.gpu_num_per_node=3
args.num_epochs=1
args.eval_example_num=eval_example_num
# args.eval_batch_size_per_device = 4
args.loss_print_every_n_iter = 20
args.batch_size_per_device=32
args.loss_print_every_n_iter=20
args.save_last_snapshot=True
args.seq_length=64
#args.seq_length=128
args.num_hidden_layers=12
args.num_attention_heads=12
args.max_position_embeddings=512
args.type_vocab_size=2
args.vocab_size=21128
args.attention_probs_dropout_prob=0.1
args.hidden_dropout_prob=0.1
args.hidden_size_per_head=64
args.learning_rate=learning_rate
args.weigt_decay=wd
args.batch_size = args.num_nodes * args.gpu_num_per_node * args.batch_size_per_device
args.eval_batch_size = args.num_nodes * args.gpu_num_per_node * args.eval_batch_size_per_device
args.epoch_size = math.ceil(args.train_example_num / args.batch_size)
args.num_eval_steps = math.ceil(args.eval_example_num / args.eval_batch_size)
args.iter_num = args.epoch_size * args.num_epochs
# ---- Fedlearn Deep Model weights obtain-compute-update params ----#
TEST_WEIGHT_GET_UPDATE = True
configs.print_args(args)
def BertDataDecoder(
data_dir, batch_size, data_part_num, seq_length, part_name_prefix, shuffle=True
):
with flow.scope.placement("cpu", "0:0"):
ofrecord = flow.data.ofrecord_reader(data_dir,
batch_size=batch_size,
data_part_num=data_part_num,
part_name_prefix=part_name_prefix,
random_shuffle=shuffle,
shuffle_after_epoch=shuffle)
blob_confs = {}
def _blob_conf(name, shape, dtype=flow.int32):
blob_confs[name] = flow.data.OFRecordRawDecoder(ofrecord, name, shape=shape, dtype=dtype)
_blob_conf("input_ids", [seq_length])
_blob_conf("input_mask", [seq_length])
_blob_conf("segment_ids", [seq_length])
_blob_conf("label_ids", [1])
#_blob_conf("is_real_example", [1])
return blob_confs
def BuildBert(
batch_size,
data_part_num,
data_dir,
part_name_prefix,
args,
shuffle=True
):
hidden_size = 64 * args.num_attention_heads # , H = 64, size per head
intermediate_size = hidden_size * 4
decoders = BertDataDecoder(
data_dir, batch_size, data_part_num, args.seq_length, part_name_prefix, shuffle=shuffle
)
#is_real_example = decoders['is_real_example']
loss, logits = GlueBERT(
decoders['input_ids'],
decoders['input_mask'],
decoders['segment_ids'],
decoders['label_ids'],
args.vocab_size,
seq_length=args.seq_length,
hidden_size=hidden_size,
num_hidden_layers=args.num_hidden_layers,
num_attention_heads=args.num_attention_heads,
intermediate_size=intermediate_size,
hidden_act="gelu",
hidden_dropout_prob=args.hidden_dropout_prob,
attention_probs_dropout_prob=args.attention_probs_dropout_prob,
max_position_embeddings=args.max_position_embeddings,
type_vocab_size=args.type_vocab_size,
initializer_range=0.02,
label_num=args.label_num
)
return loss, logits, decoders['label_ids']
@flow.global_function(type='train', function_config=GetFunctionConfig(args))
def BertGlueFinetuneJob():
loss, logits, _ = BuildBert(
args.batch_size,
args.train_data_part_num,
args.train_data_dir,
args.train_data_prefix,
args
)
flow.losses.add_loss(loss)
opt = CreateOptimizer(args)
opt.minimize(loss)
#frozen_bottom_layers()
return {'loss': loss}
@flow.global_function(type='predict', function_config=GetFunctionConfig(args))
def BertGlueEvalTrainJob():
_, logits, label_ids = BuildBert(
args.batch_size,
args.train_data_part_num,
args.train_data_dir,
args.train_data_prefix,
args,
shuffle=False
)
return logits, label_ids
@flow.global_function(type='predict', function_config=GetFunctionConfig(args))
def BertGlueEvalValJob():
#8551 or 1042
_, logits, label_ids = BuildBert(
args.eval_batch_size,
args.eval_data_part_num,
args.eval_data_dir,
args.eval_data_prefix,
args,
shuffle=False
)
return logits, label_ids
def run_eval_job(eval_job_func, num_steps, desc='train'):
labels = []
predictions = []
for index in range(num_steps):
logits, label = eval_job_func().get()
predictions.extend(list(logits.numpy().argmax(axis=1)))
labels.extend(list(label))
def metric_fn(predictions, labels):
return {
"accuracy": accuracy_score(labels, predictions),
"matthews_corrcoef": matthews_corrcoef(labels, predictions),
"precision": precision_score(labels, predictions, average='macro'),
"recall": recall_score(labels, predictions,average='macro'),
"f1": f1_score(labels, predictions,average='macro'),
}
metric_dict = metric_fn(predictions, labels)
print(desc, ', '.join('{}: {:.3f}'.format(k, v) for k, v in metric_dict.items()))
return metric_dict
def get_bert_variable(adm_param_filter=True):
variables = flow.get_all_variables()
def filter_cond(name):
return adm_param_filter and name.strip()[-2:] in['-v','-m'] or name == 'System-Train-TrainStep-BertGlueFinetuneJob'
logger.debug("Abtain Bert model parameters ... ")
V = {name: data.numpy() for name, data in variables.items() if not filter_cond(name)}
return V
class oneFlowBertClassifier():
def __init__(self, config, load_model=True):
self.CFGs = config
args = self.CFGs
flow.config.gpu_device_num(args.gpu_num_per_node)
flow.env.log_dir(args.log_dir)
InitNodes(args)
logger.debug("oneFlowBertClassifier -- 完成初始化...")
if load_model:
self.snapshot = Snapshot(self.CFGs.model_save_dir, self.CFGs.model_load_dir)
else:
# save model ...
self.snapshot = Snapshot(self.CFGs.model_save_dir, None)
def eval(self):
eval_metrics = run_eval_job(BertGlueEvalValJob, self.CFGs.num_eval_steps, desc='eval')
return eval_metrics['accuracy']
def get_model_parameters(self)->Dict[str, np.ndarray]:
return get_bert_variable()
def load(self, modelPara):
flow.load_variables(modelPara)
def train(self):
#print('Start Train oneFlowBert Classifier ...')
logger.debug("Start Train oneFlowBert Classifier ...")
for epoch in range(self.CFGs.num_epochs):
metric = Metric(desc='finetune', print_steps=self.CFGs.loss_print_every_n_iter,
batch_size=self.CFGs.batch_size, keys=['loss'])
for step in range(self.CFGs.epoch_size):
BertGlueFinetuneJob().async_get(metric.metric_cb(step, epoch=epoch))
#if 1: #step % args.loss_print_every_n_iter == 0:
run_eval_job(BertGlueEvalTrainJob, self.CFGs.epoch_size, desc='train')
eval_metrics = run_eval_job(BertGlueEvalValJob, self.CFGs.num_eval_steps, desc='eval')
if self.CFGs.save_last_snapshot:
try:
self.snapshot.save("last_snapshot")
except Exception as e:
logger.debug(e)
dummy_train_samples = 999
if TEST_WEIGHT_GET_UPDATE:
model_varaibls = get_bert_variable()
eval_metrics['acc']= eval_metrics['accuracy']
eval_metrics['train_samples']= dummy_train_samples
return model_varaibls, eval_metrics
def test():
with open_dict(args):
args.batch_size = args.num_nodes * args.gpu_num_per_node * args.batch_size_per_device
args.eval_batch_size = args.num_nodes * args.gpu_num_per_node * args.eval_batch_size_per_device
args.epoch_size = math.ceil(args.train_example_num / args.batch_size)
args.num_eval_steps = math.ceil(args.eval_example_num / args.eval_batch_size)
args.iter_num = args.epoch_size * args.num_epochs
# ----------- Model and Data Path ----------------------------------
# Root_Dir = '/data/tzeng/source/project/OneFlow-Benchmark/LanguageModeling/BERT'
Root_Dir = '../LanguageModeling/BERT'
dataset = 'dataset/glue_ofrecord/CoLA'
args.train_data_dir=f'{Root_Dir}/{dataset}/train'
args.eval_data_dir=f'{Root_Dir}/{dataset}/eval'
args.model_load_dir=f'{Root_Dir}/bert_model/uncased_L-12_H-768_A-12/uncased_L-12_H-768_A-12-oneflow'
args.model_save_dir=f'./snapshots'
configs.print_args(args)
trainer = oneFlowBertClassifier(args)
trainer.train()
if __name__ == '__main__':
test() | [
"oneflow.load_variables",
"oneflow.data.OFRecordRawDecoder",
"oneflow.get_all_variables",
"oneflow.env.log_dir",
"oneflow.losses.add_loss",
"oneflow.scope.placement",
"oneflow.config.gpu_device_num",
"oneflow.data.ofrecord_reader"
] | [((1006, 1026), 'config.get_config', 'configs.get_config', ([], {}), '()\n', (1024, 1026), True, 'import config as configs\n'), ((3580, 3604), 'config.print_args', 'configs.print_args', (['args'], {}), '(args)\n', (3598, 3604), True, 'import config as configs\n'), ((1347, 1362), 'omegaconf.open_dict', 'open_dict', (['args'], {}), '(args)\n', (1356, 1362), False, 'from omegaconf import open_dict\n'), ((3281, 3332), 'math.ceil', 'math.ceil', (['(args.train_example_num / args.batch_size)'], {}), '(args.train_example_num / args.batch_size)\n', (3290, 3332), False, 'import math\n'), ((3359, 3414), 'math.ceil', 'math.ceil', (['(args.eval_example_num / args.eval_batch_size)'], {}), '(args.eval_example_num / args.eval_batch_size)\n', (3368, 3414), False, 'import math\n'), ((5050, 5655), 'classifier_util.GlueBERT', 'GlueBERT', (["decoders['input_ids']", "decoders['input_mask']", "decoders['segment_ids']", "decoders['label_ids']", 'args.vocab_size'], {'seq_length': 'args.seq_length', 'hidden_size': 'hidden_size', 'num_hidden_layers': 'args.num_hidden_layers', 'num_attention_heads': 'args.num_attention_heads', 'intermediate_size': 'intermediate_size', 'hidden_act': '"""gelu"""', 'hidden_dropout_prob': 'args.hidden_dropout_prob', 'attention_probs_dropout_prob': 'args.attention_probs_dropout_prob', 'max_position_embeddings': 'args.max_position_embeddings', 'type_vocab_size': 'args.type_vocab_size', 'initializer_range': '(0.02)', 'label_num': 'args.label_num'}), "(decoders['input_ids'], decoders['input_mask'], decoders[\n 'segment_ids'], decoders['label_ids'], args.vocab_size, seq_length=args\n .seq_length, hidden_size=hidden_size, num_hidden_layers=args.\n num_hidden_layers, num_attention_heads=args.num_attention_heads,\n intermediate_size=intermediate_size, hidden_act='gelu',\n hidden_dropout_prob=args.hidden_dropout_prob,\n attention_probs_dropout_prob=args.attention_probs_dropout_prob,\n max_position_embeddings=args.max_position_embeddings, type_vocab_size=\n args.type_vocab_size, initializer_range=0.02, label_num=args.label_num)\n", (5058, 5655), False, 'from classifier_util import GlueBERT\n'), ((6096, 6122), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['loss'], {}), '(loss)\n', (6116, 6122), True, 'import oneflow as flow\n'), ((6133, 6154), 'util.CreateOptimizer', 'CreateOptimizer', (['args'], {}), '(args)\n', (6148, 6154), False, 'from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig\n'), ((7850, 7874), 'oneflow.get_all_variables', 'flow.get_all_variables', ([], {}), '()\n', (7872, 7874), True, 'import oneflow as flow\n'), ((8051, 8100), 'config.logger.debug', 'logger.debug', (['"""Abtain Bert model parameters ... """'], {}), "('Abtain Bert model parameters ... ')\n", (8063, 8100), False, 'from config import logger\n'), ((11348, 11372), 'config.print_args', 'configs.print_args', (['args'], {}), '(args)\n', (11366, 11372), True, 'import config as configs\n'), ((3724, 3758), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (3744, 3758), True, 'import oneflow as flow\n'), ((3779, 3964), 'oneflow.data.ofrecord_reader', 'flow.data.ofrecord_reader', (['data_dir'], {'batch_size': 'batch_size', 'data_part_num': 'data_part_num', 'part_name_prefix': 'part_name_prefix', 'random_shuffle': 'shuffle', 'shuffle_after_epoch': 'shuffle'}), '(data_dir, batch_size=batch_size, data_part_num=\n data_part_num, part_name_prefix=part_name_prefix, random_shuffle=\n shuffle, shuffle_after_epoch=shuffle)\n', (3804, 3964), True, 'import oneflow as flow\n'), ((5863, 5886), 'util.GetFunctionConfig', 'GetFunctionConfig', (['args'], {}), '(args)\n', (5880, 5886), False, 'from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig\n'), ((6288, 6311), 'util.GetFunctionConfig', 'GetFunctionConfig', (['args'], {}), '(args)\n', (6305, 6311), False, 'from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig\n'), ((6626, 6649), 'util.GetFunctionConfig', 'GetFunctionConfig', (['args'], {}), '(args)\n', (6643, 6649), False, 'from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig\n'), ((8346, 8395), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (8372, 8395), True, 'import oneflow as flow\n'), ((8404, 8434), 'oneflow.env.log_dir', 'flow.env.log_dir', (['args.log_dir'], {}), '(args.log_dir)\n', (8420, 8434), True, 'import oneflow as flow\n'), ((8443, 8458), 'util.InitNodes', 'InitNodes', (['args'], {}), '(args)\n', (8452, 8458), False, 'from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig\n'), ((8467, 8516), 'config.logger.debug', 'logger.debug', (['"""oneFlowBertClassifier -- 完成初始化..."""'], {}), "('oneFlowBertClassifier -- 完成初始化...')\n", (8479, 8516), False, 'from config import logger\n'), ((9055, 9085), 'oneflow.load_variables', 'flow.load_variables', (['modelPara'], {}), '(modelPara)\n', (9074, 9085), True, 'import oneflow as flow\n'), ((9173, 9227), 'config.logger.debug', 'logger.debug', (['"""Start Train oneFlowBert Classifier ..."""'], {}), "('Start Train oneFlowBert Classifier ...')\n", (9185, 9227), False, 'from config import logger\n'), ((10377, 10392), 'omegaconf.open_dict', 'open_dict', (['args'], {}), '(args)\n', (10386, 10392), False, 'from omegaconf import open_dict\n'), ((10619, 10670), 'math.ceil', 'math.ceil', (['(args.train_example_num / args.batch_size)'], {}), '(args.train_example_num / args.batch_size)\n', (10628, 10670), False, 'import math\n'), ((10701, 10756), 'math.ceil', 'math.ceil', (['(args.eval_example_num / args.eval_batch_size)'], {}), '(args.eval_example_num / args.eval_batch_size)\n', (10710, 10756), False, 'import math\n'), ((4290, 4360), 'oneflow.data.OFRecordRawDecoder', 'flow.data.OFRecordRawDecoder', (['ofrecord', 'name'], {'shape': 'shape', 'dtype': 'dtype'}), '(ofrecord, name, shape=shape, dtype=dtype)\n', (4318, 4360), True, 'import oneflow as flow\n'), ((7286, 7321), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (7300, 7321), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((7357, 7395), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['labels', 'predictions'], {}), '(labels, predictions)\n', (7374, 7395), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((7423, 7476), 'sklearn.metrics.precision_score', 'precision_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (7438, 7476), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((7501, 7551), 'sklearn.metrics.recall_score', 'recall_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (7513, 7551), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((7570, 7616), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'predictions'], {'average': '"""macro"""'}), "(labels, predictions, average='macro')\n", (7578, 7616), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((8577, 8637), 'util.Snapshot', 'Snapshot', (['self.CFGs.model_save_dir', 'self.CFGs.model_load_dir'], {}), '(self.CFGs.model_save_dir, self.CFGs.model_load_dir)\n', (8585, 8637), False, 'from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig\n'), ((8710, 8750), 'util.Snapshot', 'Snapshot', (['self.CFGs.model_save_dir', 'None'], {}), '(self.CFGs.model_save_dir, None)\n', (8718, 8750), False, 'from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig\n'), ((9312, 9434), 'util.Metric', 'Metric', ([], {'desc': '"""finetune"""', 'print_steps': 'self.CFGs.loss_print_every_n_iter', 'batch_size': 'self.CFGs.batch_size', 'keys': "['loss']"}), "(desc='finetune', print_steps=self.CFGs.loss_print_every_n_iter,\n batch_size=self.CFGs.batch_size, keys=['loss'])\n", (9318, 9434), False, 'from util import Snapshot, InitNodes, Metric, CreateOptimizer, GetFunctionConfig\n'), ((10035, 10050), 'config.logger.debug', 'logger.debug', (['e'], {}), '(e)\n', (10047, 10050), False, 'from config import logger\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from collections.abc import Mapping
import oneflow as flow
from libai.utils import distributed as dist
def pad_batch(x_dict, batch_size, last_batch_lack, is_last_batch):
x = list(x_dict.values())[0]
tensor_batch = x.shape[0]
assert tensor_batch <= batch_size
if tensor_batch == batch_size and not is_last_batch:
return x_dict, batch_size
valid_sample = tensor_batch - last_batch_lack
data_parallel_size = dist.get_data_parallel_size()
assert tensor_batch % data_parallel_size == 0
tensor_micro_batch_size = tensor_batch // data_parallel_size
padded_dict = {}
for key, xi in x_dict.items():
pad_shape = (batch_size, *xi.shape[1:])
local_xi = xi.to_global(
sbp=flow.sbp.broadcast, placement=flow.env.all_device_placement("cuda")
).to_local()
padded_xi = flow.zeros(pad_shape, dtype=xi.dtype, device="cuda")
padded_xi[:tensor_batch, ...] = padded_xi[:tensor_batch, ...] + local_xi
for i in range(last_batch_lack - 1):
start_idx = tensor_micro_batch_size * (data_parallel_size - i - 1) - 1
padded_xi[start_idx:-1] = padded_xi[start_idx + 1 :]
padded_xi = padded_xi.to_global(
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]), placement=xi.placement
).to_global(sbp=xi.sbp)
padded_dict[key] = padded_xi
return padded_dict, valid_sample
def print_csv_format(results):
"""
Print main metrics in a particular format
so that they are easy to copypaste into a spreadsheet.
Args:
results (OrderedDict[dict]): task_name -> {metric -> score}
unordered dict can also be printed, but in arbitrary order
"""
assert isinstance(results, Mapping) or not len(results), results
logger = logging.getLogger(__name__)
for task, res in results.items():
if isinstance(res, Mapping):
# Don't print "AP-category" metrics since they are usually not tracked.
important_res = [(k, v) for k, v in res.items() if "-" not in k]
logger.info("copypaste: Task: {}".format(task))
logger.info("copypaste: " + ",".join([k[0] for k in important_res]))
logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res]))
else:
logger.info(f"copypaste: {task}={res}")
def flatten_results_dict(results):
"""
Expand a hierarchical dict of scalars into a flat dict of scalars.
If results[k1][k2][k3] = v, the returned dict will have the entry
{"k1/k2/k3": v}.
Args:
results (dict):
"""
r = {}
for k, v in results.items():
if isinstance(v, Mapping):
v = flatten_results_dict(v)
for kk, vv in v.items():
r[k + "/" + kk] = vv
else:
r[k] = v
return r
| [
"oneflow.zeros",
"oneflow.env.all_device_placement"
] | [((1079, 1108), 'libai.utils.distributed.get_data_parallel_size', 'dist.get_data_parallel_size', ([], {}), '()\n', (1106, 1108), True, 'from libai.utils import distributed as dist\n'), ((2444, 2471), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2461, 2471), False, 'import logging\n'), ((1486, 1538), 'oneflow.zeros', 'flow.zeros', (['pad_shape'], {'dtype': 'xi.dtype', 'device': '"""cuda"""'}), "(pad_shape, dtype=xi.dtype, device='cuda')\n", (1496, 1538), True, 'import oneflow as flow\n'), ((1407, 1444), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (1436, 1444), True, 'import oneflow as flow\n'), ((1870, 1927), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (1885, 1927), True, 'from libai.utils import distributed as dist\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _np_l1loss(np_input, np_target):
np_l1 = np.abs(np_target - np_input)
np_l1_sum = np.sum(np_l1)
np_l1_mean = np.mean(np_l1)
return {
"none": np_l1,
"mean": np_l1_mean,
"sum": np_l1_sum,
}
def _np_l1loss_grad(np_input, np_target):
elem_cnt = np_input.size
np_grad = np.where(np_target - np_input > 0, -1, 1)
np_l1_grad_sum = np_grad
np_l1_grad_mean = np_l1_grad_sum / elem_cnt
return {
"none": np_grad,
"mean": np_l1_grad_mean,
"sum": np_l1_grad_sum,
}
def _test_l1loss_impl(test_case, device, shape, reduction):
x = np.random.randn(*shape)
y = np.random.randn(*shape)
input = flow.Tensor(
x, dtype=flow.float32, requires_grad=True, device=flow.device(device)
)
target = flow.Tensor(y, dtype=flow.float32, device=flow.device(device))
loss = flow.nn.L1Loss(reduction)
loss = loss.to(device)
of_out = loss(input, target)
np_out = _np_l1loss(x, y)[reduction]
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
np_grad = _np_l1loss_grad(x, y)[reduction]
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestL1LossModule(flow.unittest.TestCase):
def test_l1loss(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_l1loss_impl,
]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [
(3, 5),
(10, 9, 21),
(14, 22, 9, 21),
(3, 2, 4, 16, 5),
(1,),
]
arg_dict["reduction"] = ["none", "sum", "mean"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.nn.L1Loss",
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.device"
] | [((783, 811), 'numpy.abs', 'np.abs', (['(np_target - np_input)'], {}), '(np_target - np_input)\n', (789, 811), True, 'import numpy as np\n'), ((828, 841), 'numpy.sum', 'np.sum', (['np_l1'], {}), '(np_l1)\n', (834, 841), True, 'import numpy as np\n'), ((859, 873), 'numpy.mean', 'np.mean', (['np_l1'], {}), '(np_l1)\n', (866, 873), True, 'import numpy as np\n'), ((1058, 1099), 'numpy.where', 'np.where', (['(np_target - np_input > 0)', '(-1)', '(1)'], {}), '(np_target - np_input > 0, -1, 1)\n', (1066, 1099), True, 'import numpy as np\n'), ((1356, 1379), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1371, 1379), True, 'import numpy as np\n'), ((1388, 1411), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1403, 1411), True, 'import numpy as np\n'), ((1609, 1634), 'oneflow.experimental.nn.L1Loss', 'flow.nn.L1Loss', (['reduction'], {}), '(reduction)\n', (1623, 1634), True, 'import oneflow.experimental as flow\n'), ((2666, 2681), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2679, 2681), False, 'import unittest\n'), ((2201, 2214), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2212, 2214), False, 'from collections import OrderedDict\n'), ((2571, 2591), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2581, 2591), False, 'from test_util import GenArgList\n'), ((2013, 2056), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (2054, 2056), True, 'import oneflow.experimental as flow\n'), ((1495, 1514), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1506, 1514), True, 'import oneflow.experimental as flow\n'), ((1576, 1595), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1587, 1595), True, 'import oneflow.experimental as flow\n')] |
import numpy as np
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from abc import ABCMeta
from typing import Any
from typing import Dict
from typing import List
from typing import Type
from typing import Union
from typing import Callable
from typing import Optional
from ...types import arrays_type
from ...protocol import LossProtocol
from ...protocol import TrainerState
from ...misc.toolkit import iou
from ...misc.toolkit import to_flow
@LossProtocol.register("iou")
class IOULoss(LossProtocol):
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
logits = forward_results[0]
labels = batch[1]
return 1.0 - iou(logits, labels)
@LossProtocol.register("bce")
class BCELoss(LossProtocol):
def _init_config(self) -> None:
self.bce = nn.BCEWithLogitsLoss(reduction="none")
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
predictions = forward_results[0]
labels = batch[1]
losses = self.bce(predictions, labels)
return losses.mean(tuple(range(1, len(losses.shape))))
@LossProtocol.register("mae")
class MAELoss(LossProtocol):
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
predictions = forward_results[0]
labels = batch[1]
return F.l1_loss(predictions, labels, reduction="none")
@LossProtocol.register("sigmoid_mae")
class SigmoidMAELoss(LossProtocol):
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
predictions = forward_results[0]
labels = batch[1]
losses = F.l1_loss(flow.sigmoid(predictions), labels, reduction="none")
return losses.mean((1, 2, 3))
@LossProtocol.register("mse")
class MSELoss(LossProtocol):
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
predictions = forward_results[0]
labels = batch[1]
return F.mse_loss(predictions, labels, reduction="none")
@LossProtocol.register("quantile")
class QuantileLoss(LossProtocol):
def _init_config(self) -> None:
q = self.config.get("q")
if q is None:
raise ValueError("'q' should be provided in Quantile loss")
if isinstance(q, float):
self.register_buffer("q", flow.tensor([q], flow.float32))
else:
q = np.asarray(q, np.float32).reshape([1, -1])
self.register_buffer("q", to_flow(q))
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
quantile_error = batch[1] - forward_results[0] # type: ignore
neg_errors = self.q * quantile_error # type: ignore
pos_errors = (self.q - 1.0) * quantile_error # type: ignore
quantile_losses = flow.max(neg_errors, pos_errors)
return quantile_losses.mean(1, keepdim=True)
@LossProtocol.register("cross_entropy")
class CrossEntropyLoss(LossProtocol):
def _init_config(self) -> None:
self.ce = nn.CrossEntropyLoss(reduction="none")
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
predictions = forward_results[0]
labels = batch[1].squeeze() # type: ignore
return self.ce(predictions, labels)
@LossProtocol.register("focal")
class FocalLoss(LossProtocol):
def _init_config(self) -> None:
self._input_logits = self.config.setdefault("input_logits", True)
self._eps = self.config.setdefault("eps", 1e-6)
self._gamma = self.config.setdefault("gamma", 2.0)
alpha = self.config.setdefault("alpha", None)
if isinstance(alpha, (int, float)):
alpha = [alpha, 1 - alpha]
elif isinstance(alpha, (list, tuple)):
alpha = list(alpha)
if alpha is None:
self.alpha = None
else:
self.register_buffer("alpha", to_flow(np.array(alpha, np.float32)))
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
predictions = forward_results[0]
labels = batch[1]
if not self._input_logits:
prob_mat = predictions.view(-1, predictions.shape[-1]) + self._eps # type: ignore
else:
logits_mat = predictions.view(-1, predictions.shape[-1]) # type: ignore
prob_mat = F.softmax(logits_mat, dim=1) + self._eps
gathered_prob_flat = prob_mat.gather(dim=1, index=labels).view(-1)
gathered_log_prob_flat = gathered_prob_flat.log()
if self.alpha is not None:
alpha_target = self.alpha.gather(dim=0, index=labels.view(-1))
gathered_log_prob_flat = gathered_log_prob_flat * alpha_target
return -gathered_log_prob_flat * (1 - gathered_prob_flat) ** self._gamma
multi_prefix_mapping: Dict[str, Type["MultiLoss"]] = {}
class MultiLoss(LossProtocol, metaclass=ABCMeta):
prefix: str
names: Union[str, List[str]]
configs: Dict[str, Any]
base_losses: nn.ModuleList
def _init_config(self) -> None:
if isinstance(self.names, str):
base_losses = [LossProtocol.make(self.names, self.configs)]
else:
base_losses = [
LossProtocol.make(name, self.configs.get(name, {}))
for name in self.names
]
self.base_losses = nn.ModuleList(base_losses)
@classmethod
def register_(
cls,
base_loss_names: Union[str, List[str]],
base_configs: Optional[Dict[str, Any]] = None,
*,
tag: Optional[str] = None,
) -> None:
if tag is None:
if isinstance(base_loss_names, str):
tag = f"{cls.prefix}_{base_loss_names}"
else:
tag = f"{cls.prefix}_{'_'.join(base_loss_names)}"
if tag in cls.d:
return None
@cls.register(tag)
class _(cls): # type: ignore
names = base_loss_names
configs = base_configs or {}
@classmethod
def record_prefix(cls) -> Callable[[Type["MultiLoss"]], Type["MultiLoss"]]:
def _(cls_: Type[MultiLoss]) -> Type[MultiLoss]:
global multi_prefix_mapping
multi_prefix_mapping[cls_.prefix] = cls_
return cls_
return _
@MultiLoss.record_prefix()
class MultiTaskLoss(MultiLoss):
prefix = "multi_task"
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
losses = []
for loss_ins in self.base_losses:
loss = loss_ins._core(forward_results, batch, state, **kwargs)
# TODO : expose these keys
# losses[f"{loss_ins.__identifier__}"] = loss
losses.append(loss)
losses.insert(0, sum(losses))
return losses
@MultiLoss.record_prefix()
class MultiStageLoss(MultiLoss):
prefix = "multi_stage"
def _core(
self,
forward_results: arrays_type,
batch: arrays_type,
state: Optional[TrainerState] = None,
**kwargs: Any,
) -> arrays_type:
fr_list = list(forward_results)
predictions = fr_list[0]
losses = []
for i, pred in enumerate(predictions):
fr_list[0] = pred
for loss_ins in self.base_losses:
loss = loss_ins._core(fr_list, batch, state, **kwargs)
# TODO : expose these keys
# losses[f"{loss_ins.__identifier__}{i}"] = loss
losses.append(loss)
losses.insert(0, sum(losses))
return losses
__all__ = [
"IOULoss",
"MAELoss",
"MSELoss",
"QuantileLoss",
"SigmoidMAELoss",
"CrossEntropyLoss",
"FocalLoss",
"MultiStageLoss",
]
| [
"oneflow.nn.functional.l1_loss",
"oneflow.max",
"oneflow.sigmoid",
"oneflow.nn.functional.mse_loss",
"oneflow.nn.CrossEntropyLoss",
"oneflow.nn.functional.softmax",
"oneflow.tensor",
"oneflow.nn.BCEWithLogitsLoss",
"oneflow.nn.ModuleList"
] | [((943, 981), 'oneflow.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (963, 981), True, 'import oneflow.nn as nn\n'), ((1675, 1723), 'oneflow.nn.functional.l1_loss', 'F.l1_loss', (['predictions', 'labels'], {'reduction': '"""none"""'}), "(predictions, labels, reduction='none')\n", (1684, 1723), True, 'import oneflow.nn.functional as F\n'), ((2500, 2549), 'oneflow.nn.functional.mse_loss', 'F.mse_loss', (['predictions', 'labels'], {'reduction': '"""none"""'}), "(predictions, labels, reduction='none')\n", (2510, 2549), True, 'import oneflow.nn.functional as F\n'), ((3424, 3456), 'oneflow.max', 'flow.max', (['neg_errors', 'pos_errors'], {}), '(neg_errors, pos_errors)\n', (3432, 3456), True, 'import oneflow as flow\n'), ((3644, 3681), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (3663, 3681), True, 'import oneflow.nn as nn\n'), ((6166, 6192), 'oneflow.nn.ModuleList', 'nn.ModuleList', (['base_losses'], {}), '(base_losses)\n', (6179, 6192), True, 'import oneflow.nn as nn\n'), ((2080, 2105), 'oneflow.sigmoid', 'flow.sigmoid', (['predictions'], {}), '(predictions)\n', (2092, 2105), True, 'import oneflow as flow\n'), ((2855, 2885), 'oneflow.tensor', 'flow.tensor', (['[q]', 'flow.float32'], {}), '([q], flow.float32)\n', (2866, 2885), True, 'import oneflow as flow\n'), ((5168, 5196), 'oneflow.nn.functional.softmax', 'F.softmax', (['logits_mat'], {'dim': '(1)'}), '(logits_mat, dim=1)\n', (5177, 5196), True, 'import oneflow.nn.functional as F\n'), ((2917, 2942), 'numpy.asarray', 'np.asarray', (['q', 'np.float32'], {}), '(q, np.float32)\n', (2927, 2942), True, 'import numpy as np\n'), ((4632, 4659), 'numpy.array', 'np.array', (['alpha', 'np.float32'], {}), '(alpha, np.float32)\n', (4640, 4659), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestDiagonal(flow.unittest.TestCase):
@autotest(n=10, check_graph=True)
def test_flow_diagonal_with_random_data(test_case):
device = random_device()
offset = random(-5, 5).to(int)
dim1 = random(-4, 4).to(int)
dim2 = random(-4, 4).to(int)
x = random_tensor(
ndim=4,
dim1=random(4, 6),
dim2=random(4, 6),
dim3=random(4, 6),
dim4=random(4, 6),
).to(device)
z = torch.diagonal(x, offset, dim1, dim2)
return z
@autotest(auto_backward=False, n=10, check_graph=True)
def test_flow_diagonal_with_random_data(test_case):
device = random_device()
offset = random(-5, 5).to(int)
dim1 = random(-4, 4).to(int)
dim2 = random(-4, 4).to(int)
x = random_tensor(
ndim=4,
dim1=random(4, 6),
dim2=random(4, 6),
dim3=random(4, 6),
dim4=random(4, 6),
).to(device, torch.bool)
z = torch.diagonal(x, offset, dim1, dim2)
return z
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((729, 761), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (759, 761), True, 'import oneflow as flow\n'), ((1873, 1888), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1886, 1888), False, 'import unittest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestRNN(flow.unittest.TestCase):
@autotest(n=5, check_graph=True)
def test_rnn_tanh_cell(test_case):
device = random_device()
batch_size = random(1, 6)
time_steps = random(1, 6)
input_size = random(1, 6) * 2
hidden_size = random(1, 6) * 2
m = torch.nn.RNNCell(
input_size=input_size,
hidden_size=hidden_size,
bias=random().to(bool),
nonlinearity="tanh",
).to(device)
input = random_tensor(
ndim=3, dim0=time_steps, dim1=batch_size, dim2=input_size
).to(device)
hx = random_tensor(ndim=2, dim0=batch_size, dim1=hidden_size).to(device)
for i in range(time_steps.to(int).value()):
hx = m(input[i], hx)
return hx
@autotest(n=5, check_graph=True)
def test_rnn_relu_cell(test_case):
device = random_device()
batch_size = random(1, 6)
time_steps = random(1, 6)
input_size = random(1, 6) * 2
hidden_size = random(1, 6) * 2
m = torch.nn.RNNCell(
input_size=input_size,
hidden_size=hidden_size,
bias=random().to(bool),
nonlinearity="relu",
).to(device)
input = random_tensor(
ndim=3, dim0=time_steps, dim1=batch_size, dim2=input_size
).to(device)
hx = random_tensor(ndim=2, dim0=batch_size, dim1=hidden_size).to(device)
for i in range(time_steps.to(int).value()):
hx = m(input[i], hx)
return hx
@autotest(n=5, check_graph=True)
def test_lstm_cell(test_case):
device = random_device()
batch_size = random(1, 6)
time_steps = random(1, 6)
input_size = random(1, 6) * 2
hidden_size = random(1, 6) * 2
has_bias = random().to(bool)
cx_requires_grad = random().to(bool)
m = torch.nn.LSTMCell(
input_size=input_size, hidden_size=hidden_size, bias=has_bias,
).to(device)
input = random_tensor(
ndim=3, dim0=time_steps, dim1=batch_size, dim2=input_size
).to(device)
hx = random_tensor(
ndim=2, dim0=batch_size, dim1=hidden_size, requires_grad=False
).to(device)
cx = random_tensor(
ndim=2, dim0=batch_size, dim1=hidden_size, requires_grad=cx_requires_grad
).to(device)
for i in range(time_steps.to(int).value()):
res = m(input[i], (hx, cx))
hx = res[0]
cx = res[1]
return res[0]
@autotest(n=5, check_graph=True)
def test_gru_cell(test_case):
device = random_device()
batch_size = random(1, 6)
time_steps = random(1, 6)
input_size = random(1, 6) * 2
hidden_size = random(1, 6) * 2
has_bias = random().to(bool)
m = torch.nn.GRUCell(
input_size=input_size, hidden_size=hidden_size, bias=has_bias
).to(device)
input = random_tensor(
ndim=3, dim0=time_steps, dim1=batch_size, dim2=input_size
).to(device)
hx = random_tensor(ndim=2, dim0=batch_size, dim1=hidden_size).to(device)
for i in range(time_steps.to(int).value()):
hx = m(input[i], hx)
return hx
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((710, 742), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (740, 742), True, 'import oneflow as flow\n'), ((4042, 4057), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4055, 4057), False, 'import unittest\n')] |
import backbones
import oneflow as flow
from utils.utils_callbacks import CallBackVerification
from backbones import get_model
from graph import TrainGraph, EvalGraph
import logging
import argparse
from utils.utils_config import get_config
from function import EvalGraph
def main(args):
cfg = get_config(args.config)
logging.basicConfig(level=logging.NOTSET)
logging.info(args.model_path)
backbone = get_model(cfg.network, dropout=0.0, num_features=cfg.embedding_size).to(
"cuda"
)
val_callback = CallBackVerification(1, 0, cfg.val_targets, cfg.ofrecord_path)
state_dict = flow.load(args.model_path)
new_parameters = dict()
for key, value in state_dict.items():
if "num_batches_tracked" not in key:
if key == "fc.weight":
continue
new_key = key.replace("backbone.", "")
new_parameters[new_key] = value
backbone.load_state_dict(new_parameters)
infer_graph = EvalGraph(backbone)
val_callback(1000, backbone, infer_graph)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="OneFlow ArcFace val")
parser.add_argument("config", type=str, help="py config file")
parser.add_argument("--model_path", type=str, help="model path")
main(parser.parse_args())
| [
"oneflow.load"
] | [((300, 323), 'utils.utils_config.get_config', 'get_config', (['args.config'], {}), '(args.config)\n', (310, 323), False, 'from utils.utils_config import get_config\n'), ((328, 369), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.NOTSET'}), '(level=logging.NOTSET)\n', (347, 369), False, 'import logging\n'), ((374, 403), 'logging.info', 'logging.info', (['args.model_path'], {}), '(args.model_path)\n', (386, 403), False, 'import logging\n'), ((533, 595), 'utils.utils_callbacks.CallBackVerification', 'CallBackVerification', (['(1)', '(0)', 'cfg.val_targets', 'cfg.ofrecord_path'], {}), '(1, 0, cfg.val_targets, cfg.ofrecord_path)\n', (553, 595), False, 'from utils.utils_callbacks import CallBackVerification\n'), ((614, 640), 'oneflow.load', 'flow.load', (['args.model_path'], {}), '(args.model_path)\n', (623, 640), True, 'import oneflow as flow\n'), ((977, 996), 'function.EvalGraph', 'EvalGraph', (['backbone'], {}), '(backbone)\n', (986, 996), False, 'from function import EvalGraph\n'), ((1086, 1144), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""OneFlow ArcFace val"""'}), "(description='OneFlow ArcFace val')\n", (1109, 1144), False, 'import argparse\n'), ((420, 488), 'backbones.get_model', 'get_model', (['cfg.network'], {'dropout': '(0.0)', 'num_features': 'cfg.embedding_size'}), '(cfg.network, dropout=0.0, num_features=cfg.embedding_size)\n', (429, 488), False, 'from backbones import get_model\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Union
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
class Arange(Module):
def __init__(
self,
start: int = 0,
end: int = None,
step: int = 1,
dtype: flow.dtype = None,
device: Union[str, flow.device] = "cpu",
requires_grad: bool = False,
) -> None:
super().__init__()
assert end > start, "end should be larger than start"
assert step <= end - start, "step is ilegal"
self.start = start
self.end = end
self.step = step
self.dtype = dtype
self.device = device
self.requires_grad = requires_grad
self._op_arange = (
flow.builtin_op("range").Output("out").Attr("dtype", flow.int64).Build()
)
def forward(self):
tmp = self._op_arange(start=self.start, delta=self.step, limit=self.end)[0]
tmp.requires_grad = self.requires_grad
if isinstance(self.device, str):
device = flow.device(self.device)
else:
device = self.device
res = tmp.to(device, dtype=self.dtype)
return res
@oneflow_export("arange")
@experimental_api
def arange_op(
start: int = 0,
end: int = None,
step: int = 1,
dtype: flow.dtype = flow.int64,
device: Union[str, flow.device] = "cpu",
requires_grad: bool = False,
):
r"""
Returns a 1-D tensor of size :math:`\left\lfloor \frac{\text{end} - \text{start}}{\text{step}} \right\rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor.
.. math::
\text{out}_{i+1} = \text{out}_i + \text{step}.
Args:
start (int): the starting value for the set of points. Default: ``0``.
end (int): the ending value for the set of points
step (int): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
dtype(flow.dtype, optional): If `dtype` is not given, the `dtype` is inferred to be `flow.int64`.
device(flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> y = flow.arange(0, 5)
>>> print(y)
tensor([0, 1, 2, 3, 4], dtype=oneflow.int64)
"""
if end is None:
end = start
start = 0
return Arange(start, end, step, dtype, device, requires_grad)()
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"oneflow.builtin_op",
"oneflow.device",
"oneflow.python.oneflow_export.oneflow_export"
] | [((1885, 1909), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""arange"""'], {}), "('arange')\n", (1899, 1909), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((3498, 3515), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (3513, 3515), False, 'import doctest\n'), ((1743, 1767), 'oneflow.device', 'flow.device', (['self.device'], {}), '(self.device)\n', (1754, 1767), True, 'import oneflow as flow\n'), ((1442, 1466), 'oneflow.builtin_op', 'flow.builtin_op', (['"""range"""'], {}), "('range')\n", (1457, 1466), True, 'import oneflow as flow\n')] |
#-*- coding:utf-8 -*-
"""
@author: scorpio.lu
@datetime:2020-06-11 15:22
@software: PyCharm
@contact: <EMAIL>
----------
路有敬亭山
----------
"""
import os
import argparse
import sys
from datetime import datetime
import numpy as np
import math
import cv2
from scipy.spatial.distance import cdist
import oneflow as flow
from reid_model import resreid, HS_reid
from data_loader import Market1501
parser = argparse.ArgumentParser(description="flags for person re-identification")
parser.add_argument("--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("--model", type=str, default="resreid", required=False, help="resreid or pcbreid")
parser.add_argument("--batch_size", type=int, default=300, required=False)
parser.add_argument("--data_dir", type=str, default='/home/oneflow_reid/person_reid/dataset', required=False, help="dataset directory")
parser.add_argument("-image_height", "--image_height", type=int, default=256, required=False)
parser.add_argument("-image_width", "--image_width", type=int, default=128, required=False)
parser.add_argument("--use_tensorrt", dest="use_tensorrt", action="store_true", default=False, required=False, help="inference with tensorrt")
parser.add_argument("--model_load_dir", type=str, default='/home/oneflow_reid/person_reid/model', required=False, help="model load directory")
parser.add_argument("--log_dir", type=str, default="./output", required=False, help="log info save directory")
args = parser.parse_args()
model={'resreid': resreid, 'HS-reid': HS_reid}
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow.config.gpu_device_num(args.gpu_num_per_node)
if args.use_tensorrt:
func_config.use_tensorrt(True)
input_blob = flow.FixedTensorDef((args.batch_size, 3, args.image_height, args.image_width), dtype=flow.float)
#input_blob = flow.MirroredTensorDef((args.batch_size, 3, args.image_height, args.image_width), dtype=flow.float)
def resize_image(img, origin_h, origin_w, image_height, image_width):
w = image_width
h = image_height
resized=np.zeros((3, image_height, image_width), dtype=np.float32)
part=np.zeros((3, origin_h, image_width), dtype = np.float32)
w_scale = (float)(origin_w - 1) / (w - 1)
h_scale = (float)(origin_h - 1) / (h - 1)
for c in range(w):
if c == w-1 or origin_w == 1:
val = img[:, :, origin_w-1]
else:
sx = c * w_scale
ix = int(sx)
dx = sx - ix
val = (1 - dx) * img[:, :, ix] + dx * img[:, :, ix+1]
part[:, :, c] = val
for r in range(h):
sy = r * h_scale
iy = int(sy)
dy = sy - iy
val = (1-dy)*part[:, iy, :]
resized[:, r, :] = val
if r==h-1 or origin_h==1:
continue
resized[:, r, :] = resized[:, r, :] + dy * part[:, iy+1, :]
return resized
def batch_image_preprocess(img_paths, img_height, img_weidth):
result_list = []
base = np.ones([args.image_height, args.image_width])
norm_mean = [base * 0.485, base * 0.456, base * 0.406] # imagenet mean
norm_std = [0.229, 0.224, 0.225] # imagenet std
for img_path in img_paths:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = img.transpose(2, 0, 1).astype(np.float32) # hwc->chw
img = img / 255 # /255 # to tensor
img[[0, 1, 2], :, :] = img[[2, 1, 0], :, :] # bgr2rgb
w = img_weidth
h = img_height
origin_h = img.shape[1]
origin_w = img.shape[2]
resize_img = resize_image(img, origin_h, origin_w, h, w)
# normalize
resize_img[0] = (resize_img[0] - norm_mean[0])/ norm_std[0]
resize_img[1] = (resize_img[1] - norm_mean[1]) / norm_std[1]
resize_img[2] = (resize_img[2] - norm_mean[2]) / norm_std[2]
result_list.append(resize_img)
results = np.asarray(result_list).astype(np.float32)
return results
def evaluate(qf, q_pids, q_camids, gf, g_pids, g_camids, max_rank=50):
num_g = len(gf)
num_q = len(qf)
print('Computing distance matrix ...')
dist = cdist(qf, gf).astype(np.float16)
dist = np.power(dist, 2).astype(np.float16)
print('Computing CMC and mAP ...')
if num_g < max_rank:
max_rank = num_g
print('Note: number of gallery samples is quite small, got {}'.format(num_g))
indices = np.argsort(dist, axis=1)
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
all_cmc = []
all_AP = []
num_valid_q = 0.
for q_idx in range(num_q):
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
order = indices[q_idx]
remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
raw_cmc = matches[q_idx][keep]
if not np.any(raw_cmc):
continue
cmc = raw_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
num_rel = raw_cmc.sum()
tmp_cmc = raw_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * raw_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, 'Error: all query identities do not appear in gallery'
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
return all_cmc, mAP
@flow.function(func_config)
def reid_eval_job(image=input_blob):
features = resreid(image, trainable=False)
return features
class ReIDInference(object):
def __init__(self):
check_point = flow.train.CheckPoint()
if args.model_load_dir:
assert os.path.isdir(args.model_load_dir)
print("Restoring model from {}.".format(args.model_load_dir))
check_point.load(args.model_load_dir)
else:
print("Init model on demand.")
check_point.init()
snapshot_save_path = os.path.join(args.model_save_dir, "last_snapshot")
if not os.path.exists(snapshot_save_path):
os.makedirs(snapshot_save_path)
print("Saving model to {}.".format(snapshot_save_path))
check_point.save(snapshot_save_path)
def inference(self, imgs):
query_images = batch_image_preprocess(imgs, args.image_height, args.image_width)
batch_times = math.ceil(len(imgs)/args.batch_size)
features = []
for i in range(batch_times):
start = max(0, i*args.batch_size)
end = min((i+1)*args.batch_size, len(query_images))
array = query_images[start:end]
feature = reid_eval_job([array]).get()
features.extend(feature.ndarray_list_[0])
return features
def main():
print("=".ljust(66, "="))
print("Running {}: num_gpu = {}.".format(args.model, args.gpu_num_per_node))
print("=".ljust(66, "="))
for arg in vars(args):
print("{} = {}".format(arg, getattr(args, arg)))
print("-".ljust(66, "-"))
print("Time stamp: {}".format(str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))))
flow.env.grpc_use_no_signal()
flow.env.log_dir(args.log_dir)
obj = ReIDInference()
print("Loading data from {}".format(args.data_dir))
dataset = Market1501(root=args.data_dir)
query_img, query_id, query_cam_id = zip(*dataset.query)
gallery_img, gallery_id, gallery_cam_id = zip(*dataset.gallery)
print('extracting query features...')
query_features = obj.inference(query_img)
print('extracting query features done...')
print('extracting gallery features...')
gallery_features = obj.inference(gallery_img)
print('extracting gallery features done...')
cmc, mAP = evaluate(query_features, np.array(query_id), np.array(query_cam_id), gallery_features, np.array(gallery_id), np.array(gallery_cam_id))
print("=".ljust(30, "=")+" Result "+ "=".ljust(30, "="))
print('mAP: {:.1%}'.format(mAP))
print('CMC curve')
for r in [1, 5, 10]:
print('Rank-{:<3}: {:.1%}'.format(r, cmc[r - 1]))
print("=".ljust(66, "="))
if __name__ == "__main__":
main() | [
"oneflow.FunctionConfig",
"oneflow.env.grpc_use_no_signal",
"oneflow.env.log_dir",
"oneflow.train.CheckPoint",
"oneflow.function",
"oneflow.FixedTensorDef",
"oneflow.config.gpu_device_num"
] | [((445, 518), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""flags for person re-identification"""'}), "(description='flags for person re-identification')\n", (468, 518), False, 'import argparse\n'), ((1584, 1605), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1603, 1605), True, 'import oneflow as flow\n'), ((1648, 1697), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (1674, 1697), True, 'import oneflow as flow\n'), ((1770, 1871), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['(args.batch_size, 3, args.image_height, args.image_width)'], {'dtype': 'flow.float'}), '((args.batch_size, 3, args.image_height, args.\n image_width), dtype=flow.float)\n', (1789, 1871), True, 'import oneflow as flow\n'), ((5482, 5508), 'oneflow.function', 'flow.function', (['func_config'], {}), '(func_config)\n', (5495, 5508), True, 'import oneflow as flow\n'), ((2105, 2163), 'numpy.zeros', 'np.zeros', (['(3, image_height, image_width)'], {'dtype': 'np.float32'}), '((3, image_height, image_width), dtype=np.float32)\n', (2113, 2163), True, 'import numpy as np\n'), ((2173, 2227), 'numpy.zeros', 'np.zeros', (['(3, origin_h, image_width)'], {'dtype': 'np.float32'}), '((3, origin_h, image_width), dtype=np.float32)\n', (2181, 2227), True, 'import numpy as np\n'), ((3006, 3052), 'numpy.ones', 'np.ones', (['[args.image_height, args.image_width]'], {}), '([args.image_height, args.image_width])\n', (3013, 3052), True, 'import numpy as np\n'), ((4399, 4423), 'numpy.argsort', 'np.argsort', (['dist'], {'axis': '(1)'}), '(dist, axis=1)\n', (4409, 4423), True, 'import numpy as np\n'), ((5438, 5453), 'numpy.mean', 'np.mean', (['all_AP'], {}), '(all_AP)\n', (5445, 5453), True, 'import numpy as np\n'), ((5561, 5592), 'reid_model.resreid', 'resreid', (['image'], {'trainable': '(False)'}), '(image, trainable=False)\n', (5568, 5592), False, 'from reid_model import resreid, HS_reid\n'), ((7198, 7227), 'oneflow.env.grpc_use_no_signal', 'flow.env.grpc_use_no_signal', ([], {}), '()\n', (7225, 7227), True, 'import oneflow as flow\n'), ((7232, 7262), 'oneflow.env.log_dir', 'flow.env.log_dir', (['args.log_dir'], {}), '(args.log_dir)\n', (7248, 7262), True, 'import oneflow as flow\n'), ((7360, 7390), 'data_loader.Market1501', 'Market1501', ([], {'root': 'args.data_dir'}), '(root=args.data_dir)\n', (7370, 7390), False, 'from data_loader import Market1501\n'), ((3227, 3265), 'cv2.imread', 'cv2.imread', (['img_path', 'cv2.IMREAD_COLOR'], {}), '(img_path, cv2.IMREAD_COLOR)\n', (3237, 3265), False, 'import cv2\n'), ((4770, 4787), 'numpy.invert', 'np.invert', (['remove'], {}), '(remove)\n', (4779, 4787), True, 'import numpy as np\n'), ((5690, 5713), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (5711, 5713), True, 'import oneflow as flow\n'), ((7838, 7856), 'numpy.array', 'np.array', (['query_id'], {}), '(query_id)\n', (7846, 7856), True, 'import numpy as np\n'), ((7858, 7880), 'numpy.array', 'np.array', (['query_cam_id'], {}), '(query_cam_id)\n', (7866, 7880), True, 'import numpy as np\n'), ((7900, 7920), 'numpy.array', 'np.array', (['gallery_id'], {}), '(gallery_id)\n', (7908, 7920), True, 'import numpy as np\n'), ((7922, 7946), 'numpy.array', 'np.array', (['gallery_cam_id'], {}), '(gallery_cam_id)\n', (7930, 7946), True, 'import numpy as np\n'), ((3899, 3922), 'numpy.asarray', 'np.asarray', (['result_list'], {}), '(result_list)\n', (3909, 3922), True, 'import numpy as np\n'), ((4129, 4142), 'scipy.spatial.distance.cdist', 'cdist', (['qf', 'gf'], {}), '(qf, gf)\n', (4134, 4142), False, 'from scipy.spatial.distance import cdist\n'), ((4173, 4190), 'numpy.power', 'np.power', (['dist', '(2)'], {}), '(dist, 2)\n', (4181, 4190), True, 'import numpy as np\n'), ((4843, 4858), 'numpy.any', 'np.any', (['raw_cmc'], {}), '(raw_cmc)\n', (4849, 4858), True, 'import numpy as np\n'), ((5154, 5173), 'numpy.asarray', 'np.asarray', (['tmp_cmc'], {}), '(tmp_cmc)\n', (5164, 5173), True, 'import numpy as np\n'), ((5346, 5365), 'numpy.asarray', 'np.asarray', (['all_cmc'], {}), '(all_cmc)\n', (5356, 5365), True, 'import numpy as np\n'), ((5765, 5799), 'os.path.isdir', 'os.path.isdir', (['args.model_load_dir'], {}), '(args.model_load_dir)\n', (5778, 5799), False, 'import os\n'), ((6045, 6095), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""last_snapshot"""'], {}), "(args.model_save_dir, 'last_snapshot')\n", (6057, 6095), False, 'import os\n'), ((6115, 6149), 'os.path.exists', 'os.path.exists', (['snapshot_save_path'], {}), '(snapshot_save_path)\n', (6129, 6149), False, 'import os\n'), ((6167, 6198), 'os.makedirs', 'os.makedirs', (['snapshot_save_path'], {}), '(snapshot_save_path)\n', (6178, 6198), False, 'import os\n'), ((7146, 7160), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7158, 7160), False, 'from datetime import datetime\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow as flow
import oneflow.python.framework.id_util as id_util
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.framework.remote_blob as remote_blob_util
from typing import Optional
@oneflow_export("smooth_l1_loss")
def smooth_l1_loss(
prediction: remote_blob_util.BlobDef,
label: remote_blob_util.BlobDef,
beta: float = 1.0,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
op = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("SmoothL1Loss_")
)
.Op("smooth_l1_loss")
.Input("prediction", [prediction])
.Input("label", [label])
.Output("loss")
)
op.Attr("beta", float(beta))
return op.Build().InferAndTryRun().RemoteBlobList()[0]
| [
"oneflow.python.framework.id_util.UniqueStr",
"oneflow.python.oneflow_export.oneflow_export"
] | [((856, 888), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""smooth_l1_loss"""'], {}), "('smooth_l1_loss')\n", (870, 888), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1157, 1191), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SmoothL1Loss_"""'], {}), "('SmoothL1Loss_')\n", (1174, 1191), True, 'import oneflow.python.framework.id_util as id_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
from typing import Optional, Sequence, Union
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("constant")
def constant(
value: Union[int, float],
dtype: Optional[dtype_util.dtype] = None,
shape: Optional[Sequence[int]] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator creates a constant Blob.
Args:
value (Union[int, float]): The constant value of Blob.
dtype (Optional[dtype_util.dtype], optional): The data type of Blob. Defaults to None.
shape (Optional[Sequence[int]], optional): The shape of Blob. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
NotImplementedError: The data type of value should be int or float.
Returns:
remote_blob_util.BlobDef: The result blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def constant_Job() -> tp.Numpy:
constant_blob = flow.constant(value=1.5,
shape=(1, 3, 3),
dtype=flow.float)
return constant_blob
out = constant_Job()
# out [[[1.5 1.5 1.5]
# [1.5 1.5 1.5]
# [1.5 1.5 1.5]]]
"""
if name is None:
name = id_util.UniqueStr("Constant_")
assert value is not None
assert dtype is not None
if not isinstance(value, (int, float)):
raise NotImplementedError
if isinstance(value, float):
is_floating_value = True
floating_value = float(value)
integer_value = int(0)
else:
is_floating_value = False
floating_value = float(0)
integer_value = int(value)
if shape is not None:
assert isinstance(shape, (list, tuple))
else:
shape = []
return (
flow.user_op_builder(name)
.Op("constant")
.Output("out")
.Attr("floating_value", floating_value)
.Attr("integer_value", integer_value)
.Attr("is_floating_value", is_floating_value)
.Attr("dtype", dtype)
.Attr("shape", shape)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("constant_scalar")
def constant_scalar(
value: Union[int, float],
dtype: Optional[dtype_util.dtype] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator creates a constant scalar Blob.
Args:
value (Union[int, float]): The constant value of Blob.
dtype (Optional[dtype_util.dtype], optional): The data type of Blob. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
remote_blob_util.BlobDef: The result blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def constant_scalar_Job() -> tp.Numpy:
constant_scalar = flow.constant_scalar(value=2.5,
dtype=flow.float)
return constant_scalar
out = constant_scalar_Job()
# out [2.5]
"""
return flow.constant(value, dtype=dtype, shape=[1])
@oneflow_export("constant_like")
def constant_like(
like: remote_blob_util.BlobDef,
value: Union[int, float],
dtype: Optional[dtype_util.dtype] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator creates a constant Blob that has the same shape as `like`.
Args:
like (remote_blob_util.BlobDef): A Blob.
value (Union[int, float]): The constant value of Blob.
dtype (Optional[dtype_util.dtype], optional): The data type of Blob. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
NotImplementedError: The data type of value should be int or float.
Returns:
remote_blob_util.BlobDef: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def constant_like_Job() -> tp.Numpy:
constant_blob = flow.constant(value=1.5,
shape=(1, 3, 3),
dtype=flow.float)
constant_like_blob = flow.constant_like(like=constant_blob,
value=5.5,
dtype=flow.float)
return constant_like_blob
out = constant_like_Job()
# out [[[5.5 5.5 5.5]
# [5.5 5.5 5.5]
# [5.5 5.5 5.5]]]
"""
op_conf = op_conf_util.OperatorConf()
setattr(
op_conf,
"name",
name if name is not None else id_util.UniqueStr("ConstantLike_"),
)
setattr(op_conf.constant_like_conf, "like", like.unique_name)
if isinstance(value, int):
op_conf.constant_like_conf.int_operand = value
elif isinstance(value, float):
op_conf.constant_like_conf.float_operand = value
else:
raise NotImplementedError
if dtype is not None:
setattr(op_conf.constant_like_conf, "data_type", dtype.oneflow_proto_dtype)
setattr(op_conf.constant_like_conf, "out", "out")
interpret_util.Forward(op_conf)
out_lbi = logical_blob_id_util.LogicalBlobId()
setattr(out_lbi, "op_name", op_conf.name)
setattr(out_lbi, "blob_name", "out")
return remote_blob_util.RemoteBlob(out_lbi)
@oneflow_export("ones_like")
def ones_like(
like: remote_blob_util.BlobDef,
dtype: Optional[dtype_util.dtype] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator creates a Blob with all elements set to `1` that has the same shape as `like`.
Args:
like (remote_blob_util.BlobDef): A Blob.
dtype (Optional[dtype_util.dtype], optional): The data type of Blob. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
remote_blob_util.BlobDef: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def ones_like_Job() -> tp.Numpy:
constant_blob = flow.constant(value=1.5,
shape=(1, 3, 3),
dtype=flow.float)
ones_like_blob = flow.ones_like(like=constant_blob,
dtype=flow.float)
return ones_like_blob
out = ones_like_Job()
# out [[[1. 1. 1.]
# [1. 1. 1.]
# [1. 1. 1.]]]
"""
return constant_like(like, 1, dtype=dtype, name=name)
@oneflow_export("zeros_like")
def zeros_like(
like: remote_blob_util.BlobDef,
dtype: Optional[dtype_util.dtype] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator creates a Blob that has the same shape as `like` whose all elements are set to `0`.
Args:
like (remote_blob_util.BlobDef): A Blob.
dtype (Optional[dtype_util.dtype], optional): The data type of Blob. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
remote_blob_util.BlobDef: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def zeros_like_Job() -> tp.Numpy:
constant_blob = flow.constant(value=1.5,
shape=(1, 3, 3),
dtype=flow.float)
zeros_like_blob = flow.zeros_like(like=constant_blob,
dtype=flow.float)
return zeros_like_blob
out = zeros_like_Job()
# out [[[0. 0. 0.]
# [0. 0. 0.]
# [0. 0. 0.]]]
"""
return constant_like(like, 0, dtype=dtype, name=name)
| [
"oneflow.python.framework.interpret_util.Forward",
"oneflow.constant",
"oneflow.python.framework.remote_blob.RemoteBlob",
"oneflow.user_op_builder",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.core.operator.op_conf_pb2.OperatorConf",
"oneflow.python.framework.id_util.UniqueStr",
"oneflow.... | [((1131, 1157), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""constant"""'], {}), "('constant')\n", (1145, 1157), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3393, 3426), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""constant_scalar"""'], {}), "('constant_scalar')\n", (3407, 3426), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4477, 4508), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""constant_like"""'], {}), "('constant_like')\n", (4491, 4508), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((6865, 6892), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""ones_like"""'], {}), "('ones_like')\n", (6879, 6892), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((8196, 8224), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""zeros_like"""'], {}), "('zeros_like')\n", (8210, 8224), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4429, 4473), 'oneflow.constant', 'flow.constant', (['value'], {'dtype': 'dtype', 'shape': '[1]'}), '(value, dtype=dtype, shape=[1])\n', (4442, 4473), True, 'import oneflow as flow\n'), ((6034, 6061), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (6059, 6061), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((6644, 6675), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (6666, 6675), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((6690, 6726), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (6724, 6726), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((6825, 6861), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['out_lbi'], {}), '(out_lbi)\n', (6852, 6861), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((2489, 2519), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Constant_"""'], {}), "('Constant_')\n", (2506, 2519), True, 'import oneflow.python.framework.id_util as id_util\n'), ((6146, 6180), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ConstantLike_"""'], {}), "('ConstantLike_')\n", (6163, 6180), True, 'import oneflow.python.framework.id_util as id_util\n'), ((3030, 3056), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (3050, 3056), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.nn as nn
import oneflow.unittest
import oneflow.utils.data as data
class LinearNet(nn.Module):
def __init__(self, n_feature):
super(LinearNet, self).__init__()
self.linear = nn.Linear(n_feature, 1)
def forward(self, x):
y = self.linear(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestTensorDataset(flow.unittest.TestCase):
def test_tensor_dataset(test_case):
num_inputs = 2
num_examples = 1000
true_w = [2, -3.4]
true_b = 4.2
net = LinearNet(num_inputs)
flow.nn.init.normal_(net.linear.weight, mean=0, std=0.01)
flow.nn.init.constant_(net.linear.bias, val=0)
loss = nn.MSELoss()
optimizer = flow.optim.SGD(net.parameters(), lr=0.03)
features = flow.tensor(
np.random.normal(0, 1, (num_examples, num_inputs)), dtype=flow.float
)
labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
labels += flow.tensor(
np.random.normal(0, 0.01, size=labels.size()), dtype=flow.float
)
batch_size = 10
dataset = data.TensorDataset(features, labels)
data_iter = data.DataLoader(dataset, batch_size, shuffle=True, num_workers=0)
num_epochs = 10
for epoch in range(1, num_epochs + 1):
for (X, y) in data_iter:
output = net(X)
l = loss(output, y)
optimizer.zero_grad()
l.backward()
optimizer.step()
if epoch == num_epochs:
test_case.assertLess(l.numpy(), 0.00019)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.Linear",
"oneflow.utils.data.DataLoader",
"oneflow.nn.MSELoss",
"oneflow.utils.data.TensorDataset",
"oneflow.nn.init.constant_",
"oneflow.nn.init.normal_"
] | [((960, 992), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (990, 992), True, 'import oneflow as flow\n'), ((2317, 2332), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2330, 2332), False, 'import unittest\n'), ((862, 885), 'oneflow.nn.Linear', 'nn.Linear', (['n_feature', '(1)'], {}), '(n_feature, 1)\n', (871, 885), True, 'import oneflow.nn as nn\n'), ((1225, 1282), 'oneflow.nn.init.normal_', 'flow.nn.init.normal_', (['net.linear.weight'], {'mean': '(0)', 'std': '(0.01)'}), '(net.linear.weight, mean=0, std=0.01)\n', (1245, 1282), True, 'import oneflow as flow\n'), ((1291, 1337), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['net.linear.bias'], {'val': '(0)'}), '(net.linear.bias, val=0)\n', (1313, 1337), True, 'import oneflow as flow\n'), ((1353, 1365), 'oneflow.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (1363, 1365), True, 'import oneflow.nn as nn\n'), ((1792, 1828), 'oneflow.utils.data.TensorDataset', 'data.TensorDataset', (['features', 'labels'], {}), '(features, labels)\n', (1810, 1828), True, 'import oneflow.utils.data as data\n'), ((1849, 1914), 'oneflow.utils.data.DataLoader', 'data.DataLoader', (['dataset', 'batch_size'], {'shuffle': '(True)', 'num_workers': '(0)'}), '(dataset, batch_size, shuffle=True, num_workers=0)\n', (1864, 1914), True, 'import oneflow.utils.data as data\n'), ((1472, 1522), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(num_examples, num_inputs)'], {}), '(0, 1, (num_examples, num_inputs))\n', (1488, 1522), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import tensorflow as tf
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_multi_optimizer_with_tensorflow(
device_type,
var1_shape,
var2_shape,
var3_shape,
sgd_opt_args,
rmsprop_opt_args,
adam_opt_args,
train_iters,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def TestMultiOptimizerJob():
with flow.scope.placement(device_type, "0:0-0"):
var1 = flow.get_variable(
name="var1",
shape=var1_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
var2 = flow.get_variable(
name="var2",
shape=var2_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
var3 = flow.get_variable(
name="var3",
shape=var3_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_sum(var1 + var2 + var3)
sgd_opt = flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [sgd_opt_args["lr"]]),
momentum=sgd_opt_args["momentum"],
variables=["var1"],
)
rmsprop_opt = flow.optimizer.RMSProp(
flow.optimizer.PiecewiseConstantScheduler([], [rmsprop_opt_args["lr"]]),
decay_rate=rmsprop_opt_args["decay_rate"],
epsilon=0,
centered=rmsprop_opt_args["centered"],
variables=["var2"],
)
adam_opt = flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [adam_opt_args["lr"]]),
beta1=adam_opt_args["beta1"],
beta2=adam_opt_args["beta2"],
epsilon=adam_opt_args["epsilon"],
do_bias_correction=True,
variables=["var3"],
)
flow.optimizer.CombinedOptimizer([sgd_opt, rmsprop_opt, adam_opt]).minimize(
loss
)
return (var1, var2, var3)
init_var1 = None
init_var2 = None
init_var3 = None
for i in range(train_iters + 1):
(var1, var2, var3) = TestMultiOptimizerJob().get()
if i == 0:
init_var1 = np.copy(var1.numpy())
init_var2 = np.copy(var2.numpy())
init_var3 = np.copy(var3.numpy())
tf_var1 = tf.Variable(init_var1)
tf_var2 = tf.Variable(init_var2)
tf_var3 = tf.Variable(init_var3)
tf_sgd_opt = tf.keras.optimizers.SGD(
learning_rate=sgd_opt_args["lr"],
momentum=sgd_opt_args["momentum"],
nesterov=False,
)
tf_rmsprop_opt = tf.keras.optimizers.RMSprop(
learning_rate=rmsprop_opt_args["lr"],
rho=rmsprop_opt_args["decay_rate"],
momentum=0.0,
epsilon=0,
centered=rmsprop_opt_args["centered"],
)
tf_adam_opt = tf.keras.optimizers.Adam(
learning_rate=adam_opt_args["lr"],
beta_1=adam_opt_args["beta1"],
beta_2=adam_opt_args["beta2"],
epsilon=adam_opt_args["epsilon"],
amsgrad=False,
)
for i in range(train_iters):
with tf.GradientTape(persistent=True) as tape:
loss = tf.math.reduce_sum(tf_var1 + tf_var2 + tf_var3)
tf_var1_grad = tape.gradient([loss], tf_var1)
tf_var2_grad = tape.gradient([loss], tf_var2)
tf_var3_grad = tape.gradient([loss], tf_var3)
tf_sgd_opt.apply_gradients([(tf_var1_grad, tf_var1)])
tf_rmsprop_opt.apply_gradients([(tf_var2_grad, tf_var2)])
tf_adam_opt.apply_gradients([(tf_var3_grad, tf_var3)])
assert np.allclose(
var1.flatten(), tf_var1.numpy().flatten(), rtol=0.0001, atol=0.0001
)
assert np.allclose(
var2.flatten(), tf_var2.numpy().flatten(), rtol=0.005, atol=0.005
)
assert np.allclose(
var3.flatten(), tf_var3.numpy().flatten(), rtol=0.0001, atol=0.0001
)
@flow.unittest.skip_unless_1n1d()
class TestMultiOptimizer(flow.unittest.TestCase):
def test_multi_optimizer(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["var1_shape"] = [(10,)]
arg_dict["var2_shape"] = [(10,)]
arg_dict["var3_shape"] = [(10,)]
arg_dict["sgd_opt_args"] = [{"lr": 1, "momentum": 0.9}]
arg_dict["rmsprop_opt_args"] = [
{"lr": 0.5, "decay_rate": 0.9, "centered": False}
]
arg_dict["adam_opt_args"] = [
{"lr": 2, "beta1": 0.9, "beta2": 0.99, "epsilon": 1e-09}
]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_multi_optimizer_with_tensorflow(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.math.reduce_sum",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.unittest.skip_unless_1n1d",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.random_uniform_initializer",
"oneflow.compatible.singl... | [((831, 882), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (875, 882), True, 'import tensorflow as tf\n'), ((5292, 5324), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (5322, 5324), True, 'from oneflow.compatible import single_client as flow\n'), ((904, 955), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (944, 955), True, 'import tensorflow as tf\n'), ((1192, 1220), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1218, 1220), True, 'from oneflow.compatible import single_client as flow\n'), ((1239, 1260), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1258, 1260), True, 'from oneflow.compatible import single_client as flow\n'), ((1315, 1378), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1335, 1378), True, 'from oneflow.compatible import single_client as flow\n'), ((3741, 3763), 'tensorflow.Variable', 'tf.Variable', (['init_var1'], {}), '(init_var1)\n', (3752, 3763), True, 'import tensorflow as tf\n'), ((3778, 3800), 'tensorflow.Variable', 'tf.Variable', (['init_var2'], {}), '(init_var2)\n', (3789, 3800), True, 'import tensorflow as tf\n'), ((3815, 3837), 'tensorflow.Variable', 'tf.Variable', (['init_var3'], {}), '(init_var3)\n', (3826, 3837), True, 'import tensorflow as tf\n'), ((3855, 3968), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': "sgd_opt_args['lr']", 'momentum': "sgd_opt_args['momentum']", 'nesterov': '(False)'}), "(learning_rate=sgd_opt_args['lr'], momentum=\n sgd_opt_args['momentum'], nesterov=False)\n", (3878, 3968), True, 'import tensorflow as tf\n'), ((4016, 4191), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'learning_rate': "rmsprop_opt_args['lr']", 'rho': "rmsprop_opt_args['decay_rate']", 'momentum': '(0.0)', 'epsilon': '(0)', 'centered': "rmsprop_opt_args['centered']"}), "(learning_rate=rmsprop_opt_args['lr'], rho=\n rmsprop_opt_args['decay_rate'], momentum=0.0, epsilon=0, centered=\n rmsprop_opt_args['centered'])\n", (4043, 4191), True, 'import tensorflow as tf\n'), ((4247, 4427), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': "adam_opt_args['lr']", 'beta_1': "adam_opt_args['beta1']", 'beta_2': "adam_opt_args['beta2']", 'epsilon': "adam_opt_args['epsilon']", 'amsgrad': '(False)'}), "(learning_rate=adam_opt_args['lr'], beta_1=\n adam_opt_args['beta1'], beta_2=adam_opt_args['beta2'], epsilon=\n adam_opt_args['epsilon'], amsgrad=False)\n", (4271, 4427), True, 'import tensorflow as tf\n'), ((6086, 6101), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6099, 6101), False, 'import unittest\n'), ((5435, 5448), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5446, 5448), False, 'from collections import OrderedDict\n'), ((5973, 5993), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5983, 5993), False, 'from test_util import GenArgList\n'), ((1425, 1467), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (1445, 1467), True, 'from oneflow.compatible import single_client as flow\n'), ((2286, 2326), 'oneflow.compatible.single_client.math.reduce_sum', 'flow.math.reduce_sum', (['(var1 + var2 + var3)'], {}), '(var1 + var2 + var3)\n', (2306, 2326), True, 'from oneflow.compatible import single_client as flow\n'), ((4511, 4543), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (4526, 4543), True, 'import tensorflow as tf\n'), ((4572, 4619), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['(tf_var1 + tf_var2 + tf_var3)'], {}), '(tf_var1 + tf_var2 + tf_var3)\n', (4590, 4619), True, 'import tensorflow as tf\n'), ((2385, 2452), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', "[sgd_opt_args['lr']]"], {}), "([], [sgd_opt_args['lr']])\n", (2426, 2452), True, 'from oneflow.compatible import single_client as flow\n'), ((2621, 2692), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', "[rmsprop_opt_args['lr']]"], {}), "([], [rmsprop_opt_args['lr']])\n", (2662, 2692), True, 'from oneflow.compatible import single_client as flow\n'), ((2945, 3013), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', "[adam_opt_args['lr']]"], {}), "([], [adam_opt_args['lr']])\n", (2986, 3013), True, 'from oneflow.compatible import single_client as flow\n'), ((1634, 1687), 'oneflow.compatible.single_client.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (1665, 1687), True, 'from oneflow.compatible import single_client as flow\n'), ((1900, 1953), 'oneflow.compatible.single_client.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (1931, 1953), True, 'from oneflow.compatible import single_client as flow\n'), ((2166, 2219), 'oneflow.compatible.single_client.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (2197, 2219), True, 'from oneflow.compatible import single_client as flow\n'), ((3260, 3326), 'oneflow.compatible.single_client.optimizer.CombinedOptimizer', 'flow.optimizer.CombinedOptimizer', (['[sgd_opt, rmsprop_opt, adam_opt]'], {}), '([sgd_opt, rmsprop_opt, adam_opt])\n', (3292, 3326), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_type_as(test_case, shape, src_dtype, tgt_dtype, placement, sbp):
np_input = np.random.rand(*shape)
input = flow.tensor(np_input, dtype=src_dtype).to_global(placement, sbp)
target = flow.tensor(np_input, dtype=tgt_dtype).to_global(placement, sbp)
input = input.type_as(target)
test_case.assertEqual(input.dtype, target.dtype)
def _test_is_floating_point(test_case, shape, dtype, placement, sbp):
np_input = np.random.rand(*shape)
input = flow.tensor(np_input, dtype=dtype).to_global(placement, sbp)
output = input.is_floating_point()
if input.dtype in (flow.float, flow.float16, flow.float32, flow.double):
test_case.assertEqual(output, True)
else:
test_case.assertEqual(output, False)
@autotest(n=1, check_graph=False)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def _test_global_cuda(test_case, placement, sbp):
x = random_tensor(2, 8, 16).to_global(placement, sbp)
x = x.cuda()
y = x.sum()
return y
class TestConsistentCuda(flow.unittest.TestCase):
@globaltest
def test_global_cuda(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_cuda(test_case, placement, sbp)
@autotest(n=1, check_graph=False)
def _test_global_cpu(test_case, placement, sbp):
x = random_tensor(2, 8, 16).to_global(placement, sbp)
x = x.cpu()
y = x.sum()
return y
# PyTorch error if open auto_backward:
# element 0 of tensors does not require grad and does not have a grad_fn
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_long(test_case, placement, sbp):
x = random_tensor(2, 8, 16, requires_grad=True).to_global(placement, sbp)
y = x.long()
test_case.assertFalse(y.oneflow.requires_grad)
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_int(test_case, placement, sbp):
x = random_tensor(2, 8, 16, requires_grad=True).to_global(placement, sbp)
y = x.int()
test_case.assertFalse(y.oneflow.requires_grad)
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_float(test_case, placement, sbp):
x = random_tensor(2, 8, 16, dtype=int).to_global(placement, sbp)
y = x.float()
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_double(test_case, placement, sbp):
x = random_tensor(2, 8, 16, dtype=int).to_global(placement, sbp)
y = x.double()
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_item(test_case, placement, sbp):
x = random_tensor(ndim=1, dim0=1, dtype=int).to_global(placement, sbp)
y = torch.tensor(x.item())
return y
@autotest(n=1, auto_backward=False, check_graph=False)
def _test_global_tolist(test_case, placement, sbp):
x = random_tensor(ndim=4, dim0=8, dim1=16, dim2=24, dim3=32, dtype=int).to_global(
placement, sbp
)
y = torch.tensor(x.tolist())
return y
class TestConsistentTensorOps(flow.unittest.TestCase):
@globaltest
def test_global_cpu(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_cpu(test_case, placement, sbp)
@globaltest
def test_global_long(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_long(test_case, placement, sbp)
@globaltest
def test_global_int(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_int(test_case, placement, sbp)
@globaltest
def test_global_float(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_float(test_case, placement, sbp)
@globaltest
def test_global_double(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
_test_global_double(test_case, placement, sbp)
@globaltest
def test_global_item(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=1, except_split=True):
_test_global_item(test_case, placement, sbp)
@globaltest
def test_global_tolist(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=4):
_test_global_tolist(test_case, placement, sbp)
@globaltest
def test_type_as(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(8, 16), (8, 16, 24), (8, 16, 24, 32)]
arg_dict["src_dtype"] = [flow.int64, flow.int32, flow.float32, flow.float64]
arg_dict["tgt_dtype"] = [flow.int64, flow.int32, flow.float32, flow.float64]
for arg in GenArgList(arg_dict):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=len(arg[0])):
_test_type_as(test_case, *arg, placement, sbp)
@globaltest
def test_is_floating_point(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(8, 16), (8, 16, 24), (8, 16, 24, 32)]
arg_dict["dtype"] = [
# flow.uint8, nccl don't support uint8
flow.int8,
flow.int32,
flow.int64,
flow.float32,
flow.float64,
flow.double,
flow.float,
flow.int,
]
for arg in GenArgList(arg_dict):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=len(arg[0])):
_test_is_floating_point(test_case, *arg, placement, sbp)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.tensor",
"oneflow.test_utils.test_util.GenArgList"
] | [((909, 931), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (923, 931), True, 'import numpy as np\n'), ((1261, 1283), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1275, 1283), True, 'import numpy as np\n'), ((6657, 6672), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6670, 6672), False, 'import unittest\n'), ((5472, 5485), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5483, 5485), False, 'from collections import OrderedDict\n'), ((5743, 5763), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5753, 5763), False, 'from oneflow.test_utils.test_util import GenArgList\n'), ((6025, 6038), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6036, 6038), False, 'from collections import OrderedDict\n'), ((6411, 6431), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6421, 6431), False, 'from oneflow.test_utils.test_util import GenArgList\n'), ((944, 982), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'src_dtype'}), '(np_input, dtype=src_dtype)\n', (955, 982), True, 'import oneflow as flow\n'), ((1022, 1060), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'tgt_dtype'}), '(np_input, dtype=tgt_dtype)\n', (1033, 1060), True, 'import oneflow as flow\n'), ((1296, 1330), 'oneflow.tensor', 'flow.tensor', (['np_input'], {'dtype': 'dtype'}), '(np_input, dtype=dtype)\n', (1307, 1330), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from typing import Tuple
import oneflow.typing as tp
import numpy as np
import imageio
import os
import pix_layers as layers
import matplotlib.pyplot as plt
import time, shutil
import glob
os.environ['CUDA_VISIBLE_DEVICES'] = '2'
class Pix2Pix:
def __init__(self, args):
self.lr = args.learning_rate
self.out_channels = 3
self.img_size = 256
self.LAMBDA = args.LAMBDA
self.eval_interval = 10
self.label_smooth = args.label_smooth
self.batch_size = args.batch_size
self.path = args.path
if not os.path.exists(self.path):
os.mkdir(self.path)
print("Make new dir '{}' done.".format(self.path))
self.checkpoint_path = os.path.join(self.path, "checkpoint")
if not os.path.exists(self.checkpoint_path):
os.mkdir(self.checkpoint_path)
self.test_images_path = os.path.join(self.path, "test_images")
if not os.path.exists(self.test_images_path):
os.mkdir(self.test_images_path)
def _downsample(
self,
inputs,
filters,
size,
name,
reuse=False,
apply_batchnorm=True,
trainable=True,
const_init=True,
):
out = layers.conv2d(
inputs,
filters,
size,
const_init=const_init,
reuse=reuse,
trainable=trainable,
use_bias=False,
name=name + "_conv",
)
if apply_batchnorm: #and not const_init:
out = layers.batchnorm(out, name=name + "_bn", reuse=reuse, trainable=trainable)
out = flow.nn.leaky_relu(out, alpha=0.3)
return out
def _upsample(
self,
inputs,
filters,
size,
name,
apply_dropout=False,
trainable=True,
const_init=True,
reuse=False,
):
out = layers.deconv2d(
inputs,
filters,
size,
const_init=const_init,
trainable=trainable,
use_bias=False,
name=name + "_deconv",
)
# out = layers.batchnorm(out, name=name + "_bn", trainable=trainable)
out = layers.batchnorm(out, name=name + "_bn", reuse=reuse, trainable=trainable)
if apply_dropout:
out = flow.nn.dropout(out, rate=0.5)
out = flow.nn.relu(out)
return out
def generator(self, inputs, trainable=True, const_init=False):
if const_init:
apply_dropout = False
else:
apply_dropout = True
# (n, 64, 128, 128)
d1 = self._downsample(
inputs,
64,
4,
const_init=const_init,
trainable=trainable,
apply_batchnorm=False,
name="g_d1",
)
# (n, 128, 64, 64)
d2 = self._downsample(
d1, 128, 4, const_init=const_init, trainable=trainable, name="g_d2"
)
# (n, 256, 32, 32)
d3 = self._downsample(
d2, 256, 4, const_init=const_init, trainable=trainable, name="g_d3"
)
# (n, 512, 16, 16)
d4 = self._downsample(
d3, 512, 4, const_init=const_init, trainable=trainable, name="g_d4"
)
# (n, 512, 8, 8)
d5 = self._downsample(
d4, 512, 4, const_init=const_init, trainable=trainable, name="g_d5"
)
# (n, 512, 4, 4)
d6 = self._downsample(
d5, 512, 4, const_init=const_init, trainable=trainable, name="g_d6"
)
# (n, 512, 2, 2)
d7 = self._downsample(
d6, 512, 4, const_init=const_init, trainable=trainable, name="g_d7"
)
# (n, 512, 1, 1)
d8 = self._downsample(
d7, 512, 4, const_init=const_init, trainable=trainable, name="g_d8"
)
# (n, 1024, 2, 2)
u7 = self._upsample(
d8,
512,
4,
const_init=const_init,
trainable=trainable,
apply_dropout=apply_dropout,
name="g_u7",
)
u7 = flow.concat([u7, d7], axis=1)
# (n, 1024, 4, 4)
u6 = self._upsample(
u7,
512,
4,
const_init=const_init,
trainable=trainable,
apply_dropout=apply_dropout,
name="g_u6",
)
u6 = flow.concat([u6, d6], axis=1)
# (n, 1024, 8, 8)
u5 = self._upsample(
u6,
512,
4,
const_init=const_init,
trainable=trainable,
apply_dropout=apply_dropout,
name="g_u5",
)
u5 = flow.concat([u5, d5], axis=1)
# (n, 1024, 16, 16)
u4 = self._upsample(
u5, 512, 4, const_init=const_init, trainable=trainable, name="g_u4"
)
u4 = flow.concat([u4, d4], axis=1)
# (n, 512, 32, 32)
u3 = self._upsample(
u4, 256, 4, const_init=const_init, trainable=trainable, name="g_u3"
)
u3 = flow.concat([u3, d3], axis=1)
# (n, 256, 64, 64)
u2 = self._upsample(
u3, 128, 4, const_init=const_init, trainable=trainable, name="g_u2"
)
u2 = flow.concat([u2, d2], axis=1)
# (n, 128, 128, 128)
u1 = self._upsample(
u2, 64, 4, const_init=const_init, trainable=trainable, name="g_u1"
)
u1 = flow.concat([u1, d1], axis=1)
# (n, 3, 256, 256)
u0 = layers.deconv2d(
u1,
self.out_channels,
4,
name="g_u0_deconv",
const_init=const_init,
trainable=trainable,
)
u0 = flow.math.tanh(u0)
return u0
def discriminator(
self, inputs, targets, trainable=True, reuse=False, const_init=False
):
# (n, 6, 256, 256)
d0 = flow.concat([inputs, targets], axis=1)
# (n, 64, 128, 128)
d1 = self._downsample(
d0,
64,
4,
name="d_d1",
apply_batchnorm=False,
reuse=reuse,
const_init=const_init,
trainable=trainable,
)
# (n, 64, 64, 64)
d2 = self._downsample(
d1, 128, 4, name="d_d2", reuse=reuse, const_init=const_init
)
# (n, 256, 32, 32)
d3 = self._downsample(
d2, 256, 4, name="d_d3", reuse=reuse, const_init=const_init
)
# (n, 256, 34, 34)
pad1 = flow.pad(d3, [[0, 0], [0, 0], [1, 1], [1, 1]])
# (n, 512, 31, 31)
conv1 = layers.conv2d(
pad1,
512,
4,
strides=1,
padding="valid",
name="d_conv1",
trainable=trainable,
reuse=reuse,
const_init=const_init,
use_bias=False,
)
bn1 = layers.batchnorm(conv1, name="d_bn", reuse=reuse, trainable=trainable)
leaky_relu = flow.nn.leaky_relu(bn1, alpha=0.3)
# (n, 512, 33, 33)
pad2 = flow.pad(leaky_relu, [[0, 0], [0, 0], [1, 1], [1, 1]])
# (n, 1, 30, 30)
conv2 = layers.conv2d(
pad2,
1,
4,
strides=1,
padding="valid",
name="d_conv2",
trainable=trainable,
reuse=reuse,
const_init=const_init,
)
return conv2
def load_facades(self, mode="train"):
from PIL import Image, ImageOps
data_path = "./data/facades"
seed=np.random.randint(1024)
if not os.path.exists(data_path):
print("not Found Facades - start download")
import tensorflow as tf
if not os.path.exists("data"):
os.mkdir("data")
_PATH = os.path.join(os.getcwd(), "data/facades.tar.gz")
_URL = "https://people.eecs.berkeley.edu/~tinghuiz/projects/pix2pix/datasets/facades.tar.gz"
path_to_zip = tf.keras.utils.get_file(_PATH, origin=_URL, extract=True)
else:
# print("Found Facades - skip download")
pass
input_imgs, real_imgs = [], []
if mode == "train":
modes = ["train", "val"]
else:
modes = ["test"]
for mode in modes:
for d in os.listdir(os.path.join(data_path, mode)):
d = os.path.join(data_path, mode, d)
img = np.asarray(Image.open(d))
real_img = Image.fromarray(img[:, :256, :])
input_img = Image.fromarray(img[:, 256:, :])
# resize to 286 x 286 x 3, and randomly crop to 256 x 256 x 3
r1, r2 = np.random.randint(30, size=2)
real_img = real_img.resize((256 + 30, 256 + 30))
input_img = input_img.resize((256 + 30, 256 + 30))
real_img = real_img.crop((r1, r2, r1 + 256, r2 + 256))
input_img = input_img.crop((r1, r2, r1 + 256, r2 + 256))
if np.random.rand() > 0.5:
# random mirroring
real_img = ImageOps.mirror(real_img)
input_img = ImageOps.mirror(input_img)
real_imgs.append(np.asarray(real_img))
input_imgs.append(np.asarray(input_img))
input_imgs = np.array(input_imgs).transpose(0, 3, 1, 2)
real_imgs = np.array(real_imgs).transpose(0, 3, 1, 2)
# normalizing the images to [-1, 1]
input_imgs = input_imgs / 127.5 - 1
real_imgs = real_imgs / 127.5 - 1
np.random.seed(seed)
np.random.shuffle(input_imgs)
np.random.seed(seed)
np.random.shuffle(real_imgs)
return input_imgs, real_imgs
def save_images(self, images, real_input, target, epoch_idx, name, path=None):
if name == "eval":
plot_size = epoch_idx
else:
plot_size = self.batch_size
if name == "train":
images_path = self.train_images_path
elif name == "test":
images_path = self.test_images_path
plt.figure(figsize=(6, 8))
display_list = list(zip(real_input, target, images))
# title = ["Input Image", "Ground Truth", "Predicted Image"]
idx = 1
row = 4
# save 4 images of title
for i in range(plot_size):
dis = display_list[i]
for j in range(3):
plt.subplot(row, 6, idx)
# plt.title(title[j])
# getting the pixel values between [0, 1] to plot it.
plt.imshow(np.array(dis[j]).transpose(1, 2, 0) * 0.5 + 0.5)
plt.axis("off")
idx = idx + 1
if idx > row * 6:
break
if name == "eval":
save_path = path
else:
save_path = os.path.join(images_path, "{}_image_{:02d}.png".format(name, epoch_idx + 1))
plt.savefig(save_path)
plt.close()
def test(self, eval_size, model_path, save_path):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(type="predict", function_config=func_config)
def eval_generator(
input: tp.Numpy.Placeholder((eval_size, 3, 256, 256))
) -> tp.Numpy:
g_out = self.generator(input, trainable=False)
return g_out
check_point = flow.train.CheckPoint()
check_point.load(model_path)
test_x, test_y = self.load_facades(mode="test")
ind = np.random.choice(len(test_x) // eval_size)
test_inp = test_x[ind * eval_size : (ind + 1) * eval_size].astype(np.float32, order="C")
test_target = test_y[ind * eval_size : (ind + 1) * eval_size].astype(np.float32, order="C")
print(test_inp.shape)
print(test_target.shape)
gout = eval_generator(test_inp)
# save images
self.save_images(gout, test_inp, test_target, eval_size, name="eval", path=save_path)
def train(self, epochs, save=True):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [self.lr])
@flow.global_function(type="train", function_config=func_config)
def train_generator(
input: tp.Numpy.Placeholder((self.batch_size, 3, 256, 256)),
target: tp.Numpy.Placeholder((self.batch_size, 3, 256, 256)),
label1: tp.Numpy.Placeholder((self.batch_size, 1, 30, 30)),
)-> Tuple[tp.Numpy, tp.Numpy, tp.Numpy, tp.Numpy]:
g_out = self.generator(input, trainable=True)
g_logits = self.discriminator(input, g_out, trainable=False)
gan_loss = flow.nn.sigmoid_cross_entropy_with_logits(
label1, g_logits, name="Gloss_sigmoid_cross_entropy_with_logits"
)
l1_loss = flow.math.reduce_mean(flow.math.abs(g_out - target))
g_loss = gan_loss + self.LAMBDA * l1_loss
flow.optimizer.Adam(lr_scheduler, beta1=0.5).minimize(g_loss)
return (g_out, gan_loss, l1_loss, g_loss)
@flow.global_function(type="train", function_config=func_config)
def train_discriminator(
input: tp.Numpy.Placeholder((self.batch_size, 3, 256, 256)),
target: tp.Numpy.Placeholder((self.batch_size, 3, 256, 256)),
label0: tp.Numpy.Placeholder((self.batch_size, 1, 30, 30)),
label1: tp.Numpy.Placeholder((self.batch_size, 1, 30, 30)),
) -> tp.Numpy:
g_out = self.generator(input, trainable=False)
g_logits = self.discriminator(g_out, target, trainable=True)
d_fake_loss = flow.nn.sigmoid_cross_entropy_with_logits(
label0, g_logits, name="Dloss_fake_sigmoid_cross_entropy_with_logits"
)
d_logits = self.discriminator(input, target, trainable=True, reuse=True)
d_real_loss = flow.nn.sigmoid_cross_entropy_with_logits(
label1, d_logits, name="Dloss_real_sigmoid_cross_entropy_with_logits"
)
d_loss = d_fake_loss + d_real_loss
flow.optimizer.Adam(lr_scheduler, beta1=0.5).minimize(d_loss)
return d_loss
@flow.global_function(type="predict", function_config=func_config)
def eval_generator(
input: tp.Numpy.Placeholder((self.batch_size, 3, 256, 256)),
target: tp.Numpy.Placeholder((self.batch_size, 3, 256, 256))
) -> Tuple[tp.Numpy, tp.Numpy]:
g_out = self.generator(input, trainable=False)
error = flow.math.reduce_mean(flow.math.abs(g_out - target))
return (g_out, error)
check_point = flow.train.CheckPoint()
check_point.init()
G_image_loss, G_GAN_loss, G_total_loss, D_loss = [], [], [], []
test_G_image_error = []
x, _ = self.load_facades()
batch_num = len(x) // self.batch_size
label1 = np.ones((self.batch_size, 1, 30, 30)).astype(np.float32)
if self.label_smooth != 0:
label1_smooth = label1 - self.label_smooth
label0 = np.zeros((self.batch_size, 1, 30, 30)).astype(np.float32)
smallest_image_error = 0.5
pre_smallest = -1
for epoch_idx in range(epochs):
start = time.time()
# run every epoch to shuffle
x, y = self.load_facades()
for batch_idx in range(batch_num):
inp = x[
batch_idx * self.batch_size : (batch_idx + 1) * self.batch_size
].astype(np.float32, order="C")
target = y[
batch_idx * self.batch_size : (batch_idx + 1) * self.batch_size
].astype(np.float32, order="C")
# one_sided label smooth
if self.label_smooth != 0:
d_loss = train_discriminator(inp, target, label0, label1_smooth)
else:
d_loss = train_discriminator(inp, target, label0, label1)
g_out, g_gan_loss, g_image_loss, g_total_loss = train_generator(inp, target, label1)
g_gan_loss = g_gan_loss.mean()
g_image_loss = g_image_loss.mean()
g_total_loss = g_total_loss.mean()
d_loss = d_loss.mean()
G_GAN_loss.append(g_gan_loss)
G_image_loss.append(g_image_loss)
G_total_loss.append(g_total_loss)
D_loss.append(d_loss)
if (batch_idx + 1) % self.eval_interval == 0:
print("############## train ###############")
print(
"{}th epoch, {}th batch, dloss:{}, g_gan_loss:{}, g_image_loss:{}, g_total_loss:{}".format(
epoch_idx + 1, batch_idx + 1, d_loss, g_gan_loss, g_image_loss, g_total_loss
)
)
# calculate test error to validate the trained model
if (epoch_idx + 1) % 50 == 0:
# run every epoch to shuffle
test_x, test_y = self.load_facades(mode="test")
ind = np.random.choice(len(test_x) // self.batch_size)
test_inp = test_x[ind * self.batch_size : (ind + 1) * self.batch_size].astype(np.float32, order="C")
test_target = test_y[ind * self.batch_size : (ind + 1) * self.batch_size].astype(np.float32, order="C")
gout, test_image_error = eval_generator(test_inp, test_target)
# save images
# self.save_images(g_out, inp, target, epoch_idx, name="train")
self.save_images(gout, test_inp, test_target, epoch_idx, name="test")
print("############## evaluation ###############")
print("{}th epoch, {}th batch, test_image_error:{}".format(epoch_idx + 1, batch_idx + 1, test_image_error.mean()))
print("Time for epoch {} is {} sec.".format(epoch_idx + 1, time.time() - start))
if save:
from datetime import datetime
check_point.save(
os.path.join(self.checkpoint_path, "pix2pix_{}".format(
str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S")))
)
)
# save train loss and val error to plot
np.save(os.path.join(self.path, 'G_image_loss_{}.npy'.format(epochs)), G_image_loss)
np.save(os.path.join(self.path, 'G_GAN_loss_{}.npy'.format(epochs)), G_GAN_loss)
np.save(os.path.join(self.path, 'G_total_loss_{}.npy'.format(epochs)), G_total_loss)
np.save(os.path.join(self.path, 'D_loss_{}.npy'.format(epochs)), D_loss)
print("*************** Train {} done ***************** ".format(self.path))
def save_to_gif(self):
anim_file = os.path.join(self.path, "pix2pix .gif")
with imageio.get_writer(anim_file, mode="I") as writer:
filenames = glob.glob(os.path.join(self.test_images_path, "*image*.png"))
filenames = sorted(filenames)
last = -1
for i, filename in enumerate(filenames):
frame = 2 * (i ** 0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
print("Generate {} done.".format(anim_file))
if __name__ == "__main__":
os.environ["ENABLE_USER_OP"] = "True"
import argparse
parser = argparse.ArgumentParser(description="flags for multi-node and resource")
parser.add_argument("-e", "--epoch_num", type=int, default=200, required=False)
parser.add_argument("-lr", "--learning_rate", type=float, default=2e-4, required=False)
parser.add_argument("--LAMBDA", type=float, default=100, required=False)
parser.add_argument("--path", type=str, default='./of_pix2pix', required=False)
parser.add_argument("--batch_size", type=int, default=32, required=False)
parser.add_argument("--label_smooth", type=float, default=0, required=False)
parser.add_argument("--test", action='store_true', default=False)
args = parser.parse_args()
print(args)
pix2pix = Pix2Pix(args)
if not args.test:
# pix2pix.train(epochs=args.epoch_num)
pix2pix.save_to_gif()
else:
save_path = "eval_images.png"
model_path = "./models/pix2pix"
pix2pix.test(16, model_path, save_path)
| [
"oneflow.nn.leaky_relu",
"oneflow.math.abs",
"oneflow.nn.dropout",
"oneflow.concat",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.math.tanh",
"oneflow.nn.sigmoid_cross_entropy_with_logits",
"oneflow.train.CheckPoint",
"oneflow.optimizer.Adam",
"oneflow.global_function",
"oneflow.pad"... | [((20419, 20491), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""flags for multi-node and resource"""'}), "(description='flags for multi-node and resource')\n", (20442, 20491), False, 'import argparse\n'), ((1335, 1372), 'os.path.join', 'os.path.join', (['self.path', '"""checkpoint"""'], {}), "(self.path, 'checkpoint')\n", (1347, 1372), False, 'import os\n'), ((1501, 1539), 'os.path.join', 'os.path.join', (['self.path', '"""test_images"""'], {}), "(self.path, 'test_images')\n", (1513, 1539), False, 'import os\n'), ((1856, 1990), 'pix_layers.conv2d', 'layers.conv2d', (['inputs', 'filters', 'size'], {'const_init': 'const_init', 'reuse': 'reuse', 'trainable': 'trainable', 'use_bias': '(False)', 'name': "(name + '_conv')"}), "(inputs, filters, size, const_init=const_init, reuse=reuse,\n trainable=trainable, use_bias=False, name=name + '_conv')\n", (1869, 1990), True, 'import pix_layers as layers\n'), ((2253, 2287), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['out'], {'alpha': '(0.3)'}), '(out, alpha=0.3)\n', (2271, 2287), True, 'import oneflow as flow\n'), ((2522, 2648), 'pix_layers.deconv2d', 'layers.deconv2d', (['inputs', 'filters', 'size'], {'const_init': 'const_init', 'trainable': 'trainable', 'use_bias': '(False)', 'name': "(name + '_deconv')"}), "(inputs, filters, size, const_init=const_init, trainable=\n trainable, use_bias=False, name=name + '_deconv')\n", (2537, 2648), True, 'import pix_layers as layers\n'), ((2832, 2906), 'pix_layers.batchnorm', 'layers.batchnorm', (['out'], {'name': "(name + '_bn')", 'reuse': 'reuse', 'trainable': 'trainable'}), "(out, name=name + '_bn', reuse=reuse, trainable=trainable)\n", (2848, 2906), True, 'import pix_layers as layers\n'), ((2998, 3015), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (3010, 3015), True, 'import oneflow as flow\n'), ((4743, 4772), 'oneflow.concat', 'flow.concat', (['[u7, d7]'], {'axis': '(1)'}), '([u7, d7], axis=1)\n', (4754, 4772), True, 'import oneflow as flow\n'), ((5033, 5062), 'oneflow.concat', 'flow.concat', (['[u6, d6]'], {'axis': '(1)'}), '([u6, d6], axis=1)\n', (5044, 5062), True, 'import oneflow as flow\n'), ((5323, 5352), 'oneflow.concat', 'flow.concat', (['[u5, d5]'], {'axis': '(1)'}), '([u5, d5], axis=1)\n', (5334, 5352), True, 'import oneflow as flow\n'), ((5513, 5542), 'oneflow.concat', 'flow.concat', (['[u4, d4]'], {'axis': '(1)'}), '([u4, d4], axis=1)\n', (5524, 5542), True, 'import oneflow as flow\n'), ((5702, 5731), 'oneflow.concat', 'flow.concat', (['[u3, d3]'], {'axis': '(1)'}), '([u3, d3], axis=1)\n', (5713, 5731), True, 'import oneflow as flow\n'), ((5891, 5920), 'oneflow.concat', 'flow.concat', (['[u2, d2]'], {'axis': '(1)'}), '([u2, d2], axis=1)\n', (5902, 5920), True, 'import oneflow as flow\n'), ((6081, 6110), 'oneflow.concat', 'flow.concat', (['[u1, d1]'], {'axis': '(1)'}), '([u1, d1], axis=1)\n', (6092, 6110), True, 'import oneflow as flow\n'), ((6151, 6261), 'pix_layers.deconv2d', 'layers.deconv2d', (['u1', 'self.out_channels', '(4)'], {'name': '"""g_u0_deconv"""', 'const_init': 'const_init', 'trainable': 'trainable'}), "(u1, self.out_channels, 4, name='g_u0_deconv', const_init=\n const_init, trainable=trainable)\n", (6166, 6261), True, 'import pix_layers as layers\n'), ((6353, 6371), 'oneflow.math.tanh', 'flow.math.tanh', (['u0'], {}), '(u0)\n', (6367, 6371), True, 'import oneflow as flow\n'), ((6539, 6577), 'oneflow.concat', 'flow.concat', (['[inputs, targets]'], {'axis': '(1)'}), '([inputs, targets], axis=1)\n', (6550, 6577), True, 'import oneflow as flow\n'), ((7168, 7214), 'oneflow.pad', 'flow.pad', (['d3', '[[0, 0], [0, 0], [1, 1], [1, 1]]'], {}), '(d3, [[0, 0], [0, 0], [1, 1], [1, 1]])\n', (7176, 7214), True, 'import oneflow as flow\n'), ((7258, 7406), 'pix_layers.conv2d', 'layers.conv2d', (['pad1', '(512)', '(4)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""d_conv1"""', 'trainable': 'trainable', 'reuse': 'reuse', 'const_init': 'const_init', 'use_bias': '(False)'}), "(pad1, 512, 4, strides=1, padding='valid', name='d_conv1',\n trainable=trainable, reuse=reuse, const_init=const_init, use_bias=False)\n", (7271, 7406), True, 'import pix_layers as layers\n'), ((7548, 7618), 'pix_layers.batchnorm', 'layers.batchnorm', (['conv1'], {'name': '"""d_bn"""', 'reuse': 'reuse', 'trainable': 'trainable'}), "(conv1, name='d_bn', reuse=reuse, trainable=trainable)\n", (7564, 7618), True, 'import pix_layers as layers\n'), ((7640, 7674), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['bn1'], {'alpha': '(0.3)'}), '(bn1, alpha=0.3)\n', (7658, 7674), True, 'import oneflow as flow\n'), ((7717, 7771), 'oneflow.pad', 'flow.pad', (['leaky_relu', '[[0, 0], [0, 0], [1, 1], [1, 1]]'], {}), '(leaky_relu, [[0, 0], [0, 0], [1, 1], [1, 1]])\n', (7725, 7771), True, 'import oneflow as flow\n'), ((7813, 7943), 'pix_layers.conv2d', 'layers.conv2d', (['pad2', '(1)', '(4)'], {'strides': '(1)', 'padding': '"""valid"""', 'name': '"""d_conv2"""', 'trainable': 'trainable', 'reuse': 'reuse', 'const_init': 'const_init'}), "(pad2, 1, 4, strides=1, padding='valid', name='d_conv2',\n trainable=trainable, reuse=reuse, const_init=const_init)\n", (7826, 7943), True, 'import pix_layers as layers\n'), ((8214, 8237), 'numpy.random.randint', 'np.random.randint', (['(1024)'], {}), '(1024)\n', (8231, 8237), True, 'import numpy as np\n'), ((10256, 10276), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10270, 10276), True, 'import numpy as np\n'), ((10285, 10314), 'numpy.random.shuffle', 'np.random.shuffle', (['input_imgs'], {}), '(input_imgs)\n', (10302, 10314), True, 'import numpy as np\n'), ((10323, 10343), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (10337, 10343), True, 'import numpy as np\n'), ((10352, 10380), 'numpy.random.shuffle', 'np.random.shuffle', (['real_imgs'], {}), '(real_imgs)\n', (10369, 10380), True, 'import numpy as np\n'), ((10782, 10808), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 8)'}), '(figsize=(6, 8))\n', (10792, 10808), True, 'import matplotlib.pyplot as plt\n'), ((11623, 11645), 'matplotlib.pyplot.savefig', 'plt.savefig', (['save_path'], {}), '(save_path)\n', (11634, 11645), True, 'import matplotlib.pyplot as plt\n'), ((11654, 11665), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (11663, 11665), True, 'import matplotlib.pyplot as plt\n'), ((11743, 11764), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (11762, 11764), True, 'import oneflow as flow\n'), ((11824, 11889), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'func_config'}), "(type='predict', function_config=func_config)\n", (11844, 11889), True, 'import oneflow as flow\n'), ((12118, 12141), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (12139, 12141), True, 'import oneflow as flow\n'), ((12772, 12793), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (12791, 12793), True, 'import oneflow as flow\n'), ((12867, 12923), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[self.lr]'], {}), '([], [self.lr])\n', (12908, 12923), True, 'import oneflow as flow\n'), ((12934, 12997), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (12954, 12997), True, 'import oneflow as flow\n'), ((13865, 13928), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (13885, 13928), True, 'import oneflow as flow\n'), ((14991, 15056), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'func_config'}), "(type='predict', function_config=func_config)\n", (15011, 15056), True, 'import oneflow as flow\n'), ((15464, 15487), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (15485, 15487), True, 'import oneflow as flow\n'), ((19612, 19651), 'os.path.join', 'os.path.join', (['self.path', '"""pix2pix .gif"""'], {}), "(self.path, 'pix2pix .gif')\n", (19624, 19651), False, 'import os\n'), ((1182, 1207), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (1196, 1207), False, 'import os\n'), ((1221, 1240), 'os.mkdir', 'os.mkdir', (['self.path'], {}), '(self.path)\n', (1229, 1240), False, 'import os\n'), ((1388, 1424), 'os.path.exists', 'os.path.exists', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (1402, 1424), False, 'import os\n'), ((1438, 1468), 'os.mkdir', 'os.mkdir', (['self.checkpoint_path'], {}), '(self.checkpoint_path)\n', (1446, 1468), False, 'import os\n'), ((1555, 1592), 'os.path.exists', 'os.path.exists', (['self.test_images_path'], {}), '(self.test_images_path)\n', (1569, 1592), False, 'import os\n'), ((1606, 1637), 'os.mkdir', 'os.mkdir', (['self.test_images_path'], {}), '(self.test_images_path)\n', (1614, 1637), False, 'import os\n'), ((2163, 2237), 'pix_layers.batchnorm', 'layers.batchnorm', (['out'], {'name': "(name + '_bn')", 'reuse': 'reuse', 'trainable': 'trainable'}), "(out, name=name + '_bn', reuse=reuse, trainable=trainable)\n", (2179, 2237), True, 'import pix_layers as layers\n'), ((2952, 2982), 'oneflow.nn.dropout', 'flow.nn.dropout', (['out'], {'rate': '(0.5)'}), '(out, rate=0.5)\n', (2967, 2982), True, 'import oneflow as flow\n'), ((8253, 8278), 'os.path.exists', 'os.path.exists', (['data_path'], {}), '(data_path)\n', (8267, 8278), False, 'import os\n'), ((8648, 8705), 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', (['_PATH'], {'origin': '_URL', 'extract': '(True)'}), '(_PATH, origin=_URL, extract=True)\n', (8671, 8705), True, 'import tensorflow as tf\n'), ((13459, 13571), 'oneflow.nn.sigmoid_cross_entropy_with_logits', 'flow.nn.sigmoid_cross_entropy_with_logits', (['label1', 'g_logits'], {'name': '"""Gloss_sigmoid_cross_entropy_with_logits"""'}), "(label1, g_logits, name=\n 'Gloss_sigmoid_cross_entropy_with_logits')\n", (13500, 13571), True, 'import oneflow as flow\n'), ((14434, 14551), 'oneflow.nn.sigmoid_cross_entropy_with_logits', 'flow.nn.sigmoid_cross_entropy_with_logits', (['label0', 'g_logits'], {'name': '"""Dloss_fake_sigmoid_cross_entropy_with_logits"""'}), "(label0, g_logits, name=\n 'Dloss_fake_sigmoid_cross_entropy_with_logits')\n", (14475, 14551), True, 'import oneflow as flow\n'), ((14689, 14806), 'oneflow.nn.sigmoid_cross_entropy_with_logits', 'flow.nn.sigmoid_cross_entropy_with_logits', (['label1', 'd_logits'], {'name': '"""Dloss_real_sigmoid_cross_entropy_with_logits"""'}), "(label1, d_logits, name=\n 'Dloss_real_sigmoid_cross_entropy_with_logits')\n", (14730, 14806), True, 'import oneflow as flow\n'), ((16070, 16081), 'time.time', 'time.time', ([], {}), '()\n', (16079, 16081), False, 'import time, shutil\n'), ((19665, 19704), 'imageio.get_writer', 'imageio.get_writer', (['anim_file'], {'mode': '"""I"""'}), "(anim_file, mode='I')\n", (19683, 19704), False, 'import imageio\n'), ((20200, 20224), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (20214, 20224), False, 'import imageio\n'), ((8391, 8413), 'os.path.exists', 'os.path.exists', (['"""data"""'], {}), "('data')\n", (8405, 8413), False, 'import os\n'), ((8431, 8447), 'os.mkdir', 'os.mkdir', (['"""data"""'], {}), "('data')\n", (8439, 8447), False, 'import os\n'), ((8481, 8492), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (8490, 8492), False, 'import os\n'), ((8998, 9027), 'os.path.join', 'os.path.join', (['data_path', 'mode'], {}), '(data_path, mode)\n', (9010, 9027), False, 'import os\n'), ((9050, 9082), 'os.path.join', 'os.path.join', (['data_path', 'mode', 'd'], {}), '(data_path, mode, d)\n', (9062, 9082), False, 'import os\n'), ((9158, 9190), 'PIL.Image.fromarray', 'Image.fromarray', (['img[:, :256, :]'], {}), '(img[:, :256, :])\n', (9173, 9190), False, 'from PIL import Image, ImageOps\n'), ((9219, 9251), 'PIL.Image.fromarray', 'Image.fromarray', (['img[:, 256:, :]'], {}), '(img[:, 256:, :])\n', (9234, 9251), False, 'from PIL import Image, ImageOps\n'), ((9356, 9385), 'numpy.random.randint', 'np.random.randint', (['(30)'], {'size': '(2)'}), '(30, size=2)\n', (9373, 9385), True, 'import numpy as np\n'), ((10012, 10032), 'numpy.array', 'np.array', (['input_imgs'], {}), '(input_imgs)\n', (10020, 10032), True, 'import numpy as np\n'), ((10075, 10094), 'numpy.array', 'np.array', (['real_imgs'], {}), '(real_imgs)\n', (10083, 10094), True, 'import numpy as np\n'), ((11120, 11144), 'matplotlib.pyplot.subplot', 'plt.subplot', (['row', '(6)', 'idx'], {}), '(row, 6, idx)\n', (11131, 11144), True, 'import matplotlib.pyplot as plt\n'), ((11345, 11360), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (11353, 11360), True, 'import matplotlib.pyplot as plt\n'), ((11937, 11983), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(eval_size, 3, 256, 256)'], {}), '((eval_size, 3, 256, 256))\n', (11957, 11983), True, 'import oneflow.typing as tp\n'), ((13046, 13098), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 3, 256, 256)'], {}), '((self.batch_size, 3, 256, 256))\n', (13066, 13098), True, 'import oneflow.typing as tp\n'), ((13120, 13172), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 3, 256, 256)'], {}), '((self.batch_size, 3, 256, 256))\n', (13140, 13172), True, 'import oneflow.typing as tp\n'), ((13194, 13244), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 1, 30, 30)'], {}), '((self.batch_size, 1, 30, 30))\n', (13214, 13244), True, 'import oneflow.typing as tp\n'), ((13641, 13670), 'oneflow.math.abs', 'flow.math.abs', (['(g_out - target)'], {}), '(g_out - target)\n', (13654, 13670), True, 'import oneflow as flow\n'), ((13981, 14033), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 3, 256, 256)'], {}), '((self.batch_size, 3, 256, 256))\n', (14001, 14033), True, 'import oneflow.typing as tp\n'), ((14055, 14107), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 3, 256, 256)'], {}), '((self.batch_size, 3, 256, 256))\n', (14075, 14107), True, 'import oneflow.typing as tp\n'), ((14129, 14179), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 1, 30, 30)'], {}), '((self.batch_size, 1, 30, 30))\n', (14149, 14179), True, 'import oneflow.typing as tp\n'), ((14201, 14251), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 1, 30, 30)'], {}), '((self.batch_size, 1, 30, 30))\n', (14221, 14251), True, 'import oneflow.typing as tp\n'), ((15104, 15156), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 3, 256, 256)'], {}), '((self.batch_size, 3, 256, 256))\n', (15124, 15156), True, 'import oneflow.typing as tp\n'), ((15178, 15230), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(self.batch_size, 3, 256, 256)'], {}), '((self.batch_size, 3, 256, 256))\n', (15198, 15230), True, 'import oneflow.typing as tp\n'), ((15376, 15405), 'oneflow.math.abs', 'flow.math.abs', (['(g_out - target)'], {}), '(g_out - target)\n', (15389, 15405), True, 'import oneflow as flow\n'), ((15726, 15763), 'numpy.ones', 'np.ones', (['(self.batch_size, 1, 30, 30)'], {}), '((self.batch_size, 1, 30, 30))\n', (15733, 15763), True, 'import numpy as np\n'), ((15891, 15929), 'numpy.zeros', 'np.zeros', (['(self.batch_size, 1, 30, 30)'], {}), '((self.batch_size, 1, 30, 30))\n', (15899, 15929), True, 'import numpy as np\n'), ((19750, 19800), 'os.path.join', 'os.path.join', (['self.test_images_path', '"""*image*.png"""'], {}), "(self.test_images_path, '*image*.png')\n", (19762, 19800), False, 'import os\n'), ((20113, 20137), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (20127, 20137), False, 'import imageio\n'), ((9116, 9129), 'PIL.Image.open', 'Image.open', (['d'], {}), '(d)\n', (9126, 9129), False, 'from PIL import Image, ImageOps\n'), ((9682, 9698), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (9696, 9698), True, 'import numpy as np\n'), ((9776, 9801), 'PIL.ImageOps.mirror', 'ImageOps.mirror', (['real_img'], {}), '(real_img)\n', (9791, 9801), False, 'from PIL import Image, ImageOps\n'), ((9834, 9860), 'PIL.ImageOps.mirror', 'ImageOps.mirror', (['input_img'], {}), '(input_img)\n', (9849, 9860), False, 'from PIL import Image, ImageOps\n'), ((9911, 9931), 'numpy.asarray', 'np.asarray', (['real_img'], {}), '(real_img)\n', (9921, 9931), True, 'import numpy as np\n'), ((9967, 9988), 'numpy.asarray', 'np.asarray', (['input_img'], {}), '(input_img)\n', (9977, 9988), True, 'import numpy as np\n'), ((13739, 13783), 'oneflow.optimizer.Adam', 'flow.optimizer.Adam', (['lr_scheduler'], {'beta1': '(0.5)'}), '(lr_scheduler, beta1=0.5)\n', (13758, 13783), True, 'import oneflow as flow\n'), ((14892, 14936), 'oneflow.optimizer.Adam', 'flow.optimizer.Adam', (['lr_scheduler'], {'beta1': '(0.5)'}), '(lr_scheduler, beta1=0.5)\n', (14911, 14936), True, 'import oneflow as flow\n'), ((18788, 18799), 'time.time', 'time.time', ([], {}), '()\n', (18797, 18799), False, 'import time, shutil\n'), ((11280, 11296), 'numpy.array', 'np.array', (['dis[j]'], {}), '(dis[j])\n', (11288, 11296), True, 'import numpy as np\n'), ((18996, 19010), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (19008, 19010), False, 'from datetime import datetime\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def test_simple(test_case):
flow.config.gpu_device_num(1)
data = np.ones((10,), dtype=np.float32)
def EqOnes(x):
test_case.assertTrue(np.allclose(data, x.numpy()))
@flow.global_function()
def ReluJob(x: oft.Numpy.Placeholder((10,))):
flow.watch(x, EqOnes)
ReluJob(data)
def test_two_device(test_case):
flow.config.gpu_device_num(2)
data = np.ones((10,), dtype=np.float32)
def EqOnes(x):
test_case.assertTrue(np.allclose(data, x.numpy()))
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(func_config)
def ReluJob(x: oft.Numpy.Placeholder((10,))):
y = flow.math.relu(x)
flow.watch(y, EqOnes)
ReluJob(data)
| [
"oneflow.global_function",
"oneflow.watch",
"oneflow.scope.mirrored_view",
"oneflow.typing.Numpy.Placeholder",
"oneflow.math.relu",
"oneflow.config.gpu_device_num",
"oneflow.FunctionConfig"
] | [((695, 724), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (721, 724), True, 'import oneflow as flow\n'), ((736, 768), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (743, 768), True, 'import numpy as np\n'), ((854, 876), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (874, 876), True, 'import oneflow as flow\n'), ((1014, 1043), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (1040, 1043), True, 'import oneflow as flow\n'), ((1055, 1087), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (1062, 1087), True, 'import numpy as np\n'), ((1186, 1207), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1205, 1207), True, 'import oneflow as flow\n'), ((1279, 1312), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (1299, 1312), True, 'import oneflow as flow\n'), ((935, 956), 'oneflow.watch', 'flow.watch', (['x', 'EqOnes'], {}), '(x, EqOnes)\n', (945, 956), True, 'import oneflow as flow\n'), ((1245, 1271), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1269, 1271), True, 'import oneflow as flow\n'), ((1375, 1392), 'oneflow.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (1389, 1392), True, 'import oneflow as flow\n'), ((1401, 1422), 'oneflow.watch', 'flow.watch', (['y', 'EqOnes'], {}), '(y, EqOnes)\n', (1411, 1422), True, 'import oneflow as flow\n'), ((896, 924), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (917, 924), True, 'import oneflow.typing as oft\n'), ((1332, 1360), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (1353, 1360), True, 'import oneflow.typing as oft\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
from typing import Optional, Union, Tuple
@oneflow_export("nn.Upsample")
@experimental_api
class Upsample(Module):
r"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.9.0/_modules/torch/nn/modules/upsampling.html#Upsample
Upsamples a given multi-channel 1D (temporal), 2D (spatial) or 3D (volumetric) data.
The input data is assumed to be of the form
`minibatch x channels x [optional depth] x [optional height] x width`.
Hence, for spatial inputs, we expect a 4D Tensor and for volumetric inputs, we expect a 5D Tensor.
The algorithms available for upsampling are nearest neighbor and linear,
bilinear, bicubic and trilinear for 3D, 4D and 5D input Tensor,
respectively.
One can either give a :attr:`scale_factor` or the target output :attr:`size` to
calculate the output size. (You cannot give both, as it is ambiguous)
Args:
size (int or Tuple[int] or Tuple[int, int] or Tuple[int, int, int], optional):
output spatial sizes
scale_factor (float or Tuple[float] or Tuple[float, float] or Tuple[float, float, float], optional):
multiplier for spatial size. Has to match input size if it is a tuple.
mode (str, optional): the upsampling algorithm: one of ``'nearest'``,
``'linear'``, ``'bilinear'``, ``'bicubic'`` and ``'trilinear'``.
Default: ``'nearest'``
align_corners (bool, optional): if ``True``, the corner pixels of the input
and output tensors are aligned, and thus preserving the values at
those pixels. This only has effect when :attr:`mode` is
``'linear'``, ``'bilinear'``, or ``'trilinear'``. Default: ``False``
Shape:
- Input: :math:`(N, C, W_{in})`, :math:`(N, C, H_{in}, W_{in})` or :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, W_{out})`, :math:`(N, C, H_{out}, W_{out})`
or :math:`(N, C, D_{out}, H_{out}, W_{out})`, where
.. math::
D_{out} = \left\lfloor D_{in} \times \text{scale_factor} \right\rfloor
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale_factor} \right\rfloor
.. warning::
With ``align_corners = True``, the linearly interpolating modes
(`linear`, `bilinear`, `bicubic`, and `trilinear`) don't proportionally
align the output and input pixels, and thus the output values can depend
on the input size. This was the default behavior for these modes up to
version 0.3.1. Since then, the default behavior is
``align_corners = False``. See below for concrete examples on how this
affects the outputs.
.. note::
If you want downsampling/general resizing, you should use :func:`~nn.functional.interpolate`.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> input = flow.Tensor(np.arange(1, 5).reshape((1, 1, 2, 2)), dtype=flow.float32)
>>> input = input.to("cuda")
>>> m = flow.nn.Upsample(scale_factor=2.0, mode="nearest")
>>> output = m(input)
>>> output #doctest: +ELLIPSIS
tensor([[[[1., 1., 2., 2.],
...
[3., 3., 4., 4.]]]], device='cuda:0', dtype=oneflow.float32)
"""
def __init__(
self,
size: Optional[Union[int, Tuple[int, ...]]] = None,
scale_factor: Optional[Union[float, Tuple[float, ...]]] = None,
mode: str = "nearest",
align_corners: Optional[bool] = None,
):
super().__init__()
self.size = size
self.scale_factor = scale_factor
self.mode = mode
self.align_corners = align_corners
def forward(self, x):
return flow.experimental.nn.functional.interpolate(
x,
size=self.size,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners,
)
def extra_repr(self) -> str:
if self.scale_factor is not None:
info = "scale_factor=" + str(self.scale_factor)
else:
info = "size=" + str(self.size)
info += ", mode=" + self.mode
return info
@oneflow_export("nn.UpsamplingNearest2d")
@experimental_api
class UpsamplingNearest2d(Upsample):
r"""Applies a 2D nearest neighbor upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale_factor} \right\rfloor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.arange(1, 5).reshape((1, 1, 2, 2)), dtype=flow.float32)
>>> input = input.to("cuda")
>>> m = flow.nn.UpsamplingNearest2d(scale_factor=2.0)
>>> output = m(input)
>>> output #doctest: +ELLIPSIS
tensor([[[[1., 1., 2., 2.],
...
[3., 3., 4., 4.]]]], device='cuda:0', dtype=oneflow.float32)
"""
def __init__(
self,
size: Optional[Tuple[int, int]] = None,
scale_factor: Optional[Tuple[float, float]] = None,
) -> None:
super(UpsamplingNearest2d, self).__init__(size, scale_factor, mode="nearest")
@oneflow_export("nn.UpsamplingBilinear2d")
@experimental_api
class UpsamplingBilinear2d(Upsample):
r"""Applies a 2D bilinear upsampling to an input signal composed of several input
channels.
To specify the scale, it takes either the :attr:`size` or the :attr:`scale_factor`
as it's constructor argument.
When :attr:`size` is given, it is the output size of the image `(h, w)`.
Args:
size (int or Tuple[int, int], optional): output spatial sizes
scale_factor (float or Tuple[float, float], optional): multiplier for
spatial size.
.. warning::
This class is deprecated in favor of :func:`~nn.functional.interpolate`. It is
equivalent to ``nn.functional.interpolate(..., mode='bilinear', align_corners=True)``.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
.. math::
H_{out} = \left\lfloor H_{in} \times \text{scale_factor} \right\rfloor
.. math::
W_{out} = \left\lfloor W_{in} \times \text{scale_factor} \right\rfloor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.arange(1, 5).reshape((1, 1, 2, 2)), dtype=flow.float32)
>>> input = input.to("cuda")
>>> m = flow.nn.UpsamplingBilinear2d(scale_factor=2.0)
>>> output = m(input)
>>> output #doctest: +ELLIPSIS
tensor([[[[1. , 1.3333, 1.6667, 2. ],
...
[3. , 3.3333, 3.6667, 4. ]]]], device='cuda:0',
dtype=oneflow.float32)
"""
def __init__(
self,
size: Optional[Tuple[int, int]] = None,
scale_factor: Optional[Tuple[float, float]] = None,
) -> None:
super(UpsamplingBilinear2d, self).__init__(
size, scale_factor, mode="bilinear", align_corners=True
)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.experimental.nn.functional.interpolate"
] | [((840, 869), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Upsample"""'], {}), "('nn.Upsample')\n", (854, 869), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((5184, 5224), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.UpsamplingNearest2d"""'], {}), "('nn.UpsamplingNearest2d')\n", (5198, 5224), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((7000, 7041), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.UpsamplingBilinear2d"""'], {}), "('nn.UpsamplingBilinear2d')\n", (7014, 7041), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((9049, 9085), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (9064, 9085), False, 'import doctest\n'), ((4713, 4862), 'oneflow.experimental.nn.functional.interpolate', 'flow.experimental.nn.functional.interpolate', (['x'], {'size': 'self.size', 'scale_factor': 'self.scale_factor', 'mode': 'self.mode', 'align_corners': 'self.align_corners'}), '(x, size=self.size, scale_factor\n =self.scale_factor, mode=self.mode, align_corners=self.align_corners)\n', (4756, 4862), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.tensor,
r"""
Constructs a tensor with data, return a consistent tensor if placement and sbp are in kwargs,
otherwise return a local tensor.
Arguments:
data: Initial data for the tensor. Can be a list, tuple, NumPy ndarray, scalar or tensor.
Keyword Arguments:
dtype (oneflow.dtype, optional) – the desired data type of returned tensor.
Default: if None, infers data type from data.
device (oneflow.device, optional): the desired device of returned tensor. If placement
and sbp is None, uses the current cpu for the default tensor type.
placement (oneflow.placement, optional): the desired placement of returned tensor.
sbp (oneflow.sbp or tuple of oneflow.sbp, optional): the desired sbp of returned tensor.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False
Noted:
The Keyword Argument device is mutually exclusive with placement and sbp.
Consistent tensor only can be constructed from tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor([1,2,3])
>>> x
tensor([1, 2, 3], dtype=oneflow.int64)
""",
)
add_docstr(
oneflow.Tensor.atan2,
r"""
See :func:`oneflow.atan2`
""",
)
add_docstr(
oneflow.Tensor.expand_as,
"""
expand_as(other) -> Tensor
Expand this tensor to the same size as :attr:`other`.
``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
Please see :meth:`~Tensor.expand` for more information about ``expand``.
Args:
other (:class:`oneflow.Tensor`): The result tensor has the same size
as :attr:`other`.
""",
)
add_docstr(
oneflow.Tensor.numel,
"""
See :func:`oneflow.numel`
""",
)
add_docstr(
oneflow.Tensor.transpose,
"""
See :func:`oneflow.transpose`
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1959), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.tensor', '"""\n Constructs a tensor with data, return a consistent tensor if placement and sbp are in kwargs,\n otherwise return a local tensor. \n \n Arguments:\n data: Initial data for the tensor. Can be a list, tuple, NumPy ndarray, scalar or tensor.\n Keyword Arguments:\n dtype (oneflow.dtype, optional) – the desired data type of returned tensor.\n Default: if None, infers data type from data.\n device (oneflow.device, optional): the desired device of returned tensor. If placement\n and sbp is None, uses the current cpu for the default tensor type.\n placement (oneflow.placement, optional): the desired placement of returned tensor.\n sbp (oneflow.sbp or tuple of oneflow.sbp, optional): the desired sbp of returned tensor.\n requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False\n\n Noted:\n The Keyword Argument device is mutually exclusive with placement and sbp.\n Consistent tensor only can be constructed from tensor.\n\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> x = flow.tensor([1,2,3])\n >>> x\n tensor([1, 2, 3], dtype=oneflow.int64)\n\n """'], {}), '(oneflow.tensor,\n """\n Constructs a tensor with data, return a consistent tensor if placement and sbp are in kwargs,\n otherwise return a local tensor. \n \n Arguments:\n data: Initial data for the tensor. Can be a list, tuple, NumPy ndarray, scalar or tensor.\n Keyword Arguments:\n dtype (oneflow.dtype, optional) – the desired data type of returned tensor.\n Default: if None, infers data type from data.\n device (oneflow.device, optional): the desired device of returned tensor. If placement\n and sbp is None, uses the current cpu for the default tensor type.\n placement (oneflow.placement, optional): the desired placement of returned tensor.\n sbp (oneflow.sbp or tuple of oneflow.sbp, optional): the desired sbp of returned tensor.\n requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False\n\n Noted:\n The Keyword Argument device is mutually exclusive with placement and sbp.\n Consistent tensor only can be constructed from tensor.\n\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> x = flow.tensor([1,2,3])\n >>> x\n tensor([1, 2, 3], dtype=oneflow.int64)\n\n """\n )\n', (670, 1959), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1964, 2039), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.atan2', '"""\n See :func:`oneflow.atan2`\n """'], {}), '(oneflow.Tensor.atan2, """\n See :func:`oneflow.atan2`\n """)\n', (1974, 2039), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2053, 2475), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.expand_as', '"""\n expand_as(other) -> Tensor\n\n Expand this tensor to the same size as :attr:`other`.\n ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.\n\n Please see :meth:`~Tensor.expand` for more information about ``expand``.\n\n Args:\n other (:class:`oneflow.Tensor`): The result tensor has the same size\n as :attr:`other`.\n """'], {}), '(oneflow.Tensor.expand_as,\n """\n expand_as(other) -> Tensor\n\n Expand this tensor to the same size as :attr:`other`.\n ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.\n\n Please see :meth:`~Tensor.expand` for more information about ``expand``.\n\n Args:\n other (:class:`oneflow.Tensor`): The result tensor has the same size\n as :attr:`other`.\n """\n )\n', (2063, 2475), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2479, 2554), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.numel', '"""\n See :func:`oneflow.numel`\n """'], {}), '(oneflow.Tensor.numel, """\n See :func:`oneflow.numel`\n """)\n', (2489, 2554), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2567, 2654), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.transpose', '"""\n See :func:`oneflow.transpose`\n """'], {}), '(oneflow.Tensor.transpose,\n """\n See :func:`oneflow.transpose`\n """)\n', (2577, 2654), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def MakeFuncConfig(enable_inplace):
func_config = flow.FunctionConfig()
func_config.enable_inplace(enable_inplace)
return func_config
def TrainCompare(test_case, func):
func_config = MakeFuncConfig(True)
@flow.global_function(type="train", function_config=func_config)
def EnableInplace():
return func("w0")
func_config.enable_inplace(False)
@flow.global_function(type="train", function_config=func_config)
def DisableInplace():
return func("w1")
num_iter = 10
enable_inplace_losses = np.array(
[EnableInplace().get().tolist() for _ in range(num_iter)]
)
disable_inplace_losses = np.array(
[DisableInplace().get().tolist() for _ in range(num_iter)]
)
test_case.assertTrue(np.allclose(enable_inplace_losses, disable_inplace_losses))
@flow.unittest.skip_unless_1n1d()
class TestInplace(flow.unittest.TestCase):
def test_loss_inplace(test_case):
def IdentityLoss(name):
w = flow.get_variable(
name, (10,), initializer=flow.constant_initializer(100)
)
y = flow.math.reduce_sum(w)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [5]), momentum=0
).minimize(y)
return y
TrainCompare(test_case, IdentityLoss)
def test_inplace_variable(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def InplaceVariable():
w = flow.get_variable("w", (10,), initializer=flow.constant_initializer(1))
y = flow.math.relu(w)
return y
test_case.assertTrue(
np.allclose(InplaceVariable().get().numpy(), np.ones((10,), np.float32))
)
def test_deadlock(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.relu(x)
y = flow.math.relu(y)
Foo(np.ones((10,), dtype=np.float32))
def test_nodeadlock_with_return(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.relu(x)
y = flow.math.relu(y)
return y
Foo(np.ones((10,), dtype=np.float32)).get()
def test_reentrant_lock_check_failed(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.relu(x)
y = flow.math.relu(y)
Foo(np.ones((10,), dtype=np.float32))
def test_const_inplace_variable(test_case):
@flow.global_function(function_config=MakeFuncConfig(True))
def InplaceVariable():
w = flow.get_variable("w", (2, 5), initializer=flow.constant_initializer(1))
y = flow.reshape(w, (10,))
return y
of_ret = InplaceVariable().get().numpy()
test_case.assertTrue(np.allclose(of_ret, np.ones((10,), np.float32)))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.math.reduce_sum",
"oneflow.compatible.single_client.math.relu",
"oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler",
"oneflow.compatible.single_client.unittest.skip_unless_1n1d",
"oneflow.compatible.single_client.reshape",
"oneflow.compatible.single_cl... | [((1572, 1604), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1602, 1604), True, 'from oneflow.compatible import single_client as flow\n'), ((793, 814), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (812, 814), True, 'from oneflow.compatible import single_client as flow\n'), ((967, 1030), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (987, 1030), True, 'from oneflow.compatible import single_client as flow\n'), ((1127, 1190), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1147, 1190), True, 'from oneflow.compatible import single_client as flow\n'), ((3817, 3832), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3830, 3832), False, 'import unittest\n'), ((1509, 1567), 'numpy.allclose', 'np.allclose', (['enable_inplace_losses', 'disable_inplace_losses'], {}), '(enable_inplace_losses, disable_inplace_losses)\n', (1520, 1567), True, 'import numpy as np\n'), ((1855, 1878), 'oneflow.compatible.single_client.math.reduce_sum', 'flow.math.reduce_sum', (['w'], {}), '(w)\n', (1875, 1878), True, 'from oneflow.compatible import single_client as flow\n'), ((2330, 2347), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['w'], {}), '(w)\n', (2344, 2347), True, 'from oneflow.compatible import single_client as flow\n'), ((2664, 2681), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (2678, 2681), True, 'from oneflow.compatible import single_client as flow\n'), ((2698, 2715), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['y'], {}), '(y)\n', (2712, 2715), True, 'from oneflow.compatible import single_client as flow\n'), ((2729, 2761), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (2736, 2761), True, 'import numpy as np\n'), ((2946, 2963), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (2960, 2963), True, 'from oneflow.compatible import single_client as flow\n'), ((2980, 2997), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['y'], {}), '(y)\n', (2994, 2997), True, 'from oneflow.compatible import single_client as flow\n'), ((3260, 3277), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (3274, 3277), True, 'from oneflow.compatible import single_client as flow\n'), ((3294, 3311), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['y'], {}), '(y)\n', (3308, 3311), True, 'from oneflow.compatible import single_client as flow\n'), ((3325, 3357), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (3332, 3357), True, 'import numpy as np\n'), ((3612, 3634), 'oneflow.compatible.single_client.reshape', 'flow.reshape', (['w', '(10,)'], {}), '(w, (10,))\n', (3624, 3634), True, 'from oneflow.compatible import single_client as flow\n'), ((2457, 2483), 'numpy.ones', 'np.ones', (['(10,)', 'np.float32'], {}), '((10,), np.float32)\n', (2464, 2483), True, 'import numpy as np\n'), ((2617, 2645), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (2638, 2645), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2899, 2927), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (2920, 2927), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3213, 3241), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (3234, 3241), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3755, 3781), 'numpy.ones', 'np.ones', (['(10,)', 'np.float32'], {}), '((10,), np.float32)\n', (3762, 3781), True, 'import numpy as np\n'), ((1794, 1824), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(100)'], {}), '(100)\n', (1819, 1824), True, 'from oneflow.compatible import single_client as flow\n'), ((2284, 2312), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(1)'], {}), '(1)\n', (2309, 2312), True, 'from oneflow.compatible import single_client as flow\n'), ((3032, 3064), 'numpy.ones', 'np.ones', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (3039, 3064), True, 'import numpy as np\n'), ((3566, 3594), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(1)'], {}), '(1)\n', (3591, 3594), True, 'from oneflow.compatible import single_client as flow\n'), ((1927, 1977), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[5]'], {}), '([], [5])\n', (1968, 1977), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from google.protobuf import text_format
import os
import oneflow
import oneflow as flow
import oneflow._oneflow_internal
import oneflow._oneflow_internal._C as _C
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.session_context as session_ctx
import oneflow.unittest
from oneflow.framework.multi_client_session import MultiClientSession
def _get_c_tensor(t):
if isinstance(t, oneflow._oneflow_internal.Tensor):
return t
else:
raise NotImplementError
def _test_user_op_graph(test_case, is_cuda):
test_case.assertTrue(oneflow.framework.env_util.HasAllMultiClientEnvVars())
x0 = flow.tensor(np.random.rand(20, 30), dtype=flow.float32)
weight0 = flow.tensor(np.random.rand(30, 50), dtype=flow.float32)
x1 = flow.tensor(np.random.rand(50, 70), dtype=flow.float32)
if is_cuda:
x0 = x0.to(device=flow.device("cuda"))
weight0 = weight0.to(device=flow.device("cuda"))
x1 = x1.to(device=flow.device("cuda"))
# NOTE(chengcheng): this tiny net is:
# x0 * weight0 -> out0
# relu(out0) -> y0
# y0 * x1 -> out1
# relu(out1) -> y1
session = session_ctx.GetDefaultSession()
test_case.assertTrue(isinstance(session, MultiClientSession))
session.TryInit()
with oneflow._oneflow_internal.lazy_mode.guard(True):
oneflow._oneflow_internal.JobBuildAndInferCtx_Open(
"cc_test_user_op_expr_job_with_cuda" + str(is_cuda)
)
job_conf = oneflow.core.job.job_conf_pb2.JobConfigProto()
job_conf.job_name = "cc_test_user_op_expr_job_with_cuda" + str(is_cuda)
job_conf.predict_conf.SetInParent()
c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf)
x0_conf = oneflow.core.operator.op_conf_pb2.FeedInputOpConf()
x0_conf.in_0 = "in_0"
x0_conf.out_0 = "out_0"
x0_conf_str = text_format.MessageToString(x0_conf)
x0_op = oneflow._oneflow_internal.one.FeedInputOpExpr(
"cc_Input_0", x0_conf_str, ["in_0"], ["out_0"]
)
x1_conf = oneflow.core.operator.op_conf_pb2.FeedInputOpConf()
x1_conf.in_0 = "in_0"
x1_conf.out_0 = "out_0"
x1_conf_str = text_format.MessageToString(x1_conf)
x1_op = oneflow._oneflow_internal.one.FeedInputOpExpr(
"cc_Input_1", x1_conf_str, ["in_0"], ["out_0"]
)
weight0_conf = oneflow.core.operator.op_conf_pb2.FeedVariableOpConf()
weight0_conf.in_0 = "in_0"
weight0_conf.out_0 = "out_0"
weight0_conf_str = text_format.MessageToString(weight0_conf)
weight0_op = oneflow._oneflow_internal.one.FeedVariableOpExpr(
"cc_Variable_0", weight0_conf_str, ["in_0"], ["out_0"]
)
output_conf = oneflow.core.operator.op_conf_pb2.FetchOutputOpConf()
output_conf.in_0 = "in_0"
output_conf.out_0 = "out_0"
output_conf_str = text_format.MessageToString(output_conf)
output_op = oneflow._oneflow_internal.one.FetchOutputOpExpr(
"cc_Output_0", output_conf_str, ["in_0"], ["out_0"]
)
x0_lazy_tensor = _C.dispatch_feed_input(x0_op, x0)
x1_lazy_tensor = _C.dispatch_feed_input(x1_op, x1)
weight0_lazy_tensor = _C.dispatch_feed_input(weight0_op, weight0)
test_case.assertEqual(x0_lazy_tensor.shape, (20, 30))
test_case.assertTrue(x0_lazy_tensor.is_lazy)
test_case.assertEqual(weight0_lazy_tensor.shape, (30, 50))
test_case.assertTrue(weight0_lazy_tensor.is_lazy)
test_case.assertEqual(x1_lazy_tensor.shape, (50, 70))
test_case.assertTrue(x1_lazy_tensor.is_lazy)
out0 = flow._C.matmul(x0_lazy_tensor, weight0_lazy_tensor)
test_case.assertEqual(out0.shape, (20, 50))
test_case.assertTrue(out0.is_lazy)
y0 = flow._C.relu(out0)
test_case.assertEqual(y0.shape, (20, 50))
test_case.assertTrue(y0.is_lazy)
out1 = flow._C.matmul(y0, x1_lazy_tensor)
test_case.assertEqual(out1.shape, (20, 70))
test_case.assertTrue(out1.is_lazy)
y1 = flow._C.relu(out1)
test_case.assertEqual(y1.shape, (20, 70))
test_case.assertTrue(y1.is_lazy)
eager_output = _C.dispatch_fetch_output(output_op, y1)
test_case.assertEqual(eager_output.shape, (20, 70))
test_case.assertTrue(not eager_output.is_lazy)
if is_cuda:
test_case.assertTrue(x0_lazy_tensor.is_cuda)
test_case.assertTrue(x1_lazy_tensor.is_cuda)
test_case.assertTrue(weight0_lazy_tensor.is_cuda)
test_case.assertTrue(out0.is_cuda)
test_case.assertTrue(y0.is_cuda)
test_case.assertTrue(out1.is_cuda)
test_case.assertTrue(y1.is_cuda)
oneflow._oneflow_internal.JobBuildAndInferCtx_Close()
@flow.unittest.skip_unless_1n1d()
class TestUserOpGraph(unittest.TestCase):
def test_user_op_graph_cpu(test_case):
_test_user_op_graph(test_case, False)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_user_op_graph_gpu(test_case):
_test_user_op_graph(test_case, True)
if __name__ == "__main__":
unittest.main()
| [
"oneflow._oneflow_internal._C.dispatch_feed_input",
"oneflow._oneflow_internal.lazy_mode.guard",
"oneflow._oneflow_internal.one.FeedVariableOpExpr",
"oneflow._C.relu",
"oneflow._oneflow_internal.one.FetchOutputOpExpr",
"oneflow.framework.session_context.GetDefaultSession",
"oneflow.core.job.job_conf_pb2... | [((5462, 5494), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (5492, 5494), True, 'import oneflow as flow\n'), ((1790, 1821), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1819, 1821), True, 'import oneflow.framework.session_context as session_ctx\n'), ((5828, 5843), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5841, 5843), False, 'import unittest\n'), ((1201, 1254), 'oneflow.framework.env_util.HasAllMultiClientEnvVars', 'oneflow.framework.env_util.HasAllMultiClientEnvVars', ([], {}), '()\n', (1252, 1254), False, 'import oneflow\n'), ((1278, 1300), 'numpy.random.rand', 'np.random.rand', (['(20)', '(30)'], {}), '(20, 30)\n', (1292, 1300), True, 'import numpy as np\n'), ((1348, 1370), 'numpy.random.rand', 'np.random.rand', (['(30)', '(50)'], {}), '(30, 50)\n', (1362, 1370), True, 'import numpy as np\n'), ((1413, 1435), 'numpy.random.rand', 'np.random.rand', (['(50)', '(70)'], {}), '(50, 70)\n', (1427, 1435), True, 'import numpy as np\n'), ((1920, 1967), 'oneflow._oneflow_internal.lazy_mode.guard', 'oneflow._oneflow_internal.lazy_mode.guard', (['(True)'], {}), '(True)\n', (1961, 1967), False, 'import oneflow\n'), ((2123, 2169), 'oneflow.core.job.job_conf_pb2.JobConfigProto', 'oneflow.core.job.job_conf_pb2.JobConfigProto', ([], {}), '()\n', (2167, 2169), False, 'import oneflow\n'), ((2302, 2356), 'oneflow.framework.c_api_util.CurJobBuildAndInferCtx_SetJobConf', 'c_api_util.CurJobBuildAndInferCtx_SetJobConf', (['job_conf'], {}), '(job_conf)\n', (2346, 2356), True, 'import oneflow.framework.c_api_util as c_api_util\n'), ((2376, 2427), 'oneflow.core.operator.op_conf_pb2.FeedInputOpConf', 'oneflow.core.operator.op_conf_pb2.FeedInputOpConf', ([], {}), '()\n', (2425, 2427), False, 'import oneflow\n'), ((2512, 2548), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['x0_conf'], {}), '(x0_conf)\n', (2539, 2548), False, 'from google.protobuf import text_format\n'), ((2565, 2663), 'oneflow._oneflow_internal.one.FeedInputOpExpr', 'oneflow._oneflow_internal.one.FeedInputOpExpr', (['"""cc_Input_0"""', 'x0_conf_str', "['in_0']", "['out_0']"], {}), "('cc_Input_0', x0_conf_str, [\n 'in_0'], ['out_0'])\n", (2610, 2663), False, 'import oneflow\n'), ((2700, 2751), 'oneflow.core.operator.op_conf_pb2.FeedInputOpConf', 'oneflow.core.operator.op_conf_pb2.FeedInputOpConf', ([], {}), '()\n', (2749, 2751), False, 'import oneflow\n'), ((2836, 2872), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['x1_conf'], {}), '(x1_conf)\n', (2863, 2872), False, 'from google.protobuf import text_format\n'), ((2889, 2987), 'oneflow._oneflow_internal.one.FeedInputOpExpr', 'oneflow._oneflow_internal.one.FeedInputOpExpr', (['"""cc_Input_1"""', 'x1_conf_str', "['in_0']", "['out_0']"], {}), "('cc_Input_1', x1_conf_str, [\n 'in_0'], ['out_0'])\n", (2934, 2987), False, 'import oneflow\n'), ((3029, 3083), 'oneflow.core.operator.op_conf_pb2.FeedVariableOpConf', 'oneflow.core.operator.op_conf_pb2.FeedVariableOpConf', ([], {}), '()\n', (3081, 3083), False, 'import oneflow\n'), ((3183, 3224), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['weight0_conf'], {}), '(weight0_conf)\n', (3210, 3224), False, 'from google.protobuf import text_format\n'), ((3246, 3354), 'oneflow._oneflow_internal.one.FeedVariableOpExpr', 'oneflow._oneflow_internal.one.FeedVariableOpExpr', (['"""cc_Variable_0"""', 'weight0_conf_str', "['in_0']", "['out_0']"], {}), "('cc_Variable_0',\n weight0_conf_str, ['in_0'], ['out_0'])\n", (3294, 3354), False, 'import oneflow\n'), ((3395, 3448), 'oneflow.core.operator.op_conf_pb2.FetchOutputOpConf', 'oneflow.core.operator.op_conf_pb2.FetchOutputOpConf', ([], {}), '()\n', (3446, 3448), False, 'import oneflow\n'), ((3545, 3585), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['output_conf'], {}), '(output_conf)\n', (3572, 3585), False, 'from google.protobuf import text_format\n'), ((3606, 3710), 'oneflow._oneflow_internal.one.FetchOutputOpExpr', 'oneflow._oneflow_internal.one.FetchOutputOpExpr', (['"""cc_Output_0"""', 'output_conf_str', "['in_0']", "['out_0']"], {}), "('cc_Output_0',\n output_conf_str, ['in_0'], ['out_0'])\n", (3653, 3710), False, 'import oneflow\n'), ((3755, 3788), 'oneflow._oneflow_internal._C.dispatch_feed_input', '_C.dispatch_feed_input', (['x0_op', 'x0'], {}), '(x0_op, x0)\n', (3777, 3788), True, 'import oneflow._oneflow_internal._C as _C\n'), ((3814, 3847), 'oneflow._oneflow_internal._C.dispatch_feed_input', '_C.dispatch_feed_input', (['x1_op', 'x1'], {}), '(x1_op, x1)\n', (3836, 3847), True, 'import oneflow._oneflow_internal._C as _C\n'), ((3878, 3921), 'oneflow._oneflow_internal._C.dispatch_feed_input', '_C.dispatch_feed_input', (['weight0_op', 'weight0'], {}), '(weight0_op, weight0)\n', (3900, 3921), True, 'import oneflow._oneflow_internal._C as _C\n'), ((4295, 4346), 'oneflow._C.matmul', 'flow._C.matmul', (['x0_lazy_tensor', 'weight0_lazy_tensor'], {}), '(x0_lazy_tensor, weight0_lazy_tensor)\n', (4309, 4346), True, 'import oneflow as flow\n'), ((4456, 4474), 'oneflow._C.relu', 'flow._C.relu', (['out0'], {}), '(out0)\n', (4468, 4474), True, 'import oneflow as flow\n'), ((4582, 4616), 'oneflow._C.matmul', 'flow._C.matmul', (['y0', 'x1_lazy_tensor'], {}), '(y0, x1_lazy_tensor)\n', (4596, 4616), True, 'import oneflow as flow\n'), ((4726, 4744), 'oneflow._C.relu', 'flow._C.relu', (['out1'], {}), '(out1)\n', (4738, 4744), True, 'import oneflow as flow\n'), ((4860, 4899), 'oneflow._oneflow_internal._C.dispatch_fetch_output', '_C.dispatch_fetch_output', (['output_op', 'y1'], {}), '(output_op, y1)\n', (4884, 4899), True, 'import oneflow._oneflow_internal._C as _C\n'), ((5405, 5458), 'oneflow._oneflow_internal.JobBuildAndInferCtx_Close', 'oneflow._oneflow_internal.JobBuildAndInferCtx_Close', ([], {}), '()\n', (5456, 5458), False, 'import oneflow\n'), ((5648, 5682), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (5657, 5682), False, 'import os\n'), ((1500, 1519), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1511, 1519), True, 'import oneflow as flow\n'), ((1557, 1576), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1568, 1576), True, 'import oneflow as flow\n'), ((1604, 1623), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1615, 1623), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import imp
import importlib.util
import os
from typing import List
import oneflow
import oneflow._oneflow_internal
def get_include() -> str:
return os.path.join(os.path.dirname(oneflow.__file__), "include")
def get_lib() -> str:
return os.path.dirname(oneflow.__file__)
def get_compile_flags() -> List[str]:
flags = []
flags.append("-I{}".format(get_include()))
flags.append("-DHALF_ENABLE_CPP11_USER_LITERALS=0")
if oneflow._oneflow_internal.flags.with_cuda():
flags.append("-DWITH_CUDA")
if oneflow._oneflow_internal.flags.use_cxx11_abi():
flags.append("-D_GLIBCXX_USE_CXX11_ABI=1")
else:
flags.append("-D_GLIBCXX_USE_CXX11_ABI=0")
return flags
def get_liboneflow_link_flags() -> List[str]:
oneflow_python_module_path = get_lib()
# path in a pip release
oneflow_python_libs_path = f"{oneflow_python_module_path}.libs"
# path in a cmake build dir
if not os.path.exists(oneflow_python_libs_path):
from oneflow.version import __cmake_project_binary_dir__
oneflow_python_libs_path = __cmake_project_binary_dir__
return [
f"-L{oneflow_python_libs_path}",
f"-l:oneflow",
f"-l:of_pyext_obj",
f"-l:of_protoobj",
]
def get_link_flags() -> List[str]:
flags = []
flags.append("-L{}".format(get_lib()))
(file, oneflow_internal_lib_path, _) = imp.find_module(
"_oneflow_internal", [get_lib()]
)
if file:
file.close()
flags.append("-l:{}".format(os.path.basename(oneflow_internal_lib_path)))
return flags
def with_cuda() -> bool:
return oneflow._oneflow_internal.flags.with_cuda()
def get_cuda_version() -> int:
return oneflow._oneflow_internal.flags.cuda_version()
def has_rpc_backend_grpc() -> bool:
return oneflow._oneflow_internal.flags.has_rpc_backend_grpc()
def has_rpc_backend_local() -> bool:
return oneflow._oneflow_internal.flags.has_rpc_backend_local()
def cmake_build_type() -> str:
return oneflow._oneflow_internal.flags.cmake_build_type()
def with_rdma() -> bool:
return oneflow._oneflow_internal.flags.with_rdma()
| [
"oneflow._oneflow_internal.flags.has_rpc_backend_local",
"oneflow._oneflow_internal.flags.cmake_build_type",
"oneflow._oneflow_internal.flags.with_cuda",
"oneflow._oneflow_internal.flags.has_rpc_backend_grpc",
"oneflow._oneflow_internal.flags.with_rdma",
"oneflow._oneflow_internal.flags.cuda_version",
"... | [((839, 872), 'os.path.dirname', 'os.path.dirname', (['oneflow.__file__'], {}), '(oneflow.__file__)\n', (854, 872), False, 'import os\n'), ((1038, 1081), 'oneflow._oneflow_internal.flags.with_cuda', 'oneflow._oneflow_internal.flags.with_cuda', ([], {}), '()\n', (1079, 1081), False, 'import oneflow\n'), ((1126, 1173), 'oneflow._oneflow_internal.flags.use_cxx11_abi', 'oneflow._oneflow_internal.flags.use_cxx11_abi', ([], {}), '()\n', (1171, 1173), False, 'import oneflow\n'), ((2213, 2256), 'oneflow._oneflow_internal.flags.with_cuda', 'oneflow._oneflow_internal.flags.with_cuda', ([], {}), '()\n', (2254, 2256), False, 'import oneflow\n'), ((2301, 2347), 'oneflow._oneflow_internal.flags.cuda_version', 'oneflow._oneflow_internal.flags.cuda_version', ([], {}), '()\n', (2345, 2347), False, 'import oneflow\n'), ((2397, 2451), 'oneflow._oneflow_internal.flags.has_rpc_backend_grpc', 'oneflow._oneflow_internal.flags.has_rpc_backend_grpc', ([], {}), '()\n', (2449, 2451), False, 'import oneflow\n'), ((2502, 2557), 'oneflow._oneflow_internal.flags.has_rpc_backend_local', 'oneflow._oneflow_internal.flags.has_rpc_backend_local', ([], {}), '()\n', (2555, 2557), False, 'import oneflow\n'), ((2602, 2652), 'oneflow._oneflow_internal.flags.cmake_build_type', 'oneflow._oneflow_internal.flags.cmake_build_type', ([], {}), '()\n', (2650, 2652), False, 'import oneflow\n'), ((2691, 2734), 'oneflow._oneflow_internal.flags.with_rdma', 'oneflow._oneflow_internal.flags.with_rdma', ([], {}), '()\n', (2732, 2734), False, 'import oneflow\n'), ((758, 791), 'os.path.dirname', 'os.path.dirname', (['oneflow.__file__'], {}), '(oneflow.__file__)\n', (773, 791), False, 'import os\n'), ((1534, 1574), 'os.path.exists', 'os.path.exists', (['oneflow_python_libs_path'], {}), '(oneflow_python_libs_path)\n', (1548, 1574), False, 'import os\n'), ((2112, 2155), 'os.path.basename', 'os.path.basename', (['oneflow_internal_lib_path'], {}), '(oneflow_internal_lib_path)\n', (2128, 2155), False, 'import os\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow.unittest
import oneflow as flow
class TestTensorIndexError(flow.unittest.TestCase):
def test_PrepareSliceIndices_indices_amount_index_error(test_case):
with test_case.assertRaises(IndexError) as context:
x = flow.arange(16).reshape(4, 4)
x[0, 0, 0] = 0
test_case.assertTrue(
"Too many indices for tensor of dimension" in str(context.exception)
)
def test_PrepareSliceIndices_slice_step_runtime_error(test_case):
with test_case.assertRaises(RuntimeError) as context:
x = flow.tensor([0, 1, 2, 3], dtype=flow.int32)
s = slice(0, 2, -1)
y = x[s]
test_case.assertTrue("Step must be greater than zero" in str(context.exception))
def test_ApplySelectIndexing_input_dim_runtime_error(test_case):
with test_case.assertRaises(RuntimeError) as context:
x = flow.tensor(5, dtype=flow.int32)
y = x[0]
test_case.assertTrue(
"select() cannot be applied to a 0-dim tensor." in str(context.exception)
)
def test_ApplySelectIndexing_index_error(test_case):
with test_case.assertRaises(IndexError) as context:
x = flow.ones(2, 3, dtype=flow.int32)
y = x[3]
test_case.assertTrue(
"Index out of range (expected to be in range of" in str(context.exception)
)
def test_ApplyAdvancedIndexing_index_error(test_case):
with test_case.assertRaises(IndexError) as context:
x = flow.ones(2, 2, dtype=flow.int32)
index = (
flow.tensor(1, dtype=flow.int32),
flow.tensor(1, dtype=flow.int32),
flow.tensor(1, dtype=flow.int32),
)
y = x[index]
test_case.assertTrue(
"Too many indices for tensor of dimension" in str(context.exception)
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.arange",
"oneflow.tensor",
"oneflow.ones"
] | [((2547, 2562), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2560, 2562), False, 'import unittest\n'), ((1182, 1225), 'oneflow.tensor', 'flow.tensor', (['[0, 1, 2, 3]'], {'dtype': 'flow.int32'}), '([0, 1, 2, 3], dtype=flow.int32)\n', (1193, 1225), True, 'import oneflow as flow\n'), ((1516, 1548), 'oneflow.tensor', 'flow.tensor', (['(5)'], {'dtype': 'flow.int32'}), '(5, dtype=flow.int32)\n', (1527, 1548), True, 'import oneflow as flow\n'), ((1830, 1863), 'oneflow.ones', 'flow.ones', (['(2)', '(3)'], {'dtype': 'flow.int32'}), '(2, 3, dtype=flow.int32)\n', (1839, 1863), True, 'import oneflow as flow\n'), ((2148, 2181), 'oneflow.ones', 'flow.ones', (['(2)', '(2)'], {'dtype': 'flow.int32'}), '(2, 2, dtype=flow.int32)\n', (2157, 2181), True, 'import oneflow as flow\n'), ((2220, 2252), 'oneflow.tensor', 'flow.tensor', (['(1)'], {'dtype': 'flow.int32'}), '(1, dtype=flow.int32)\n', (2231, 2252), True, 'import oneflow as flow\n'), ((2270, 2302), 'oneflow.tensor', 'flow.tensor', (['(1)'], {'dtype': 'flow.int32'}), '(1, dtype=flow.int32)\n', (2281, 2302), True, 'import oneflow as flow\n'), ((2320, 2352), 'oneflow.tensor', 'flow.tensor', (['(1)'], {'dtype': 'flow.int32'}), '(1, dtype=flow.int32)\n', (2331, 2352), True, 'import oneflow as flow\n'), ((855, 870), 'oneflow.arange', 'flow.arange', (['(16)'], {}), '(16)\n', (866, 870), True, 'import oneflow as flow\n')] |
import os
import math
import random
import logging
import warnings
import csv
import sys
warnings.filterwarnings('ignore')
sys.path.insert(0, "../../")
import glob
import numpy as np
import oneflow as of
from oneflow.utils.data import Dataset
import soundfile as sf
from scipy import signal
from libs.utils.utils import read_config
def loadWAV(filename, max_frames, evalmode=True, num_eval=10):
# Maximum audio length
max_audio = max_frames * 160 + 240
# Read wav file and convert to oneflow tensor
audio, _ = sf.read(filename)
audiosize = audio.shape[0]
if audiosize <= max_audio:
shortage = max_audio - audiosize + 1
audio = np.pad(audio, (0, shortage), 'wrap')
audiosize = audio.shape[0]
if evalmode:
startframe = np.linspace(0,audiosize-max_audio,num=num_eval)
else:
startframe = np.array([np.int64(random.random()*(audiosize-max_audio))])
feats = []
if evalmode and max_frames == 0:
feats.append(audio)
else:
for asf in startframe:
feats.append(audio[int(asf):int(asf)+max_audio])
feat = np.stack(feats,axis=0).astype(np.float)
return feat
class AugmentWAV(object):
def __init__(self, musan_path, rir_path, max_frames):
self.max_frames = max_frames
self.max_audio = max_frames * 160 + 240
self.noisetypes = ['noise','speech','music']
self.noisesnr = {'noise':[0,15],'speech':[13,20],'music':[5,15]}
self.numnoise = {'noise':[1,1], 'speech':[3,7], 'music':[1,1] }
self.noiselist = {}
augment_files = glob.glob(os.path.join(musan_path,'*/*/*.wav'))
for file in augment_files:
if not file.split('/')[-3] in self.noiselist:
self.noiselist[file.split('/')[-3]] = []
self.noiselist[file.split('/')[-3]].append(file)
self.rir_files = glob.glob(os.path.join(rir_path,'*/*/*.wav'))
def additive_noise(self, noisecat, audio):
clean_db = 10 * np.log10(np.mean(audio ** 2)+1e-4)
numnoise = self.numnoise[noisecat]
noiselist = random.sample(self.noiselist[noisecat], random.randint(numnoise[0],numnoise[1]))
noises = []
for noise in noiselist:
noiseaudio = loadWAV(noise, self.max_frames, evalmode=False)
noise_snr = random.uniform(self.noisesnr[noisecat][0],self.noisesnr[noisecat][1])
noise_db = 10 * np.log10(np.mean(noiseaudio[0] ** 2)+1e-4)
noises.append(np.sqrt(10 ** ((clean_db - noise_db - noise_snr) / 10)) * noiseaudio)
return np.sum(np.concatenate(noises,axis=0),axis=0,keepdims=True) + audio
def reverberate(self, audio):
rir_file = random.choice(self.rir_files)
rir, _ = sf.read(rir_file)
rir = np.expand_dims(rir.astype(np.float),0)
rir = rir / np.sqrt(np.sum(rir**2))
return signal.convolve(audio, rir, mode='full')[:,:self.max_audio]
class SpeechTrainDataset(Dataset):
def __init__(self, opts):
# read config from opts
frame_range = opts['frames'] # frame number range in training
self.lower_frame_num = frame_range[0]
self.higher_frame_num = frame_range[1]
TRAIN_MANIFEST = opts['train_manifest']
self.rate = opts['rate']
self.win_len = opts['win_len']
self.win_shift = opts['win_shift']
feat_type = opts['feat_type']
# musan_path = opts.get('musan', None)
# rirs_path = opts.get('rirs', None)
# self.augment_wav = AugmentWAV(musan_path, rirs_path, frame_range[-1])
# self.augment = False
# if (not musan_path is None) and (not rirs_path is None):
# self.augment = True
if 'repeat' in opts:
repeat = opts['repeat']
else:
repeat = True
self.labels = []
# read audio file path from manifest
self.dataset = []
current_sid = -1
total_duration = 0
count = 0
with open(TRAIN_MANIFEST, 'r') as f:
reader = csv.reader(f)
for sid, _, filename, duration, samplerate in reader:
if sid != current_sid:
self.dataset.append([])
current_sid = sid
self.dataset[-1].append((filename, float(duration), int(samplerate)))
total_duration += eval(duration)
count += 1
self.n_spk = len(self.dataset)
# split dev dataset
self.split_train_dev(opts['dev_number'])
# compute the length of dataset according to mean duration and total duration
total_duration -= self.dev_total_duration
mean_duration_per_utt = (np.mean(frame_range) - 1) * self.win_shift + self.win_len
if repeat:
self.count = math.floor(total_duration / mean_duration_per_utt) # make sure each sampling point in data will be used
else:
self.count = count - opts['dev_number']
# set feature extractor according to feature type
# if 'kaldi' in feat_type:
# from libs.dataio.feature import KaldiFeatureExtractor as FeatureExtractor
# elif 'wave' in feat_type:
# from libs.dataio.feature import RawWaveform as FeatureExtractor
# else:
from libs.dataio.feature import FeatureExtractor
try:
feature_opts = read_config("conf/data/{}.yaml".format(feat_type))
except:
feature_opts = read_config("../../conf/data/{}.yaml".format(feat_type)) # for test
self.feature_extractor = FeatureExtractor(self.rate, feat_type.split("_")[-1], feature_opts)
def split_train_dev(self, dev_number = 1000):
self.dev = []
self.dev_total_duration = 0
self.dev_number = dev_number
i = 0
while i < dev_number:
spk = random.randint(0, self.n_spk - 1)
if len(self.dataset[spk]) <= 1:
continue
utt_idx = random.randint(0, len(self.dataset[spk]) - 1)
utt = self.dataset[spk][utt_idx]
self.dev.append((utt, spk))
self.dev_total_duration += utt[1]
del self.dataset[spk][utt_idx]
i += 1
def __len__(self):
return self.count
def __getitem__(self, idx):
idx = idx % self.n_spk
return idx
def collate_fn(self, batch):
frame = random.randint(self.lower_frame_num, self.higher_frame_num) # random select a frame number in uniform distribution
duration = (frame - 1) * self.win_shift + self.win_len # duration in time of one training speech segment
samples_num = int(duration * self.rate) # duration in sample point of one training speech segment
wave = []
for sid in batch:
speaker = self.dataset[sid]
y = []
n_samples = 0
while n_samples < samples_num:
aid = random.randrange(0, len(speaker))
audio = speaker[aid]
t, sr = audio[1], audio[2]
samples_len = int(t * sr)
if n_samples == 0:
start = int(random.uniform(0, t - 1.0) * sr)
else:
start = 0
# start = int(random.uniform(0, t) * sr) # random select start point of speech
_y, _ = self._load_audio(audio[0], start = start, stop = samples_len) # read speech data from start point to the end
if _y is not None:
y.append(_y)
n_samples += len(_y)
y = np.hstack(y)[:samples_num]
# if self.augment:
# augtype = random.randint(0,4)
# if augtype == 1:
# y = self.augment_wav.reverberate(y)
# elif augtype == 2:
# y = self.augment_wav.additive_noise('music',y)
# elif augtype == 3:
# y = self.augment_wav.additive_noise('speech',y)
# elif augtype == 4:
# y = self.augment_wav.additive_noise('noise',y)
wave.append(y)
feature = self.feature_extractor(wave)
labels = of.tensor(batch)
return feature, labels
def _load_audio(self, path, start = 0, stop = None, resample = True):
y, sr = sf.read(path, start = start, stop = stop, dtype = 'float32', always_2d = True)
y = y[:, 0]
return y, sr
def get_dev_data(self):
idx = 0
while idx < self.dev_number:
(wav_path, _, __), spk = self.dev[idx]
data, _ = self._load_audio(wav_path)
feat = self.feature_extractor([data])
yield feat, of.tensor([spk], dtype = of.int64)
idx += 1
def __call__(self):
idx = 0
wavlist = []
spk = []
for ind, i in enumerate(self.dataset):
wavlist.extend(i)
spk.extend([ind] * len(i))
while idx < len(wavlist):
wav_path, _, __ = wavlist[idx]
data, _ = self._load_audio(wav_path)
feat = self.feature_extractor(data)
yield feat, spk[idx], os.path.basename(wav_path).replace('.wav', '.npy')
idx += 1
if __name__ == "__main__":
from oneflow.utils.data import DataLoader
# from libs.utils.utils import BalancedBatchSamplerV2
opts = read_config("../../conf/data.yaml")
train_dataset = SpeechTrainDataset(opts)
# batch_sampler = BalancedBatchSamplerV2(train_dataset.n_spk, len(train_dataset), 500, 5)
train_loader = DataLoader(train_dataset, shuffle = True, collate_fn = train_dataset.collate_fn, num_workers = 0)
trainiter = iter(train_loader)
feature, label = next(trainiter)
print(feature.shape)
print(label)
# output = of.unique(label)
# print(len(output))
| [
"oneflow.tensor",
"oneflow.utils.data.DataLoader"
] | [((89, 122), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (112, 122), False, 'import warnings\n'), ((123, 151), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../"""'], {}), "(0, '../../')\n", (138, 151), False, 'import sys\n'), ((531, 548), 'soundfile.read', 'sf.read', (['filename'], {}), '(filename)\n', (538, 548), True, 'import soundfile as sf\n'), ((9481, 9516), 'libs.utils.utils.read_config', 'read_config', (['"""../../conf/data.yaml"""'], {}), "('../../conf/data.yaml')\n", (9492, 9516), False, 'from libs.utils.utils import read_config\n'), ((9676, 9771), 'oneflow.utils.data.DataLoader', 'DataLoader', (['train_dataset'], {'shuffle': '(True)', 'collate_fn': 'train_dataset.collate_fn', 'num_workers': '(0)'}), '(train_dataset, shuffle=True, collate_fn=train_dataset.collate_fn,\n num_workers=0)\n', (9686, 9771), False, 'from oneflow.utils.data import DataLoader\n'), ((684, 720), 'numpy.pad', 'np.pad', (['audio', '(0, shortage)', '"""wrap"""'], {}), "(audio, (0, shortage), 'wrap')\n", (690, 720), True, 'import numpy as np\n'), ((797, 848), 'numpy.linspace', 'np.linspace', (['(0)', '(audiosize - max_audio)'], {'num': 'num_eval'}), '(0, audiosize - max_audio, num=num_eval)\n', (808, 848), True, 'import numpy as np\n'), ((2740, 2769), 'random.choice', 'random.choice', (['self.rir_files'], {}), '(self.rir_files)\n', (2753, 2769), False, 'import random\n'), ((2792, 2809), 'soundfile.read', 'sf.read', (['rir_file'], {}), '(rir_file)\n', (2799, 2809), True, 'import soundfile as sf\n'), ((6481, 6540), 'random.randint', 'random.randint', (['self.lower_frame_num', 'self.higher_frame_num'], {}), '(self.lower_frame_num, self.higher_frame_num)\n', (6495, 6540), False, 'import random\n'), ((8290, 8306), 'oneflow.tensor', 'of.tensor', (['batch'], {}), '(batch)\n', (8299, 8306), True, 'import oneflow as of\n'), ((8429, 8499), 'soundfile.read', 'sf.read', (['path'], {'start': 'start', 'stop': 'stop', 'dtype': '"""float32"""', 'always_2d': '(True)'}), "(path, start=start, stop=stop, dtype='float32', always_2d=True)\n", (8436, 8499), True, 'import soundfile as sf\n'), ((1135, 1158), 'numpy.stack', 'np.stack', (['feats'], {'axis': '(0)'}), '(feats, axis=0)\n', (1143, 1158), True, 'import numpy as np\n'), ((1632, 1669), 'os.path.join', 'os.path.join', (['musan_path', '"""*/*/*.wav"""'], {}), "(musan_path, '*/*/*.wav')\n", (1644, 1669), False, 'import os\n'), ((1918, 1953), 'os.path.join', 'os.path.join', (['rir_path', '"""*/*/*.wav"""'], {}), "(rir_path, '*/*/*.wav')\n", (1930, 1953), False, 'import os\n'), ((2170, 2210), 'random.randint', 'random.randint', (['numnoise[0]', 'numnoise[1]'], {}), '(numnoise[0], numnoise[1])\n', (2184, 2210), False, 'import random\n'), ((2363, 2433), 'random.uniform', 'random.uniform', (['self.noisesnr[noisecat][0]', 'self.noisesnr[noisecat][1]'], {}), '(self.noisesnr[noisecat][0], self.noisesnr[noisecat][1])\n', (2377, 2433), False, 'import random\n'), ((2938, 2978), 'scipy.signal.convolve', 'signal.convolve', (['audio', 'rir'], {'mode': '"""full"""'}), "(audio, rir, mode='full')\n", (2953, 2978), False, 'from scipy import signal\n'), ((4119, 4132), 'csv.reader', 'csv.reader', (['f'], {}), '(f)\n', (4129, 4132), False, 'import csv\n'), ((4871, 4921), 'math.floor', 'math.floor', (['(total_duration / mean_duration_per_utt)'], {}), '(total_duration / mean_duration_per_utt)\n', (4881, 4921), False, 'import math\n'), ((5933, 5966), 'random.randint', 'random.randint', (['(0)', '(self.n_spk - 1)'], {}), '(0, self.n_spk - 1)\n', (5947, 5966), False, 'import random\n'), ((2623, 2653), 'numpy.concatenate', 'np.concatenate', (['noises'], {'axis': '(0)'}), '(noises, axis=0)\n', (2637, 2653), True, 'import numpy as np\n'), ((2907, 2923), 'numpy.sum', 'np.sum', (['(rir ** 2)'], {}), '(rir ** 2)\n', (2913, 2923), True, 'import numpy as np\n'), ((7671, 7683), 'numpy.hstack', 'np.hstack', (['y'], {}), '(y)\n', (7680, 7683), True, 'import numpy as np\n'), ((2035, 2054), 'numpy.mean', 'np.mean', (['(audio ** 2)'], {}), '(audio ** 2)\n', (2042, 2054), True, 'import numpy as np\n'), ((2531, 2586), 'numpy.sqrt', 'np.sqrt', (['(10 ** ((clean_db - noise_db - noise_snr) / 10))'], {}), '(10 ** ((clean_db - noise_db - noise_snr) / 10))\n', (2538, 2586), True, 'import numpy as np\n'), ((4769, 4789), 'numpy.mean', 'np.mean', (['frame_range'], {}), '(frame_range)\n', (4776, 4789), True, 'import numpy as np\n'), ((8805, 8837), 'oneflow.tensor', 'of.tensor', (['[spk]'], {'dtype': 'of.int64'}), '([spk], dtype=of.int64)\n', (8814, 8837), True, 'import oneflow as of\n'), ((895, 910), 'random.random', 'random.random', ([], {}), '()\n', (908, 910), False, 'import random\n'), ((2470, 2497), 'numpy.mean', 'np.mean', (['(noiseaudio[0] ** 2)'], {}), '(noiseaudio[0] ** 2)\n', (2477, 2497), True, 'import numpy as np\n'), ((7232, 7258), 'random.uniform', 'random.uniform', (['(0)', '(t - 1.0)'], {}), '(0, t - 1.0)\n', (7246, 7258), False, 'import random\n'), ((9264, 9290), 'os.path.basename', 'os.path.basename', (['wav_path'], {}), '(wav_path)\n', (9280, 9290), False, 'import os\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _np_mseloss(np_input, np_target):
np_mse = np.square(np_target - np_input)
np_mse_mean = np.mean(np_mse)
np_mse_sum = np.sum(np_mse)
return {
"none": np_mse,
"mean": np_mse_mean,
"sum": np_mse_sum,
}
def _np_mseloss_grad(np_input, np_target):
elem_cnt = np_input.size
np_mse_grad_sum = -2 * (np_target - np_input)
np_mse_grad_mean = np_mse_grad_sum / elem_cnt
return {
"none": np_mse_grad_sum,
"mean": np_mse_grad_mean,
"sum": np_mse_grad_sum,
}
def _test_mseloss_impl(test_case, device, shape, reduction):
x = np.random.randn(*shape)
y = np.random.randn(*shape)
input = flow.Tensor(
x, dtype=flow.float32, requires_grad=True, device=flow.device(device)
)
target = flow.Tensor(y, dtype=flow.float32, device=flow.device(device))
loss = flow.nn.MSELoss(reduction=reduction)
loss = loss.to(device)
of_out = loss(input, target)
np_out = _np_mseloss(x, y)[reduction]
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
np_grad = _np_mseloss_grad(x, y)[reduction]
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-5, 1e-5))
@flow.unittest.skip_unless_1n1d()
class TestMSELossModule(flow.unittest.TestCase):
def test_mseloss(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_mseloss_impl,
]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [
(3, 5),
(10, 9, 21),
(14, 22, 9, 21),
(3, 2, 4, 16, 5),
(1,),
]
arg_dict["reduction"] = ["none", "mean", "sum"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.nn.MSELoss",
"oneflow.experimental.unittest.skip_unless_1n1d",
"oneflow.experimental.device"
] | [((1992, 2024), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2022, 2024), True, 'import oneflow.experimental as flow\n'), ((785, 816), 'numpy.square', 'np.square', (['(np_target - np_input)'], {}), '(np_target - np_input)\n', (794, 816), True, 'import numpy as np\n'), ((835, 850), 'numpy.mean', 'np.mean', (['np_mse'], {}), '(np_mse)\n', (842, 850), True, 'import numpy as np\n'), ((868, 882), 'numpy.sum', 'np.sum', (['np_mse'], {}), '(np_mse)\n', (874, 882), True, 'import numpy as np\n'), ((1347, 1370), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1362, 1370), True, 'import numpy as np\n'), ((1379, 1402), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1394, 1402), True, 'import numpy as np\n'), ((1600, 1636), 'oneflow.experimental.nn.MSELoss', 'flow.nn.MSELoss', ([], {'reduction': 'reduction'}), '(reduction=reduction)\n', (1615, 1636), True, 'import oneflow.experimental as flow\n'), ((2592, 2607), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2605, 2607), False, 'import unittest\n'), ((2126, 2139), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2137, 2139), False, 'from collections import OrderedDict\n'), ((2497, 2517), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2507, 2517), False, 'from test_util import GenArgList\n'), ((1486, 1505), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1497, 1505), True, 'import oneflow.experimental as flow\n'), ((1567, 1586), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1578, 1586), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
from typing import Union, Optional, Sequence
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.module as module_util
import oneflow.python.ops.math_unary_elementwise_ops as math_unary_elementwise_ops
from oneflow.python.oneflow_export import oneflow_export
import oneflow._oneflow_internal
@oneflow_export("combined_margin_loss")
def combined_margin_loss(
x: oneflow._oneflow_internal.BlobDesc,
label: oneflow._oneflow_internal.BlobDesc,
m1: float = 1,
m2: float = 0,
m3: float = 0,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
depth = x.shape[1]
y, theta = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("CombinedMarginLoss_")
)
.Op("combined_margin_loss")
.Input("x", [x])
.Input("label", [label])
.Output("y")
.Output("theta")
.Attr("m1", float(m1))
.Attr("m2", float(m2))
.Attr("m3", float(m3))
.Attr("depth", int(depth))
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
return y
| [
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.id_util.UniqueStr"
] | [((1250, 1288), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""combined_margin_loss"""'], {}), "('combined_margin_loss')\n", (1264, 1288), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1647, 1687), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""CombinedMarginLoss_"""'], {}), "('CombinedMarginLoss_')\n", (1664, 1687), True, 'import oneflow.python.framework.id_util as id_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def ccrelu(x, name):
return (
flow.user_op_builder(name)
.Op("ccrelu")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def fixed_tensor_def_test(test_case, func_config):
func_config.default_data_type(flow.float)
@flow.global_function(func_config)
def ReluJob(a: oft.Numpy.Placeholder((5, 2))):
return ccrelu(a, "my_cc_relu_op")
x = np.random.rand(5, 2).astype(np.float32)
y = ReluJob(x).get().numpy()
test_case.assertTrue(np.array_equal(y, np.maximum(x, 0)))
def mirrored_tensor_def_test(test_case, func_config):
func_config.default_data_type(flow.float)
@flow.global_function(func_config)
def ReluJob(a: oft.ListNumpy.Placeholder((5, 2))):
return ccrelu(a, "my_cc_relu_op")
x = np.random.rand(3, 1).astype(np.float32)
y = ReluJob([x]).get().numpy_list()[0]
test_case.assertTrue(np.array_equal(y, np.maximum(x, 0)))
def test_ccrelu(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
fixed_tensor_def_test(test_case, func_config)
def test_mirror_ccrelu(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
mirrored_tensor_def_test(test_case, func_config)
def test_1n2c_mirror_dynamic_ccrelu(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_data_type(flow.float)
@flow.global_function(func_config)
def ReluJob(a: oft.ListNumpy.Placeholder((5, 2))):
return ccrelu(a, "my_cc_relu_op")
x1 = np.random.rand(3, 1).astype(np.float32)
x2 = np.random.rand(4, 2).astype(np.float32)
y1, y2 = ReluJob([x1, x2]).get().numpy_list()
test_case.assertTrue(np.array_equal(y1, np.maximum(x1, 0)))
test_case.assertTrue(np.array_equal(y2, np.maximum(x2, 0)))
@flow.unittest.num_nodes_required(2)
def test_ccrelu_2n1c(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
fixed_tensor_def_test(test_case, func_config)
| [
"oneflow.global_function",
"oneflow.scope.mirrored_view",
"oneflow.typing.Numpy.Placeholder",
"oneflow.unittest.num_nodes_required",
"oneflow.user_op_builder",
"oneflow.config.gpu_device_num",
"oneflow.typing.ListNumpy.Placeholder",
"oneflow.FunctionConfig",
"oneflow.scope.consistent_view"
] | [((2684, 2719), 'oneflow.unittest.num_nodes_required', 'flow.unittest.num_nodes_required', (['(2)'], {}), '(2)\n', (2716, 2719), True, 'import oneflow as flow\n'), ((986, 1019), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (1006, 1019), True, 'import oneflow as flow\n'), ((1365, 1398), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (1385, 1398), True, 'import oneflow as flow\n'), ((1698, 1719), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1717, 1719), True, 'import oneflow as flow\n'), ((1892, 1913), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1911, 1913), True, 'import oneflow as flow\n'), ((2086, 2115), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (2112, 2115), True, 'import oneflow as flow\n'), ((2134, 2155), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2153, 2155), True, 'import oneflow as flow\n'), ((2273, 2306), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (2293, 2306), True, 'import oneflow as flow\n'), ((2771, 2792), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2790, 2792), True, 'import oneflow as flow\n'), ((1757, 1785), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1783, 1785), True, 'import oneflow as flow\n'), ((1951, 1977), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1975, 1977), True, 'import oneflow as flow\n'), ((2193, 2219), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2217, 2219), True, 'import oneflow as flow\n'), ((2830, 2858), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2856, 2858), True, 'import oneflow as flow\n'), ((1039, 1068), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (1060, 1068), True, 'import oneflow.typing as oft\n'), ((1122, 1142), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (1136, 1142), True, 'import numpy as np\n'), ((1238, 1254), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (1248, 1254), True, 'import numpy as np\n'), ((1418, 1451), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (1443, 1451), True, 'import oneflow.typing as oft\n'), ((1505, 1525), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)'], {}), '(3, 1)\n', (1519, 1525), True, 'import numpy as np\n'), ((1631, 1647), 'numpy.maximum', 'np.maximum', (['x', '(0)'], {}), '(x, 0)\n', (1641, 1647), True, 'import numpy as np\n'), ((2326, 2359), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (2351, 2359), True, 'import oneflow.typing as oft\n'), ((2414, 2434), 'numpy.random.rand', 'np.random.rand', (['(3)', '(1)'], {}), '(3, 1)\n', (2428, 2434), True, 'import numpy as np\n'), ((2463, 2483), 'numpy.random.rand', 'np.random.rand', (['(4)', '(2)'], {}), '(4, 2)\n', (2477, 2483), True, 'import numpy as np\n'), ((2597, 2614), 'numpy.maximum', 'np.maximum', (['x1', '(0)'], {}), '(x1, 0)\n', (2607, 2614), True, 'import numpy as np\n'), ((2661, 2678), 'numpy.maximum', 'np.maximum', (['x2', '(0)'], {}), '(x2, 0)\n', (2671, 2678), True, 'import numpy as np\n'), ((705, 731), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (725, 731), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow
import oneflow.python.framework.input_blob_def as input_blob_def
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.python_callback as python_callback
import oneflow.python.framework.balanced_splitter as balanced_splitter
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.eager.vm_util as vm_util
import oneflow.python.eager.blob_register as blob_register_util
import oneflow.python.eager.object as object_util
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import numpy
from functools import reduce
def AsyncPush(session, job_func, *arg):
assert len(arg) == len(job_func.__oneflow_input_blob_defs__)
for i in range(len(arg)):
_AsyncPushArg(session, job_func.__oneflow_input_blob_defs__[i], arg[i])
def _AsyncPushArg(session, arg_blob_def, arg_ndarray):
if isinstance(arg_blob_def, (list, tuple)):
assert isinstance(arg_ndarray, (list, tuple)), "type(arg_ndarray): %s" % (
type(arg_ndarray)
)
assert len(arg_blob_def) == len(arg_ndarray), "%s v.s. %s" % (
len(arg_blob_def),
len(arg_ndarray),
)
for blob_def, ndarray in zip(arg_blob_def, arg_ndarray):
_AsyncPushArg(session, blob_def, ndarray)
elif isinstance(arg_blob_def, dict):
assert type(arg_blob_def) is type(arg_ndarray)
assert set(arg_blob_def.keys()) == set(arg_ndarray.keys())
for k, blob_def in arg_blob_def.items():
_AsyncPushArg(session, blob_def, arg_ndarray[k])
else:
assert isinstance(arg_blob_def, input_blob_def.ArgBlobDef)
arg_blob_def.CheckAndAsyncPush(session, arg_ndarray)
def MakeEagerInputBlobs(arg_blob_def, arg_ndarray):
if isinstance(arg_blob_def, (list, tuple)):
assert isinstance(arg_ndarray, (list, tuple)), "type(arg_ndarray): %s" % (
type(arg_ndarray)
)
assert len(arg_blob_def) == len(arg_ndarray)
return type(arg_blob_def)(
MakeEagerInputBlobs(blob_def, ndarray)
for blob_def, ndarray in zip(arg_blob_def, arg_ndarray)
)
elif isinstance(arg_blob_def, dict):
assert type(arg_blob_def) is type(arg_ndarray)
assert set(arg_blob_def.keys()) == set(arg_ndarray.keys())
return {
k: MakeEagerInputBlobs(blob_def, arg_ndarray[k])
for k, blob_def in arg_blob_def.items()
}
else:
return _CreateEagerInputBlobAndFeedValue(arg_blob_def, arg_ndarray)
def _CheckInputArgBlobDefValueMatch(arg_blob_def, arg_value):
if isinstance(arg_blob_def, input_blob_def.FixedTensorDef):
assert isinstance(arg_value, numpy.ndarray)
assert arg_blob_def.shape == arg_value.shape
elif isinstance(arg_blob_def, input_blob_def.MirroredTensorDef):
assert isinstance(arg_value, (list, tuple))
for v in arg_value:
assert isinstance(v, numpy.ndarray)
assert len(v.shape) == len(arg_blob_def.shape)
assert numpy.prod(v.shape) <= numpy.prod(arg_blob_def.shape)
elif isinstance(arg_blob_def, input_blob_def.MirroredTensorListDef):
assert isinstance(arg_value, (list, tuple))
for ndarray_list in arg_value:
for ndarray in ndarray_list:
assert isinstance(ndarray, numpy.ndarray)
assert len(ndarray.shape) == len(arg_blob_def.shape)
assert numpy.prod(ndarray.shape) <= numpy.prod(
arg_blob_def.shape
), "%s v.s. %s" % (ndarray.shape, arg_blob_def.shape)
else:
raise NotImplementedError
def FeedValueToEagerBlob(blob_object, blob_def, ndarray):
physical_blob_objects = _GetPhysicalBlobObjects(blob_object, None)
feed_ctx = FeedContext(blob_object.op_arg_parallel_attr, ndarray)
for i, physical_blob_object in enumerate(physical_blob_objects):
feed_ctx.set_rank(i)
_FeedValueToInputPhysicalBlob(feed_ctx, blob_def, physical_blob_object)
def _CreateEagerInputBlobAndFeedValue(arg_blob_def, arg_ndarray):
_CheckInputArgBlobDefValueMatch(arg_blob_def, arg_ndarray)
arg_blob_object, lbi = _MakeInputBlobObject(arg_blob_def)
FeedValueToEagerBlob(arg_blob_object, arg_blob_def, arg_ndarray)
get_blob = None
if isinstance(arg_blob_def, input_blob_def.FixedTensorDef):
def get_blob(lbi, blob_object):
blob = remote_blob_util.EagerConsistentBlob(lbi, blob_object)
with oneflow.scope.consistent_view():
return oneflow.identity(blob)
elif isinstance(arg_blob_def, input_blob_def.MirroredTensorDef):
get_blob = remote_blob_util.EagerMirroredBlob
elif isinstance(arg_blob_def, input_blob_def.MirroredTensorListDef):
get_blob = remote_blob_util.EagerMirroredBlob
else:
raise NotImplementedError
return get_blob(lbi, blob_object=arg_blob_object)
def _MakeInputBlobObject(arg_blob_def):
input_op_conf, lbi = _MakeInputOpConfAndRetLbi(arg_blob_def)
bn_in_op2blob_object = {}
def BuildInputInstruction(builder):
op_attribute = arg_blob_def.EagerAddAndInferOp(input_op_conf)
scope = oneflow.current_scope()
parallel_conf = scope.device_parallel_desc_symbol.parallel_conf
builder.StatelessCall(
op_attribute, parallel_conf, bn_in_op2blob_object=bn_in_op2blob_object
)
vm_util.LogicalRun(BuildInputInstruction)
return bn_in_op2blob_object["out"], lbi
def _GetPhysicalBlobObjects(logical_blob_object, lbi):
blob_register = blob_register_util.GetDefaultBlobRegister()
physical_blob_objects = None
def BuildLogical2PhysicalInstruction(builder):
nonlocal physical_blob_objects
physical_blob_objects = builder.UnpackLogicalBlobToPhysicalBlobs(
logical_blob_object
)
vm_util.LogicalRun(BuildLogical2PhysicalInstruction)
return physical_blob_objects
def _MakeInputOpConfAndRetLbi(arg_blob_def):
assert isinstance(arg_blob_def, input_blob_def.ArgBlobDef)
op_conf = op_conf_util.OperatorConf()
op_conf.name = id_util.UniqueStr("Input_")
op_conf.input_conf.out = "out"
op_conf.input_conf.blob_conf.CopyFrom(arg_blob_def.ToInterfaceBlobConf())
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = op_conf.input_conf.out
return op_conf, lbi
class FeedContext(object):
def __init__(self, op_arg_parallel_attr, arg_ndarray, rank=0):
self.op_arg_parallel_attr_ = op_arg_parallel_attr
self.arg_ndarray_ = arg_ndarray
self.rank_ = rank
# balanced_range is used in split_parallel
self.balanced_range_ = None
def set_rank(self, rank):
self.rank_ = rank
def GetFixedTensor(self, logical_shape):
assert isinstance(self.arg_ndarray_, numpy.ndarray)
assert self.arg_ndarray_.shape == logical_shape, "%s v.s. %s" % (
self.arg_ndarray_.shape,
logical_shape,
)
sbp_parallel = self.op_arg_parallel_attr_.sbp_parallel
parallel_num = self.op_arg_parallel_attr_.parallel_desc_symbol.parallel_num
if sbp_parallel.HasField("broadcast_parallel") or parallel_num == 1:
return self._AsContiguousNdArray(self.arg_ndarray_)
elif sbp_parallel.HasField("split_parallel"):
axis = sbp_parallel.split_parallel.axis
start, end = self._GetBalancedRanges(logical_shape[axis])[self.rank_]
slc = [slice(None)] * len(logical_shape)
slc[axis] = slice(start, end)
ndarray = self.arg_ndarray_[tuple(slc)]
return self._AsContiguousNdArray(ndarray)
else:
raise NotImplementedError
def _GetBalancedRanges(self, dim):
parallel_num = self.op_arg_parallel_attr_.parallel_desc_symbol.parallel_num
if self.balanced_range_ is None:
self.balanced_range_ = balanced_splitter.BalancedRanges(dim, parallel_num)
return self.balanced_range_
def GetMirroredTensor(self, static_shape):
capacity = reduce(lambda x, y: x * y, static_shape, 1)
assert isinstance(self.arg_ndarray_, (list, tuple))
parallel_num = self.op_arg_parallel_attr_.parallel_desc_symbol.parallel_num
assert len(self.arg_ndarray_) == parallel_num
assert all(isinstance(a, numpy.ndarray) for a in self.arg_ndarray_)
assert self.rank_ >= 0
assert self.rank_ < parallel_num
ndarray = self.arg_ndarray_[self.rank_]
elem_cnt = reduce(lambda x, y: x * y, ndarray.shape, 1)
assert elem_cnt <= capacity, "%s v.s. %s" % (ndarray.shape, static_shape)
return self._AsContiguousNdArray(ndarray)
def GetMirroredTensorList(self, static_shape):
assert isinstance(self.arg_ndarray_, (list, tuple))
parallel_num = self.op_arg_parallel_attr_.parallel_desc_symbol.parallel_num
assert self.rank_ >= 0
assert self.rank_ < parallel_num
assert len(self.arg_ndarray_) == parallel_num
assert all(isinstance(a, (list, tuple)) for a in self.arg_ndarray_)
ndarray_list = self.arg_ndarray_[self.rank_]
assert all(isinstance(arr, numpy.ndarray) for arr in ndarray_list)
capacity = numpy.prod(static_shape)
assert all(numpy.prod(arr.shape) <= capacity for arr in ndarray_list)
return self._AsContiguousNdArray(ndarray_list)
def _AsContiguousNdArray(self, ndarray):
if isinstance(ndarray, numpy.ndarray):
return (
ndarray if ndarray.data.contiguous else numpy.ascontiguousarray(ndarray)
)
elif isinstance(ndarray, (tuple, list)):
return type(ndarray)(self._AsContiguousNdArray(a) for a in ndarray)
else:
raise NotImplementedError
def _FeedValueToInputPhysicalBlob(feed_ctx, blob_def, blob_object):
assert isinstance(blob_def, input_blob_def.ArgBlobDef)
assert isinstance(blob_object, object_util.BlobObject)
FeedBlob = _MakeFeedBlobCallback(feed_ctx, blob_def, blob_object)
assert callable(FeedBlob)
def BuildFeedInstruction(builder):
builder.FeedBlob(blob_object, FeedBlob)
builder.InsertRemoveForeignCallbackInstruction(blob_object.object_id, FeedBlob)
vm_util.PhysicalRun(BuildFeedInstruction)
def _MakeFeedBlobCallback(feed_ctx, blob_def, blob_object):
if isinstance(blob_def, input_blob_def.FixedTensorDef):
def FeedBlob(ofblob):
ndarray = feed_ctx.GetFixedTensor(blob_def.shape)
dtype = dtype_util.convert_oneflow_dtype_to_numpy_dtype(ofblob.dtype)
assert ndarray.dtype == dtype, "%s v.s. %s" % (ndarray.dtype, dtype)
assert ndarray.shape == ofblob.static_shape, "%s v.s. %s" % (
ndarray.shape,
ofblob.static_shape,
)
if ofblob.CopyFromNdarray(ndarray) is False:
raise ValueError
elif isinstance(blob_def, input_blob_def.MirroredTensorDef):
def FeedBlob(ofblob):
ndarray = feed_ctx.GetMirroredTensor(ofblob.static_shape)
assert isinstance(ndarray, numpy.ndarray)
dtype = dtype_util.convert_oneflow_dtype_to_numpy_dtype(ofblob.dtype)
assert ndarray.dtype == dtype, "%s v.s. %s" % (ndarray.dtype, dtype)
if ofblob.CopyFromNdarray(ndarray) is False:
raise ValueError
elif isinstance(blob_def, input_blob_def.MirroredTensorListDef):
def FeedBlob(ofblob):
assert ofblob.is_tensor_list
ndarray_list = feed_ctx.GetMirroredTensorList(ofblob.static_shape)
assert isinstance(ndarray_list, (list, tuple))
assert all(isinstance(ndarray, numpy.ndarray) for ndarray in ndarray_list)
dtype = dtype_util.convert_oneflow_dtype_to_numpy_dtype(ofblob.dtype)
assert all(ndarray.dtype == dtype for ndarray in ndarray_list)
if ofblob.CopyFromNdarrayList(ndarray_list) is False:
raise ValueError
else:
raise NotImplementedError
return FeedBlob
| [
"oneflow.python.eager.vm_util.LogicalRun",
"oneflow.python.eager.vm_util.PhysicalRun",
"oneflow.identity",
"oneflow.python.eager.blob_register.GetDefaultBlobRegister",
"oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype",
"oneflow.current_scope",
"oneflow.core.operator.op_conf_pb2.Opera... | [((6181, 6222), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['BuildInputInstruction'], {}), '(BuildInputInstruction)\n', (6199, 6222), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((6344, 6387), 'oneflow.python.eager.blob_register.GetDefaultBlobRegister', 'blob_register_util.GetDefaultBlobRegister', ([], {}), '()\n', (6385, 6387), True, 'import oneflow.python.eager.blob_register as blob_register_util\n'), ((6633, 6685), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['BuildLogical2PhysicalInstruction'], {}), '(BuildLogical2PhysicalInstruction)\n', (6651, 6685), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((6843, 6870), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (6868, 6870), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((6890, 6917), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Input_"""'], {}), "('Input_')\n", (6907, 6917), True, 'import oneflow.python.framework.id_util as id_util\n'), ((7041, 7077), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (7075, 7077), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((11083, 11124), 'oneflow.python.eager.vm_util.PhysicalRun', 'vm_util.PhysicalRun', (['BuildFeedInstruction'], {}), '(BuildFeedInstruction)\n', (11102, 11124), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((5956, 5979), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (5977, 5979), False, 'import oneflow\n'), ((8878, 8921), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'static_shape', '(1)'], {}), '(lambda x, y: x * y, static_shape, 1)\n', (8884, 8921), False, 'from functools import reduce\n'), ((9335, 9379), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'ndarray.shape', '(1)'], {}), '(lambda x, y: x * y, ndarray.shape, 1)\n', (9341, 9379), False, 'from functools import reduce\n'), ((10057, 10081), 'numpy.prod', 'numpy.prod', (['static_shape'], {}), '(static_shape)\n', (10067, 10081), False, 'import numpy\n'), ((5192, 5246), 'oneflow.python.framework.remote_blob.EagerConsistentBlob', 'remote_blob_util.EagerConsistentBlob', (['lbi', 'blob_object'], {}), '(lbi, blob_object)\n', (5228, 5246), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((8723, 8774), 'oneflow.python.framework.balanced_splitter.BalancedRanges', 'balanced_splitter.BalancedRanges', (['dim', 'parallel_num'], {}), '(dim, parallel_num)\n', (8755, 8774), True, 'import oneflow.python.framework.balanced_splitter as balanced_splitter\n'), ((11360, 11421), 'oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['ofblob.dtype'], {}), '(ofblob.dtype)\n', (11407, 11421), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((5264, 5295), 'oneflow.scope.consistent_view', 'oneflow.scope.consistent_view', ([], {}), '()\n', (5293, 5295), False, 'import oneflow\n'), ((5320, 5342), 'oneflow.identity', 'oneflow.identity', (['blob'], {}), '(blob)\n', (5336, 5342), False, 'import oneflow\n'), ((10385, 10417), 'numpy.ascontiguousarray', 'numpy.ascontiguousarray', (['ndarray'], {}), '(ndarray)\n', (10408, 10417), False, 'import numpy\n'), ((11990, 12051), 'oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['ofblob.dtype'], {}), '(ofblob.dtype)\n', (12037, 12051), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((3804, 3823), 'numpy.prod', 'numpy.prod', (['v.shape'], {}), '(v.shape)\n', (3814, 3823), False, 'import numpy\n'), ((3827, 3857), 'numpy.prod', 'numpy.prod', (['arg_blob_def.shape'], {}), '(arg_blob_def.shape)\n', (3837, 3857), False, 'import numpy\n'), ((10101, 10122), 'numpy.prod', 'numpy.prod', (['arr.shape'], {}), '(arr.shape)\n', (10111, 10122), False, 'import numpy\n'), ((12610, 12671), 'oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['ofblob.dtype'], {}), '(ofblob.dtype)\n', (12657, 12671), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((4213, 4238), 'numpy.prod', 'numpy.prod', (['ndarray.shape'], {}), '(ndarray.shape)\n', (4223, 4238), False, 'import numpy\n'), ((4242, 4272), 'numpy.prod', 'numpy.prod', (['arg_blob_def.shape'], {}), '(arg_blob_def.shape)\n', (4252, 4272), False, 'import numpy\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import os
import oneflow
import oneflow.experimental as flow
import oneflow.python.framework.session_context as session_ctx
import oneflow._oneflow_internal
from oneflow.python.framework.multi_client_session import MultiClientSession
import oneflow.python.framework.c_api_util as c_api_util
@flow.unittest.skip_unless_1n1d()
class TestUserOpGraph(unittest.TestCase):
def test_user_op_graph(test_case):
test_case.assertTrue(oneflow.distributed.is_multi_client())
test_case.assertTrue(
oneflow.python.framework.env_util.HasAllMultiClientEnvVars()
)
x0 = flow.Tensor(20, 30)
weight0 = flow.Tensor(30, 50)
x1 = flow.Tensor(50, 70)
# NOTE(chengcheng): this tiny net is:
# x0 * weight0 -> out0
# relu(out0) -> y0
# y0 * x1 -> out1
# relu(out1) -> y1
flow.nn.init.uniform_(x0, a=-1.0, b=1.0)
flow.nn.init.uniform_(x1, a=-1.0, b=1.0)
flow.nn.init.uniform_(weight0, a=-1.0, b=1.0)
session = session_ctx.GetDefaultSession()
test_case.assertTrue(isinstance(session, MultiClientSession))
session.TryInit()
with oneflow._oneflow_internal.lazy_mode.gard(True):
oneflow._oneflow_internal.JobBuildAndInferCtx_Open(
"cc_test_user_op_expr_job"
)
job_conf = (
oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto()
)
job_conf.set_job_name("cc_test_user_op_expr_job")
job_conf.mutable_predict_conf()
c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf)
# input_conf.set_in_0("EagerTensorInput")
# input_conf.set_out_0("out_0")
x0_conf = (
oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf()
)
x0_op = oneflow._oneflow_internal.one.FeedInputOpExpr(
"cc_Input_0", x0_conf, ["in_0"], ["out_0"]
)
x1_conf = (
oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf()
)
x1_op = oneflow._oneflow_internal.one.FeedInputOpExpr(
"cc_Input_1", x1_conf, ["in_0"], ["out_0"]
)
weight0_conf = (
oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedVariableOpConf()
)
weight0_op = oneflow._oneflow_internal.one.FeedVariableOpExpr(
"cc_Variable_0", weight0_conf, ["in_0"], ["out_0"]
)
output_conf = (
oneflow._oneflow_internal.oneflow.core.operator.op_conf.FetchOutputOpConf()
)
output_op = oneflow._oneflow_internal.one.FetchOutputOpExpr(
"cc_Output_0", output_conf, ["in_0"], ["out_0"]
)
attrs = oneflow._oneflow_internal.MutableCfgAttrMap()
if not x0.is_determined:
x0.determine()
x0_tensor_in_c = x0._local_or_consistent_tensor
if not x1.is_determined:
x1.determine()
x1_tensor_in_c = x1._local_or_consistent_tensor
if not weight0.is_determined:
weight0.determine()
weight0_tensor_in_c = weight0._local_or_consistent_tensor
x0_lazy_tensor = x0_op.apply([x0_tensor_in_c], attrs)[0]
x1_lazy_tensor = x1_op.apply([x1_tensor_in_c], attrs)[0]
weight0_lazy_tensor = weight0_op.apply([weight0_tensor_in_c], attrs)[0]
test_case.assertEqual(x0_lazy_tensor.shape, (20, 30))
test_case.assertTrue(x0_lazy_tensor.is_lazy)
test_case.assertEqual(weight0_lazy_tensor.shape, (30, 50))
test_case.assertTrue(weight0_lazy_tensor.is_lazy)
test_case.assertEqual(x1_lazy_tensor.shape, (50, 70))
test_case.assertTrue(x1_lazy_tensor.is_lazy)
out0 = flow.F.matmul(x0_lazy_tensor, weight0_lazy_tensor)
test_case.assertEqual(out0.shape, (20, 50))
test_case.assertTrue(out0.is_lazy)
y0 = flow.F.relu(out0)
test_case.assertEqual(y0.shape, (20, 50))
test_case.assertTrue(y0.is_lazy)
out1 = flow.F.matmul(y0, x1_lazy_tensor)
test_case.assertEqual(out1.shape, (20, 70))
test_case.assertTrue(out1.is_lazy)
y1 = flow.F.relu(out1)
test_case.assertEqual(y1.shape, (20, 70))
test_case.assertTrue(y1.is_lazy)
eager_output = output_op.apply([y1], attrs)[0]
test_case.assertEqual(eager_output.shape, (20, 70))
test_case.assertTrue(not eager_output.is_lazy)
if __name__ == "__main__":
unittest.main()
| [
"oneflow._oneflow_internal.lazy_mode.gard",
"oneflow.experimental.nn.init.uniform_",
"oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_SetJobConf",
"oneflow._oneflow_internal.one.FeedVariableOpExpr",
"oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedVariableOpConf",
"oneflow._oneflow_... | [((921, 953), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (951, 953), True, 'import oneflow.experimental as flow\n'), ((5377, 5392), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5390, 5392), False, 'import unittest\n'), ((1230, 1249), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(20)', '(30)'], {}), '(20, 30)\n', (1241, 1249), True, 'import oneflow.experimental as flow\n'), ((1268, 1287), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(30)', '(50)'], {}), '(30, 50)\n', (1279, 1287), True, 'import oneflow.experimental as flow\n'), ((1301, 1320), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(50)', '(70)'], {}), '(50, 70)\n', (1312, 1320), True, 'import oneflow.experimental as flow\n'), ((1500, 1540), 'oneflow.experimental.nn.init.uniform_', 'flow.nn.init.uniform_', (['x0'], {'a': '(-1.0)', 'b': '(1.0)'}), '(x0, a=-1.0, b=1.0)\n', (1521, 1540), True, 'import oneflow.experimental as flow\n'), ((1549, 1589), 'oneflow.experimental.nn.init.uniform_', 'flow.nn.init.uniform_', (['x1'], {'a': '(-1.0)', 'b': '(1.0)'}), '(x1, a=-1.0, b=1.0)\n', (1570, 1589), True, 'import oneflow.experimental as flow\n'), ((1598, 1643), 'oneflow.experimental.nn.init.uniform_', 'flow.nn.init.uniform_', (['weight0'], {'a': '(-1.0)', 'b': '(1.0)'}), '(weight0, a=-1.0, b=1.0)\n', (1619, 1643), True, 'import oneflow.experimental as flow\n'), ((1663, 1694), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1692, 1694), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((1064, 1101), 'oneflow.distributed.is_multi_client', 'oneflow.distributed.is_multi_client', ([], {}), '()\n', (1099, 1101), False, 'import oneflow\n'), ((1145, 1205), 'oneflow.python.framework.env_util.HasAllMultiClientEnvVars', 'oneflow.python.framework.env_util.HasAllMultiClientEnvVars', ([], {}), '()\n', (1203, 1205), False, 'import oneflow\n'), ((1805, 1851), 'oneflow._oneflow_internal.lazy_mode.gard', 'oneflow._oneflow_internal.lazy_mode.gard', (['(True)'], {}), '(True)\n', (1845, 1851), False, 'import oneflow\n'), ((1866, 1944), 'oneflow._oneflow_internal.JobBuildAndInferCtx_Open', 'oneflow._oneflow_internal.JobBuildAndInferCtx_Open', (['"""cc_test_user_op_expr_job"""'], {}), "('cc_test_user_op_expr_job')\n", (1916, 1944), False, 'import oneflow\n'), ((2016, 2084), 'oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto', 'oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto', ([], {}), '()\n', (2082, 2084), False, 'import oneflow\n'), ((2217, 2271), 'oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_SetJobConf', 'c_api_util.CurJobBuildAndInferCtx_SetJobConf', (['job_conf'], {}), '(job_conf)\n', (2261, 2271), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((2412, 2485), 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf', 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf', ([], {}), '()\n', (2483, 2485), False, 'import oneflow\n'), ((2520, 2614), 'oneflow._oneflow_internal.one.FeedInputOpExpr', 'oneflow._oneflow_internal.one.FeedInputOpExpr', (['"""cc_Input_0"""', 'x0_conf', "['in_0']", "['out_0']"], {}), "('cc_Input_0', x0_conf, [\n 'in_0'], ['out_0'])\n", (2565, 2614), False, 'import oneflow\n'), ((2680, 2753), 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf', 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf', ([], {}), '()\n', (2751, 2753), False, 'import oneflow\n'), ((2788, 2882), 'oneflow._oneflow_internal.one.FeedInputOpExpr', 'oneflow._oneflow_internal.one.FeedInputOpExpr', (['"""cc_Input_1"""', 'x1_conf', "['in_0']", "['out_0']"], {}), "('cc_Input_1', x1_conf, [\n 'in_0'], ['out_0'])\n", (2833, 2882), False, 'import oneflow\n'), ((2953, 3029), 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedVariableOpConf', 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedVariableOpConf', ([], {}), '()\n', (3027, 3029), False, 'import oneflow\n'), ((3069, 3173), 'oneflow._oneflow_internal.one.FeedVariableOpExpr', 'oneflow._oneflow_internal.one.FeedVariableOpExpr', (['"""cc_Variable_0"""', 'weight0_conf', "['in_0']", "['out_0']"], {}), "('cc_Variable_0',\n weight0_conf, ['in_0'], ['out_0'])\n", (3117, 3173), False, 'import oneflow\n'), ((3244, 3319), 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FetchOutputOpConf', 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FetchOutputOpConf', ([], {}), '()\n', (3317, 3319), False, 'import oneflow\n'), ((3358, 3458), 'oneflow._oneflow_internal.one.FetchOutputOpExpr', 'oneflow._oneflow_internal.one.FetchOutputOpExpr', (['"""cc_Output_0"""', 'output_conf', "['in_0']", "['out_0']"], {}), "('cc_Output_0', output_conf,\n ['in_0'], ['out_0'])\n", (3405, 3458), False, 'import oneflow\n'), ((3506, 3551), 'oneflow._oneflow_internal.MutableCfgAttrMap', 'oneflow._oneflow_internal.MutableCfgAttrMap', ([], {}), '()\n', (3549, 3551), False, 'import oneflow\n'), ((4580, 4630), 'oneflow.experimental.F.matmul', 'flow.F.matmul', (['x0_lazy_tensor', 'weight0_lazy_tensor'], {}), '(x0_lazy_tensor, weight0_lazy_tensor)\n', (4593, 4630), True, 'import oneflow.experimental as flow\n'), ((4752, 4769), 'oneflow.experimental.F.relu', 'flow.F.relu', (['out0'], {}), '(out0)\n', (4763, 4769), True, 'import oneflow.experimental as flow\n'), ((4889, 4922), 'oneflow.experimental.F.matmul', 'flow.F.matmul', (['y0', 'x1_lazy_tensor'], {}), '(y0, x1_lazy_tensor)\n', (4902, 4922), True, 'import oneflow.experimental as flow\n'), ((5044, 5061), 'oneflow.experimental.F.relu', 'flow.F.relu', (['out1'], {}), '(out1)\n', (5055, 5061), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
def test_2d_gpu_variable(test_case):
flow.enable_eager_execution()
flow.config.gpu_device_num(2)
function_config = flow.FunctionConfig()
function_config.train.model_update_conf(dict(naive_conf={}))
function_config.train.primary_lr(0.1)
device_name = "0:0-1"
@flow.global_function(function_config)
def Foo():
with flow.scope.placement("gpu", device_name):
w = flow.get_variable(
"w",
shape=(10,),
dtype=flow.float,
initializer=flow.constant_initializer(0),
)
print(w.numpy(0))
flow.losses.add_loss(w)
Foo()
Foo()
| [
"oneflow.global_function",
"oneflow.constant_initializer",
"oneflow.scope.placement",
"oneflow.losses.add_loss",
"oneflow.enable_eager_execution",
"oneflow.config.gpu_device_num",
"oneflow.FunctionConfig"
] | [((656, 685), 'oneflow.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (683, 685), True, 'import oneflow as flow\n'), ((690, 719), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (716, 719), True, 'import oneflow as flow\n'), ((742, 763), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (761, 763), True, 'import oneflow as flow\n'), ((903, 940), 'oneflow.global_function', 'flow.global_function', (['function_config'], {}), '(function_config)\n', (923, 940), True, 'import oneflow as flow\n'), ((1240, 1263), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['w'], {}), '(w)\n', (1260, 1263), True, 'import oneflow as flow\n'), ((969, 1009), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', 'device_name'], {}), "('gpu', device_name)\n", (989, 1009), True, 'import oneflow as flow\n'), ((1158, 1186), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (1183, 1186), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
import numpy as np
from google.protobuf import text_format
import oneflow
import oneflow as flow
import oneflow._oneflow_internal
import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb
import oneflow.framework.dtype as dtype_util
import pickle
SNAPSHOT_DONE_FILENAME = "snapshot_done"
META_INFO_FILENAME = "meta"
DATA_FILENAME = "out"
class FileBackendVariableBlob:
def __init__(
self,
var_dir: str,
dtype: Optional[oneflow.dtype] = None,
shape: Optional[Sequence[int]] = None,
):
data_path = os.path.join(var_dir, DATA_FILENAME)
assert os.path.isfile(data_path)
self.var_dir_ = var_dir
meta_info_path = os.path.join(self.var_dir_, META_INFO_FILENAME)
if os.path.exists(meta_info_path):
meta_info = variable_meta_info_pb.VariableMetaInfo()
with open(meta_info_path) as f:
text_format.Parse(f.read(), meta_info)
self.has_meta_info_ = True
else:
self.has_meta_info_ = False
if self.has_meta_info_:
assert dtype is None and shape is None
self.shape_ = tuple(meta_info.shape.dim)
self.dtype_ = dtype_util.convert_proto_dtype_to_oneflow_dtype(
meta_info.data_type
)
elif shape is not None and dtype is not None:
self.shape_ = shape
self.dtype_ = dtype
self.has_meta_info_ = True
elif shape is not None or dtype is not None:
raise RuntimeError("both or neither of shape and dtype should be None")
else:
pass
if self.has_meta_info_:
itemsize = np.dtype(
dtype_util.convert_oneflow_dtype_to_numpy_dtype(self.dtype_)
).itemsize
assert os.path.getsize(data_path) == np.prod(self.shape).item() * itemsize
@property
def file_path(self) -> str:
return os.path.join(self.var_dir_, DATA_FILENAME)
@property
def shape(self) -> Tuple[int]:
return self.shape_
@property
def quant_info(self):
raise NotImplementedError()
@property
def dtype(self) -> oneflow.dtype:
return self.dtype_
def numpy(self) -> np.ndarray:
if not self.has_meta_info_:
raise RuntimeError("This variable does not have meta info")
return np.fromfile(
self.file_path,
dtype=dtype_util.convert_oneflow_dtype_to_numpy_dtype(self.dtype),
).reshape(self.shape)
ValueContainer = Union[FileBackendVariableBlob, np.ndarray, "oneflow.Tensor"]
def _ElemCnt(shape):
return np.prod(shape).astype(int).item()
def _LoadSingleVariable(
path: Optional[str], consistent_src_rank: Optional[int] = None
) -> "flow.Tensor":
if consistent_src_rank is not None:
rank = flow.framework.distribute.get_rank()
if rank == consistent_src_rank:
assert isinstance(path, str)
file_backed_blob = FileBackendVariableBlob(path)
loaded = flow.tensor(
file_backed_blob.numpy(), dtype=file_backed_blob.dtype
).to("cuda")
else:
loaded = flow.tensor([]).to("cuda")
loaded = loaded.to_consistent(
flow.placement("cuda", {0: [0]}), flow.sbp.broadcast
)
return loaded
assert isinstance(path, str)
return flow.tensor(FileBackendVariableBlob(path).numpy())
def _broadcast_py_object(obj, src: int = 0):
rank = flow.framework.distribute.get_rank()
if src == rank:
obj_bytes = pickle.dumps(obj)
return pickle.loads(flow._oneflow_internal.cpu_broadcast(obj_bytes, src))
else:
return pickle.loads(flow._oneflow_internal.cpu_broadcast(None, src))
def Load(
path: str, consistent_src_rank: Optional[int] = None,
) -> Dict[str, "flow.Tensor"]:
assert os.path.isdir(path), "Directory {} doesn't exist!".format(path)
rank = flow.framework.distribute.get_rank()
var_dict = {}
if consistent_src_rank is None or rank == consistent_src_rank:
all_files = os.listdir(path)
assert SNAPSHOT_DONE_FILENAME in all_files
all_files.remove(SNAPSHOT_DONE_FILENAME)
if consistent_src_rank is not None:
_broadcast_py_object(all_files, consistent_src_rank)
else:
all_files = _broadcast_py_object(None, consistent_src_rank)
for f in all_files:
var_dir = os.path.join(path, f)
var_dict[f] = _LoadSingleVariable(var_dir, consistent_src_rank)
return var_dict
def save(
var_dict: Dict[str, "flow.Tensor"],
path: str,
consistent_dst_rank: Optional[int] = None,
) -> None:
consistent_mode = consistent_dst_rank is not None
for (name, var) in var_dict.items():
if consistent_mode:
assert (
var.is_consistent
), f"consistent tensor is needed, but {name} is a local tensor"
var_dict[name] = var.to_consistent(sbp=flow.sbp.broadcast).to_local()
else:
assert (
not var.is_consistent
), f"local tensor is needed, but {name} is a consistent tensor"
rank = flow.framework.distribute.get_rank()
if consistent_mode and rank != consistent_dst_rank:
return
def IsFileOrNonEmptyDir(path):
if os.path.isfile(path):
return True
if os.path.isdir(path) and len(os.listdir(path)) != 0:
return True
return False
assert not IsFileOrNonEmptyDir(
path
), "{} is a file or non-empty directory! Note that flow.save is different from torch.save. It saves each weight as a separated file so that a directory instead of a file should be given.".format(
path
)
os.makedirs(path, exist_ok=True)
for (name, var) in var_dict.items():
meta_info = variable_meta_info_pb.VariableMetaInfo()
meta_info.shape.dim[:] = var.shape
meta_info.data_type = oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(
var.dtype
)
var_dir = os.path.join(path, name)
param_path = os.path.join(var_dir, DATA_FILENAME)
os.makedirs(os.path.dirname(param_path))
with open(param_path, "wb") as f:
f.write(var.numpy().tobytes())
with open(os.path.join(var_dir, META_INFO_FILENAME), "w") as f:
f.write(text_format.MessageToString(meta_info))
with open(os.path.join(path, SNAPSHOT_DONE_FILENAME), "w"):
pass
def generate_values_by_initializer(initializer, shape, dtype):
np_dtype = np.dtype(dtype_util.convert_oneflow_dtype_to_numpy_dtype(dtype))
length = _ElemCnt(shape)
return np.array(initializer(length)).astype(np_dtype).reshape(shape)
| [
"oneflow.tensor",
"oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype",
"oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype",
"oneflow.framework.dtype.convert_proto_dtype_to_oneflow_dtype",
"oneflow._oneflow_internal.cpu_broadcast",
"oneflow.placement",
"oneflow.core.framework.variable... | [((4205, 4241), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (4239, 4241), True, 'import oneflow as flow\n'), ((4581, 4600), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (4594, 4600), False, 'import os\n'), ((4656, 4692), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (4690, 4692), True, 'import oneflow as flow\n'), ((5880, 5916), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (5914, 5916), True, 'import oneflow as flow\n'), ((6462, 6494), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (6473, 6494), False, 'import os\n'), ((1258, 1294), 'os.path.join', 'os.path.join', (['var_dir', 'DATA_FILENAME'], {}), '(var_dir, DATA_FILENAME)\n', (1270, 1294), False, 'import os\n'), ((1310, 1335), 'os.path.isfile', 'os.path.isfile', (['data_path'], {}), '(data_path)\n', (1324, 1335), False, 'import os\n'), ((1393, 1440), 'os.path.join', 'os.path.join', (['self.var_dir_', 'META_INFO_FILENAME'], {}), '(self.var_dir_, META_INFO_FILENAME)\n', (1405, 1440), False, 'import os\n'), ((1452, 1482), 'os.path.exists', 'os.path.exists', (['meta_info_path'], {}), '(meta_info_path)\n', (1466, 1482), False, 'import os\n'), ((2641, 2683), 'os.path.join', 'os.path.join', (['self.var_dir_', 'DATA_FILENAME'], {}), '(self.var_dir_, DATA_FILENAME)\n', (2653, 2683), False, 'import os\n'), ((3544, 3580), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (3578, 3580), True, 'import oneflow as flow\n'), ((4282, 4299), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (4294, 4299), False, 'import pickle\n'), ((4798, 4814), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4808, 4814), False, 'import os\n'), ((5144, 5165), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (5156, 5165), False, 'import os\n'), ((6035, 6055), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (6049, 6055), False, 'import os\n'), ((6557, 6597), 'oneflow.core.framework.variable_meta_info_pb2.VariableMetaInfo', 'variable_meta_info_pb.VariableMetaInfo', ([], {}), '()\n', (6595, 6597), True, 'import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb\n'), ((6671, 6740), 'oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype', 'oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype', (['var.dtype'], {}), '(var.dtype)\n', (6729, 6740), False, 'import oneflow\n'), ((6781, 6805), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (6793, 6805), False, 'import os\n'), ((6827, 6863), 'os.path.join', 'os.path.join', (['var_dir', 'DATA_FILENAME'], {}), '(var_dir, DATA_FILENAME)\n', (6839, 6863), False, 'import os\n'), ((7297, 7351), 'oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['dtype'], {}), '(dtype)\n', (7344, 7351), True, 'import oneflow.framework.dtype as dtype_util\n'), ((1508, 1548), 'oneflow.core.framework.variable_meta_info_pb2.VariableMetaInfo', 'variable_meta_info_pb.VariableMetaInfo', ([], {}), '()\n', (1546, 1548), True, 'import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb\n'), ((1903, 1971), 'oneflow.framework.dtype.convert_proto_dtype_to_oneflow_dtype', 'dtype_util.convert_proto_dtype_to_oneflow_dtype', (['meta_info.data_type'], {}), '(meta_info.data_type)\n', (1950, 1971), True, 'import oneflow.framework.dtype as dtype_util\n'), ((3966, 4000), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '{(0): [0]}'], {}), "('cuda', {(0): [0]})\n", (3980, 4000), True, 'import oneflow as flow\n'), ((4328, 4380), 'oneflow._oneflow_internal.cpu_broadcast', 'flow._oneflow_internal.cpu_broadcast', (['obj_bytes', 'src'], {}), '(obj_bytes, src)\n', (4364, 4380), True, 'import oneflow as flow\n'), ((4420, 4467), 'oneflow._oneflow_internal.cpu_broadcast', 'flow._oneflow_internal.cpu_broadcast', (['None', 'src'], {}), '(None, src)\n', (4456, 4467), True, 'import oneflow as flow\n'), ((6092, 6111), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (6105, 6111), False, 'import os\n'), ((6884, 6911), 'os.path.dirname', 'os.path.dirname', (['param_path'], {}), '(param_path)\n', (6899, 6911), False, 'import os\n'), ((7145, 7187), 'os.path.join', 'os.path.join', (['path', 'SNAPSHOT_DONE_FILENAME'], {}), '(path, SNAPSHOT_DONE_FILENAME)\n', (7157, 7187), False, 'import os\n'), ((2511, 2537), 'os.path.getsize', 'os.path.getsize', (['data_path'], {}), '(data_path)\n', (2526, 2537), False, 'import os\n'), ((7017, 7058), 'os.path.join', 'os.path.join', (['var_dir', 'META_INFO_FILENAME'], {}), '(var_dir, META_INFO_FILENAME)\n', (7029, 7058), False, 'import os\n'), ((7091, 7129), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['meta_info'], {}), '(meta_info)\n', (7118, 7129), False, 'from google.protobuf import text_format\n'), ((2408, 2468), 'oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['self.dtype_'], {}), '(self.dtype_)\n', (2455, 2468), True, 'import oneflow.framework.dtype as dtype_util\n'), ((3341, 3355), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (3348, 3355), True, 'import numpy as np\n'), ((3888, 3903), 'oneflow.tensor', 'flow.tensor', (['[]'], {}), '([])\n', (3899, 3903), True, 'import oneflow as flow\n'), ((6120, 6136), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (6130, 6136), False, 'import os\n'), ((3136, 3195), 'oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['self.dtype'], {}), '(self.dtype)\n', (3183, 3195), True, 'import oneflow.framework.dtype as dtype_util\n'), ((2541, 2560), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (2548, 2560), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def _check(test_case, x, y, value, dtype=None):
np_constant_like = np.full(x.shape, value)
test_case.assertTrue(np.array_equal(np_constant_like, y))
def _run_test(test_case, x, value, dtype=None, device="gpu"):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def ConstantLikeJob(x: oft.Numpy.Placeholder(x.shape)):
return flow.constant_like(x, value=value, dtype=dtype)
y = ConstantLikeJob(x).get()
_check(test_case, x, y.numpy(), value, dtype=dtype)
def test_constant_like_gpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 1.0, flow.float, "gpu")
def test_constant_like_cpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 2.0, flow.float, "cpu")
def test_constant_like_gpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 3.0, flow.double, "gpu")
def test_constant_like_cpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 4.0, flow.double, "cpu")
def test_constant_like_gpu_int8(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 5.0, flow.int8, "gpu")
def test_constant_like_cpu_int8(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 6.0, flow.int8, "cpu")
def test_constant_like_gpu_int32(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 7.0, flow.int32, "gpu")
def test_constant_like_cpu_int32(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 8.0, flow.int32, "cpu")
def test_constant_like_gpu_int64(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 9.0, flow.int64, "gpu")
def test_constant_like_cpu_int64(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 10.0, flow.int64, "cpu")
def test_constant_like_gpu(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 11.0, device="gpu")
def test_constant_like_cpu(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
_run_test(test_case, x, 12.0, device="cpu")
| [
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.constant_like",
"oneflow.FunctionConfig",
"oneflow.scope.consistent_view"
] | [((734, 757), 'numpy.full', 'np.full', (['x.shape', 'value'], {}), '(x.shape, value)\n', (741, 757), True, 'import numpy as np\n'), ((902, 923), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (921, 923), True, 'import oneflow as flow\n'), ((1043, 1092), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1063, 1092), True, 'import oneflow as flow\n'), ((783, 818), 'numpy.array_equal', 'np.array_equal', (['np_constant_like', 'y'], {}), '(np_constant_like, y)\n', (797, 818), True, 'import numpy as np\n'), ((1007, 1035), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1033, 1035), True, 'import oneflow as flow\n'), ((1168, 1215), 'oneflow.constant_like', 'flow.constant_like', (['x'], {'value': 'value', 'dtype': 'dtype'}), '(x, value=value, dtype=dtype)\n', (1186, 1215), True, 'import oneflow as flow\n'), ((1120, 1150), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x.shape'], {}), '(x.shape)\n', (1141, 1150), True, 'import oneflow.typing as oft\n'), ((1361, 1392), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (1375, 1392), True, 'import numpy as np\n'), ((1519, 1550), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (1533, 1550), True, 'import numpy as np\n'), ((1678, 1709), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (1692, 1709), True, 'import numpy as np\n'), ((1838, 1869), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (1852, 1869), True, 'import numpy as np\n'), ((1996, 2027), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2010, 2027), True, 'import numpy as np\n'), ((2152, 2183), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2166, 2183), True, 'import numpy as np\n'), ((2309, 2340), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2323, 2340), True, 'import numpy as np\n'), ((2467, 2498), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2481, 2498), True, 'import numpy as np\n'), ((2625, 2656), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2639, 2656), True, 'import numpy as np\n'), ((2783, 2814), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2797, 2814), True, 'import numpy as np\n'), ((2936, 2967), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2950, 2967), True, 'import numpy as np\n'), ((3084, 3115), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (3098, 3115), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.compatible import single_client as flow
import tensorflow as tf
from test_util import GenArgList
from oneflow.compatible.single_client import typing as oft
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def _np_dtype_to_of_dtype(np_dtype):
if np_dtype == np.float32:
return flow.float
else:
raise NotImplementedError
def _of_clip_by_value(values, min, max, device_type="gpu", dynamic=False, grad_cb=None):
data_type = _np_dtype_to_of_dtype(values.dtype)
if callable(grad_cb):
def clip(values_blob):
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"values",
shape=values.shape,
dtype=data_type,
initializer=flow.constant_initializer(0),
)
x = flow.cast_to_current_logical_view(x)
x = x + values_blob
y = flow.clip_by_value(x, min, max)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(y)
flow.watch_diff(x, grad_cb)
return y
else:
def clip(values_blob):
with flow.scope.placement(device_type, "0:0"):
return flow.clip_by_value(values_blob, min, max, name="Clip")
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(data_type)
if grad_cb is not None:
func_config_type = "train"
else:
func_config_type = "predict"
if dynamic:
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(type=func_config_type, function_config=func_config)
def clip_fn(
values_def: oft.ListNumpy.Placeholder(values.shape, dtype=data_type)
):
return clip(values_def)
return clip_fn([values]).get().numpy_list()[0]
else:
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(type=func_config_type, function_config=func_config)
def clip_fn(values_def: oft.Numpy.Placeholder(values.shape, dtype=data_type)):
return clip(values_def)
return clip_fn(values).get().numpy()
def _compare_with_tf(test_case, values, min, max, device_type, dynamic):
with tf.GradientTape() as t:
x = tf.Variable(values)
y = tf.clip_by_value(x, min, max)
dy = t.gradient(y, x)
def compare_dy(dy_blob):
test_case.assertTrue(
np.array_equal(
dy.numpy(), dy_blob.numpy_list()[0] if dynamic else dy_blob.numpy()
)
)
of_y = _of_clip_by_value(
values=values,
min=min,
max=max,
device_type=device_type,
dynamic=dynamic,
grad_cb=compare_dy,
)
test_case.assertTrue(np.array_equal(y.numpy(), of_y))
@flow.unittest.skip_unless_1n1d()
class TestClipByValue(flow.unittest.TestCase):
def test_clip_by_value(test_case):
values = np.random.randint(low=-100, high=100, size=(8, 512, 4)).astype(
np.float32
)
np_out = np.clip(values, -50, 50)
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["dynamic"] = [True, False]
for arg in GenArgList(arg_dict):
of_out = _of_clip_by_value(values, -50, 50, *arg)
test_case.assertTrue(np.array_equal(np_out, of_out))
def test_clip_by_min(test_case):
values = np.random.standard_normal((100, 30)).astype(np.float32)
np_out = np.clip(values, a_min=0, a_max=None)
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["dynamic"] = [True, False]
for arg in GenArgList(arg_dict):
of_out = _of_clip_by_value(values, 0, None, *arg)
test_case.assertTrue(np.array_equal(np_out, of_out))
def test_clip_by_max(test_case):
values = np.random.standard_normal((2, 64, 800, 1088)).astype(np.float32)
np_out = np.clip(values, a_min=None, a_max=0.2)
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["dynamic"] = [True, False]
for arg in GenArgList(arg_dict):
of_out = _of_clip_by_value(values, None, 0.2, *arg)
test_case.assertTrue(np.allclose(np_out, of_out))
def test_clip_by_value_grad(test_case):
values = np.random.standard_normal(1024).astype(np.float32)
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["dynamic"] = [True, False]
for arg in GenArgList(arg_dict):
_compare_with_tf(test_case, values, 0, 0.5, *arg)
def test_clip_by_value_grad_case_1(test_case):
values = np.random.standard_normal((128, 10, 27)).astype(np.float32)
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["dynamic"] = [True, False]
for arg in GenArgList(arg_dict):
_compare_with_tf(test_case, values, -0.2, 0.2, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.constant_initializer",
"oneflow.compatible.single_client.scope.consistent_view",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.watch_diff",
"oneflow.compatible.single_client.unittest.skip_unless_1n1d",
"oneflow.compatible.single_cli... | [((839, 890), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (883, 890), True, 'import tensorflow as tf\n'), ((3712, 3744), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3742, 3744), True, 'from oneflow.compatible import single_client as flow\n'), ((912, 963), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (952, 963), True, 'import tensorflow as tf\n'), ((2135, 2163), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2161, 2163), True, 'from oneflow.compatible import single_client as flow\n'), ((2182, 2203), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2201, 2203), True, 'from oneflow.compatible import single_client as flow\n'), ((5946, 5961), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5959, 5961), False, 'import unittest\n'), ((2455, 2527), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': 'func_config_type', 'function_config': 'func_config'}), '(type=func_config_type, function_config=func_config)\n', (2475, 2527), True, 'from oneflow.compatible import single_client as flow\n'), ((2825, 2897), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': 'func_config_type', 'function_config': 'func_config'}), '(type=func_config_type, function_config=func_config)\n', (2845, 2897), True, 'from oneflow.compatible import single_client as flow\n'), ((3151, 3168), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3166, 3168), True, 'import tensorflow as tf\n'), ((3187, 3206), 'tensorflow.Variable', 'tf.Variable', (['values'], {}), '(values)\n', (3198, 3206), True, 'import tensorflow as tf\n'), ((3219, 3248), 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', 'min', 'max'], {}), '(x, min, max)\n', (3235, 3248), True, 'import tensorflow as tf\n'), ((3962, 3986), 'numpy.clip', 'np.clip', (['values', '(-50)', '(50)'], {}), '(values, -50, 50)\n', (3969, 3986), True, 'import numpy as np\n'), ((4007, 4020), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4018, 4020), False, 'from collections import OrderedDict\n'), ((4133, 4153), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4143, 4153), False, 'from test_util import GenArgList\n'), ((4410, 4446), 'numpy.clip', 'np.clip', (['values'], {'a_min': '(0)', 'a_max': 'None'}), '(values, a_min=0, a_max=None)\n', (4417, 4446), True, 'import numpy as np\n'), ((4466, 4479), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4477, 4479), False, 'from collections import OrderedDict\n'), ((4592, 4612), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4602, 4612), False, 'from test_util import GenArgList\n'), ((4878, 4916), 'numpy.clip', 'np.clip', (['values'], {'a_min': 'None', 'a_max': '(0.2)'}), '(values, a_min=None, a_max=0.2)\n', (4885, 4916), True, 'import numpy as np\n'), ((4936, 4949), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4947, 4949), False, 'from collections import OrderedDict\n'), ((5062, 5082), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5072, 5082), False, 'from test_util import GenArgList\n'), ((5342, 5355), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5353, 5355), False, 'from collections import OrderedDict\n'), ((5468, 5488), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5478, 5488), False, 'from test_util import GenArgList\n'), ((5700, 5713), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5711, 5713), False, 'from collections import OrderedDict\n'), ((5826, 5846), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5836, 5846), False, 'from test_util import GenArgList\n'), ((1901, 1928), 'oneflow.compatible.single_client.watch_diff', 'flow.watch_diff', (['x', 'grad_cb'], {}), '(x, grad_cb)\n', (1916, 1928), True, 'from oneflow.compatible import single_client as flow\n'), ((2417, 2443), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2441, 2443), True, 'from oneflow.compatible import single_client as flow\n'), ((2785, 2813), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2811, 2813), True, 'from oneflow.compatible import single_client as flow\n'), ((1323, 1363), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1343, 1363), True, 'from oneflow.compatible import single_client as flow\n'), ((1611, 1647), 'oneflow.compatible.single_client.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['x'], {}), '(x)\n', (1644, 1647), True, 'from oneflow.compatible import single_client as flow\n'), ((1704, 1735), 'oneflow.compatible.single_client.clip_by_value', 'flow.clip_by_value', (['x', 'min', 'max'], {}), '(x, min, max)\n', (1722, 1735), True, 'from oneflow.compatible import single_client as flow\n'), ((2010, 2050), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (2030, 2050), True, 'from oneflow.compatible import single_client as flow\n'), ((2075, 2129), 'oneflow.compatible.single_client.clip_by_value', 'flow.clip_by_value', (['values_blob', 'min', 'max'], {'name': '"""Clip"""'}), "(values_blob, min, max, name='Clip')\n", (2093, 2129), True, 'from oneflow.compatible import single_client as flow\n'), ((2573, 2629), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['values.shape'], {'dtype': 'data_type'}), '(values.shape, dtype=data_type)\n', (2598, 2629), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2930, 2982), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['values.shape'], {'dtype': 'data_type'}), '(values.shape, dtype=data_type)\n', (2951, 2982), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3848, 3903), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(-100)', 'high': '(100)', 'size': '(8, 512, 4)'}), '(low=-100, high=100, size=(8, 512, 4))\n', (3865, 3903), True, 'import numpy as np\n'), ((4250, 4280), 'numpy.array_equal', 'np.array_equal', (['np_out', 'of_out'], {}), '(np_out, of_out)\n', (4264, 4280), True, 'import numpy as np\n'), ((4337, 4373), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(100, 30)'], {}), '((100, 30))\n', (4362, 4373), True, 'import numpy as np\n'), ((4709, 4739), 'numpy.array_equal', 'np.array_equal', (['np_out', 'of_out'], {}), '(np_out, of_out)\n', (4723, 4739), True, 'import numpy as np\n'), ((4796, 4841), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(2, 64, 800, 1088)'], {}), '((2, 64, 800, 1088))\n', (4821, 4841), True, 'import numpy as np\n'), ((5181, 5208), 'numpy.allclose', 'np.allclose', (['np_out', 'of_out'], {}), '(np_out, of_out)\n', (5192, 5208), True, 'import numpy as np\n'), ((5272, 5303), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(1024)'], {}), '(1024)\n', (5297, 5303), True, 'import numpy as np\n'), ((5621, 5661), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(128, 10, 27)'], {}), '((128, 10, 27))\n', (5646, 5661), True, 'import numpy as np\n'), ((1543, 1571), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (1568, 1571), True, 'from oneflow.compatible import single_client as flow\n'), ((1792, 1846), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (1833, 1846), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.abs,
r"""Return the absolute value of each element in input tensor:math:`y = |x|` element-wise.
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([-1, 2, -3, 4]).astype(np.float32))
>>> flow.abs(x)
tensor([1., 2., 3., 4.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.add,
r"""Computes the addition of `input` by `other` for each element, scalar and broadcast promotation are supported.
The formula is:
.. math::
out = input + other
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
# element-wise add
>>> x = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.add(x, y).numpy()
>>> out.shape
(2, 3)
# scalar add
>>> x = 5
>>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.add(x, y).numpy()
>>> out.shape
(2, 3)
# broadcast add
>>> x = flow.tensor(np.random.randn(1,1), dtype=flow.float32)
>>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.add(x, y).numpy()
>>> out.shape
(2, 3)
""",
)
add_docstr(
oneflow.floor,
"""
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\lfloor \\text{input}_{i} \\rfloor
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([-0.5, 1.5, 0, 0.8]), dtype=flow.float32)
>>> output = flow.floor(input)
>>> output.shape
oneflow.Size([4])
>>> output.numpy()
array([-1., 1., 0., 0.], dtype=float32)
>>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, 2.5]]), dtype=flow.float32)
>>> output1 = input1.floor()
>>> output1.shape
oneflow.Size([2, 2])
>>> output1.numpy()
array([[ 0., 1.],
[-1., 2.]], dtype=float32)
""",
)
add_docstr(
oneflow.floor_,
r"""
In-place version of :func:`oneflow.floor`
""",
)
add_docstr(
oneflow.div,
r"""Computes the division of input by other for each element, scalar and broadcast promotation are supported.
The formula is:
.. math::
out = \frac{input}{other}
Args:
input (Union[int, float, oneflow.Tensor]): input.
other (Union[int, float, oneflow.Tensor]): other.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
# element-wise divide
>>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.div(input,other).numpy()
>>> out.shape
(2, 3)
# scalar divide
>>> input = 5
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.div(input,other).numpy()
>>> out.shape
(2, 3)
# broadcast divide
>>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.div(input,other).numpy()
>>> out.shape
(2, 3)
""",
)
add_docstr(
oneflow.mul,
r"""Computes the multiplication of input by other for each element, scalar and broadcast promotation are supported.
The formula is:
.. math::
\text{out}_i = \text{input}_i \times \text{other}_i
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
# element-wise multiply
>>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.mul(input,other).numpy()
>>> out.shape
(2, 3)
# scalar mutiply
>>> input = 5
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.mul(input,other).numpy()
>>> out.shape
(2, 3)
# broadcast mutiply
>>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.mul(input,other).numpy()
>>> out.shape
(2, 3)
""",
)
add_docstr(
oneflow.reciprocal,
r"""Computes the safe reciprocal of x. If x is zero, the reciprocal will
be also set to zero.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = flow.tensor(np.array([[1, 2, 3], [4, 5, 6]]), dtype=flow.float32)
>>> out = flow.reciprocal(x)
>>> out.numpy()
array([[1. , 0.5 , 0.33333334],
[0.25 , 0.2 , 0.16666667]], dtype=float32)
""",
)
add_docstr(
oneflow.sub,
r"""Computes the subtraction of input by other for each element, scalar and broadcast promotation are supported.
The formula is:
.. math::
out = input - other
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
# element-wise subtract
>>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.sub(input,other).numpy()
>>> out.shape
(2, 3)
# scalar subtract
>>> input = 5
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.sub(input,other).numpy()
>>> out.shape
(2, 3)
# broadcast subtract
>>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)
>>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)
>>> out = flow.sub(input,other).numpy()
>>> out.shape
(2, 3)
""",
)
add_docstr(
oneflow.asin,
r"""
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sin^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32)
>>> output = flow.asin(input)
>>> output.shape
oneflow.Size([4])
>>> output
tensor([-0.5236, 0.9273, 1.5708, -0.9273], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, -1.0]]), dtype=flow.float32)
>>> output1 = input1.asin()
>>> output1.shape
oneflow.Size([2, 2])
>>> output1
tensor([[ 0.9273, 1.5708],
[-0.6435, -1.5708]], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.asinh,
r"""
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sinh^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([2, 3, 4]), dtype=flow.float32)
>>> output = flow.asinh(input)
>>> output.shape
oneflow.Size([3])
>>> output
tensor([1.4436, 1.8184, 2.0947], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[-1, 0, -0.4], [5, 7, 0.8]]), dtype=flow.float32)
>>> output1 = input1.asinh()
>>> output1.shape
oneflow.Size([2, 3])
>>> output1
tensor([[-0.8814, 0.0000, -0.3900],
[ 2.3124, 2.6441, 0.7327]], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.atan,
r"""
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32)
>>> output = flow.atan(input)
>>> output.shape
oneflow.Size([3])
""",
)
add_docstr(
oneflow.ceil,
r"""Returns a new tensor with the ceil of the elements of :attr:`input`,
the smallest integer greater than or equal to each element.
The equation is:
.. math::
\text{out}_{i} = \left\lceil \text{input}_{i} \right\rceil = \left\lfloor \text{input}_{i} \right\rfloor + 1
Args:
input (oneflow.Tensor): A Tensor.
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([0.1, -2, 3.4]).astype(np.float32))
>>> y = flow.ceil(x)
>>> y.shape
oneflow.Size([3])
>>> y
tensor([ 1., -2., 4.], dtype=oneflow.float32)
>>> x = flow.tensor(np.array([[2.5, 4.6, 0.6],[7.8, 8.3, 9.2]]).astype(np.float32))
>>> y = x.ceil()
>>> y.shape
oneflow.Size([2, 3])
>>> y
tensor([[ 3., 5., 1.],
[ 8., 9., 10.]], dtype=oneflow.float32)
>>> x = flow.tensor(np.array([[[2.2, 4.4, 6.5],[7.1, 8.2, 9.3]],[[10.6,11.2,12.2],[13.5,14.8,15.9]]]).astype(np.float32))
>>> y = flow.ceil(x)
>>> y.shape
oneflow.Size([2, 2, 3])
>>> y
tensor([[[ 3., 5., 7.],
[ 8., 9., 10.]],
<BLANKLINE>
[[11., 12., 13.],
[14., 15., 16.]]], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.negative,
r"""This operator computes the negative value of Tensor.
Args:
input (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> input = flow.tensor(
... np.array([1.0, -1.0, 2.3]).astype(np.float32), dtype=flow.float32
... )
>>> out = flow.negative(input)
>>> out
tensor([-1.0000, 1.0000, -2.3000], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.log1p,
r"""Returns a new tensor with the natural logarithm of (1 + input).
.. math::
\text{out}_{i}=\log_e(1+\text{input}_{i})
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([1.3, 1.5, 2.7]), dtype=flow.float32)
>>> out = flow.log1p(x)
>>> out
tensor([0.8329, 0.9163, 1.3083], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.exp,
r"""
This operator computes the exponential of Tensor.
The equation is:
.. math::
out = e^x
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = flow.tensor(np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32)
>>> y = flow.exp(x)
>>> y
tensor([ 2.7183, 7.3891, 20.0855], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.acos,
r"""
Returns a new tensor with the inverse cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \arccos(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([0.5, 0.6, 0.7])
>>> input = flow.tensor(arr, dtype=flow.float32)
>>> output = flow.acos(input)
>>> output
tensor([1.0472, 0.9273, 0.7954], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.acosh,
r"""
Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cosh^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x1 = flow.tensor(np.array([2, 3, 4]).astype(np.float32))
>>> out1 = flow.acosh(x1)
>>> out1
tensor([1.3170, 1.7627, 2.0634], dtype=oneflow.float32)
>>> x2 = flow.tensor(np.array([1.5, 2.6, 3.7]).astype(np.float32),device=flow.device('cuda'))
>>> out2 = flow.acosh(x2)
>>> out2
tensor([0.9624, 1.6094, 1.9827], device='cuda:0', dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.atanh,
r"""Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tanh^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> np_arr = np.array([0.5, 0.6, 0.7]).astype(np.float32)
>>> input = flow.tensor(np_arr, dtype=flow.float32)
>>> output = flow.atanh(input)
>>> output
tensor([0.5493, 0.6931, 0.8673], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.sign,
r"""Computes the sign of Tensor.
.. math::
\text{out}_{i} = \text{sgn}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x1 = flow.tensor(np.array([-2, 0, 2]).astype(np.float32))
>>> out1 = flow.sign(x1)
>>> out1.numpy()
array([-1., 0., 1.], dtype=float32)
>>> x2 = flow.tensor(np.array([-3.2, -4.5, 5.8]).astype(np.float32),device=flow.device('cuda'))
>>> out2 = flow.sign(x2)
>>> out2.numpy()
array([-1., -1., 1.], dtype=float32)
""",
)
add_docstr(
oneflow.sin,
r"""Returns a new tensor with the sine of the elements of :attr:`input`.
sin(x: Tensor) -> Tensor
.. math::
\text{y}_{i} = \sin(\text{x}_{i})
Args:
x (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x1 = flow.tensor(np.array([-0.5461, 0.1347, -2.7266, -0.2746]).astype(np.float32))
>>> y1 = flow.sin(x1)
>>> y1
tensor([-0.5194, 0.1343, -0.4032, -0.2712], dtype=oneflow.float32)
>>> x2 = flow.tensor(np.array([-1.4, 2.6, 3.7]).astype(np.float32), device=flow.device('cuda'))
>>> y2 = flow.sin(x2)
>>> y2
tensor([-0.9854, 0.5155, -0.5298], device='cuda:0', dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.sin_,
r"""
In-place version of :func:`oneflow.sin`
""",
)
add_docstr(
oneflow.sinh,
r"""Returns a new tensor with the hyperbolic sine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sinh(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x1 = flow.tensor(np.array([1, 2, 3]), dtype=flow.float32)
>>> x2 = flow.tensor(np.array([1.53123589,0.54242598,0.15117185]), dtype=flow.float32)
>>> x3 = flow.tensor(np.array([1,0,-1]), dtype=flow.float32)
>>> flow.sinh(x1).numpy()
array([ 1.1752012, 3.6268604, 10.017875 ], dtype=float32)
>>> flow.sinh(x2).numpy()
array([2.20381 , 0.5694193, 0.1517483], dtype=float32)
>>> flow.sinh(x3).numpy()
array([ 1.1752012, 0. , -1.1752012], dtype=float32)
""",
)
add_docstr(
oneflow.tan,
r"""Returns the tan value of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \tan(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> np_arr = np.array([-1/4*np.pi, 0, 1/4*np.pi]).astype(np.float32)
>>> input = flow.tensor(np_arr, dtype=flow.float32)
>>> output = flow.tan(input)
>>> output
tensor([-1., 0., 1.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.cos,
r"""
Returns a new tensor with the cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cos(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([1.4309, 1.2706, -0.8562, 0.9796])
>>> input = flow.tensor(arr, dtype=flow.float32)
>>> output = flow.cos(input).numpy()
""",
)
add_docstr(
oneflow.cosh,
r"""
Returns a new tensor with the hyperbolic cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cosh(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> arr = np.array([ 0.1632, 1.1835, -0.6979, -0.7325])
>>> input = flow.tensor(arr, dtype=flow.float32)
>>> output = flow.cosh(input).numpy()
>>> output
array([1.0133467, 1.7859949, 1.2535787, 1.2804903], dtype=float32)
""",
)
add_docstr(
oneflow.erf,
r"""Computes the error function of each element. The error function is defined as follows:
.. math::
\operatorname{erf}(x)=\frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^{2}} d t
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([0, -1., 10.]), dtype=flow.float32)
>>> out = flow.erf(x)
>>> out.shape
oneflow.Size([3])
>>> out.numpy()
array([ 0. , -0.8427008, 1. ], dtype=float32)
>>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8]]), dtype=flow.float32)
>>> out = flow.erf(x)
>>> out.shape
oneflow.Size([2, 3])
>>> out.numpy()
array([[ 0. , -0.8427008 , 1. ],
[ 1. , 1. , 0.74210095]], dtype=float32)
>>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8], [2, 3, 4]]), dtype=flow.float32)
>>> out = x.erf()
>>> out.shape
oneflow.Size([3, 3])
>>> out.numpy()
array([[ 0. , -0.8427008 , 1. ],
[ 1. , 1. , 0.74210095],
[ 0.9953223 , 0.9999779 , 1. ]], dtype=float32)
""",
)
add_docstr(
oneflow.erfc,
r"""Computes the complementary error function of each element of input. The complementary error
function is defined as follows:
.. math::
\operatorname{erfc}(x)=1-\frac{2}{\sqrt{\pi}} \int_{0}^{x} e^{-t^{2}} d t
Args:
x (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([0, -1., 10.]), dtype=flow.float32)
>>> out = flow.erfc(x)
>>> out
tensor([1.0000e+00, 1.8427e+00, 2.8026e-45], dtype=oneflow.float32)
>>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8]]), dtype=flow.float32)
>>> out = flow.erfc(x)
>>> out
tensor([[1.0000e+00, 1.8427e+00, 2.8026e-45],
[1.5375e-12, 4.1838e-23, 2.5790e-01]], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.expm1,
r"""Returns a new tensor with the exponential of the elements minus 1
of :attr:`input`.
The equation is:
.. math::
y_{i} = e^{x_{i}} - 1
Args:
input (oneflow.Tensor): A Tensor.
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([1, 2, 3]).astype(np.float32))
>>> y = flow.expm1(x)
>>> y.shape
oneflow.Size([3])
>>> y
tensor([ 1.7183, 6.3891, 19.0855], dtype=oneflow.float32)
>>> x = flow.tensor(np.array([[[2, 4, 6],[7, 8, 9]],[[10,11,12],[13,14,15]]]).astype(np.float32))
>>> y = flow.expm1(x)
>>> print(y.shape)
oneflow.Size([2, 2, 3])
>>> print(y.numpy())
[[[6.3890562e+00 5.3598152e+01 4.0242880e+02]
[1.0956332e+03 2.9799580e+03 8.1020840e+03]]
<BLANKLINE>
[[2.2025465e+04 5.9873141e+04 1.6275380e+05]
[4.4241238e+05 1.2026032e+06 3.2690165e+06]]]
""",
)
add_docstr(
oneflow.fmod,
r"""
fmod(input, other, *, out=None) -> Tensor
Computes the element-wise remainder of division.
The dividend and divisor may contain both for integer and floating point
numbers. The remainder has the same sign as the dividend :attr:`input`.
Supports broadcasting to a common shape, integer and float inputs.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> import oneflow as flow
>>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3], dtype=flow.float32), 2.)
tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4, 5.], dtype=flow.float32), 1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4., -5]), flow.tensor([4, 2, 1, 3., 1]))
tensor([1., 0., 0., 1., -0.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.log,
r"""
Returns a new tensor with the natural logarithm of the elements of :attr:`input`.
.. math::
y_{i} = \log_{e} (x_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.random.randn(2, 3, 4, 5)
>>> input = flow.tensor(arr, dtype=flow.float32)
>>> output = flow.log(input)
""",
)
add_docstr(
oneflow.log2,
"""
oneflow.log2(input) -> Tensor
Returns a new tensor with the natural logarithm to the base 2 of the elements of :attr:`input`.
.. math::
y_{i} = \\log2_{e} (x_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.random.randn(2, 3, 4, 5)
>>> input = flow.tensor(arr, dtype=flow.float32)
>>> output = flow.log2(input)
""",
)
add_docstr(
oneflow.minimum,
r"""Computes the element-wise minimum of x and y.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor((1, 2, -1), dtype=flow.float32)
>>> y = flow.tensor((3, 0, 4), dtype=flow.float32)
>>> flow.minimum(x, y)
tensor([ 1., 0., -1.], dtype=oneflow.float32)
>>> x = flow.tensor((1,), dtype=flow.float32)
>>> y = flow.tensor((3, 0, 4), dtype=flow.float32)
>>> flow.minimum(x, y)
tensor([1., 0., 1.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.maximum,
r"""Computes the element-wise maximum of x and y.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor((1, 2, -1), dtype=flow.float32)
>>> y = flow.tensor((3, 0, 4), dtype=flow.float32)
>>> flow.maximum(x, y)
tensor([3., 2., 4.], dtype=oneflow.float32)
>>> x = flow.tensor((1,), dtype=flow.float32)
>>> y = flow.tensor((3, 0, 4), dtype=flow.float32)
>>> flow.maximum(x, y)
tensor([3., 1., 4.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.pow,
r"""Takes the power of each element in input with exponent and returns a tensor with the result. Exponent can be either a single float number, a single int number, or a tensor with the same shape as input.
When exponent is a scalar value, the operation applied is:
.. math::
\\text{out}_i = x_i ^ \\text{exponent}
\u200b
When exponent is a tensor, the operation applied is:
.. math::
\\text{out}_i = x_i ^ {\\text{exponent}_i}
Args:
- input (Tensor): the input tensor.
- exponent (int, float, Tensor): the exponent.
Returns:
Tensor: The result of variance on the specified axis of input Tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), dtype=flow.float32)
>>> out = flow.pow(x, 2)
>>> out
tensor([ 1., 4., 9., 16., 25., 36.], dtype=oneflow.float32)
>>> x = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0]), dtype=flow.float32)
>>> y = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0]), dtype=flow.float32)
>>> out = flow.pow(x, y)
>>> out
tensor([ 1., 4., 27., 256.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.rsqrt,
r"""Returns a new tensor with the reciprocal of the square-root of each of
the elements of :attr:`input`.
.. math::
\text{out}_{i} = \frac{1}{\sqrt{\text{input}_{i}}}
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> a = flow.tensor(np.array([1.0, 2.0, 3.0]), dtype=flow.float32)
>>> out = flow.rsqrt(a).numpy()
>>> out
array([1. , 0.70710677, 0.57735026], dtype=float32)
""",
)
add_docstr(
oneflow.sqrt,
r"""Returns a new tensor with the square-root of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}}
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([1.0, 2.0, 3.0])
>>> input = flow.tensor(arr, dtype=flow.float32)
>>> output = flow.sqrt(input).numpy()
>>> output
array([1. , 1.4142135, 1.7320508], dtype=float32)
""",
)
add_docstr(
oneflow.square,
r"""Returns a new tensor with the square of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \sqrt{\text{input}_{i}}
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([1.0, 2.0, 3.0])
>>> input = flow.tensor(arr, dtype=flow.float32)
>>> output = flow.square(input).numpy()
>>> output
array([1., 4., 9.], dtype=float32)
""",
)
add_docstr(
oneflow.matmul,
r"""
matmul(input, other) -> Tensor
This operator applies matrix multiplication to two Tensor.
Args:
a (oneflow.Tensor): A Tensor
b (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input1 = flow.tensor(np.random.randn(2, 6), dtype=flow.float32)
>>> input2 = flow.tensor(np.random.randn(6, 5), dtype=flow.float32)
>>> of_out = flow.matmul(input1, input2)
>>> of_out.shape
oneflow.Size([2, 5])
""",
)
add_docstr(
oneflow.round,
r"""This operator rounds the value of Blob to the nearest integer.
Args:
input (oneflow.Tensor): A Tensor
Returns:
oneflow.Tensor: The result Tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x1 = flow.tensor(np.array([1.49999, 1.500001, 2.7]).astype(np.float32))
>>> out1 = flow.round(x1)
>>> out1.numpy()
array([1., 2., 3.], dtype=float32)
>>> x2 = flow.tensor(np.array([2.499999, 7.5000001, 5.3, 6.8]).astype(np.float32))
>>> out2 = flow.round(x2)
>>> out2.numpy()
array([2., 8., 5., 7.], dtype=float32)
""",
)
add_docstr(
oneflow.std,
r"""
Returns the standard-deviation of each row of the :attr:`input` tensor in the
dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,
reduce over all of them.
If keepdim is True, the output tensor is of the same size as input except in
the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed,
resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).
If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated
via the biased estimator. Otherwise, Bessel's correction will be used.
Args:
input (Tensor): the input tensor.
dim (int or tuple of python:ints): the dimension or dimensions to reduce.
unbiased (bool): whether to use the unbiased estimation or not
keepdim (bool): whether the output tensor has `dim` retained or not.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([1.0, 2.0, 3.0])
>>> input = flow.tensor(arr)
>>> output = flow.std(input, dim=0).numpy()
>>> output
array(1.)
""",
)
add_docstr(
oneflow.var,
r"""Returns the variance of each row of the `input` tensor in the given dimension `dim`.
If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim`
where it is of size 1. Otherwise, dim is squeezed (see `flow.squeeze()`), resulting in the output
tensor having 1 (or `len(dim)`) fewer dimension(s).
Args:
input (Tensor): the input tensor.
dim (int or tuple of python:ints): the dimension or dimensions to reduce. Defaults to None.
unbiased (bool, optional): whether to use Bessel’s correction (:math:`\delta N = 1`). Defaults to True.
keepdim (bool, optional): whether the output tensor has dim retained or not. Defaults to False.
Returns:
Tensor: The result of variance on the specified axis of input Tensor
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> input = flow.tensor(np.random.randn(2, 3, 4, 5))
>>> output = flow.var(input, 1, True)
""",
)
add_docstr(
oneflow.logical_not,
r"""
Computes the element-wise logical NOT of the given input tensors.
Zeros are treated as False and nonzeros are treated as True.
Args:
input (oneflow.Tensor): The input Tensor
other (oneflow.Tensor): The Tensor to compute NOT with
Returns:
oneflow.Tensor: The output Tensor
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([1, 0, -1], dtype=flow.float32)
>>> out = flow.logical_not(input)
>>> out
tensor([False, True, False], dtype=oneflow.bool)
""",
)
add_docstr(
oneflow.dot,
r"""This operator computes the dot product of tensor input and other.
The equation is:
$$
\\sum_{i=1}^{n}(x[i] * y[i])
$$
Args:
input (Tensor): first tensor in the dot product.
other (Tensor): second tensor in the dot product.
Shape:
- input: Input must be 1D.
- other: Other must be 1D.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> flow.dot(flow.Tensor([2, 3]), flow.Tensor([2, 1]))
tensor(7., dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.select,
r"""
Slices the self tensor along the selected dimension at the given index. This function returns
a view of the original tensor with the given dimension removed.
Args:
input (Tensor): the input tensor.
dim (int): the dimension to slice.
select (int): the index to select with.
Returns:
oneflow.Tensor: the output Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.rand(3, 4, 5)
>>> out = flow.select(input, 0, 1)
>>> out.size()
oneflow.Size([4, 5])
>>> out = flow.select(input, 1, 1)
>>> out.size()
oneflow.Size([3, 5])
""",
)
add_docstr(
oneflow.movedim,
r"""
Moves the dimension(s) of input at the position(s) in source to the position(s) in destination.
Other dimensions of input that are not explicitly moved remain in their original order and appear at the positions not specified in destination.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.movedim.html#torch.movedim.
Args:
input (Tensor): the input tensor.
source (int or a list): Original positions of the dims to move. These must be unique.
destination (int or a list): Destination positions for each of the original dims. These must also be unique.
Returns:
oneflow.Tensor: the output Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.random.randn(2, 3, 4, 5), dtype=flow.float32)
>>> output = flow.movedim(input, 1, 0)
>>> output.shape
oneflow.Size([3, 2, 4, 5])
>>> output = flow.movedim(input, (1, 2), (0, 1))
>>> output.shape
oneflow.Size([3, 4, 2, 5])
""",
)
add_docstr(
oneflow.as_strided,
r"""
Create a view of an existing oneflow.Tensor input with specified size, stride and storage_offset.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.as_strided.html#torch.as_strided.
Args:
input (Tensor): the input tensor.
size (tuple or ints): the shape of the output tensor.
stride (tuple or ints): the stride of the output tensor.
storage_offset (int): the offset in the underlying storage of the output tensor
Returns:
oneflow.Tensor: the output Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.rand(2,3,5)
>>> output = flow.as_strided(input, (2,3,3), (1,2,3), 1)
>>> output.size()
oneflow.Size([2, 3, 3])
""",
)
add_docstr(
oneflow.addcmul,
r"""
oneflow.addcmul(input, tensor1, tensor2, *, value=1) -> Tensor
Performs the element-wise multiplication of tensor1 by tensor2, multiply the result
by the scalar value and add it to input.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.addcmul.html
.. math::
\text{out}_i = \text{input}_i + value \times\ \text{tensor1}_i \times\ \text{tensor2}_i
Args:
input (Tensor): the tensor to be added.
tensor1 (Tensor): the tensor to be multiplied.
tensor2 (Tensor): the tensor to be multiplied.
Keyword args:
value (Number, optional): multiplier for :math:`tensor1 * tensor2`.
Returns:
oneflow.Tensor: the output Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.rand(2, 3, 4)
>>> tensor1 = flow.rand(2, 3, 4)
>>> tensor2 = flow.rand(2, 3, 4)
>>> out = flow.addcmul(input, tensor1, tensor2, value=2)
>>> out.size()
oneflow.Size([2, 3, 4])
""",
)
add_docstr(
oneflow.eye,
"""oneflow.eye(n, m, *, device=None, requires_grad=False, placement=None, sbp) -> Tensor
This operator creates a 2-D Tensor with ones on the diagonal and zeros elsewhere.
Args:
n (int): the number of rows.
m (int, optional): the number of colums with default being n. Defaults to None.
Keyword args:
device(Union[flow.device, str], optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.
placement(oneflow._oneflow_internal.placement, optional): The placement attribute allows you to specify which physical device the tensor is stored on.
sbp(Union[oneflow._oneflow_internal.sbp.sbp, List[oneflow._oneflow_internal.sbp.sbp]], optional): When creating a global tensor, specify the SBP of the tensor.
Returns:
oneflow.Tensor: The result tensor with ones on the diagonal and zeros elsewhere.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> out = flow.eye(3, 3)
>>> out
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=oneflow.float32)
>>> out = flow.eye(3, 3, device="cuda")
>>> out
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], device='cuda:0', dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.tensor_split,
r"""
Splits a tensor into multiple sub-tensors, all of which are views of input, along dimension
dim according to the indices or number of sections specified by indices_or_sections .
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.tensor_split.html#torch.tensor_split
Args:
input (Tensor): the input tensor.
indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections
along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size,
input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).
sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).
If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in
the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors
input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or
one-dimensional long tensor on the CPU.
dim (int): dimension along which to split the tensor.
Returns:
oneflow.TensorTuple: the output TensorTuple.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.rand(3,4,5)
>>> output = flow.tensor_split(input,(2,3),2)
>>> output[0].size()
oneflow.Size([3, 4, 2])
>>> output[1].size()
oneflow.Size([3, 4, 1])
>>> output[2].size()
oneflow.Size([3, 4, 2])
""",
)
add_docstr(
oneflow.hsplit,
r"""
Splits input, a tensor with one or more dimensions, into multiple tensors horizontally according to indices_or_sections.
Each split is a view of input.
If input is one dimensional this is equivalent to calling oneflow.tensor_split(input, indices_or_sections, dim=0)
(the split dimension is zero), and if input has two or more dimensions it’s equivalent to calling
oneflow.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), except that if indices_or_sections
is an integer it must evenly divide the split dimension or a runtime error will be thrown.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.hsplit.html#torch.hsplit
Args:
input (Tensor): the input tensor.
indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections
along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size,
input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).
sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).
If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in
the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors
input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or
one-dimensional long tensor on the CPU.
Returns:
oneflow.TensorTuple: the output TensorTuple.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.rand(3,4,5,6)
>>> output = flow.hsplit(input,(1,3))
>>> output[0].size()
oneflow.Size([3, 1, 5, 6])
>>> output[1].size()
oneflow.Size([3, 2, 5, 6])
>>> output[2].size()
oneflow.Size([3, 1, 5, 6])
""",
)
add_docstr(
oneflow.vsplit,
r"""
Splits input, a tensor with two or more dimensions, into multiple tensors vertically according to indices_or_sections.
Each split is a view of input.
This is equivalent to calling oneflow.tensor_split(input, indices_or_sections, dim=0) (the split dimension is 0),
except that if indices_or_sections is an integer it must evenly divide the split dimension or a runtime error will be thrown.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.vsplit.html#torch.vsplit
Args:
input (Tensor): the input tensor.
indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections
along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size,
input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).
sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).
If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in
the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors
input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or
one-dimensional long tensor on the CPU.
Returns:
oneflow.TensorTuple: the output TensorTuple.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.rand(3,4,5,6)
>>> output = flow.vsplit(input,(1,3))
>>> output[0].size()
oneflow.Size([1, 4, 5, 6])
>>> output[1].size()
oneflow.Size([2, 4, 5, 6])
>>> output[2].size()
oneflow.Size([1, 4, 5, 6])
""",
)
add_docstr(
oneflow.cumsum,
r"""oneflow.cumsum(input, dim) -> Tensor
This operator computes the cumulative sum of input elements in the given dimension.
The equation is:
$$
y_{i}=x_{0}+x_{1}+...+x_{i}
$$
Args:
input (Tensor): the input ND tensor.
dim (int): the dimension to do cumsum, valid range is [-N, N-1), N is tensor's dimensions
Returns:
oneflow.Tensor: The result tensor with cumsum result.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.ones(3, 3)
>>> dim = 1
>>> flow.cumsum(input, dim)
tensor([[1., 2., 3.],
[1., 2., 3.],
[1., 2., 3.]], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.cumprod,
"""oneflow.cumprod(input, dim) -> Tensor
This operator computes the cumulative product of input elements in the given dimension.
The equation is:
$$
y_{i}=x_{0}*x_{1}*...*x_{i}
$$
Args:
input (Tensor): the input tensor.
dim (int): the dimension to do cumsum whose valid range is [-N, N-1), and the N is tensor's dimensions
Returns:
oneflow.Tensor: The result tensor with cumprod result.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input=flow.tensor([1, 2, 3])
>>> flow.cumprod(input, dim=0)
tensor([1, 2, 6], dtype=oneflow.int64)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1112), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.abs', '"""Return the absolute value of each element in input tensor:math:`y = |x|` element-wise.\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> x = flow.tensor(np.array([-1, 2, -3, 4]).astype(np.float32))\n >>> flow.abs(x)\n tensor([1., 2., 3., 4.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.abs,\n """Return the absolute value of each element in input tensor:math:`y = |x|` element-wise.\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> x = flow.tensor(np.array([-1, 2, -3, 4]).astype(np.float32))\n >>> flow.abs(x)\n tensor([1., 2., 3., 4.], dtype=oneflow.float32)\n\n """\n )\n', (670, 1112), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1117, 2125), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.add', '"""Computes the addition of `input` by `other` for each element, scalar and broadcast promotation are supported.\n The formula is:\n\n .. math::\n out = input + other\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n # element-wise add\n >>> x = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.add(x, y).numpy()\n >>> out.shape\n (2, 3)\n\n # scalar add\n >>> x = 5\n >>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.add(x, y).numpy()\n >>> out.shape\n (2, 3)\n\n # broadcast add\n >>> x = flow.tensor(np.random.randn(1,1), dtype=flow.float32)\n >>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.add(x, y).numpy()\n >>> out.shape\n (2, 3)\n\n """'], {}), '(oneflow.add,\n """Computes the addition of `input` by `other` for each element, scalar and broadcast promotation are supported.\n The formula is:\n\n .. math::\n out = input + other\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n # element-wise add\n >>> x = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.add(x, y).numpy()\n >>> out.shape\n (2, 3)\n\n # scalar add\n >>> x = 5\n >>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.add(x, y).numpy()\n >>> out.shape\n (2, 3)\n\n # broadcast add\n >>> x = flow.tensor(np.random.randn(1,1), dtype=flow.float32)\n >>> y = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.add(x, y).numpy()\n >>> out.shape\n (2, 3)\n\n """\n )\n', (1127, 2125), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2130, 3031), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.floor', '"""\n Returns a new tensor with the arcsine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\lfloor \\\\text{input}_{i} \\\\rfloor\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input = flow.tensor(np.array([-0.5, 1.5, 0, 0.8]), dtype=flow.float32)\n >>> output = flow.floor(input)\n >>> output.shape\n oneflow.Size([4])\n >>> output.numpy()\n array([-1., 1., 0., 0.], dtype=float32)\n\n >>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, 2.5]]), dtype=flow.float32)\n >>> output1 = input1.floor()\n >>> output1.shape\n oneflow.Size([2, 2])\n >>> output1.numpy()\n array([[ 0., 1.],\n [-1., 2.]], dtype=float32)\n\n """'], {}), '(oneflow.floor,\n """\n Returns a new tensor with the arcsine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\lfloor \\\\text{input}_{i} \\\\rfloor\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input = flow.tensor(np.array([-0.5, 1.5, 0, 0.8]), dtype=flow.float32)\n >>> output = flow.floor(input)\n >>> output.shape\n oneflow.Size([4])\n >>> output.numpy()\n array([-1., 1., 0., 0.], dtype=float32)\n\n >>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, 2.5]]), dtype=flow.float32)\n >>> output1 = input1.floor()\n >>> output1.shape\n oneflow.Size([2, 2])\n >>> output1.numpy()\n array([[ 0., 1.],\n [-1., 2.]], dtype=float32)\n\n """\n )\n', (2140, 3031), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((3035, 3125), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.floor_', '"""\n In-place version of :func:`oneflow.floor`\n\n """'], {}), '(oneflow.floor_,\n """\n In-place version of :func:`oneflow.floor`\n\n """)\n', (3045, 3125), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((3135, 4327), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.div', '"""Computes the division of input by other for each element, scalar and broadcast promotation are supported.\n The formula is:\n\n .. math::\n out = \\\\frac{input}{other}\n\n Args:\n input (Union[int, float, oneflow.Tensor]): input.\n other (Union[int, float, oneflow.Tensor]): other.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n # element-wise divide\n >>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.div(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # scalar divide\n >>> input = 5\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.div(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # broadcast divide\n >>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.div(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n """'], {}), '(oneflow.div,\n """Computes the division of input by other for each element, scalar and broadcast promotation are supported.\n The formula is:\n\n .. math::\n out = \\\\frac{input}{other}\n\n Args:\n input (Union[int, float, oneflow.Tensor]): input.\n other (Union[int, float, oneflow.Tensor]): other.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n # element-wise divide\n >>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.div(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # scalar divide\n >>> input = 5\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.div(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # broadcast divide\n >>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.div(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n """\n )\n', (3145, 4327), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4331, 5436), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.mul', '"""Computes the multiplication of input by other for each element, scalar and broadcast promotation are supported.\n\n The formula is:\n\n .. math::\n \\\\text{out}_i = \\\\text{input}_i \\\\times \\\\text{other}_i\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n # element-wise multiply\n >>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.mul(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # scalar mutiply\n >>> input = 5\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.mul(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # broadcast mutiply\n >>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.mul(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n """'], {}), '(oneflow.mul,\n """Computes the multiplication of input by other for each element, scalar and broadcast promotation are supported.\n\n The formula is:\n\n .. math::\n \\\\text{out}_i = \\\\text{input}_i \\\\times \\\\text{other}_i\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n # element-wise multiply\n >>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.mul(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # scalar mutiply\n >>> input = 5\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.mul(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # broadcast mutiply\n >>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.mul(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n """\n )\n', (4341, 5436), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5437, 5961), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.reciprocal', '"""Computes the safe reciprocal of x. If x is zero, the reciprocal will\n be also set to zero.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.tensor(np.array([[1, 2, 3], [4, 5, 6]]), dtype=flow.float32)\n >>> out = flow.reciprocal(x)\n >>> out.numpy()\n array([[1. , 0.5 , 0.33333334],\n [0.25 , 0.2 , 0.16666667]], dtype=float32)\n """'], {}), '(oneflow.reciprocal,\n """Computes the safe reciprocal of x. If x is zero, the reciprocal will\n be also set to zero.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.tensor(np.array([[1, 2, 3], [4, 5, 6]]), dtype=flow.float32)\n >>> out = flow.reciprocal(x)\n >>> out.numpy()\n array([[1. , 0.5 , 0.33333334],\n [0.25 , 0.2 , 0.16666667]], dtype=float32)\n """\n )\n', (5447, 5961), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5966, 7033), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.sub', '"""Computes the subtraction of input by other for each element, scalar and broadcast promotation are supported.\n The formula is:\n\n .. math::\n out = input - other\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n # element-wise subtract\n >>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.sub(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # scalar subtract\n >>> input = 5\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.sub(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # broadcast subtract\n >>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.sub(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n """'], {}), '(oneflow.sub,\n """Computes the subtraction of input by other for each element, scalar and broadcast promotation are supported.\n The formula is:\n\n .. math::\n out = input - other\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n # element-wise subtract\n >>> input = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.sub(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # scalar subtract\n >>> input = 5\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.sub(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n # broadcast subtract\n >>> input = flow.tensor(np.random.randn(1,1), dtype=flow.float32)\n >>> other = flow.tensor(np.random.randn(2,3), dtype=flow.float32)\n >>> out = flow.sub(input,other).numpy()\n >>> out.shape\n (2, 3)\n\n """\n )\n', (5976, 7033), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7038, 7967), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.asin', '"""\n Returns a new tensor with the arcsine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sin^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input = flow.tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32)\n >>> output = flow.asin(input)\n >>> output.shape\n oneflow.Size([4])\n >>> output\n tensor([-0.5236, 0.9273, 1.5708, -0.9273], dtype=oneflow.float32)\n >>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, -1.0]]), dtype=flow.float32)\n >>> output1 = input1.asin()\n >>> output1.shape\n oneflow.Size([2, 2])\n >>> output1\n tensor([[ 0.9273, 1.5708],\n [-0.6435, -1.5708]], dtype=oneflow.float32)\n """'], {}), '(oneflow.asin,\n """\n Returns a new tensor with the arcsine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sin^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input = flow.tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32)\n >>> output = flow.asin(input)\n >>> output.shape\n oneflow.Size([4])\n >>> output\n tensor([-0.5236, 0.9273, 1.5708, -0.9273], dtype=oneflow.float32)\n >>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, -1.0]]), dtype=flow.float32)\n >>> output1 = input1.asin()\n >>> output1.shape\n oneflow.Size([2, 2])\n >>> output1\n tensor([[ 0.9273, 1.5708],\n [-0.6435, -1.5708]], dtype=oneflow.float32)\n """\n )\n', (7048, 7967), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7969, 8913), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.asinh', '"""\n Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sinh^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input = flow.tensor(np.array([2, 3, 4]), dtype=flow.float32)\n >>> output = flow.asinh(input)\n >>> output.shape\n oneflow.Size([3])\n >>> output\n tensor([1.4436, 1.8184, 2.0947], dtype=oneflow.float32)\n\n >>> input1 = flow.tensor(np.array([[-1, 0, -0.4], [5, 7, 0.8]]), dtype=flow.float32)\n >>> output1 = input1.asinh()\n >>> output1.shape\n oneflow.Size([2, 3])\n >>> output1\n tensor([[-0.8814, 0.0000, -0.3900],\n [ 2.3124, 2.6441, 0.7327]], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.asinh,\n """\n Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sinh^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input = flow.tensor(np.array([2, 3, 4]), dtype=flow.float32)\n >>> output = flow.asinh(input)\n >>> output.shape\n oneflow.Size([3])\n >>> output\n tensor([1.4436, 1.8184, 2.0947], dtype=oneflow.float32)\n\n >>> input1 = flow.tensor(np.array([[-1, 0, -0.4], [5, 7, 0.8]]), dtype=flow.float32)\n >>> output1 = input1.asinh()\n >>> output1.shape\n oneflow.Size([2, 3])\n >>> output1\n tensor([[-0.8814, 0.0000, -0.3900],\n [ 2.3124, 2.6441, 0.7327]], dtype=oneflow.float32)\n\n """\n )\n', (7979, 8913), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8915, 9446), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.atan', '"""\n Returns a new tensor with the arctangent of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\tan^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input = flow.tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32)\n >>> output = flow.atan(input)\n >>> output.shape\n oneflow.Size([3])\n\n """'], {}), '(oneflow.atan,\n """\n Returns a new tensor with the arctangent of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\tan^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input = flow.tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32)\n >>> output = flow.atan(input)\n >>> output.shape\n oneflow.Size([3])\n\n """\n )\n', (8925, 9446), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((9448, 10909), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.ceil', '"""Returns a new tensor with the ceil of the elements of :attr:`input`,\n the smallest integer greater than or equal to each element.\n\n The equation is:\n\n .. math::\n \\\\text{out}_{i} = \\\\left\\\\lceil \\\\text{input}_{i} \\\\right\\\\rceil = \\\\left\\\\lfloor \\\\text{input}_{i} \\\\right\\\\rfloor + 1\n\n Args:\n input (oneflow.Tensor): A Tensor.\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.array([0.1, -2, 3.4]).astype(np.float32))\n >>> y = flow.ceil(x)\n >>> y.shape\n oneflow.Size([3])\n >>> y\n tensor([ 1., -2., 4.], dtype=oneflow.float32)\n >>> x = flow.tensor(np.array([[2.5, 4.6, 0.6],[7.8, 8.3, 9.2]]).astype(np.float32))\n >>> y = x.ceil()\n >>> y.shape\n oneflow.Size([2, 3])\n >>> y\n tensor([[ 3., 5., 1.],\n [ 8., 9., 10.]], dtype=oneflow.float32)\n >>> x = flow.tensor(np.array([[[2.2, 4.4, 6.5],[7.1, 8.2, 9.3]],[[10.6,11.2,12.2],[13.5,14.8,15.9]]]).astype(np.float32))\n >>> y = flow.ceil(x)\n >>> y.shape\n oneflow.Size([2, 2, 3])\n >>> y\n tensor([[[ 3., 5., 7.],\n [ 8., 9., 10.]],\n <BLANKLINE>\n [[11., 12., 13.],\n [14., 15., 16.]]], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.ceil,\n """Returns a new tensor with the ceil of the elements of :attr:`input`,\n the smallest integer greater than or equal to each element.\n\n The equation is:\n\n .. math::\n \\\\text{out}_{i} = \\\\left\\\\lceil \\\\text{input}_{i} \\\\right\\\\rceil = \\\\left\\\\lfloor \\\\text{input}_{i} \\\\right\\\\rfloor + 1\n\n Args:\n input (oneflow.Tensor): A Tensor.\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.array([0.1, -2, 3.4]).astype(np.float32))\n >>> y = flow.ceil(x)\n >>> y.shape\n oneflow.Size([3])\n >>> y\n tensor([ 1., -2., 4.], dtype=oneflow.float32)\n >>> x = flow.tensor(np.array([[2.5, 4.6, 0.6],[7.8, 8.3, 9.2]]).astype(np.float32))\n >>> y = x.ceil()\n >>> y.shape\n oneflow.Size([2, 3])\n >>> y\n tensor([[ 3., 5., 1.],\n [ 8., 9., 10.]], dtype=oneflow.float32)\n >>> x = flow.tensor(np.array([[[2.2, 4.4, 6.5],[7.1, 8.2, 9.3]],[[10.6,11.2,12.2],[13.5,14.8,15.9]]]).astype(np.float32))\n >>> y = flow.ceil(x)\n >>> y.shape\n oneflow.Size([2, 2, 3])\n >>> y\n tensor([[[ 3., 5., 7.],\n [ 8., 9., 10.]],\n <BLANKLINE>\n [[11., 12., 13.],\n [14., 15., 16.]]], dtype=oneflow.float32)\n\n """\n )\n', (9458, 10909), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10903, 11478), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.negative', '"""This operator computes the negative value of Tensor.\n\n Args:\n input (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> input = flow.tensor(\n ... np.array([1.0, -1.0, 2.3]).astype(np.float32), dtype=flow.float32\n ... )\n >>> out = flow.negative(input)\n >>> out\n tensor([-1.0000, 1.0000, -2.3000], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.negative,\n """This operator computes the negative value of Tensor.\n\n Args:\n input (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> input = flow.tensor(\n ... np.array([1.0, -1.0, 2.3]).astype(np.float32), dtype=flow.float32\n ... )\n >>> out = flow.negative(input)\n >>> out\n tensor([-1.0000, 1.0000, -2.3000], dtype=oneflow.float32)\n\n """\n )\n', (10913, 11478), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11483, 11962), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.log1p', '"""Returns a new tensor with the natural logarithm of (1 + input).\n\n .. math::\n \\\\text{out}_{i}=\\\\log_e(1+\\\\text{input}_{i})\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.array([1.3, 1.5, 2.7]), dtype=flow.float32)\n >>> out = flow.log1p(x)\n >>> out\n tensor([0.8329, 0.9163, 1.3083], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.log1p,\n """Returns a new tensor with the natural logarithm of (1 + input).\n\n .. math::\n \\\\text{out}_{i}=\\\\log_e(1+\\\\text{input}_{i})\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.array([1.3, 1.5, 2.7]), dtype=flow.float32)\n >>> out = flow.log1p(x)\n >>> out\n tensor([0.8329, 0.9163, 1.3083], dtype=oneflow.float32)\n\n """\n )\n', (11493, 11962), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11964, 12536), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.exp', '"""\n\n This operator computes the exponential of Tensor.\n\n The equation is:\n\n .. math::\n\n out = e^x\n\n Args:\n x (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.tensor(np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32)\n >>> y = flow.exp(x)\n >>> y\n tensor([ 2.7183, 7.3891, 20.0855], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.exp,\n """\n\n This operator computes the exponential of Tensor.\n\n The equation is:\n\n .. math::\n\n out = e^x\n\n Args:\n x (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.tensor(np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32)\n >>> y = flow.exp(x)\n >>> y\n tensor([ 2.7183, 7.3891, 20.0855], dtype=oneflow.float32)\n\n """\n )\n', (11974, 12536), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((12541, 13128), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.acos', '"""\n Returns a new tensor with the inverse cosine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\arccos(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([0.5, 0.6, 0.7])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.acos(input)\n >>> output\n tensor([1.0472, 0.9273, 0.7954], dtype=oneflow.float32)\n """'], {}), '(oneflow.acos,\n """\n Returns a new tensor with the inverse cosine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\arccos(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([0.5, 0.6, 0.7])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.acos(input)\n >>> output\n tensor([1.0472, 0.9273, 0.7954], dtype=oneflow.float32)\n """\n )\n', (12551, 13128), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((13130, 13928), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.acosh', '"""\n Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.\n\n .. math::\n\n \\\\text{out}_{i} = \\\\cosh^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x1 = flow.tensor(np.array([2, 3, 4]).astype(np.float32))\n >>> out1 = flow.acosh(x1)\n >>> out1\n tensor([1.3170, 1.7627, 2.0634], dtype=oneflow.float32)\n >>> x2 = flow.tensor(np.array([1.5, 2.6, 3.7]).astype(np.float32),device=flow.device(\'cuda\'))\n >>> out2 = flow.acosh(x2)\n >>> out2\n tensor([0.9624, 1.6094, 1.9827], device=\'cuda:0\', dtype=oneflow.float32)\n """'], {}), '(oneflow.acosh,\n """\n Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.\n\n .. math::\n\n \\\\text{out}_{i} = \\\\cosh^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x1 = flow.tensor(np.array([2, 3, 4]).astype(np.float32))\n >>> out1 = flow.acosh(x1)\n >>> out1\n tensor([1.3170, 1.7627, 2.0634], dtype=oneflow.float32)\n >>> x2 = flow.tensor(np.array([1.5, 2.6, 3.7]).astype(np.float32),device=flow.device(\'cuda\'))\n >>> out2 = flow.acosh(x2)\n >>> out2\n tensor([0.9624, 1.6094, 1.9827], device=\'cuda:0\', dtype=oneflow.float32)\n """\n )\n', (13140, 13928), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((13930, 14554), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.atanh', '"""Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\tanh^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> np_arr = np.array([0.5, 0.6, 0.7]).astype(np.float32)\n >>> input = flow.tensor(np_arr, dtype=flow.float32)\n >>> output = flow.atanh(input)\n >>> output\n tensor([0.5493, 0.6931, 0.8673], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.atanh,\n """Returns a new tensor with the inverse hyperbolic tangent of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\tanh^{-1}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> np_arr = np.array([0.5, 0.6, 0.7]).astype(np.float32)\n >>> input = flow.tensor(np_arr, dtype=flow.float32)\n >>> output = flow.atanh(input)\n >>> output\n tensor([0.5493, 0.6931, 0.8673], dtype=oneflow.float32)\n\n """\n )\n', (13940, 14554), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((14556, 15253), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.sign', '"""Computes the sign of Tensor.\n\n .. math::\n\n \\\\text{out}_{i} = \\\\text{sgn}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x1 = flow.tensor(np.array([-2, 0, 2]).astype(np.float32))\n >>> out1 = flow.sign(x1)\n >>> out1.numpy()\n array([-1., 0., 1.], dtype=float32)\n >>> x2 = flow.tensor(np.array([-3.2, -4.5, 5.8]).astype(np.float32),device=flow.device(\'cuda\'))\n >>> out2 = flow.sign(x2)\n >>> out2.numpy()\n array([-1., -1., 1.], dtype=float32)\n\n """'], {}), '(oneflow.sign,\n """Computes the sign of Tensor.\n\n .. math::\n\n \\\\text{out}_{i} = \\\\text{sgn}(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x1 = flow.tensor(np.array([-2, 0, 2]).astype(np.float32))\n >>> out1 = flow.sign(x1)\n >>> out1.numpy()\n array([-1., 0., 1.], dtype=float32)\n >>> x2 = flow.tensor(np.array([-3.2, -4.5, 5.8]).astype(np.float32),device=flow.device(\'cuda\'))\n >>> out2 = flow.sign(x2)\n >>> out2.numpy()\n array([-1., -1., 1.], dtype=float32)\n\n """\n )\n', (14566, 15253), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((15255, 16071), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.sin', '"""Returns a new tensor with the sine of the elements of :attr:`input`.\n\n sin(x: Tensor) -> Tensor\n\n .. math::\n \\\\text{y}_{i} = \\\\sin(\\\\text{x}_{i})\n\n Args:\n x (Tensor): the input tensor.\n\n For example:\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x1 = flow.tensor(np.array([-0.5461, 0.1347, -2.7266, -0.2746]).astype(np.float32))\n >>> y1 = flow.sin(x1)\n >>> y1\n tensor([-0.5194, 0.1343, -0.4032, -0.2712], dtype=oneflow.float32)\n\n >>> x2 = flow.tensor(np.array([-1.4, 2.6, 3.7]).astype(np.float32), device=flow.device(\'cuda\'))\n >>> y2 = flow.sin(x2)\n >>> y2\n tensor([-0.9854, 0.5155, -0.5298], device=\'cuda:0\', dtype=oneflow.float32)\n\n """'], {}), '(oneflow.sin,\n """Returns a new tensor with the sine of the elements of :attr:`input`.\n\n sin(x: Tensor) -> Tensor\n\n .. math::\n \\\\text{y}_{i} = \\\\sin(\\\\text{x}_{i})\n\n Args:\n x (Tensor): the input tensor.\n\n For example:\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x1 = flow.tensor(np.array([-0.5461, 0.1347, -2.7266, -0.2746]).astype(np.float32))\n >>> y1 = flow.sin(x1)\n >>> y1\n tensor([-0.5194, 0.1343, -0.4032, -0.2712], dtype=oneflow.float32)\n\n >>> x2 = flow.tensor(np.array([-1.4, 2.6, 3.7]).astype(np.float32), device=flow.device(\'cuda\'))\n >>> y2 = flow.sin(x2)\n >>> y2\n tensor([-0.9854, 0.5155, -0.5298], device=\'cuda:0\', dtype=oneflow.float32)\n\n """\n )\n', (15265, 16071), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((16073, 16159), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.sin_', '"""\n In-place version of :func:`oneflow.sin`\n\n """'], {}), '(oneflow.sin_,\n """\n In-place version of :func:`oneflow.sin`\n\n """)\n', (16083, 16159), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((16169, 17064), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.sinh', '"""Returns a new tensor with the hyperbolic sine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sinh(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x1 = flow.tensor(np.array([1, 2, 3]), dtype=flow.float32)\n >>> x2 = flow.tensor(np.array([1.53123589,0.54242598,0.15117185]), dtype=flow.float32)\n >>> x3 = flow.tensor(np.array([1,0,-1]), dtype=flow.float32)\n\n >>> flow.sinh(x1).numpy()\n array([ 1.1752012, 3.6268604, 10.017875 ], dtype=float32)\n >>> flow.sinh(x2).numpy()\n array([2.20381 , 0.5694193, 0.1517483], dtype=float32)\n >>> flow.sinh(x3).numpy()\n array([ 1.1752012, 0. , -1.1752012], dtype=float32)\n\n """'], {}), '(oneflow.sinh,\n """Returns a new tensor with the hyperbolic sine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sinh(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x1 = flow.tensor(np.array([1, 2, 3]), dtype=flow.float32)\n >>> x2 = flow.tensor(np.array([1.53123589,0.54242598,0.15117185]), dtype=flow.float32)\n >>> x3 = flow.tensor(np.array([1,0,-1]), dtype=flow.float32)\n\n >>> flow.sinh(x1).numpy()\n array([ 1.1752012, 3.6268604, 10.017875 ], dtype=float32)\n >>> flow.sinh(x2).numpy()\n array([2.20381 , 0.5694193, 0.1517483], dtype=float32)\n >>> flow.sinh(x3).numpy()\n array([ 1.1752012, 0. , -1.1752012], dtype=float32)\n\n """\n )\n', (16179, 17064), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((17066, 17648), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.tan', '"""Returns the tan value of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\tan(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> np_arr = np.array([-1/4*np.pi, 0, 1/4*np.pi]).astype(np.float32)\n >>> input = flow.tensor(np_arr, dtype=flow.float32)\n >>> output = flow.tan(input)\n >>> output\n tensor([-1., 0., 1.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.tan,\n """Returns the tan value of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\tan(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> np_arr = np.array([-1/4*np.pi, 0, 1/4*np.pi]).astype(np.float32)\n >>> input = flow.tensor(np_arr, dtype=flow.float32)\n >>> output = flow.tan(input)\n >>> output\n tensor([-1., 0., 1.], dtype=oneflow.float32)\n\n """\n )\n', (17076, 17648), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((17650, 18170), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.cos', '"""\n Returns a new tensor with the cosine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\cos(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> arr = np.array([1.4309, 1.2706, -0.8562, 0.9796])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.cos(input).numpy()\n\n """'], {}), '(oneflow.cos,\n """\n Returns a new tensor with the cosine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\cos(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> arr = np.array([1.4309, 1.2706, -0.8562, 0.9796])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.cos(input).numpy()\n\n """\n )\n', (17660, 18170), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((18172, 18801), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.cosh', '"""\n Returns a new tensor with the hyperbolic cosine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\cosh(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> arr = np.array([ 0.1632, 1.1835, -0.6979, -0.7325])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.cosh(input).numpy()\n >>> output\n array([1.0133467, 1.7859949, 1.2535787, 1.2804903], dtype=float32)\n\n """'], {}), '(oneflow.cosh,\n """\n Returns a new tensor with the hyperbolic cosine of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\cosh(\\\\text{input}_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> arr = np.array([ 0.1632, 1.1835, -0.6979, -0.7325])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.cosh(input).numpy()\n >>> output\n array([1.0133467, 1.7859949, 1.2535787, 1.2804903], dtype=float32)\n\n """\n )\n', (18182, 18801), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((18803, 20205), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.erf', '"""Computes the error function of each element. The error function is defined as follows:\n\n .. math::\n \\\\operatorname{erf}(x)=\\\\frac{2}{\\\\sqrt{\\\\pi}} \\\\int_{0}^{x} e^{-t^{2}} d t\n\n Args:\n x (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> x = flow.tensor(np.array([0, -1., 10.]), dtype=flow.float32)\n >>> out = flow.erf(x)\n >>> out.shape\n oneflow.Size([3])\n >>> out.numpy()\n array([ 0. , -0.8427008, 1. ], dtype=float32)\n\n >>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8]]), dtype=flow.float32)\n >>> out = flow.erf(x)\n >>> out.shape\n oneflow.Size([2, 3])\n >>> out.numpy()\n array([[ 0. , -0.8427008 , 1. ],\n [ 1. , 1. , 0.74210095]], dtype=float32)\n\n >>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8], [2, 3, 4]]), dtype=flow.float32)\n >>> out = x.erf()\n >>> out.shape\n oneflow.Size([3, 3])\n >>> out.numpy()\n array([[ 0. , -0.8427008 , 1. ],\n [ 1. , 1. , 0.74210095],\n [ 0.9953223 , 0.9999779 , 1. ]], dtype=float32)\n\n """'], {}), '(oneflow.erf,\n """Computes the error function of each element. The error function is defined as follows:\n\n .. math::\n \\\\operatorname{erf}(x)=\\\\frac{2}{\\\\sqrt{\\\\pi}} \\\\int_{0}^{x} e^{-t^{2}} d t\n\n Args:\n x (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> x = flow.tensor(np.array([0, -1., 10.]), dtype=flow.float32)\n >>> out = flow.erf(x)\n >>> out.shape\n oneflow.Size([3])\n >>> out.numpy()\n array([ 0. , -0.8427008, 1. ], dtype=float32)\n\n >>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8]]), dtype=flow.float32)\n >>> out = flow.erf(x)\n >>> out.shape\n oneflow.Size([2, 3])\n >>> out.numpy()\n array([[ 0. , -0.8427008 , 1. ],\n [ 1. , 1. , 0.74210095]], dtype=float32)\n\n >>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8], [2, 3, 4]]), dtype=flow.float32)\n >>> out = x.erf()\n >>> out.shape\n oneflow.Size([3, 3])\n >>> out.numpy()\n array([[ 0. , -0.8427008 , 1. ],\n [ 1. , 1. , 0.74210095],\n [ 0.9953223 , 0.9999779 , 1. ]], dtype=float32)\n\n """\n )\n', (18813, 20205), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((20205, 21167), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.erfc', '"""Computes the complementary error function of each element of input. The complementary error\n function is defined as follows:\n\n .. math::\n \\\\operatorname{erfc}(x)=1-\\\\frac{2}{\\\\sqrt{\\\\pi}} \\\\int_{0}^{x} e^{-t^{2}} d t\n\n Args:\n x (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> x = flow.tensor(np.array([0, -1., 10.]), dtype=flow.float32)\n >>> out = flow.erfc(x)\n >>> out\n tensor([1.0000e+00, 1.8427e+00, 2.8026e-45], dtype=oneflow.float32)\n\n >>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8]]), dtype=flow.float32)\n >>> out = flow.erfc(x)\n >>> out\n tensor([[1.0000e+00, 1.8427e+00, 2.8026e-45],\n [1.5375e-12, 4.1838e-23, 2.5790e-01]], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.erfc,\n """Computes the complementary error function of each element of input. The complementary error\n function is defined as follows:\n\n .. math::\n \\\\operatorname{erfc}(x)=1-\\\\frac{2}{\\\\sqrt{\\\\pi}} \\\\int_{0}^{x} e^{-t^{2}} d t\n\n Args:\n x (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> x = flow.tensor(np.array([0, -1., 10.]), dtype=flow.float32)\n >>> out = flow.erfc(x)\n >>> out\n tensor([1.0000e+00, 1.8427e+00, 2.8026e-45], dtype=oneflow.float32)\n\n >>> x = flow.tensor(np.array([[0, -1., 10.], [5, 7, 0.8]]), dtype=flow.float32)\n >>> out = flow.erfc(x)\n >>> out\n tensor([[1.0000e+00, 1.8427e+00, 2.8026e-45],\n [1.5375e-12, 4.1838e-23, 2.5790e-01]], dtype=oneflow.float32)\n\n """\n )\n', (20215, 21167), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((21167, 22282), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.expm1', '"""Returns a new tensor with the exponential of the elements minus 1\n of :attr:`input`.\n\n\n The equation is:\n\n .. math::\n y_{i} = e^{x_{i}} - 1\n\n Args:\n input (oneflow.Tensor): A Tensor.\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.array([1, 2, 3]).astype(np.float32))\n >>> y = flow.expm1(x)\n >>> y.shape\n oneflow.Size([3])\n >>> y\n tensor([ 1.7183, 6.3891, 19.0855], dtype=oneflow.float32)\n\n >>> x = flow.tensor(np.array([[[2, 4, 6],[7, 8, 9]],[[10,11,12],[13,14,15]]]).astype(np.float32))\n >>> y = flow.expm1(x)\n >>> print(y.shape)\n oneflow.Size([2, 2, 3])\n >>> print(y.numpy())\n [[[6.3890562e+00 5.3598152e+01 4.0242880e+02]\n [1.0956332e+03 2.9799580e+03 8.1020840e+03]]\n <BLANKLINE>\n [[2.2025465e+04 5.9873141e+04 1.6275380e+05]\n [4.4241238e+05 1.2026032e+06 3.2690165e+06]]]\n\n\n """'], {}), '(oneflow.expm1,\n """Returns a new tensor with the exponential of the elements minus 1\n of :attr:`input`.\n\n\n The equation is:\n\n .. math::\n y_{i} = e^{x_{i}} - 1\n\n Args:\n input (oneflow.Tensor): A Tensor.\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.array([1, 2, 3]).astype(np.float32))\n >>> y = flow.expm1(x)\n >>> y.shape\n oneflow.Size([3])\n >>> y\n tensor([ 1.7183, 6.3891, 19.0855], dtype=oneflow.float32)\n\n >>> x = flow.tensor(np.array([[[2, 4, 6],[7, 8, 9]],[[10,11,12],[13,14,15]]]).astype(np.float32))\n >>> y = flow.expm1(x)\n >>> print(y.shape)\n oneflow.Size([2, 2, 3])\n >>> print(y.numpy())\n [[[6.3890562e+00 5.3598152e+01 4.0242880e+02]\n [1.0956332e+03 2.9799580e+03 8.1020840e+03]]\n <BLANKLINE>\n [[2.2025465e+04 5.9873141e+04 1.6275380e+05]\n [4.4241238e+05 1.2026032e+06 3.2690165e+06]]]\n\n\n """\n )\n', (21177, 22282), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((22287, 23333), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.fmod', '"""\n fmod(input, other, *, out=None) -> Tensor\n\n Computes the element-wise remainder of division.\n\n The dividend and divisor may contain both for integer and floating point\n numbers. The remainder has the same sign as the dividend :attr:`input`.\n\n Supports broadcasting to a common shape, integer and float inputs.\n\n\n Args:\n input (Tensor): the dividend\n other (Tensor or Scalar): the divisor\n\n Keyword args:\n out (Tensor, optional): the output tensor.\n\n Example::\n\n >>> import oneflow as flow\n >>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3], dtype=flow.float32), 2.)\n tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32)\n >>> flow.fmod(flow.tensor([1, 2, 3, 4, 5.], dtype=flow.float32), 1.5)\n tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000], dtype=oneflow.float32)\n >>> flow.fmod(flow.tensor([1, 2, 3, 4., -5]), flow.tensor([4, 2, 1, 3., 1]))\n tensor([1., 0., 0., 1., -0.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.fmod,\n """\n fmod(input, other, *, out=None) -> Tensor\n\n Computes the element-wise remainder of division.\n\n The dividend and divisor may contain both for integer and floating point\n numbers. The remainder has the same sign as the dividend :attr:`input`.\n\n Supports broadcasting to a common shape, integer and float inputs.\n\n\n Args:\n input (Tensor): the dividend\n other (Tensor or Scalar): the divisor\n\n Keyword args:\n out (Tensor, optional): the output tensor.\n\n Example::\n\n >>> import oneflow as flow\n >>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3], dtype=flow.float32), 2.)\n tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32)\n >>> flow.fmod(flow.tensor([1, 2, 3, 4, 5.], dtype=flow.float32), 1.5)\n tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000], dtype=oneflow.float32)\n >>> flow.fmod(flow.tensor([1, 2, 3, 4., -5]), flow.tensor([4, 2, 1, 3., 1]))\n tensor([1., 0., 0., 1., -0.], dtype=oneflow.float32)\n\n """\n )\n', (22297, 23333), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((23338, 23826), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.log', '"""\n Returns a new tensor with the natural logarithm of the elements of :attr:`input`.\n\n .. math::\n y_{i} = \\\\log_{e} (x_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> arr = np.random.randn(2, 3, 4, 5)\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.log(input)\n\n\n """'], {}), '(oneflow.log,\n """\n Returns a new tensor with the natural logarithm of the elements of :attr:`input`.\n\n .. math::\n y_{i} = \\\\log_{e} (x_{i})\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> arr = np.random.randn(2, 3, 4, 5)\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.log(input)\n\n\n """\n )\n', (23348, 23826), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((23830, 24378), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.log2', '"""\n oneflow.log2(input) -> Tensor\n\n Returns a new tensor with the natural logarithm to the base 2 of the elements of :attr:`input`.\n \n .. math::\n y_{i} = \\\\log2_{e} (x_{i})\n\n Args:\n input (Tensor): the input tensor.\n \n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> arr = np.random.randn(2, 3, 4, 5)\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.log2(input)\n\n\n """'], {}), '(oneflow.log2,\n """\n oneflow.log2(input) -> Tensor\n\n Returns a new tensor with the natural logarithm to the base 2 of the elements of :attr:`input`.\n \n .. math::\n y_{i} = \\\\log2_{e} (x_{i})\n\n Args:\n input (Tensor): the input tensor.\n \n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> arr = np.random.randn(2, 3, 4, 5)\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.log2(input)\n\n\n """\n )\n', (23840, 24378), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((24382, 24961), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.minimum', '"""Computes the element-wise minimum of x and y.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> x = flow.tensor((1, 2, -1), dtype=flow.float32)\n >>> y = flow.tensor((3, 0, 4), dtype=flow.float32)\n >>> flow.minimum(x, y)\n tensor([ 1., 0., -1.], dtype=oneflow.float32)\n\n >>> x = flow.tensor((1,), dtype=flow.float32)\n >>> y = flow.tensor((3, 0, 4), dtype=flow.float32)\n >>> flow.minimum(x, y)\n tensor([1., 0., 1.], dtype=oneflow.float32)\n """'], {}), '(oneflow.minimum,\n """Computes the element-wise minimum of x and y.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> x = flow.tensor((1, 2, -1), dtype=flow.float32)\n >>> y = flow.tensor((3, 0, 4), dtype=flow.float32)\n >>> flow.minimum(x, y)\n tensor([ 1., 0., -1.], dtype=oneflow.float32)\n\n >>> x = flow.tensor((1,), dtype=flow.float32)\n >>> y = flow.tensor((3, 0, 4), dtype=flow.float32)\n >>> flow.minimum(x, y)\n tensor([1., 0., 1.], dtype=oneflow.float32)\n """\n )\n', (24392, 24961), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((24966, 25542), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.maximum', '"""Computes the element-wise maximum of x and y.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> x = flow.tensor((1, 2, -1), dtype=flow.float32)\n >>> y = flow.tensor((3, 0, 4), dtype=flow.float32)\n >>> flow.maximum(x, y)\n tensor([3., 2., 4.], dtype=oneflow.float32)\n\n >>> x = flow.tensor((1,), dtype=flow.float32)\n >>> y = flow.tensor((3, 0, 4), dtype=flow.float32)\n >>> flow.maximum(x, y)\n tensor([3., 1., 4.], dtype=oneflow.float32)\n """'], {}), '(oneflow.maximum,\n """Computes the element-wise maximum of x and y.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> x = flow.tensor((1, 2, -1), dtype=flow.float32)\n >>> y = flow.tensor((3, 0, 4), dtype=flow.float32)\n >>> flow.maximum(x, y)\n tensor([3., 2., 4.], dtype=oneflow.float32)\n\n >>> x = flow.tensor((1,), dtype=flow.float32)\n >>> y = flow.tensor((3, 0, 4), dtype=flow.float32)\n >>> flow.maximum(x, y)\n tensor([3., 1., 4.], dtype=oneflow.float32)\n """\n )\n', (24976, 25542), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((25547, 26856), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.pow', '"""Takes the power of each element in input with exponent and returns a tensor with the result. Exponent can be either a single float number, a single int number, or a tensor with the same shape as input.\n When exponent is a scalar value, the operation applied is:\n\n .. math::\n \\\\\\\\text{out}_i = x_i ^ \\\\\\\\text{exponent}\n\\\\u200b\n When exponent is a tensor, the operation applied is:\n\n .. math::\n \\\\\\\\text{out}_i = x_i ^ {\\\\\\\\text{exponent}_i}\n\n Args:\n - input (Tensor): the input tensor.\n - exponent (int, float, Tensor): the exponent.\n\n Returns:\n Tensor: The result of variance on the specified axis of input Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> x = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), dtype=flow.float32)\n >>> out = flow.pow(x, 2)\n >>> out\n tensor([ 1., 4., 9., 16., 25., 36.], dtype=oneflow.float32)\n\n >>> x = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0]), dtype=flow.float32)\n >>> y = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0]), dtype=flow.float32)\n >>> out = flow.pow(x, y)\n >>> out\n tensor([ 1., 4., 27., 256.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.pow,\n """Takes the power of each element in input with exponent and returns a tensor with the result. Exponent can be either a single float number, a single int number, or a tensor with the same shape as input.\n When exponent is a scalar value, the operation applied is:\n\n .. math::\n \\\\\\\\text{out}_i = x_i ^ \\\\\\\\text{exponent}\n\\\\u200b\n When exponent is a tensor, the operation applied is:\n\n .. math::\n \\\\\\\\text{out}_i = x_i ^ {\\\\\\\\text{exponent}_i}\n\n Args:\n - input (Tensor): the input tensor.\n - exponent (int, float, Tensor): the exponent.\n\n Returns:\n Tensor: The result of variance on the specified axis of input Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> x = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0]), dtype=flow.float32)\n >>> out = flow.pow(x, 2)\n >>> out\n tensor([ 1., 4., 9., 16., 25., 36.], dtype=oneflow.float32)\n\n >>> x = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0]), dtype=flow.float32)\n >>> y = flow.tensor(np.array([1.0, 2.0, 3.0, 4.0]), dtype=flow.float32)\n >>> out = flow.pow(x, y)\n >>> out\n tensor([ 1., 4., 27., 256.], dtype=oneflow.float32)\n\n """\n )\n', (25557, 26856), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((26852, 27500), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.rsqrt', '"""Returns a new tensor with the reciprocal of the square-root of each of\n the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\frac{1}{\\\\sqrt{\\\\text{input}_{i}}}\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> a = flow.tensor(np.array([1.0, 2.0, 3.0]), dtype=flow.float32)\n >>> out = flow.rsqrt(a).numpy()\n >>> out\n array([1. , 0.70710677, 0.57735026], dtype=float32)\n """'], {}), '(oneflow.rsqrt,\n """Returns a new tensor with the reciprocal of the square-root of each of\n the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\frac{1}{\\\\sqrt{\\\\text{input}_{i}}}\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> a = flow.tensor(np.array([1.0, 2.0, 3.0]), dtype=flow.float32)\n >>> out = flow.rsqrt(a).numpy()\n >>> out\n array([1. , 0.70710677, 0.57735026], dtype=float32)\n """\n )\n', (26862, 27500), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((27501, 28143), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.sqrt', '"""Returns a new tensor with the square-root of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sqrt{\\\\text{input}_{i}}\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([1.0, 2.0, 3.0])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.sqrt(input).numpy()\n >>> output\n array([1. , 1.4142135, 1.7320508], dtype=float32)\n """'], {}), '(oneflow.sqrt,\n """Returns a new tensor with the square-root of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sqrt{\\\\text{input}_{i}}\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([1.0, 2.0, 3.0])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.sqrt(input).numpy()\n >>> output\n array([1. , 1.4142135, 1.7320508], dtype=float32)\n """\n )\n', (27511, 28143), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((28146, 28766), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.square', '"""Returns a new tensor with the square of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sqrt{\\\\text{input}_{i}}\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([1.0, 2.0, 3.0])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.square(input).numpy()\n >>> output\n array([1., 4., 9.], dtype=float32)\n """'], {}), '(oneflow.square,\n """Returns a new tensor with the square of the elements of :attr:`input`.\n\n .. math::\n \\\\text{out}_{i} = \\\\sqrt{\\\\text{input}_{i}}\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([1.0, 2.0, 3.0])\n >>> input = flow.tensor(arr, dtype=flow.float32)\n >>> output = flow.square(input).numpy()\n >>> output\n array([1., 4., 9.], dtype=float32)\n """\n )\n', (28156, 28766), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((28768, 29425), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.matmul', '"""\n matmul(input, other) -> Tensor\n\n This operator applies matrix multiplication to two Tensor.\n\n Args:\n a (oneflow.Tensor): A Tensor\n b (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input1 = flow.tensor(np.random.randn(2, 6), dtype=flow.float32)\n >>> input2 = flow.tensor(np.random.randn(6, 5), dtype=flow.float32)\n >>> of_out = flow.matmul(input1, input2)\n >>> of_out.shape\n oneflow.Size([2, 5])\n\n """'], {}), '(oneflow.matmul,\n """\n matmul(input, other) -> Tensor\n\n This operator applies matrix multiplication to two Tensor.\n\n Args:\n a (oneflow.Tensor): A Tensor\n b (oneflow.Tensor): A Tensor\n\n Returns:\n oneflow.Tensor: The result Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> input1 = flow.tensor(np.random.randn(2, 6), dtype=flow.float32)\n >>> input2 = flow.tensor(np.random.randn(6, 5), dtype=flow.float32)\n >>> of_out = flow.matmul(input1, input2)\n >>> of_out.shape\n oneflow.Size([2, 5])\n\n """\n )\n', (28778, 29425), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((29430, 30141), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.round', '"""This operator rounds the value of Blob to the nearest integer.\n Args:\n input (oneflow.Tensor): A Tensor\n Returns:\n oneflow.Tensor: The result Tensor\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x1 = flow.tensor(np.array([1.49999, 1.500001, 2.7]).astype(np.float32))\n >>> out1 = flow.round(x1)\n >>> out1.numpy()\n array([1., 2., 3.], dtype=float32)\n >>> x2 = flow.tensor(np.array([2.499999, 7.5000001, 5.3, 6.8]).astype(np.float32))\n >>> out2 = flow.round(x2)\n >>> out2.numpy()\n array([2., 8., 5., 7.], dtype=float32)\n\n """'], {}), '(oneflow.round,\n """This operator rounds the value of Blob to the nearest integer.\n Args:\n input (oneflow.Tensor): A Tensor\n Returns:\n oneflow.Tensor: The result Tensor\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x1 = flow.tensor(np.array([1.49999, 1.500001, 2.7]).astype(np.float32))\n >>> out1 = flow.round(x1)\n >>> out1.numpy()\n array([1., 2., 3.], dtype=float32)\n >>> x2 = flow.tensor(np.array([2.499999, 7.5000001, 5.3, 6.8]).astype(np.float32))\n >>> out2 = flow.round(x2)\n >>> out2.numpy()\n array([2., 8., 5., 7.], dtype=float32)\n\n """\n )\n', (29440, 30141), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((30146, 31334), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.std', '"""\n Returns the standard-deviation of each row of the :attr:`input` tensor in the\n dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,\n reduce over all of them.\n\n If keepdim is True, the output tensor is of the same size as input except in\n the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed,\n resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).\n\n If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated\n via the biased estimator. Otherwise, Bessel\'s correction will be used.\n\n Args:\n input (Tensor): the input tensor.\n dim (int or tuple of python:ints): the dimension or dimensions to reduce.\n unbiased (bool): whether to use the unbiased estimation or not\n keepdim (bool): whether the output tensor has `dim` retained or not.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([1.0, 2.0, 3.0])\n >>> input = flow.tensor(arr)\n >>> output = flow.std(input, dim=0).numpy()\n >>> output\n array(1.)\n\n """'], {}), '(oneflow.std,\n """\n Returns the standard-deviation of each row of the :attr:`input` tensor in the\n dimension :attr:`dim`. If :attr:`dim` is a list of dimensions,\n reduce over all of them.\n\n If keepdim is True, the output tensor is of the same size as input except in\n the dimension(s) dim where it is of size 1. Otherwise, dim is squeezed,\n resulting in the output tensor having 1 (or len(dim)) fewer dimension(s).\n\n If :attr:`unbiased` is ``False``, then the standard-deviation will be calculated\n via the biased estimator. Otherwise, Bessel\'s correction will be used.\n\n Args:\n input (Tensor): the input tensor.\n dim (int or tuple of python:ints): the dimension or dimensions to reduce.\n unbiased (bool): whether to use the unbiased estimation or not\n keepdim (bool): whether the output tensor has `dim` retained or not.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> arr = np.array([1.0, 2.0, 3.0])\n >>> input = flow.tensor(arr)\n >>> output = flow.std(input, dim=0).numpy()\n >>> output\n array(1.)\n\n """\n )\n', (30156, 31334), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((31339, 32420), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.var', '"""Returns the variance of each row of the `input` tensor in the given dimension `dim`.\n\n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim`\n where it is of size 1. Otherwise, dim is squeezed (see `flow.squeeze()`), resulting in the output\n tensor having 1 (or `len(dim)`) fewer dimension(s).\n\n Args:\n input (Tensor): the input tensor.\n dim (int or tuple of python:ints): the dimension or dimensions to reduce. Defaults to None.\n unbiased (bool, optional): whether to use Bessel’s correction (:math:`\\\\delta N = 1`). Defaults to True.\n keepdim (bool, optional): whether the output tensor has dim retained or not. Defaults to False.\n\n Returns:\n Tensor: The result of variance on the specified axis of input Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> input = flow.tensor(np.random.randn(2, 3, 4, 5))\n >>> output = flow.var(input, 1, True)\n\n """'], {}), '(oneflow.var,\n """Returns the variance of each row of the `input` tensor in the given dimension `dim`.\n\n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim`\n where it is of size 1. Otherwise, dim is squeezed (see `flow.squeeze()`), resulting in the output\n tensor having 1 (or `len(dim)`) fewer dimension(s).\n\n Args:\n input (Tensor): the input tensor.\n dim (int or tuple of python:ints): the dimension or dimensions to reduce. Defaults to None.\n unbiased (bool, optional): whether to use Bessel’s correction (:math:`\\\\delta N = 1`). Defaults to True.\n keepdim (bool, optional): whether the output tensor has dim retained or not. Defaults to False.\n\n Returns:\n Tensor: The result of variance on the specified axis of input Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> input = flow.tensor(np.random.randn(2, 3, 4, 5))\n >>> output = flow.var(input, 1, True)\n\n """\n )\n', (31349, 32420), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((32424, 33054), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.logical_not', '"""\n Computes the element-wise logical NOT of the given input tensors.\n Zeros are treated as False and nonzeros are treated as True.\n Args:\n input (oneflow.Tensor): The input Tensor\n other (oneflow.Tensor): The Tensor to compute NOT with\n\n Returns:\n oneflow.Tensor: The output Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.tensor([1, 0, -1], dtype=flow.float32)\n >>> out = flow.logical_not(input)\n >>> out\n tensor([False, True, False], dtype=oneflow.bool)\n\n """'], {}), '(oneflow.logical_not,\n """\n Computes the element-wise logical NOT of the given input tensors.\n Zeros are treated as False and nonzeros are treated as True.\n Args:\n input (oneflow.Tensor): The input Tensor\n other (oneflow.Tensor): The Tensor to compute NOT with\n\n Returns:\n oneflow.Tensor: The output Tensor\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.tensor([1, 0, -1], dtype=flow.float32)\n >>> out = flow.logical_not(input)\n >>> out\n tensor([False, True, False], dtype=oneflow.bool)\n\n """\n )\n', (32434, 33054), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((33059, 33637), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.dot', '"""This operator computes the dot product of tensor input and other.\n\n The equation is:\n\n\t$$\n \\\\\\\\sum_{i=1}^{n}(x[i] * y[i])\n\t$$\n\n Args:\n input (Tensor): first tensor in the dot product.\n other (Tensor): second tensor in the dot product.\n\n Shape:\n - input: Input must be 1D.\n - other: Other must be 1D.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> flow.dot(flow.Tensor([2, 3]), flow.Tensor([2, 1]))\n tensor(7., dtype=oneflow.float32)\n\n """'], {}), '(oneflow.dot,\n """This operator computes the dot product of tensor input and other.\n\n The equation is:\n\n\t$$\n \\\\\\\\sum_{i=1}^{n}(x[i] * y[i])\n\t$$\n\n Args:\n input (Tensor): first tensor in the dot product.\n other (Tensor): second tensor in the dot product.\n\n Shape:\n - input: Input must be 1D.\n - other: Other must be 1D.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> flow.dot(flow.Tensor([2, 3]), flow.Tensor([2, 1]))\n tensor(7., dtype=oneflow.float32)\n\n """\n )\n', (33069, 33637), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((33640, 34377), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.select', '"""\n Slices the self tensor along the selected dimension at the given index. This function returns \n a view of the original tensor with the given dimension removed.\n\n Args:\n input (Tensor): the input tensor.\n dim (int): the dimension to slice.\n select (int): the index to select with.\n\n Returns:\n oneflow.Tensor: the output Tensor.\n\n For example:\n \n .. code-block:: python\n \n >>> import oneflow as flow\n >>> input = flow.rand(3, 4, 5)\n >>> out = flow.select(input, 0, 1)\n >>> out.size()\n oneflow.Size([4, 5])\n >>> out = flow.select(input, 1, 1)\n >>> out.size()\n oneflow.Size([3, 5])\n """'], {}), '(oneflow.select,\n """\n Slices the self tensor along the selected dimension at the given index. This function returns \n a view of the original tensor with the given dimension removed.\n\n Args:\n input (Tensor): the input tensor.\n dim (int): the dimension to slice.\n select (int): the index to select with.\n\n Returns:\n oneflow.Tensor: the output Tensor.\n\n For example:\n \n .. code-block:: python\n \n >>> import oneflow as flow\n >>> input = flow.rand(3, 4, 5)\n >>> out = flow.select(input, 0, 1)\n >>> out.size()\n oneflow.Size([4, 5])\n >>> out = flow.select(input, 1, 1)\n >>> out.size()\n oneflow.Size([3, 5])\n """\n )\n', (33650, 34377), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((34382, 35543), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.movedim', '"""\n Moves the dimension(s) of input at the position(s) in source to the position(s) in destination.\n Other dimensions of input that are not explicitly moved remain in their original order and appear at the positions not specified in destination.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.movedim.html#torch.movedim.\n\n Args:\n input (Tensor): the input tensor.\n source (int or a list): Original positions of the dims to move. These must be unique.\n destination (int or a list): Destination positions for each of the original dims. These must also be unique.\n\n Returns:\n oneflow.Tensor: the output Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> input = flow.tensor(np.random.randn(2, 3, 4, 5), dtype=flow.float32)\n >>> output = flow.movedim(input, 1, 0)\n >>> output.shape\n oneflow.Size([3, 2, 4, 5])\n >>> output = flow.movedim(input, (1, 2), (0, 1))\n >>> output.shape\n oneflow.Size([3, 4, 2, 5])\n """'], {}), '(oneflow.movedim,\n """\n Moves the dimension(s) of input at the position(s) in source to the position(s) in destination.\n Other dimensions of input that are not explicitly moved remain in their original order and appear at the positions not specified in destination.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.movedim.html#torch.movedim.\n\n Args:\n input (Tensor): the input tensor.\n source (int or a list): Original positions of the dims to move. These must be unique.\n destination (int or a list): Destination positions for each of the original dims. These must also be unique.\n\n Returns:\n oneflow.Tensor: the output Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> input = flow.tensor(np.random.randn(2, 3, 4, 5), dtype=flow.float32)\n >>> output = flow.movedim(input, 1, 0)\n >>> output.shape\n oneflow.Size([3, 2, 4, 5])\n >>> output = flow.movedim(input, (1, 2), (0, 1))\n >>> output.shape\n oneflow.Size([3, 4, 2, 5])\n """\n )\n', (34392, 35543), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((35548, 36398), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.as_strided', '"""\n Create a view of an existing oneflow.Tensor input with specified size, stride and storage_offset.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.as_strided.html#torch.as_strided.\n\n Args:\n input (Tensor): the input tensor.\n size (tuple or ints): the shape of the output tensor.\n stride (tuple or ints): the stride of the output tensor.\n storage_offset (int): the offset in the underlying storage of the output tensor\n\n Returns:\n oneflow.Tensor: the output Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.rand(2,3,5)\n >>> output = flow.as_strided(input, (2,3,3), (1,2,3), 1)\n >>> output.size()\n oneflow.Size([2, 3, 3])\n """'], {}), '(oneflow.as_strided,\n """\n Create a view of an existing oneflow.Tensor input with specified size, stride and storage_offset.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.as_strided.html#torch.as_strided.\n\n Args:\n input (Tensor): the input tensor.\n size (tuple or ints): the shape of the output tensor.\n stride (tuple or ints): the stride of the output tensor.\n storage_offset (int): the offset in the underlying storage of the output tensor\n\n Returns:\n oneflow.Tensor: the output Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.rand(2,3,5)\n >>> output = flow.as_strided(input, (2,3,3), (1,2,3), 1)\n >>> output.size()\n oneflow.Size([2, 3, 3])\n """\n )\n', (35558, 36398), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((36403, 37549), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.addcmul', '"""\n oneflow.addcmul(input, tensor1, tensor2, *, value=1) -> Tensor\n\n Performs the element-wise multiplication of tensor1 by tensor2, multiply the result\n by the scalar value and add it to input.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.addcmul.html\n \n .. math::\n \\\\text{out}_i = \\\\text{input}_i + value \\\\times\\\\ \\\\text{tensor1}_i \\\\times\\\\ \\\\text{tensor2}_i\n \n Args:\n input (Tensor): the tensor to be added.\n tensor1 (Tensor): the tensor to be multiplied.\n tensor2 (Tensor): the tensor to be multiplied.\n \n Keyword args:\n value (Number, optional): multiplier for :math:`tensor1 * tensor2`.\n\n Returns:\n oneflow.Tensor: the output Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> input = flow.rand(2, 3, 4)\n >>> tensor1 = flow.rand(2, 3, 4)\n >>> tensor2 = flow.rand(2, 3, 4)\n >>> out = flow.addcmul(input, tensor1, tensor2, value=2)\n >>> out.size()\n oneflow.Size([2, 3, 4])\n """'], {}), '(oneflow.addcmul,\n """\n oneflow.addcmul(input, tensor1, tensor2, *, value=1) -> Tensor\n\n Performs the element-wise multiplication of tensor1 by tensor2, multiply the result\n by the scalar value and add it to input.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.addcmul.html\n \n .. math::\n \\\\text{out}_i = \\\\text{input}_i + value \\\\times\\\\ \\\\text{tensor1}_i \\\\times\\\\ \\\\text{tensor2}_i\n \n Args:\n input (Tensor): the tensor to be added.\n tensor1 (Tensor): the tensor to be multiplied.\n tensor2 (Tensor): the tensor to be multiplied.\n \n Keyword args:\n value (Number, optional): multiplier for :math:`tensor1 * tensor2`.\n\n Returns:\n oneflow.Tensor: the output Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> input = flow.rand(2, 3, 4)\n >>> tensor1 = flow.rand(2, 3, 4)\n >>> tensor2 = flow.rand(2, 3, 4)\n >>> out = flow.addcmul(input, tensor1, tensor2, value=2)\n >>> out.size()\n oneflow.Size([2, 3, 4])\n """\n )\n', (36413, 37549), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((37546, 39062), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.eye', '"""oneflow.eye(n, m, *, device=None, requires_grad=False, placement=None, sbp) -> Tensor\n\n This operator creates a 2-D Tensor with ones on the diagonal and zeros elsewhere.\n\n Args:\n n (int): the number of rows.\n m (int, optional): the number of colums with default being n. Defaults to None.\n\n Keyword args:\n device(Union[flow.device, str], optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.\n requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.\n placement(oneflow._oneflow_internal.placement, optional): The placement attribute allows you to specify which physical device the tensor is stored on.\n sbp(Union[oneflow._oneflow_internal.sbp.sbp, List[oneflow._oneflow_internal.sbp.sbp]], optional): When creating a global tensor, specify the SBP of the tensor.\n\n Returns:\n oneflow.Tensor: The result tensor with ones on the diagonal and zeros elsewhere.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> out = flow.eye(3, 3)\n >>> out\n tensor([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]], dtype=oneflow.float32)\n >>> out = flow.eye(3, 3, device="cuda")\n >>> out\n tensor([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]], device=\'cuda:0\', dtype=oneflow.float32)\n """'], {}), '(oneflow.eye,\n """oneflow.eye(n, m, *, device=None, requires_grad=False, placement=None, sbp) -> Tensor\n\n This operator creates a 2-D Tensor with ones on the diagonal and zeros elsewhere.\n\n Args:\n n (int): the number of rows.\n m (int, optional): the number of colums with default being n. Defaults to None.\n\n Keyword args:\n device(Union[flow.device, str], optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.\n requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.\n placement(oneflow._oneflow_internal.placement, optional): The placement attribute allows you to specify which physical device the tensor is stored on.\n sbp(Union[oneflow._oneflow_internal.sbp.sbp, List[oneflow._oneflow_internal.sbp.sbp]], optional): When creating a global tensor, specify the SBP of the tensor.\n\n Returns:\n oneflow.Tensor: The result tensor with ones on the diagonal and zeros elsewhere.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> out = flow.eye(3, 3)\n >>> out\n tensor([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]], dtype=oneflow.float32)\n >>> out = flow.eye(3, 3, device="cuda")\n >>> out\n tensor([[1., 0., 0.],\n [0., 1., 0.],\n [0., 0., 1.]], device=\'cuda:0\', dtype=oneflow.float32)\n """\n )\n', (37556, 39062), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((39066, 40865), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.tensor_split', '"""\n Splits a tensor into multiple sub-tensors, all of which are views of input, along dimension\n dim according to the indices or number of sections specified by indices_or_sections .\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.tensor_split.html#torch.tensor_split\n\n Args:\n input (Tensor): the input tensor.\n indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections \n along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size, \n input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).\n sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).\n If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in \n the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors \n input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or\n one-dimensional long tensor on the CPU.\n dim (int): dimension along which to split the tensor.\n\n Returns:\n oneflow.TensorTuple: the output TensorTuple.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.rand(3,4,5)\n >>> output = flow.tensor_split(input,(2,3),2)\n >>> output[0].size()\n oneflow.Size([3, 4, 2])\n >>> output[1].size()\n oneflow.Size([3, 4, 1])\n >>> output[2].size()\n oneflow.Size([3, 4, 2])\n """'], {}), '(oneflow.tensor_split,\n """\n Splits a tensor into multiple sub-tensors, all of which are views of input, along dimension\n dim according to the indices or number of sections specified by indices_or_sections .\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.tensor_split.html#torch.tensor_split\n\n Args:\n input (Tensor): the input tensor.\n indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections \n along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size, \n input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).\n sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).\n If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in \n the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors \n input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or\n one-dimensional long tensor on the CPU.\n dim (int): dimension along which to split the tensor.\n\n Returns:\n oneflow.TensorTuple: the output TensorTuple.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.rand(3,4,5)\n >>> output = flow.tensor_split(input,(2,3),2)\n >>> output[0].size()\n oneflow.Size([3, 4, 2])\n >>> output[1].size()\n oneflow.Size([3, 4, 1])\n >>> output[2].size()\n oneflow.Size([3, 4, 2])\n """\n )\n', (39076, 40865), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((40870, 43006), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.hsplit', '"""\n Splits input, a tensor with one or more dimensions, into multiple tensors horizontally according to indices_or_sections.\n Each split is a view of input.\n If input is one dimensional this is equivalent to calling oneflow.tensor_split(input, indices_or_sections, dim=0) \n (the split dimension is zero), and if input has two or more dimensions it’s equivalent to calling \n oneflow.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), except that if indices_or_sections\n is an integer it must evenly divide the split dimension or a runtime error will be thrown.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.hsplit.html#torch.hsplit\n\n Args:\n input (Tensor): the input tensor.\n indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections \n along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size, \n input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).\n sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).\n If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in \n the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors \n input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or\n one-dimensional long tensor on the CPU.\n\n Returns:\n oneflow.TensorTuple: the output TensorTuple.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.rand(3,4,5,6)\n >>> output = flow.hsplit(input,(1,3))\n >>> output[0].size()\n oneflow.Size([3, 1, 5, 6])\n >>> output[1].size()\n oneflow.Size([3, 2, 5, 6])\n >>> output[2].size()\n oneflow.Size([3, 1, 5, 6])\n """'], {}), '(oneflow.hsplit,\n """\n Splits input, a tensor with one or more dimensions, into multiple tensors horizontally according to indices_or_sections.\n Each split is a view of input.\n If input is one dimensional this is equivalent to calling oneflow.tensor_split(input, indices_or_sections, dim=0) \n (the split dimension is zero), and if input has two or more dimensions it’s equivalent to calling \n oneflow.tensor_split(input, indices_or_sections, dim=1) (the split dimension is 1), except that if indices_or_sections\n is an integer it must evenly divide the split dimension or a runtime error will be thrown.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.hsplit.html#torch.hsplit\n\n Args:\n input (Tensor): the input tensor.\n indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections \n along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size, \n input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).\n sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).\n If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in \n the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors \n input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or\n one-dimensional long tensor on the CPU.\n\n Returns:\n oneflow.TensorTuple: the output TensorTuple.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.rand(3,4,5,6)\n >>> output = flow.hsplit(input,(1,3))\n >>> output[0].size()\n oneflow.Size([3, 1, 5, 6])\n >>> output[1].size()\n oneflow.Size([3, 2, 5, 6])\n >>> output[2].size()\n oneflow.Size([3, 1, 5, 6])\n """\n )\n', (40880, 43006), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((43011, 44953), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.vsplit', '"""\n Splits input, a tensor with two or more dimensions, into multiple tensors vertically according to indices_or_sections.\n Each split is a view of input.\n This is equivalent to calling oneflow.tensor_split(input, indices_or_sections, dim=0) (the split dimension is 0),\n except that if indices_or_sections is an integer it must evenly divide the split dimension or a runtime error will be thrown.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.vsplit.html#torch.vsplit\n\n Args:\n input (Tensor): the input tensor.\n indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections \n along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size, \n input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).\n sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).\n If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in \n the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors \n input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or\n one-dimensional long tensor on the CPU.\n\n Returns:\n oneflow.TensorTuple: the output TensorTuple.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.rand(3,4,5,6)\n >>> output = flow.vsplit(input,(1,3))\n >>> output[0].size()\n oneflow.Size([1, 4, 5, 6])\n >>> output[1].size()\n oneflow.Size([2, 4, 5, 6])\n >>> output[2].size()\n oneflow.Size([1, 4, 5, 6])\n """'], {}), '(oneflow.vsplit,\n """\n Splits input, a tensor with two or more dimensions, into multiple tensors vertically according to indices_or_sections.\n Each split is a view of input.\n This is equivalent to calling oneflow.tensor_split(input, indices_or_sections, dim=0) (the split dimension is 0),\n except that if indices_or_sections is an integer it must evenly divide the split dimension or a runtime error will be thrown.\n The documentation is referenced from:\n https://pytorch.org/docs/stable/generated/torch.vsplit.html#torch.vsplit\n\n Args:\n input (Tensor): the input tensor.\n indices_or_sections (int or a list): If indices_or_sections is an integer n , input is split into n sections \n along dimension dim.If input is divisible by n along dimension dim, each section will be of equal size, \n input.size (dim) / n. If input is not divisible by n, the sizes of the first int(input.size(dim) % n).\n sections will have size int(input.size(dim) / n) + 1, and the rest will have size int(input.size(dim) / n).\n If indices_or_sections is a list or tuple of ints, then input is split along dimension dim at each of the indices in \n the list, tuple or tensor. For instance, indices_or_sections=[2, 3] and dim=0 would result in the tensors \n input[:2], input[2:3], and input[3:].If indices_or_sections is a tensor, it must be a zero-dimensional or\n one-dimensional long tensor on the CPU.\n\n Returns:\n oneflow.TensorTuple: the output TensorTuple.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.rand(3,4,5,6)\n >>> output = flow.vsplit(input,(1,3))\n >>> output[0].size()\n oneflow.Size([1, 4, 5, 6])\n >>> output[1].size()\n oneflow.Size([2, 4, 5, 6])\n >>> output[2].size()\n oneflow.Size([1, 4, 5, 6])\n """\n )\n', (43021, 44953), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((44958, 45722), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.cumsum', '"""oneflow.cumsum(input, dim) -> Tensor\n \n This operator computes the cumulative sum of input elements in the given dimension.\n\n The equation is:\n\n\t$$\n y_{i}=x_{0}+x_{1}+...+x_{i}\n\t$$\n\n Args:\n input (Tensor): the input ND tensor.\n dim (int): the dimension to do cumsum, valid range is [-N, N-1), N is tensor\'s dimensions\n\n Returns:\n oneflow.Tensor: The result tensor with cumsum result.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> input = flow.ones(3, 3)\n >>> dim = 1\n >>> flow.cumsum(input, dim)\n tensor([[1., 2., 3.],\n [1., 2., 3.],\n [1., 2., 3.]], dtype=oneflow.float32)\n """'], {}), '(oneflow.cumsum,\n """oneflow.cumsum(input, dim) -> Tensor\n \n This operator computes the cumulative sum of input elements in the given dimension.\n\n The equation is:\n\n\t$$\n y_{i}=x_{0}+x_{1}+...+x_{i}\n\t$$\n\n Args:\n input (Tensor): the input ND tensor.\n dim (int): the dimension to do cumsum, valid range is [-N, N-1), N is tensor\'s dimensions\n\n Returns:\n oneflow.Tensor: The result tensor with cumsum result.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> input = flow.ones(3, 3)\n >>> dim = 1\n >>> flow.cumsum(input, dim)\n tensor([[1., 2., 3.],\n [1., 2., 3.],\n [1., 2., 3.]], dtype=oneflow.float32)\n """\n )\n', (44968, 45722), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((45727, 46425), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.cumprod', '"""oneflow.cumprod(input, dim) -> Tensor\n\n This operator computes the cumulative product of input elements in the given dimension.\n\n The equation is:\n\n\t$$\n y_{i}=x_{0}*x_{1}*...*x_{i}\n\t$$\n\n Args:\n input (Tensor): the input tensor.\n dim (int): the dimension to do cumsum whose valid range is [-N, N-1), and the N is tensor\'s dimensions\n\n Returns:\n oneflow.Tensor: The result tensor with cumprod result.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> input=flow.tensor([1, 2, 3])\n >>> flow.cumprod(input, dim=0)\n tensor([1, 2, 6], dtype=oneflow.int64)\n """'], {}), '(oneflow.cumprod,\n """oneflow.cumprod(input, dim) -> Tensor\n\n This operator computes the cumulative product of input elements in the given dimension.\n\n The equation is:\n\n\t$$\n y_{i}=x_{0}*x_{1}*...*x_{i}\n\t$$\n\n Args:\n input (Tensor): the input tensor.\n dim (int): the dimension to do cumsum whose valid range is [-N, N-1), and the N is tensor\'s dimensions\n\n Returns:\n oneflow.Tensor: The result tensor with cumprod result.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> input=flow.tensor([1, 2, 3])\n >>> flow.cumprod(input, dim=0)\n tensor([1, 2, 6], dtype=oneflow.int64)\n """\n )\n', (45737, 46425), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
import numpy as np
import oneflow as flow
class Accuracy(flow.nn.Module):
def __init__(self):
super().__init__()
def forward(self, preds, labels):
top1_num = flow.zeros(1, dtype=flow.float32)
num_samples = 0
for pred, label in zip(preds, labels):
clsidxs = pred.argmax(dim=-1)
clsidxs = clsidxs.to(flow.int32)
match = (clsidxs == label).sum()
top1_num += match.to(device=top1_num.device, dtype=top1_num.dtype)
num_samples += np.prod(label.shape).item()
top1_acc = top1_num / num_samples
return top1_acc
| [
"oneflow.zeros"
] | [((185, 218), 'oneflow.zeros', 'flow.zeros', (['(1)'], {'dtype': 'flow.float32'}), '(1, dtype=flow.float32)\n', (195, 218), True, 'import oneflow as flow\n'), ((528, 548), 'numpy.prod', 'np.prod', (['label.shape'], {}), '(label.shape)\n', (535, 548), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# ViT Model
# References:
# moco-v3: https://github.com/facebookresearch/moco-v3/blob/main/vits.py
# --------------------------------------------------------
import math
from functools import reduce
from operator import mul
import oneflow as flow
import oneflow.nn as nn
from flowvision.layers.weight_init import trunc_normal_
from utils.load_checkpoint import load_checkpoint
from libai.layers import Linear, PatchEmbedding
from libai.models import vision_transformer
class VisionTransformer(vision_transformer.VisionTransformer):
"""Vision Transformer for MOCO
LiBai impl of: `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.0,
drop_rate=0.0,
attn_drop_rate=0.0,
drop_path_rate=0.0,
global_pool=False,
num_classes=1000,
loss_func=None,
linear_prob=None,
weight_style="pytorch",
stop_grad_conv1=False,
):
super(VisionTransformer, self).__init__(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
depth=depth,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
drop_rate=drop_rate,
attn_drop_rate=attn_drop_rate,
drop_path_rate=drop_path_rate,
num_classes=num_classes,
loss_func=loss_func,
)
self.global_pool = global_pool
# weight init
if linear_prob:
load_checkpoint(self, linear_prob, weight_style, num_heads, embed_dim)
self.head.weight.data.normal_(mean=0.0, std=0.01)
self.head.bias.data.zeros_()
else:
trunc_normal_(self.pos_embed, std=0.02)
trunc_normal_(self.cls_token, std=0.02)
self.apply(self._init_weights)
self.stop_grad_conv1 = stop_grad_conv1
self.embed_dim = embed_dim
self.initialization()
def initialization(self):
# Use fixed 2D sin-cos position embedding
self.build_2d_sincos_position_embedding()
# weight initialization
for name, m in self.named_modules():
if isinstance(m, Linear):
if "query_key_value" in name:
val = math.sqrt(6.0 / float(m.weight.shape[0] // 3 + m.weight.shape[1]))
nn.init.uniform_(m.weight, -val, val)
else:
nn.init.xavier_uniform_(m.weight)
nn.init.zeros_(m.bias)
nn.init.normal_(self.cls_token, std=1e-6)
if isinstance(self.patch_embed, PatchEmbedding):
# xavier_uniform initialization
val = math.sqrt(
6.0 / float(3 * reduce(mul, self.patch_embed.patch_size, 1) + self.embed_dim)
)
nn.init.uniform_(self.patch_embed.proj.weight, -val, val)
nn.init.zeros_(self.patch_embed.proj.bias)
if self.stop_grad_conv1:
self.patch_embed.proj.weight.requires_grad = False
self.patch_embed.proj.bias.requires_grad = False
def build_2d_sincos_position_embedding(self, temperature=10000.0):
sbp = self.pos_embed.sbp
placement = self.pos_embed.placement
h, w = self.patch_embed.grid_size
grid_w = flow.arange(w, dtype=flow.float32).to_global(sbp=sbp, placement=placement)
grid_h = flow.arange(h, dtype=flow.float32).to_global(sbp=sbp, placement=placement)
grid_w, grid_h = flow.meshgrid(grid_w, grid_h)
assert (
self.embed_dim % 4 == 0
), "Embed dimension must be divisible by 4 for 2D sin-cos position embedding"
pos_dim = self.embed_dim // 4
omega = (flow.arange(pos_dim, dtype=flow.float32) / pos_dim).to_global(
sbp=sbp, placement=placement
)
omega = 1.0 / flow.tensor(temperature).to_global(sbp=sbp, placement=placement) ** omega
out_w = flow.einsum("m,d->md", grid_w.flatten(), omega)
out_h = flow.einsum("m,d->md", grid_h.flatten(), omega)
pos_emb = flow.cat(
[flow.sin(out_w), flow.cos(out_w), flow.sin(out_h), flow.cos(out_h)], dim=1
)[None, :, :]
pe_token = flow.zeros([1, 1, self.embed_dim], dtype=flow.float32).to_global(
sbp=sbp, placement=placement
)
self.pos_embed = nn.Parameter(flow.cat([pe_token, pos_emb], dim=1))
self.pos_embed.requires_grad = False
def forward_head(self, x):
if self.global_pool:
x = x[:, 1:, :].mean(dim=1) # global pool without cls token
outcome = self.norm(x)
outcome = self.head(outcome)
else:
x = self.norm(x)
outcome = x[:, 0]
outcome = self.head(outcome)
return outcome
| [
"oneflow.tensor",
"oneflow.sin",
"oneflow.cat",
"oneflow.nn.init.xavier_uniform_",
"oneflow.nn.init.zeros_",
"oneflow.zeros",
"oneflow.meshgrid",
"oneflow.nn.init.normal_",
"oneflow.cos",
"oneflow.arange",
"oneflow.nn.init.uniform_"
] | [((3433, 3475), 'oneflow.nn.init.normal_', 'nn.init.normal_', (['self.cls_token'], {'std': '(1e-06)'}), '(self.cls_token, std=1e-06)\n', (3448, 3475), True, 'import oneflow.nn as nn\n'), ((4411, 4440), 'oneflow.meshgrid', 'flow.meshgrid', (['grid_w', 'grid_h'], {}), '(grid_w, grid_h)\n', (4424, 4440), True, 'import oneflow as flow\n'), ((2404, 2474), 'utils.load_checkpoint.load_checkpoint', 'load_checkpoint', (['self', 'linear_prob', 'weight_style', 'num_heads', 'embed_dim'], {}), '(self, linear_prob, weight_style, num_heads, embed_dim)\n', (2419, 2474), False, 'from utils.load_checkpoint import load_checkpoint\n'), ((2604, 2643), 'flowvision.layers.weight_init.trunc_normal_', 'trunc_normal_', (['self.pos_embed'], {'std': '(0.02)'}), '(self.pos_embed, std=0.02)\n', (2617, 2643), False, 'from flowvision.layers.weight_init import trunc_normal_\n'), ((2656, 2695), 'flowvision.layers.weight_init.trunc_normal_', 'trunc_normal_', (['self.cls_token'], {'std': '(0.02)'}), '(self.cls_token, std=0.02)\n', (2669, 2695), False, 'from flowvision.layers.weight_init import trunc_normal_\n'), ((3726, 3783), 'oneflow.nn.init.uniform_', 'nn.init.uniform_', (['self.patch_embed.proj.weight', '(-val)', 'val'], {}), '(self.patch_embed.proj.weight, -val, val)\n', (3742, 3783), True, 'import oneflow.nn as nn\n'), ((3796, 3838), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['self.patch_embed.proj.bias'], {}), '(self.patch_embed.proj.bias)\n', (3810, 3838), True, 'import oneflow.nn as nn\n'), ((5285, 5321), 'oneflow.cat', 'flow.cat', (['[pe_token, pos_emb]'], {'dim': '(1)'}), '([pe_token, pos_emb], dim=1)\n', (5293, 5321), True, 'import oneflow as flow\n'), ((3402, 3424), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (3416, 3424), True, 'import oneflow.nn as nn\n'), ((4219, 4253), 'oneflow.arange', 'flow.arange', (['w'], {'dtype': 'flow.float32'}), '(w, dtype=flow.float32)\n', (4230, 4253), True, 'import oneflow as flow\n'), ((4311, 4345), 'oneflow.arange', 'flow.arange', (['h'], {'dtype': 'flow.float32'}), '(h, dtype=flow.float32)\n', (4322, 4345), True, 'import oneflow as flow\n'), ((5130, 5184), 'oneflow.zeros', 'flow.zeros', (['[1, 1, self.embed_dim]'], {'dtype': 'flow.float32'}), '([1, 1, self.embed_dim], dtype=flow.float32)\n', (5140, 5184), True, 'import oneflow as flow\n'), ((3271, 3308), 'oneflow.nn.init.uniform_', 'nn.init.uniform_', (['m.weight', '(-val)', 'val'], {}), '(m.weight, -val, val)\n', (3287, 3308), True, 'import oneflow.nn as nn\n'), ((3351, 3384), 'oneflow.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (3374, 3384), True, 'import oneflow.nn as nn\n'), ((4635, 4675), 'oneflow.arange', 'flow.arange', (['pos_dim'], {'dtype': 'flow.float32'}), '(pos_dim, dtype=flow.float32)\n', (4646, 4675), True, 'import oneflow as flow\n'), ((5014, 5029), 'oneflow.sin', 'flow.sin', (['out_w'], {}), '(out_w)\n', (5022, 5029), True, 'import oneflow as flow\n'), ((5031, 5046), 'oneflow.cos', 'flow.cos', (['out_w'], {}), '(out_w)\n', (5039, 5046), True, 'import oneflow as flow\n'), ((5048, 5063), 'oneflow.sin', 'flow.sin', (['out_h'], {}), '(out_h)\n', (5056, 5063), True, 'import oneflow as flow\n'), ((5065, 5080), 'oneflow.cos', 'flow.cos', (['out_h'], {}), '(out_h)\n', (5073, 5080), True, 'import oneflow as flow\n'), ((4771, 4795), 'oneflow.tensor', 'flow.tensor', (['temperature'], {}), '(temperature)\n', (4782, 4795), True, 'import oneflow as flow\n'), ((3638, 3681), 'functools.reduce', 'reduce', (['mul', 'self.patch_embed.patch_size', '(1)'], {}), '(mul, self.patch_embed.patch_size, 1)\n', (3644, 3681), False, 'from functools import reduce\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
@flow.unittest.skip_unless_1n1d()
def compare_with_numpy_rmsprop(
test_case,
device,
x_shape,
learning_rate,
momentum,
train_iters,
alpha,
eps,
weight_decay,
centered,
):
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
class CustomModel(flow.nn.Module):
def __init__(self):
super().__init__()
self.param0 = flow.nn.Parameter(
flow.Tensor(init_value, device=flow.device(device))
)
def forward(self, mask):
return self.param0 * mask
simp_module = CustomModel()
simp_module.to(flow.device(device))
simp_module.train()
rmsprop0 = flow.optim.RMSprop(
[
{
"params": simp_module.parameters(),
"lr": learning_rate,
"alpha": alpha,
"eps": eps,
"weight_decay": weight_decay,
"momentum": momentum,
"centered": centered,
}
]
)
class CustomRMSpropGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = simp_module
self.add_optimizer(rmsprop0)
def build(self, mask_tensor):
loss = flow.sum(self.m(mask_tensor))
loss.backward()
return loss
of_res_list = []
rmsprop_graph = CustomRMSpropGraph()
for i in range(train_iters):
mask_tensor = flow.Tensor(
random_grad_seq[i], requires_grad=False, device=flow.device(device)
)
rmsprop_x = rmsprop_graph(mask_tensor)
of_res_list.append(simp_module.param0.numpy())
np_res_list = []
def train_by_numpy():
x = init_value
r = np.zeros_like(x)
v = np.zeros_like(x)
g = np.zeros_like(x)
def np_train_one_iter(grad):
# ref to: ../modules/test_optim_rmsprop.py -> train_by_numpy()
# weight decay is equivalent to l2 penalty
grad = grad + weight_decay * x
r_ = alpha * r + (1 - alpha) * grad * grad
if centered:
g_ = alpha * g + (1 - alpha) * grad
v_ = momentum * v + learning_rate / np.sqrt(r_ - g_ * g_ + eps) * grad
else:
g_ = g
v_ = momentum * v + learning_rate / np.sqrt(r_ + eps) * grad
param = x - v_
return (param, r_, g_, v_)
for i in range(train_iters):
(x, r, g, v) = np_train_one_iter(random_grad_seq[i])
np_res_list.append(x)
return x
train_by_numpy()
test_case.assertTrue(np.allclose(of_res_list, np_res_list, rtol=1e-3, atol=1e-3))
@flow.unittest.skip_unless_1n1d()
def compare_with_numpy_rmsprop_clip_grad(
test_case,
device,
x_shape,
learning_rate,
momentum,
train_iters,
alpha,
eps,
weight_decay,
centered,
clip_grad_max_norm,
clip_grad_norm_type,
):
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
class CustomModel(flow.nn.Module):
def __init__(self):
super().__init__()
self.param0 = flow.nn.Parameter(
flow.Tensor(init_value, device=flow.device(device))
)
def forward(self, mask):
return self.param0 * mask
simp_module = CustomModel()
simp_module.to(flow.device(device))
simp_module.train()
rmsprop0 = flow.optim.RMSprop(
[
{
"params": simp_module.parameters(),
"lr": learning_rate,
"alpha": alpha,
"eps": eps,
"weight_decay": weight_decay,
"momentum": momentum,
"centered": centered,
"clip_grad_max_norm": clip_grad_max_norm,
"clip_grad_norm_type": clip_grad_norm_type,
}
]
)
class CustomRMSpropGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = simp_module
self.add_optimizer(rmsprop0)
def build(self, mask_tensor):
loss = flow.sum(self.m(mask_tensor))
loss.backward()
return loss
of_res_list = []
rmsprop_graph = CustomRMSpropGraph()
for i in range(train_iters):
mask_tensor = flow.Tensor(
random_grad_seq[i], requires_grad=False, device=flow.device(device)
)
rmsprop_x = rmsprop_graph(mask_tensor)
of_res_list.append(simp_module.param0.numpy())
np_res_list = []
def train_by_numpy():
x = init_value
r = np.zeros_like(x)
v = np.zeros_like(x)
g = np.zeros_like(x)
def np_train_one_iter(grad):
norm, grad = clip_grad_norm_np(
grad, clip_grad_max_norm, clip_grad_norm_type
)
# weight decay is equivalent to l2 penalty
grad = grad + weight_decay * x
r_ = alpha * r + (1 - alpha) * grad * grad
if centered:
g_ = alpha * g + (1 - alpha) * grad
v_ = momentum * v + learning_rate / np.sqrt(r_ - g_ * g_ + eps) * grad
else:
g_ = g
v_ = momentum * v + learning_rate / np.sqrt(r_ + eps) * grad
param = x - v_
return (param, r_, g_, v_)
for i in range(train_iters):
(x, r, g, v) = np_train_one_iter(random_grad_seq[i])
np_res_list.append(x)
return x
train_by_numpy()
test_case.assertTrue(np.allclose(of_res_list, np_res_list, rtol=1e-3, atol=1e-3))
@flow.unittest.skip_unless_1n1d()
class TestRMSprop(flow.unittest.TestCase):
def test_rmsprop(test_case):
args_dict = OrderedDict()
args_dict["device"] = ["cpu", "cuda"]
args_dict["x_shape"] = [(1,), (10,)]
args_dict["learning_rate"] = [1, 10]
args_dict["momentum"] = [0.0] # not supported momentum > 0
args_dict["train_iters"] = [10]
args_dict["alpha"] = [0.9, 0.99]
args_dict["eps"] = [1e-8, 1e-5]
args_dict["weight_decay"] = [0.1, 0.9]
args_dict["centered"] = [False, True]
for args in GenArgList(args_dict):
compare_with_numpy_rmsprop(test_case, *args)
def test_rmsprop_clip_grad(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [1]
arg_dict["momentum"] = [0.0]
arg_dict["train_iters"] = [10]
arg_dict["alpha"] = [0.9, 0.99]
arg_dict["eps"] = [1e-08, 1e-05]
arg_dict["weight_decay"] = [
0.0
] # NOTE(<NAME>): test will fail when weight_decay > 0
arg_dict["centered"] = [False, True]
arg_dict["clip_grad_max_norm"] = [1.0]
arg_dict["clip_grad_norm_type"] = [2.0]
for arg in GenArgList(arg_dict):
compare_with_numpy_rmsprop_clip_grad(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.device"
] | [((773, 805), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (803, 805), True, 'import oneflow as flow\n'), ((3632, 3664), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3662, 3664), True, 'import oneflow as flow\n'), ((6712, 6744), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (6742, 6744), True, 'import oneflow as flow\n'), ((8128, 8143), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8141, 8143), False, 'import unittest\n'), ((1542, 1561), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1553, 1561), True, 'import oneflow as flow\n'), ((2676, 2692), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2689, 2692), True, 'import numpy as np\n'), ((2705, 2721), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2718, 2721), True, 'import numpy as np\n'), ((2734, 2750), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2747, 2750), True, 'import numpy as np\n'), ((3568, 3629), 'numpy.allclose', 'np.allclose', (['of_res_list', 'np_res_list'], {'rtol': '(0.001)', 'atol': '(0.001)'}), '(of_res_list, np_res_list, rtol=0.001, atol=0.001)\n', (3579, 3629), True, 'import numpy as np\n'), ((4460, 4479), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (4471, 4479), True, 'import oneflow as flow\n'), ((5712, 5728), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5725, 5728), True, 'import numpy as np\n'), ((5741, 5757), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5754, 5757), True, 'import numpy as np\n'), ((5770, 5786), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5783, 5786), True, 'import numpy as np\n'), ((6648, 6709), 'numpy.allclose', 'np.allclose', (['of_res_list', 'np_res_list'], {'rtol': '(0.001)', 'atol': '(0.001)'}), '(of_res_list, np_res_list, rtol=0.001, atol=0.001)\n', (6659, 6709), True, 'import numpy as np\n'), ((6841, 6854), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6852, 6854), False, 'from collections import OrderedDict\n'), ((7294, 7315), 'test_util.GenArgList', 'GenArgList', (['args_dict'], {}), '(args_dict)\n', (7304, 7315), False, 'from test_util import GenArgList\n'), ((7437, 7450), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7448, 7450), False, 'from collections import OrderedDict\n'), ((8007, 8027), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8017, 8027), False, 'from test_util import GenArgList\n'), ((1141, 1172), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (1158, 1172), True, 'import numpy as np\n'), ((4059, 4090), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (4076, 4090), True, 'import numpy as np\n'), ((5850, 5914), 'optimizer_test_util.clip_grad_norm_np', 'clip_grad_norm_np', (['grad', 'clip_grad_max_norm', 'clip_grad_norm_type'], {}), '(grad, clip_grad_max_norm, clip_grad_norm_type)\n', (5867, 5914), False, 'from optimizer_test_util import clip_grad_norm_np\n'), ((2459, 2478), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2470, 2478), True, 'import oneflow as flow\n'), ((5495, 5514), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (5506, 5514), True, 'import oneflow as flow\n'), ((1072, 1103), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (1089, 1103), True, 'import numpy as np\n'), ((3990, 4021), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (4007, 4021), True, 'import numpy as np\n'), ((1383, 1402), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1394, 1402), True, 'import oneflow as flow\n'), ((4301, 4320), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (4312, 4320), True, 'import oneflow as flow\n'), ((3147, 3174), 'numpy.sqrt', 'np.sqrt', (['(r_ - g_ * g_ + eps)'], {}), '(r_ - g_ * g_ + eps)\n', (3154, 3174), True, 'import numpy as np\n'), ((3275, 3292), 'numpy.sqrt', 'np.sqrt', (['(r_ + eps)'], {}), '(r_ + eps)\n', (3282, 3292), True, 'import numpy as np\n'), ((6227, 6254), 'numpy.sqrt', 'np.sqrt', (['(r_ - g_ * g_ + eps)'], {}), '(r_ - g_ * g_ + eps)\n', (6234, 6254), True, 'import numpy as np\n'), ((6355, 6372), 'numpy.sqrt', 'np.sqrt', (['(r_ + eps)'], {}), '(r_ + eps)\n', (6362, 6372), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
config = flow.function_config()
def make_job(a_shape, b_shape, axis, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def concat_job(
x=flow.FixedTensorDef(a_shape, dtype=dtype),
y=flow.FixedTensorDef(b_shape, dtype=dtype),
):
return flow.concat([x, y], axis=axis)
return concat_job
def make_trt_job(a_shape, b_shape, axis, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(True)
@flow.global_function(config)
def trt_concat_job(
x=flow.FixedTensorDef(a_shape, dtype=dtype),
y=flow.FixedTensorDef(b_shape, dtype=dtype),
):
return flow.concat([x, y], axis=axis)
return trt_concat_job
class Testconcat(unittest.TestCase):
def _test_body(self, x, y, axis, dtype=np.float32):
f1 = make_job(x.shape, y.shape, axis, dtype=flow.float32)
f2 = make_trt_job(x.shape, y.shape, axis, dtype=flow.float32)
a = f1(x, y).get()
b = f2(x, y).get()
print("without xla: ", a)
print("with tensorrt: ", b)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=0.001, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, a_shape, b_shape, axis, dtype=np.float32):
x = np.ones(a_shape, dtype=dtype)
y = np.ones(b_shape, dtype=dtype)
self._test_body(x, y, axis, dtype=dtype)
def _test_random_body(self, a_shape, b_shape, axis, dtype=np.float32):
x = np.random.random(a_shape).astype(dtype)
y = np.random.random(b_shape).astype(dtype)
self._test_body(x, y, axis, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((5, 2), (5, 3), axis=1)
self._test_ones_body((5, 2), (5, 3), axis=-1)
self._test_ones_body((5, 1, 2), (5, 1, 2), axis=1)
self._test_ones_body((5, 1, 2), (5, 1, 2), axis=2)
def test_random_input(self):
self._test_random_body((5, 2), (5, 3), axis=1)
self._test_random_body((5, 2), (5, 3), axis=-1)
self._test_random_body((5, 1, 2), (5, 1, 2), axis=1)
self._test_random_body((5, 3, 2), (5, 3, 2), axis=2)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.function_config",
"oneflow.compatible.single_client.FixedTensorDef",
"oneflow.compatible.single_client.concat",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.clear_default_session"
] | [((740, 762), 'oneflow.compatible.single_client.function_config', 'flow.function_config', ([], {}), '()\n', (760, 762), True, 'from oneflow.compatible import single_client as flow\n'), ((890, 918), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (910, 918), True, 'from oneflow.compatible import single_client as flow\n'), ((1251, 1279), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (1271, 1279), True, 'from oneflow.compatible import single_client as flow\n'), ((2958, 2973), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2971, 2973), False, 'import unittest\n'), ((949, 990), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['a_shape'], {'dtype': 'dtype'}), '(a_shape, dtype=dtype)\n', (968, 990), True, 'from oneflow.compatible import single_client as flow\n'), ((1002, 1043), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (1021, 1043), True, 'from oneflow.compatible import single_client as flow\n'), ((1067, 1097), 'oneflow.compatible.single_client.concat', 'flow.concat', (['[x, y]'], {'axis': 'axis'}), '([x, y], axis=axis)\n', (1078, 1097), True, 'from oneflow.compatible import single_client as flow\n'), ((1314, 1355), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['a_shape'], {'dtype': 'dtype'}), '(a_shape, dtype=dtype)\n', (1333, 1355), True, 'from oneflow.compatible import single_client as flow\n'), ((1367, 1408), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (1386, 1408), True, 'from oneflow.compatible import single_client as flow\n'), ((1432, 1462), 'oneflow.compatible.single_client.concat', 'flow.concat', (['[x, y]'], {'axis': 'axis'}), '([x, y], axis=axis)\n', (1443, 1462), True, 'from oneflow.compatible import single_client as flow\n'), ((1936, 1964), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1962, 1964), True, 'from oneflow.compatible import single_client as flow\n'), ((2051, 2080), 'numpy.ones', 'np.ones', (['a_shape'], {'dtype': 'dtype'}), '(a_shape, dtype=dtype)\n', (2058, 2080), True, 'import numpy as np\n'), ((2093, 2122), 'numpy.ones', 'np.ones', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (2100, 2122), True, 'import numpy as np\n'), ((2260, 2285), 'numpy.random.random', 'np.random.random', (['a_shape'], {}), '(a_shape)\n', (2276, 2285), True, 'import numpy as np\n'), ((2312, 2337), 'numpy.random.random', 'np.random.random', (['b_shape'], {}), '(b_shape)\n', (2328, 2337), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow._C.triplet_margin_loss,
r"""
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.triplet_margin_loss.html?highlight=triplet_margin_loss
Creates a criterion that measures the triplet loss given an input
tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative
examples` respectively). The shapes of all input tensors should be
:math:`(N, D)`.
The distance swap is described in detail in the paper `Learning shallow
convolutional feature descriptors with triplet losses <http://www.bmva.org/bmvc/2016/papers/paper119/index.html>`__ by
<NAME>, <NAME> et al.
The loss function for each sample in the mini-batch is:
.. math::
L(a, p, n) = \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\}
where
.. math::
d(x_i, y_i) = \left\lVert {\bf x}_i - {\bf y}_i \right\rVert_p
Args:
margin (float, optional): Default: :math:`1`.
p (float, optional): The norm degree for pairwise distance. Default: :math:`2.0`.
swap (bool, optional): The distance swap is described in detail in the paper
`Learning shallow convolutional feature descriptors with triplet losses` by
<NAME>, <NAME> et al. Default: ``False``.
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will be applied,
``'mean'``: the sum of the output will be divided by the number of
elements in the output, ``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in the meantime,
specifying either of those two args will override :attr:`reduction`. Default: ``'mean'``
Shape:
- Input: :math:`(N, D)` where :math:`D` is the vector dimension.
- Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``'none'``, or a scalar
otherwise.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> triplet_loss = flow.nn.TripletMarginLoss(margin=1.0, p=2)
>>> anchor = np.array([[1, -1, 1],[-1, 1, -1], [1, 1, 1]])
>>> positive = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> negative = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])
>>> output = triplet_loss(flow.Tensor(anchor), flow.Tensor(positive), flow.Tensor(negative))
>>> output
tensor(6.2971, dtype=oneflow.float32)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 3486), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.triplet_margin_loss', '""" \n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.triplet_margin_loss.html?highlight=triplet_margin_loss\n\n Creates a criterion that measures the triplet loss given an input\n tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.\n This is used for measuring a relative similarity between samples. A triplet\n is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative\n examples` respectively). The shapes of all input tensors should be\n :math:`(N, D)`.\n\n The distance swap is described in detail in the paper `Learning shallow\n convolutional feature descriptors with triplet losses <http://www.bmva.org/bmvc/2016/papers/paper119/index.html>`__ by\n <NAME>, <NAME> et al.\n\n The loss function for each sample in the mini-batch is:\n\n .. math::\n L(a, p, n) = \\\\max \\\\{d(a_i, p_i) - d(a_i, n_i) + {\\\\rm margin}, 0\\\\}\n\n\n where\n\n .. math::\n d(x_i, y_i) = \\\\left\\\\lVert {\\\\bf x}_i - {\\\\bf y}_i \\\\right\\\\rVert_p\n\n Args:\n margin (float, optional): Default: :math:`1`.\n p (float, optional): The norm degree for pairwise distance. Default: :math:`2.0`.\n swap (bool, optional): The distance swap is described in detail in the paper\n `Learning shallow convolutional feature descriptors with triplet losses` by\n <NAME>, <NAME> et al. Default: ``False``.\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``\'none\'`` | ``\'mean\'`` | ``\'sum\'``. ``\'none\'``: no reduction will be applied,\n ``\'mean\'``: the sum of the output will be divided by the number of\n elements in the output, ``\'sum\'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``\'mean\'``\n\n Shape:\n - Input: :math:`(N, D)` where :math:`D` is the vector dimension.\n - Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``\'none\'``, or a scalar\n otherwise.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> triplet_loss = flow.nn.TripletMarginLoss(margin=1.0, p=2)\n >>> anchor = np.array([[1, -1, 1],[-1, 1, -1], [1, 1, 1]])\n >>> positive = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> negative = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])\n >>> output = triplet_loss(flow.Tensor(anchor), flow.Tensor(positive), flow.Tensor(negative))\n >>> output\n tensor(6.2971, dtype=oneflow.float32)\n \n """'], {}), '(oneflow._C.triplet_margin_loss,\n """ \n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.triplet_margin_loss.html?highlight=triplet_margin_loss\n\n Creates a criterion that measures the triplet loss given an input\n tensors :math:`x1`, :math:`x2`, :math:`x3` and a margin with a value greater than :math:`0`.\n This is used for measuring a relative similarity between samples. A triplet\n is composed by `a`, `p` and `n` (i.e., `anchor`, `positive examples` and `negative\n examples` respectively). The shapes of all input tensors should be\n :math:`(N, D)`.\n\n The distance swap is described in detail in the paper `Learning shallow\n convolutional feature descriptors with triplet losses <http://www.bmva.org/bmvc/2016/papers/paper119/index.html>`__ by\n <NAME>, <NAME> et al.\n\n The loss function for each sample in the mini-batch is:\n\n .. math::\n L(a, p, n) = \\\\max \\\\{d(a_i, p_i) - d(a_i, n_i) + {\\\\rm margin}, 0\\\\}\n\n\n where\n\n .. math::\n d(x_i, y_i) = \\\\left\\\\lVert {\\\\bf x}_i - {\\\\bf y}_i \\\\right\\\\rVert_p\n\n Args:\n margin (float, optional): Default: :math:`1`.\n p (float, optional): The norm degree for pairwise distance. Default: :math:`2.0`.\n swap (bool, optional): The distance swap is described in detail in the paper\n `Learning shallow convolutional feature descriptors with triplet losses` by\n <NAME>, <NAME> et al. Default: ``False``.\n reduction (string, optional): Specifies the reduction to apply to the output:\n ``\'none\'`` | ``\'mean\'`` | ``\'sum\'``. ``\'none\'``: no reduction will be applied,\n ``\'mean\'``: the sum of the output will be divided by the number of\n elements in the output, ``\'sum\'``: the output will be summed. Note: :attr:`size_average`\n and :attr:`reduce` are in the process of being deprecated, and in the meantime,\n specifying either of those two args will override :attr:`reduction`. Default: ``\'mean\'``\n\n Shape:\n - Input: :math:`(N, D)` where :math:`D` is the vector dimension.\n - Output: A Tensor of shape :math:`(N)` if :attr:`reduction` is ``\'none\'``, or a scalar\n otherwise.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> triplet_loss = flow.nn.TripletMarginLoss(margin=1.0, p=2)\n >>> anchor = np.array([[1, -1, 1],[-1, 1, -1], [1, 1, 1]])\n >>> positive = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])\n >>> negative = np.array([[2, 2, 2], [2, 2, 2], [2, 2, 2]])\n >>> output = triplet_loss(flow.Tensor(anchor), flow.Tensor(positive), flow.Tensor(negative))\n >>> output\n tensor(6.2971, dtype=oneflow.float32)\n \n """\n )\n', (670, 3486), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
# oneflow.python.onnx.graph_helper - class to help building graph, such as helping to make complex node
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import logging
from oneflow.python.framework import id_util
from oneflow.python.onnx import util
# pylint: disable=missing-docstring
logger = logging.getLogger(__name__)
class GraphBuilder(object):
"""help to build graph"""
def __init__(self, graph):
self._g = graph
@property
def graph(self):
return self._g
def MakeSlice(self, kwargs, name=None, shapes=None, dtypes=None):
"""
slice changes its schema at opset 10: it treats some attributes as dynamic input
so this function has to process inputs according to graph's opset version
to get "inputs" and "attr" to feed "MakeNode"
kwargs: key could be ["data", "starts", "ends", "axes", "steps", "outputs"].
"""
outputs = kwargs.pop("outputs", None)
if self.graph.opset < 10:
# "data" is string
# "starts", "ends" and "axes" are attributes, and "axes" is optional.
inputs = [kwargs.pop("data")]
starts = self.ConvertToAttribute(kwargs.pop("starts"))
ends = self.ConvertToAttribute(kwargs.pop("ends"))
axes = self.ConvertToAttribute(kwargs.pop("axes", None), is_optional=True)
attr = {"starts": starts, "ends": ends, "axes": axes}
else:
# slice-10 has 3 required inputs "data", "starts", "ends"l
# and 2 optional inputs "axes", "steps"
# input sequence should be "data", "starts", "ends", "axes", "steps"
attr = {}
data = self.ConvertToInput(kwargs.pop("data"))
starts = self.ConvertToInput(kwargs.pop("starts"), dtype=np.int64)
ends = self.ConvertToInput(kwargs.pop("ends"), dtype=np.int64)
axes = self.ConvertToInput(
kwargs.pop("axes", None), is_optional=True, dtype=np.int64
)
steps = self.ConvertToInput(
kwargs.pop("steps", None), is_optional=True, dtype=np.int64
)
inputs = [data, starts, ends, axes, steps]
# pro-process inputs and attr
if kwargs:
logger.warning("kwargs contains un-used key")
new_attr = {}
for key, val in attr.items():
if val is not None:
new_attr[key] = val
attr = new_attr
for ind, val in enumerate(inputs):
if val is None:
# empty string means no connection in ONNX
inputs[ind] = util.ONNX_EMPTY_INPUT
# remove tailing ""
while inputs[-1] == util.ONNX_EMPTY_INPUT:
inputs = inputs[:-1]
if self.graph.opset >= 10:
dtype = self.graph.get_dtype(inputs[1])
for input_data in inputs[1:]:
if input_data != util.ONNX_EMPTY_INPUT:
util.MakeSure(
dtype == self.graph.get_dtype(input_data),
"dtype should be same",
)
return self.graph.MakeNode(
op_type="Slice",
inputs=inputs,
attr=attr,
name=name,
outputs=outputs,
shapes=shapes,
dtypes=dtypes,
).output[0]
def ConvertToInput(self, tensor, is_optional=False, dtype=None):
"""in ONNX, input shold come from node, so it must be a string"""
if is_optional and tensor is None:
return None
util.MakeSure(tensor is not None, "input is required so it couldn't be None")
res = tensor
if isinstance(tensor, list):
res = self.graph.MakeConst(
id_util.UniqueStr("const_slice"), np.array(tensor, dtype)
).output[0]
util.MakeSure(
isinstance(res, str), "input is a dynamic input, so a str is needed"
)
return res
def ConvertToAttribute(self, tensor, is_optional=False):
if is_optional and tensor is None:
return None
util.MakeSure(tensor is not None, "input is required so it couldn't be None")
res = tensor
if isinstance(tensor, str):
const_node = self.graph.get_node_by_output(tensor)
res = const_node.get_tensor_value(as_list=True)
util.MakeSure(isinstance(res, list), "input is an attr, so a list is needed")
return res
| [
"oneflow.python.onnx.util.MakeSure",
"oneflow.python.framework.id_util.UniqueStr"
] | [((1105, 1132), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1122, 1132), False, 'import logging\n'), ((4384, 4461), 'oneflow.python.onnx.util.MakeSure', 'util.MakeSure', (['(tensor is not None)', '"""input is required so it couldn\'t be None"""'], {}), '(tensor is not None, "input is required so it couldn\'t be None")\n', (4397, 4461), False, 'from oneflow.python.onnx import util\n'), ((4932, 5009), 'oneflow.python.onnx.util.MakeSure', 'util.MakeSure', (['(tensor is not None)', '"""input is required so it couldn\'t be None"""'], {}), '(tensor is not None, "input is required so it couldn\'t be None")\n', (4945, 5009), False, 'from oneflow.python.onnx import util\n'), ((4577, 4609), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""const_slice"""'], {}), "('const_slice')\n", (4594, 4609), False, 'from oneflow.python.framework import id_util\n'), ((4611, 4634), 'numpy.array', 'np.array', (['tensor', 'dtype'], {}), '(tensor, dtype)\n', (4619, 4634), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def ccrelu(x, name):
return (
flow.user_op_builder(name)
.Op("ccrelu")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@unittest.skipIf(True, "skip for now because of single-client tensor_list removed")
class TestCopyCommNetPassEmpty(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_multi_node_comm_net(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
func_config.default_data_type(flow.float)
flow.config.gpu_device_num(1)
@flow.global_function(function_config=func_config)
def ReluJob(x: oft.Numpy.Placeholder((10, 2))):
with flow.scope.placement("gpu", "0:0"):
out0 = ccrelu(x, "my_op_0_0")
with flow.scope.placement("gpu", "1:0"):
out1 = ccrelu(out0, "my_op_1_0")
with flow.scope.placement("gpu", "0:0"):
out2 = ccrelu(out1, "my_op_print")
return out2
index = [-2, -1, 0, 1, 2]
data = []
for i in index:
data.append(np.ones((10, 2), dtype=np.float32) * i)
for i in range(5):
ret = ReluJob(data[i]).get().numpy()
print(ret)
if index[i] > 0:
test_case.assertTrue(
np.array_equal(ret, np.ones((10, 2), dtype=np.float32) * index[i])
)
else:
test_case.assertTrue(
np.array_equal(ret, np.zeros((10, 2), dtype=np.float32))
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_multi_node_comm_net_dynamic(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.scope.placement("gpu", "0:0"))
func_config.default_data_type(flow.float)
flow.config.machine_num(2)
flow.config.gpu_device_num(1)
@flow.global_function(function_config=func_config)
def ReluJob(x: oft.ListNumpy.Placeholder((10, 2))):
with flow.scope.placement("gpu", "0:0"):
out0 = flow.math.relu(x)
with flow.scope.placement("gpu", "1:0"):
out1 = flow.math.relu(out0)
with flow.scope.placement("gpu", "0:0"):
out2 = flow.math.relu(out1)
return out2
index = [-2, -1, 0, 1, 2]
data = []
for i in index:
data.append(np.ones((5, 2), dtype=np.float32) * i)
for i in range(5):
ret = ReluJob([data[i]]).get().numpy_list()[0]
print(ret)
if index[i] > 0:
test_case.assertTrue(
np.array_equal(ret, np.ones((5, 2), dtype=np.float32) * index[i])
)
else:
test_case.assertTrue(
np.array_equal(ret, np.zeros((5, 2), dtype=np.float32))
)
def test_multi_node_comm_net_dynamic_empty(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
func_config.default_data_type(flow.float)
flow.config.machine_num(2)
flow.config.gpu_device_num(1)
@flow.global_function(function_config=func_config)
def ReluJob(x: oft.ListNumpy.Placeholder((10, 2))):
with flow.scope.placement("cpu", "0:0"):
out0 = flow.math.relu(x)
with flow.scope.placement("cpu", "1:0"):
out1 = flow.math.relu(out0)
with flow.scope.placement("cpu", "0:0"):
out2 = flow.math.relu(out1)
return out2
index = [-2, -1, 0, 1, 2]
data = []
for i in index:
data.append(np.ones((0, 0), dtype=np.float32) * i)
for i in range(5):
ret = ReluJob([data[i]]).get().numpy_list()[0]
print(ret)
if index[i] > 0:
test_case.assertTrue(
np.array_equal(ret, np.ones((0, 0), dtype=np.float32) * index[i])
)
else:
test_case.assertTrue(
np.array_equal(ret, np.zeros((0, 0), dtype=np.float32))
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.config.gpu_device_num",
"oneflow.compatible.single_client.scope.consistent_view",
"oneflow.compatible.single_client.math.relu",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.scope.mirrored_view",
"oneflow.compatible.single_client.us... | [((1022, 1108), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""skip for now because of single-client tensor_list removed"""'], {}), "(True,\n 'skip for now because of single-client tensor_list removed')\n", (1037, 1108), False, 'import unittest\n'), ((5360, 5375), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5373, 5375), False, 'import unittest\n'), ((1308, 1329), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1327, 1329), True, 'from oneflow.compatible import single_client as flow\n'), ((1459, 1488), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (1485, 1488), True, 'from oneflow.compatible import single_client as flow\n'), ((1499, 1548), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1519, 1548), True, 'from oneflow.compatible import single_client as flow\n'), ((1182, 1216), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1191, 1216), False, 'import os\n'), ((2653, 2674), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2672, 2674), True, 'from oneflow.compatible import single_client as flow\n'), ((2882, 2908), 'oneflow.compatible.single_client.config.machine_num', 'flow.config.machine_num', (['(2)'], {}), '(2)\n', (2905, 2908), True, 'from oneflow.compatible import single_client as flow\n'), ((2917, 2946), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (2943, 2946), True, 'from oneflow.compatible import single_client as flow\n'), ((2957, 3006), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2977, 3006), True, 'from oneflow.compatible import single_client as flow\n'), ((2519, 2553), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (2528, 2553), False, 'import os\n'), ((4031, 4052), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (4050, 4052), True, 'from oneflow.compatible import single_client as flow\n'), ((4260, 4286), 'oneflow.compatible.single_client.config.machine_num', 'flow.config.machine_num', (['(2)'], {}), '(2)\n', (4283, 4286), True, 'from oneflow.compatible import single_client as flow\n'), ((4295, 4324), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (4321, 4324), True, 'from oneflow.compatible import single_client as flow\n'), ((4335, 4384), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4355, 4384), True, 'from oneflow.compatible import single_client as flow\n'), ((1371, 1399), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1397, 1399), True, 'from oneflow.compatible import single_client as flow\n'), ((2716, 2742), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2740, 2742), True, 'from oneflow.compatible import single_client as flow\n'), ((2788, 2822), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (2808, 2822), True, 'from oneflow.compatible import single_client as flow\n'), ((4094, 4120), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (4118, 4120), True, 'from oneflow.compatible import single_client as flow\n'), ((4166, 4200), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (4186, 4200), True, 'from oneflow.compatible import single_client as flow\n'), ((1572, 1602), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10, 2)'], {}), '((10, 2))\n', (1593, 1602), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1622, 1656), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (1642, 1656), True, 'from oneflow.compatible import single_client as flow\n'), ((1721, 1755), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""1:0"""'], {}), "('gpu', '1:0')\n", (1741, 1755), True, 'from oneflow.compatible import single_client as flow\n'), ((1823, 1857), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (1843, 1857), True, 'from oneflow.compatible import single_client as flow\n'), ((3030, 3064), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10, 2)'], {}), '((10, 2))\n', (3055, 3064), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3084, 3118), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (3104, 3118), True, 'from oneflow.compatible import single_client as flow\n'), ((3143, 3160), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (3157, 3160), True, 'from oneflow.compatible import single_client as flow\n'), ((3178, 3212), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""1:0"""'], {}), "('gpu', '1:0')\n", (3198, 3212), True, 'from oneflow.compatible import single_client as flow\n'), ((3237, 3257), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['out0'], {}), '(out0)\n', (3251, 3257), True, 'from oneflow.compatible import single_client as flow\n'), ((3275, 3309), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (3295, 3309), True, 'from oneflow.compatible import single_client as flow\n'), ((3334, 3354), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['out1'], {}), '(out1)\n', (3348, 3354), True, 'from oneflow.compatible import single_client as flow\n'), ((4408, 4442), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(10, 2)'], {}), '((10, 2))\n', (4433, 4442), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((4462, 4496), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (4482, 4496), True, 'from oneflow.compatible import single_client as flow\n'), ((4521, 4538), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (4535, 4538), True, 'from oneflow.compatible import single_client as flow\n'), ((4556, 4590), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""1:0"""'], {}), "('cpu', '1:0')\n", (4576, 4590), True, 'from oneflow.compatible import single_client as flow\n'), ((4615, 4635), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['out0'], {}), '(out0)\n', (4629, 4635), True, 'from oneflow.compatible import single_client as flow\n'), ((4653, 4687), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (4673, 4687), True, 'from oneflow.compatible import single_client as flow\n'), ((4712, 4732), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['out1'], {}), '(out1)\n', (4726, 4732), True, 'from oneflow.compatible import single_client as flow\n'), ((2035, 2069), 'numpy.ones', 'np.ones', (['(10, 2)'], {'dtype': 'np.float32'}), '((10, 2), dtype=np.float32)\n', (2042, 2069), True, 'import numpy as np\n'), ((3480, 3513), 'numpy.ones', 'np.ones', (['(5, 2)'], {'dtype': 'np.float32'}), '((5, 2), dtype=np.float32)\n', (3487, 3513), True, 'import numpy as np\n'), ((4858, 4891), 'numpy.ones', 'np.ones', (['(0, 0)'], {'dtype': 'np.float32'}), '((0, 0), dtype=np.float32)\n', (4865, 4891), True, 'import numpy as np\n'), ((2442, 2477), 'numpy.zeros', 'np.zeros', (['(10, 2)'], {'dtype': 'np.float32'}), '((10, 2), dtype=np.float32)\n', (2450, 2477), True, 'import numpy as np\n'), ((3895, 3929), 'numpy.zeros', 'np.zeros', (['(5, 2)'], {'dtype': 'np.float32'}), '((5, 2), dtype=np.float32)\n', (3903, 3929), True, 'import numpy as np\n'), ((5273, 5307), 'numpy.zeros', 'np.zeros', (['(0, 0)'], {'dtype': 'np.float32'}), '((0, 0), dtype=np.float32)\n', (5281, 5307), True, 'import numpy as np\n'), ((2281, 2315), 'numpy.ones', 'np.ones', (['(10, 2)'], {'dtype': 'np.float32'}), '((10, 2), dtype=np.float32)\n', (2288, 2315), True, 'import numpy as np\n'), ((3735, 3768), 'numpy.ones', 'np.ones', (['(5, 2)'], {'dtype': 'np.float32'}), '((5, 2), dtype=np.float32)\n', (3742, 3768), True, 'import numpy as np\n'), ((5113, 5146), 'numpy.ones', 'np.ones', (['(0, 0)'], {'dtype': 'np.float32'}), '((0, 0), dtype=np.float32)\n', (5120, 5146), True, 'import numpy as np\n'), ((843, 869), 'oneflow.compatible.single_client.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (863, 869), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestLess(flow.unittest.TestCase):
def test_less_v1(test_case):
input1 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
input2 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
of_out = flow.lt(input1, input2)
np_out = np.less(input1.numpy(), input2.numpy())
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def test_tensor_less(test_case):
input1 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
input2 = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
of_out = input1.lt(input2)
np_out = np.less(input1.numpy(), input2.numpy())
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def test_less_symbol(test_case):
input1 = flow.Tensor(np.array([1, 1, 4]).astype(np.float32), dtype=flow.float32)
input2 = flow.Tensor(np.array([1, 2, 3]).astype(np.float32), dtype=flow.float32)
of_out = input1 < input2
np_out = np.less(input1.numpy(), input2.numpy())
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def test_less_int_scalar(test_case):
np_arr = np.random.randn(2, 3, 4, 5)
input1 = flow.Tensor(np_arr, dtype=flow.float32)
input2 = 1
of_out = input1 < input2
np_out = np.less(np_arr, input2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
def test_less_float_scalar(test_case):
np_arr = np.random.randn(3, 2, 5, 7)
input1 = flow.Tensor(np_arr, dtype=flow.float32)
input2 = 2.3
of_out = input1 < input2
np_out = np.less(np_arr, input2)
test_case.assertTrue(np.array_equal(of_out.numpy(), np_out))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.Tensor",
"oneflow.experimental.lt"
] | [((2554, 2569), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2567, 2569), False, 'import unittest\n'), ((1025, 1048), 'oneflow.experimental.lt', 'flow.lt', (['input1', 'input2'], {}), '(input1, input2)\n', (1032, 1048), True, 'import oneflow.experimental as flow\n'), ((1964, 1991), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (1979, 1991), True, 'import numpy as np\n'), ((2009, 2048), 'oneflow.experimental.Tensor', 'flow.Tensor', (['np_arr'], {'dtype': 'flow.float32'}), '(np_arr, dtype=flow.float32)\n', (2020, 2048), True, 'import oneflow.experimental as flow\n'), ((2118, 2141), 'numpy.less', 'np.less', (['np_arr', 'input2'], {}), '(np_arr, input2)\n', (2125, 2141), True, 'import numpy as np\n'), ((2272, 2299), 'numpy.random.randn', 'np.random.randn', (['(3)', '(2)', '(5)', '(7)'], {}), '(3, 2, 5, 7)\n', (2287, 2299), True, 'import numpy as np\n'), ((2317, 2356), 'oneflow.experimental.Tensor', 'flow.Tensor', (['np_arr'], {'dtype': 'flow.float32'}), '(np_arr, dtype=flow.float32)\n', (2328, 2356), True, 'import oneflow.experimental as flow\n'), ((2428, 2451), 'numpy.less', 'np.less', (['np_arr', 'input2'], {}), '(np_arr, input2)\n', (2435, 2451), True, 'import numpy as np\n'), ((690, 733), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (731, 733), True, 'import oneflow.experimental as flow\n'), ((881, 908), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (896, 908), True, 'import numpy as np\n'), ((959, 986), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (974, 986), True, 'import numpy as np\n'), ((1242, 1269), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (1257, 1269), True, 'import numpy as np\n'), ((1320, 1347), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (1335, 1347), True, 'import numpy as np\n'), ((1597, 1616), 'numpy.array', 'np.array', (['[1, 1, 4]'], {}), '([1, 1, 4])\n', (1605, 1616), True, 'import numpy as np\n'), ((1686, 1705), 'numpy.array', 'np.array', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1694, 1705), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import inspect
import threading
import traceback
from contextlib import contextmanager
from typing import Callable
from google.protobuf import text_format
import oneflow
import oneflow._oneflow_internal
import oneflow.core.job.job_set_pb2 as job_set_util
import oneflow.eager.op_executor as op_executor
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.check_point_v2 as check_point_v2
import oneflow.framework.compiler as compiler
import oneflow.framework.config_util as config_util
import oneflow.framework.env_util as env_util
import oneflow.framework.hob as hob
import oneflow.framework.job_instance as job_instance_util
import oneflow.framework.module as module_util
import oneflow.framework.push_util as push_util
import oneflow.framework.session_context as session_ctx
import oneflow.framework.typing_util as oft_util
import oneflow.support.enable_if as enable_if
from oneflow import oneflow_deprecate
from oneflow.core.job.job_set_pb2 import ConfigProto
from oneflow.experimental import interface_op_read_and_write
from oneflow.framework.check_point import SnapshotManager
from oneflow.framework.function_desc import FunctionDesc
from oneflow.framework.pull_util import EagerFutureRemoteBlobs, LazyFutureRemoteBlobs
from oneflow.framework.session_context import SessionStatus
def api_find_or_create_module(
module_name: str, create: Callable[[], None], reuse: bool = False
):
func = enable_if.unique([find_or_create_module])
return func(module_name, create, reuse)
@enable_if.condition(hob.in_global_mode)
def find_or_create_module(module_name, create, reuse=False):
assert callable(create)
sess = session_ctx.GetDefaultSession()
job_name = oneflow.current_global_function_desc().job_config_proto.job_name()
if job_name not in sess.job_name2module_name2module_:
sess.job_name2module_name2module_[job_name] = {}
module_name2module = sess.job_name2module_name2module_[job_name]
if module_name not in module_name2module:
module = create()
assert isinstance(module, module_util.Module)
module_name2module[module_name] = module
elif not reuse:
assert module_name not in sess.existed_module_names_, (
"duplicated module_name `%s' in global_function `%s'"
% (module_name, job_name)
)
else:
pass
sess.existed_module_names_.add(module_name)
return module_name2module[module_name]
def api_clear_default_session() -> None:
"""Clear the default session. All compiled OneFlow functions will be deleted.
"""
func = enable_if.unique([clear_default_session])
return func()
@enable_if.condition(hob.in_normal_mode)
def clear_default_session():
is_multi_client = oneflow._oneflow_internal.IsMultiClient()
if not is_multi_client:
session_ctx.TryCloseDefaultSession()
session_ctx.OpenDefaultSession(
Session(oneflow._oneflow_internal.NewSessionId())
)
def api_sync_default_session() -> None:
"""Synchronize the default session. Block until every synchronous OneFlow function and its callback finishes running.
"""
func = enable_if.unique([sync_default_session])
return func()
@enable_if.condition(hob.in_normal_mode)
def sync_default_session() -> None:
session_ctx.GetDefaultSession().Sync()
def _TryCompleteConfigProto(config_proto):
if config_proto.resource.machine_num == 0:
config_proto.resource.machine_num = oneflow._oneflow_internal.GetNodeSize()
def _GetDefaultConfigProto():
config_proto = job_set_util.ConfigProto()
config_proto.resource.machine_num = 0
if oneflow._oneflow_internal.flags.with_cuda():
config_proto.resource.gpu_device_num = 1
else:
config_proto.resource.cpu_device_num = 1
config_proto.resource.gpu_device_num = 0
config_proto.session_id = session_ctx.GetDefaultSession().id
return config_proto
| [
"oneflow._oneflow_internal.NewSessionId",
"oneflow.current_global_function_desc",
"oneflow._oneflow_internal.IsMultiClient",
"oneflow.framework.session_context.TryCloseDefaultSession",
"oneflow.support.enable_if.condition",
"oneflow._oneflow_internal.flags.with_cuda",
"oneflow.framework.session_context.... | [((2107, 2146), 'oneflow.support.enable_if.condition', 'enable_if.condition', (['hob.in_global_mode'], {}), '(hob.in_global_mode)\n', (2126, 2146), True, 'import oneflow.support.enable_if as enable_if\n'), ((3239, 3278), 'oneflow.support.enable_if.condition', 'enable_if.condition', (['hob.in_normal_mode'], {}), '(hob.in_normal_mode)\n', (3258, 3278), True, 'import oneflow.support.enable_if as enable_if\n'), ((3802, 3841), 'oneflow.support.enable_if.condition', 'enable_if.condition', (['hob.in_normal_mode'], {}), '(hob.in_normal_mode)\n', (3821, 3841), True, 'import oneflow.support.enable_if as enable_if\n'), ((2018, 2059), 'oneflow.support.enable_if.unique', 'enable_if.unique', (['[find_or_create_module]'], {}), '([find_or_create_module])\n', (2034, 2059), True, 'import oneflow.support.enable_if as enable_if\n'), ((2247, 2278), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (2276, 2278), True, 'import oneflow.framework.session_context as session_ctx\n'), ((3176, 3217), 'oneflow.support.enable_if.unique', 'enable_if.unique', (['[clear_default_session]'], {}), '([clear_default_session])\n', (3192, 3217), True, 'import oneflow.support.enable_if as enable_if\n'), ((3330, 3371), 'oneflow._oneflow_internal.IsMultiClient', 'oneflow._oneflow_internal.IsMultiClient', ([], {}), '()\n', (3369, 3371), False, 'import oneflow\n'), ((3740, 3780), 'oneflow.support.enable_if.unique', 'enable_if.unique', (['[sync_default_session]'], {}), '([sync_default_session])\n', (3756, 3780), True, 'import oneflow.support.enable_if as enable_if\n'), ((4148, 4174), 'oneflow.core.job.job_set_pb2.ConfigProto', 'job_set_util.ConfigProto', ([], {}), '()\n', (4172, 4174), True, 'import oneflow.core.job.job_set_pb2 as job_set_util\n'), ((4224, 4267), 'oneflow._oneflow_internal.flags.with_cuda', 'oneflow._oneflow_internal.flags.with_cuda', ([], {}), '()\n', (4265, 4267), False, 'import oneflow\n'), ((3408, 3444), 'oneflow.framework.session_context.TryCloseDefaultSession', 'session_ctx.TryCloseDefaultSession', ([], {}), '()\n', (3442, 3444), True, 'import oneflow.framework.session_context as session_ctx\n'), ((4057, 4096), 'oneflow._oneflow_internal.GetNodeSize', 'oneflow._oneflow_internal.GetNodeSize', ([], {}), '()\n', (4094, 4096), False, 'import oneflow\n'), ((4456, 4487), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4485, 4487), True, 'import oneflow.framework.session_context as session_ctx\n'), ((3882, 3913), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (3911, 3913), True, 'import oneflow.framework.session_context as session_ctx\n'), ((2294, 2332), 'oneflow.current_global_function_desc', 'oneflow.current_global_function_desc', ([], {}), '()\n', (2330, 2332), False, 'import oneflow\n'), ((3505, 3545), 'oneflow._oneflow_internal.NewSessionId', 'oneflow._oneflow_internal.NewSessionId', ([], {}), '()\n', (3543, 3545), False, 'import oneflow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
numpy_dtype_to_oneflow_dtype_dict = {
np.int32: flow.int32,
np.int64: flow.int64,
np.int8: flow.int8,
np.uint8: flow.uint8,
np.bool: flow.bool,
np.float64: flow.float64,
np.float32: flow.float32,
np.float16: flow.float16,
}
def as_tensor(data, dtype=None, device=None):
if flow.is_tensor(data):
if dtype is None:
dtype = data.dtype
if device is None:
device = data.device
if data.dtype is dtype and data.device is device:
return data
else:
data = data.to(dtype=dtype, device=device)
elif isinstance(data, (np.ndarray)):
if dtype is None:
if (device is None) or (device.type == "cpu"):
data = flow.from_numpy(data)
else:
data = flow.tensor(data, device=device)
else:
if data.dtype in numpy_dtype_to_oneflow_dtype_dict:
data_infer_flow_type = numpy_dtype_to_oneflow_dtype_dict[data.dtype]
else:
raise TypeError("numpy-ndarray holds elements of unsupported datatype")
if data_infer_flow_type is dtype:
if (device is None) or (device.type == "cpu"):
data = flow.from_numpy(data)
else:
data = flow.tensor(data, dtype=dtype, device=device)
else:
if (device is None) or (device.type == "cpu"):
data = flow.tensor(data, dtype=dtype)
else:
data = flow.tensor(data, dtype=dtype, device=device)
else:
# handle tuple, list, scalar
data = np.array(data)
# not shared memory in this case
data = flow.tensor(data)
if device is not None:
data = data.to(device)
if dtype is not None:
data = data.to(dtype)
return data
| [
"oneflow.is_tensor",
"oneflow.from_numpy",
"oneflow.tensor"
] | [((944, 964), 'oneflow.is_tensor', 'flow.is_tensor', (['data'], {}), '(data)\n', (958, 964), True, 'import oneflow as flow\n'), ((2297, 2311), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (2305, 2311), True, 'import numpy as np\n'), ((2368, 2385), 'oneflow.tensor', 'flow.tensor', (['data'], {}), '(data)\n', (2379, 2385), True, 'import oneflow as flow\n'), ((1383, 1404), 'oneflow.from_numpy', 'flow.from_numpy', (['data'], {}), '(data)\n', (1398, 1404), True, 'import oneflow as flow\n'), ((1446, 1478), 'oneflow.tensor', 'flow.tensor', (['data'], {'device': 'device'}), '(data, device=device)\n', (1457, 1478), True, 'import oneflow as flow\n'), ((1884, 1905), 'oneflow.from_numpy', 'flow.from_numpy', (['data'], {}), '(data)\n', (1899, 1905), True, 'import oneflow as flow\n'), ((1955, 2000), 'oneflow.tensor', 'flow.tensor', (['data'], {'dtype': 'dtype', 'device': 'device'}), '(data, dtype=dtype, device=device)\n', (1966, 2000), True, 'import oneflow as flow\n'), ((2109, 2139), 'oneflow.tensor', 'flow.tensor', (['data'], {'dtype': 'dtype'}), '(data, dtype=dtype)\n', (2120, 2139), True, 'import oneflow as flow\n'), ((2189, 2234), 'oneflow.tensor', 'flow.tensor', (['data'], {'dtype': 'dtype', 'device': 'device'}), '(data, dtype=dtype, device=device)\n', (2200, 2234), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Union
import oneflow as flow
from oneflow.nn.module import Module
from oneflow.nn.common_types import _size_4_t
from oneflow.nn.modules.utils import _quadruple
class ReplicationPad2d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.ReplicationPad2d.html?highlight=replicationpad2d#torch.nn.ReplicationPad2d
Pads the input tensor using the replication of the input boundary.
Args:
padding (Union[int, tuple, list]): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 4-`tuple`, uses (:math:`\\mathrm{padding_{left}}`, :math:`\\mathrm{padding_{right}}`, :math:`\\mathrm{padding_{top}}`, :math:`\\mathrm{padding_{bottom}}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \\mathrm{padding_{top}} + \\mathrm{padding_{bottom}}`
:math:`W_{out} = W_{in} + \\mathrm{padding_{left}} + \\mathrm{padding_{right}}`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> replicationpad_layer_0 = flow.nn.ReplicationPad2d((2, 2, 1, 1))
>>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32))
>>> input_int = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.int32))
>>> output = replicationpad_layer_0(input)
>>> output.shape
flow.Size([1, 2, 5, 7])
>>> output
tensor([[[[ 0., 0., 0., 1., 2., 2., 2.],
[ 0., 0., 0., 1., 2., 2., 2.],
[ 3., 3., 3., 4., 5., 5., 5.],
[ 6., 6., 6., 7., 8., 8., 8.],
[ 6., 6., 6., 7., 8., 8., 8.]],
<BLANKLINE>
[[ 9., 9., 9., 10., 11., 11., 11.],
[ 9., 9., 9., 10., 11., 11., 11.],
[12., 12., 12., 13., 14., 14., 14.],
[15., 15., 15., 16., 17., 17., 17.],
[15., 15., 15., 16., 17., 17., 17.]]]], dtype=oneflow.float32)
>>> output_int = replicationpad_layer_0(input_int)
>>> output_int
tensor([[[[ 0., 0., 0., 1., 2., 2., 2.],
[ 0., 0., 0., 1., 2., 2., 2.],
[ 3., 3., 3., 4., 5., 5., 5.],
[ 6., 6., 6., 7., 8., 8., 8.],
[ 6., 6., 6., 7., 8., 8., 8.]],
<BLANKLINE>
[[ 9., 9., 9., 10., 11., 11., 11.],
[ 9., 9., 9., 10., 11., 11., 11.],
[12., 12., 12., 13., 14., 14., 14.],
[15., 15., 15., 16., 17., 17., 17.],
[15., 15., 15., 16., 17., 17., 17.]]]], dtype=oneflow.float32)
"""
def __init__(self, padding: _size_4_t):
super().__init__()
if isinstance(padding, (tuple, list)):
assert len(padding) == 4, ValueError("Length of padding must be 4")
boundary = [*padding]
elif isinstance(padding, int):
boundary = _quadruple(padding)
else:
raise ValueError("padding must be int or list or tuple!")
self.padding = boundary
def forward(self, x):
return flow._C.pad(x, pad=self.padding, mode="replicate")
def extra_repr(self) -> str:
return "{}".format(self.padding)
class ReflectionPad2d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.ReflectionPad2d.html
This operator pads the input tensor using the reflection of the input boundary.
Args:
padding (Union[int,tuple]): The size or bundary of padding, if is `int` uses the same padding in all dimension; if 4-dims `tuple`, uses :math:`(\\text{padding}_{\\text{left}}, \\text{padding}_{\\text{right}}, \\text{padding}_{\\text{top}}, \\text{padding}_{\\text{bottom}} )`
Returns:
Tensor: Returns a new tensor which is result of the reflection padding of the input tensor.
Shape:
- Input: :math:`(N, C, H_{\\text{in}}, W_{\\text{in}})`
- Output: :math:`(N, C, H_{\\text{out}}, W_{\\text{out}})` where
:math:`H_{\\text{out}} = H_{\\text{in}} + \\text{padding}_{\\text{top}} + \\text{padding}_{\\text{bottom}}`
:math:`W_{\\text{out}} = W_{\\text{in}} + \\text{padding}_{\\text{left}} + \\text{padding}_{\\text{right}}`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)), dtype=flow.float32)
>>> m = flow.nn.ReflectionPad2d((2, 2, 1, 1))
>>> out = m(input)
>>> out
tensor([[[[ 5., 4., 3., 4., 5., 4., 3.],
[ 2., 1., 0., 1., 2., 1., 0.],
[ 5., 4., 3., 4., 5., 4., 3.],
[ 8., 7., 6., 7., 8., 7., 6.],
[ 5., 4., 3., 4., 5., 4., 3.]],
<BLANKLINE>
[[14., 13., 12., 13., 14., 13., 12.],
[11., 10., 9., 10., 11., 10., 9.],
[14., 13., 12., 13., 14., 13., 12.],
[17., 16., 15., 16., 17., 16., 15.],
[14., 13., 12., 13., 14., 13., 12.]]]], dtype=oneflow.float32)
"""
def __init__(self, padding: _size_4_t) -> None:
super().__init__()
if isinstance(padding, tuple):
assert len(padding) == 4, ValueError("Padding length must be 4")
boundary = [*padding]
elif isinstance(padding, int):
boundary = _quadruple(padding)
else:
raise ValueError("padding must be in or list or tuple!")
self.padding = boundary
def forward(self, x):
(H, W) = (x.shape[2], x.shape[3])
if (
self.padding[2] < H
and self.padding[3] < H
and (self.padding[0] < W)
and (self.padding[1] < W)
):
return flow._C.pad(x, pad=self.padding, mode="reflect")
else:
raise ValueError(
"padding size should be less than the corresponding input dimension!"
)
def extra_repr(self) -> str:
return "{}".format(self.padding)
class ConstantPad1d(Module):
"""Pads the input tensor boundaries with a constant value.
The interface is consistent with PyTorch, and referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.ConstantPad1d.html?highlight=constantpad1d#torch.nn.ConstantPad1d
For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
Args:
padding (int, list, tuple): the size of the padding. If is `int`, uses the same
padding in both boundaries. If a 2-`tuple`, uses
(:math:`\\text{padding_left}`, :math:`\\text{padding_right}`)
value (int, float): The constant value used for padding. Defaults to 0.
Shape:
- Input: :math:`(N, C, W_{in})`
- Output: :math:`(N, C, W_{out})` where
:math:`W_{out} = W_{in} + \\text{padding\\_left} + \\text{padding\\_right}`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.arange(8).reshape(2,2,2).astype(np.float32))
>>> m = flow.nn.ConstantPad1d(padding=[1, 2], value=9.9999)
>>> output = m(input)
>>> output
tensor([[[9.9999, 0.0000, 1.0000, 9.9999, 9.9999],
[9.9999, 2.0000, 3.0000, 9.9999, 9.9999]],
<BLANKLINE>
[[9.9999, 4.0000, 5.0000, 9.9999, 9.9999],
[9.9999, 6.0000, 7.0000, 9.9999, 9.9999]]], dtype=oneflow.float32)
"""
def __init__(self, padding: Union[int, tuple, list], value: Union[int, float] = 0):
super().__init__()
if isinstance(padding, (tuple, list)):
assert len(padding) == 2, ValueError("Length of padding must be 4")
boundary = [padding[0], padding[1]]
elif isinstance(padding, int):
boundary = [padding, padding]
else:
raise ValueError("padding must be int or list or tuple!")
self.padding = boundary
self.value = value
def forward(self, x):
if x.dtype in (flow.float32, flow.float16, flow.float64):
self.value = float(self.value)
else:
self.value = int(self.value)
return flow._C.pad(x, pad=self.padding, mode="constant", value=self.value)
class ConstantPad2d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.ConstantPad2d.html?highlight=constantpad2d#torch.nn.ConstantPad2d
This operator pads the input with constant value that user specifies.
User can set the amount of padding by setting the parameter `paddings`.
Args:
padding (int, tuple, list): the size of the padding.
If is `int`, uses the same padding in all boundaries.
If a 4-`tuple`, uses
(:math:`\\mathrm{padding_{left}}`, :math:`\\mathrm{padding_{right}}`, :math:`\\mathrm{padding_{top}}`, :math:`\\mathrm{padding_{bottom}}`)
value (int, float): The constant value used for padding. Defaults to 0.
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \\mathrm{padding_{top}} + \\mathrm{padding_{bottom}}`
:math:`W_{out} = W_{in} + \\mathrm{padding_{left}} + \\mathrm{padding_{right}}`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> constantpad_layer_0 = flow.nn.ConstantPad2d((2, 2, 1, 1), 1)
>>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32))
>>> input_int = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.int32))
>>> output = constantpad_layer_0(input)
>>> output.shape
flow.Size([1, 2, 5, 7])
>>> output
tensor([[[[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 0., 1., 2., 1., 1.],
[ 1., 1., 3., 4., 5., 1., 1.],
[ 1., 1., 6., 7., 8., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]],
<BLANKLINE>
[[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 9., 10., 11., 1., 1.],
[ 1., 1., 12., 13., 14., 1., 1.],
[ 1., 1., 15., 16., 17., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]]]], dtype=oneflow.float32)
>>> output_int = constantpad_layer_0(input_int)
>>> output_int
tensor([[[[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 0., 1., 2., 1., 1.],
[ 1., 1., 3., 4., 5., 1., 1.],
[ 1., 1., 6., 7., 8., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]],
<BLANKLINE>
[[ 1., 1., 1., 1., 1., 1., 1.],
[ 1., 1., 9., 10., 11., 1., 1.],
[ 1., 1., 12., 13., 14., 1., 1.],
[ 1., 1., 15., 16., 17., 1., 1.],
[ 1., 1., 1., 1., 1., 1., 1.]]]], dtype=oneflow.float32)
"""
def __init__(self, padding: Union[int, tuple, list], value: Union[int, float] = 0):
super().__init__()
if isinstance(padding, (tuple, list)):
assert len(padding) == 4, ValueError("Length of padding must be 4")
boundary = [padding[0], padding[1], padding[2], padding[3]]
elif isinstance(padding, int):
boundary = [padding, padding, padding, padding]
else:
raise ValueError("padding must be int or list or tuple!")
self.padding = boundary
self.value = value
def forward(self, x):
if x.dtype in (flow.float32, flow.float16, flow.float64):
self.value = float(self.value)
else:
self.value = int(self.value)
return flow._C.pad(x, pad=self.padding, mode="constant", value=self.value)
class ConstantPad3d(Module):
"""Pads the input tensor boundaries with a constant value.
The interface is consistent with PyTorch, and referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.ConstantPad3d.html?highlight=constantpad3d#torch.nn.ConstantPad3d
For `N`-dimensional padding, use :func:`flow.nn.functional.pad()`.
Args:
padding (int, list, tuple): the size of the padding. If is `int`, uses the same
padding in all boundaries. If a 6-`tuple`, uses
(:math:`\\text{padding_left}`, :math:`\\text{padding_right}`,
:math:`\\text{padding_top}`, :math:`\\text{padding_bottom}`,
:math:`\\text{padding_front}`, :math:`\\text{padding_back}`)
value (int, float): The constant value used for padding. Defaults to 0.
Shape:
- Input: :math:`(N, C, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` where
:math:`D_{out} = D_{in} + \\text{padding_front} + \\text{padding_back}`
:math:`H_{out} = H_{in} + \\text{padding_top} + \\text{padding_bottom}`
:math:`W_{out} = W_{in} + \\text{padding_left} + \\text{padding_right}`
Examples::
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.arange(8).reshape(1,1,2,2,2).astype(np.int32))
>>> m = flow.nn.ConstantPad3d(padding=1, value=9)
>>> output = m(input)
>>> output
tensor([[[[[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9]],
<BLANKLINE>
[[9, 9, 9, 9],
[9, 0, 1, 9],
[9, 2, 3, 9],
[9, 9, 9, 9]],
<BLANKLINE>
[[9, 9, 9, 9],
[9, 4, 5, 9],
[9, 6, 7, 9],
[9, 9, 9, 9]],
<BLANKLINE>
[[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9],
[9, 9, 9, 9]]]]], dtype=oneflow.int32)
"""
def __init__(self, padding: Union[int, tuple, list], value: Union[int, float] = 0):
super().__init__()
if isinstance(padding, (tuple, list)):
assert len(padding) == 6, ValueError("Length of padding must be 6")
boundary = [
padding[0],
padding[1],
padding[2],
padding[3],
padding[4],
padding[5],
]
elif isinstance(padding, int):
boundary = [padding, padding, padding, padding, padding, padding]
else:
raise ValueError("padding must be int or list or tuple!")
self.padding = boundary
self.value = value
def forward(self, x):
if x.dtype in (flow.float32, flow.float16, flow.float64):
self.value = float(self.value)
else:
self.value = int(self.value)
return flow._C.pad(x, pad=self.padding, mode="constant", value=self.value)
class ZeroPad2d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from:
https://pytorch.org/docs/stable/generated/torch.nn.ZeroPad2d.html
Pads the input tensor boundaries with zero. User can set the amount of padding by setting the parameter `paddings`.
Args:
padding (Union[int, tuple]): the size of the padding. If is `int`, uses the same padding in all boundaries. If a 4-`tuple`, uses (:math:`\\mathrm{padding_{left}}`, :math:`\\mathrm{padding_{right}}`, :math:`\\mathrm{padding_{top}}`, :math:`\\mathrm{padding_{bottom}}`)
Shape:
- Input: :math:`(N, C, H_{in}, W_{in})`
- Output: :math:`(N, C, H_{out}, W_{out})` where
:math:`H_{out} = H_{in} + \\mathrm{padding_{top}} + \\mathrm{padding_{bottom}}`
:math:`W_{out} = W_{in} + \\mathrm{padding_{left}} + \\mathrm{padding_{right}}`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> zeropad_layer_int = flow.nn.ZeroPad2d(2)
>>> zeropad_layer_tuple = flow.nn.ZeroPad2d((1,2,2,0))
>>> input = flow.Tensor(np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32))
>>> output_int = zeropad_layer_int(input)
>>> output_int.shape
flow.Size([1, 2, 7, 7])
>>> output_int
tensor([[[[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 1., 2., 0., 0.],
[ 0., 0., 3., 4., 5., 0., 0.],
[ 0., 0., 6., 7., 8., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]],
<BLANKLINE>
[[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 9., 10., 11., 0., 0.],
[ 0., 0., 12., 13., 14., 0., 0.],
[ 0., 0., 15., 16., 17., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0., 0.]]]], dtype=oneflow.float32)
>>> output_tuple = zeropad_layer_tuple(input)
>>> output_tuple
tensor([[[[ 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 0., 1., 2., 0., 0.],
[ 0., 3., 4., 5., 0., 0.],
[ 0., 6., 7., 8., 0., 0.]],
<BLANKLINE>
[[ 0., 0., 0., 0., 0., 0.],
[ 0., 0., 0., 0., 0., 0.],
[ 0., 9., 10., 11., 0., 0.],
[ 0., 12., 13., 14., 0., 0.],
[ 0., 15., 16., 17., 0., 0.]]]], dtype=oneflow.float32)
"""
def __init__(self, padding: Union[int, tuple]):
super().__init__()
if isinstance(padding, tuple):
assert len(padding) == 4, ValueError("Length of padding must be 4")
boundary = [padding[0], padding[1], padding[2], padding[3]]
elif isinstance(padding, int):
boundary = [padding, padding, padding, padding]
else:
raise ValueError("padding must be int or tuple!")
self.padding = boundary
self.value = 0.0
def forward(self, x):
(_, _, h, w) = x.shape
if x.dtype in [flow.float32, flow.float16, flow.float64]:
floating_value = float(self.value)
integral_value = int(0)
else:
floating_value = float(0)
integral_value = int(self.value)
self._op = (
flow.builtin_op("constant_pad2d")
.Input("x")
.Output("y")
.Attr("padding", self.padding)
.Attr("floating_value", floating_value)
.Attr("integral_value", integral_value)
.Build()
)
res = self._op(x)[0]
return res
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.nn.modules.utils._quadruple",
"oneflow.builtin_op",
"oneflow._C.pad"
] | [((20039, 20075), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (20054, 20075), False, 'import doctest\n'), ((3946, 3996), 'oneflow._C.pad', 'flow._C.pad', (['x'], {'pad': 'self.padding', 'mode': '"""replicate"""'}), "(x, pad=self.padding, mode='replicate')\n", (3957, 3996), True, 'import oneflow as flow\n'), ((9190, 9257), 'oneflow._C.pad', 'flow._C.pad', (['x'], {'pad': 'self.padding', 'mode': '"""constant"""', 'value': 'self.value'}), "(x, pad=self.padding, mode='constant', value=self.value)\n", (9201, 9257), True, 'import oneflow as flow\n'), ((12888, 12955), 'oneflow._C.pad', 'flow._C.pad', (['x'], {'pad': 'self.padding', 'mode': '"""constant"""', 'value': 'self.value'}), "(x, pad=self.padding, mode='constant', value=self.value)\n", (12899, 12955), True, 'import oneflow as flow\n'), ((15966, 16033), 'oneflow._C.pad', 'flow._C.pad', (['x'], {'pad': 'self.padding', 'mode': '"""constant"""', 'value': 'self.value'}), "(x, pad=self.padding, mode='constant', value=self.value)\n", (15977, 16033), True, 'import oneflow as flow\n'), ((6742, 6790), 'oneflow._C.pad', 'flow._C.pad', (['x'], {'pad': 'self.padding', 'mode': '"""reflect"""'}), "(x, pad=self.padding, mode='reflect')\n", (6753, 6790), True, 'import oneflow as flow\n'), ((3768, 3787), 'oneflow.nn.modules.utils._quadruple', '_quadruple', (['padding'], {}), '(padding)\n', (3778, 3787), False, 'from oneflow.nn.modules.utils import _quadruple\n'), ((6351, 6370), 'oneflow.nn.modules.utils._quadruple', '_quadruple', (['padding'], {}), '(padding)\n', (6361, 6370), False, 'from oneflow.nn.modules.utils import _quadruple\n'), ((19677, 19710), 'oneflow.builtin_op', 'flow.builtin_op', (['"""constant_pad2d"""'], {}), "('constant_pad2d')\n", (19692, 19710), True, 'import oneflow as flow\n')] |
# Copyright TODO
""" BNN with VI.ELBO example code """
import sys
import os
import math
import numpy as np
import oneflow.experimental as flow
flow.enable_eager_execution()
sys.path.append('..')
sys.path.append('../..')
import conf
from zhusuan_of.framework.bn import BayesianNet
from zhusuan_of.variational.elbo import ELBO
from utils import load_uci_boston_housing, standardize
class Net(BayesianNet):
def __init__(self, layer_sizes, n_particles):
super().__init__()
self.layer_sizes = layer_sizes
self.n_particles = n_particles
self.y_logstd = flow.nn.Parameter(flow.Tensor(1))
flow.nn.init.uniform_(self.y_logstd)
def forward(self, observed):
self.observe(observed)
x = self.observed['x']
h = flow.tile(x, reps=(self.n_particles, *len(x.shape)*[1]))
batch_size = x.shape[0]
for i, (n_in, n_out) in enumerate(zip(self.layer_sizes[:-1], self.layer_sizes[1:])):
w = self.sn('Normal',
name="w" + str(i),
mean=flow.zeros((n_out, n_in + 1), dtype=flow.float32),
std=flow.ones((n_out, n_in +1), dtype=flow.float32),
group_ndims=2,
n_samples=self.n_particles,
reduce_mean_dims=[0])
w = flow.unsqueeze(w, dim=1)
w = flow.tile(w, reps=(1, batch_size, 1,1))
h = flow.cat([h, flow.ones((*(list(h.shape)[:-1]), 1), dtype=flow.float32).to("cuda")], -1)
h = flow.reshape(h, list(h.shape) + [1])
p = flow.sqrt(flow.Tensor([h.shape[2]], dtype=flow.float32).to("cuda"))
wd1, wd2, wd3, wd4 = w.shape
w = flow.reshape(w, [wd1*wd2, wd3, wd4])
hd1, hd2, hd3, hd4 = h.shape
h = flow.reshape(h, [hd1*hd2, hd3, hd4])
h = flow.bmm(w, h) / p
h = flow.reshape(h, [hd1, hd2, wd3, hd4])
h = flow.squeeze(h, [-1])
if i < len(self.layer_sizes) - 2:
h = flow.nn.ReLU()(h)
y_mean = flow.squeeze(h, [2])
y = self.observed['y']
y_pred = flow.mean(y_mean, dim=[0])
self.cache['rmse'] = flow.sqrt(flow.mean((y - y_pred)**2))
self.sn('Normal',
name='y',
mean=y_mean,
logstd=self.y_logstd,
reparameterize=True,
reduce_mean_dims=[0,1],
multiplier=456,) ## training data size
return self
class Variational(BayesianNet):
def __init__(self, layer_sizes, n_particles):
super().__init__()
self.layer_sizes = layer_sizes
self.n_particles = n_particles
self.w_means = []
self.w_logstds = []
for i, (n_in, n_out) in enumerate(zip(self.layer_sizes[:-1], self.layer_sizes[1:])):
w_mean_ = flow.nn.Parameter(flow.Tensor(n_out, n_in + 1))
self.w_means.append(w_mean_)
w_logstd_ = flow.nn.Parameter(flow.Tensor(n_out, n_in + 1))
self.w_logstds.append(w_logstd_)
self.w_means = flow.nn.ParameterList(self.w_means)
self.w_logstds = flow.nn.ParameterList(self.w_logstds)
def forward(self, observed):
self.observe(observed)
for i, (n_in, n_out) in enumerate(zip(self.layer_sizes[:-1], self.layer_sizes[1:])):
self.sn('Normal',
name='w' + str(i),
mean=self.w_means[i],
logstd=self.w_logstds[i],
group_ndims=2,
n_samples=self.n_particles,
reparametrize=True,
reduce_mean_dims=[0])
return self
def main():
# Load UCI Boston housing data
data_path = os.path.join(conf.data_dir, "housing.data")
x_train, y_train, x_valid, y_valid, x_test, y_test = \
load_uci_boston_housing(data_path)
x_train = np.vstack([x_train, x_valid])
y_train = np.hstack([y_train, y_valid])
n_train, x_dim = x_train.shape
# Standardize data
x_train, x_test, _, _ = standardize(x_train, x_test)
y_train, y_test, mean_y_train, std_y_train = standardize(
y_train, y_test)
print('data size: ', len(x_train))
# Define model parameters
lb_samples = 512
epoch_size = 5000
batch_size = 114
n_hiddens = [50]
layer_sizes = [x_dim] + n_hiddens + [1]
print('layer size: ', layer_sizes)
# create the network
net = Net(layer_sizes, lb_samples).to("cuda")
variational = Variational(layer_sizes, lb_samples).to("cuda")
model = ELBO(net, variational).to("cuda")
lr = 0.001
optimizer = flow.optim.Adam(parameters=model.parameters(), lr=lr)
# do train
len_ = len(x_train)
num_batches = math.floor(len_ / batch_size)
# Define training/evaluation parameters
test_freq = 20
for epoch in range(epoch_size):
perm = np.random.permutation(x_train.shape[0])
x_train = x_train[perm, :]
y_train = y_train[perm]
for step in range(num_batches):
x = flow.Tensor(x_train[step*batch_size:(step+1)*batch_size]).to("cuda")
y = flow.Tensor(y_train[step*batch_size:(step+1)*batch_size]).to("cuda")
lbs = model({'x':x, 'y':y})
lbs.backward()
optimizer.step()
optimizer.zero_grad()
if (step + 1) % num_batches == 0:
rmse = net.cache['rmse'].numpy()
print("Epoch[{}/{}], Step [{}/{}], Lower bound: {:.4f}, RMSE: {:.4f}"
.format(epoch + 1, epoch_size, step + 1, num_batches, float(lbs.numpy()), float(rmse )* std_y_train))
# eval
if epoch % test_freq == 0:
x_t = flow.Tensor(x_test).to("cuda")
y_t = flow.Tensor(y_test).to("cuda")
lbs = model({'x':x_t, 'y':y_t})
rmse = net.cache['rmse'].numpy()
print('>> TEST')
print('>> Test Lower bound: {:.4f}, RMSE: {:.4f}'.format(float(lbs.numpy()), float(rmse) * std_y_train))
if __name__ == '__main__':
main()
| [
"oneflow.experimental.nn.ParameterList",
"oneflow.experimental.enable_eager_execution",
"oneflow.experimental.squeeze",
"oneflow.experimental.nn.init.uniform_",
"oneflow.experimental.zeros",
"oneflow.experimental.reshape",
"oneflow.experimental.unsqueeze",
"oneflow.experimental.mean",
"oneflow.exper... | [((147, 176), 'oneflow.experimental.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (174, 176), True, 'import oneflow.experimental as flow\n'), ((178, 199), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (193, 199), False, 'import sys\n'), ((200, 224), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (215, 224), False, 'import sys\n'), ((3798, 3841), 'os.path.join', 'os.path.join', (['conf.data_dir', '"""housing.data"""'], {}), "(conf.data_dir, 'housing.data')\n", (3810, 3841), False, 'import os\n'), ((3909, 3943), 'utils.load_uci_boston_housing', 'load_uci_boston_housing', (['data_path'], {}), '(data_path)\n', (3932, 3943), False, 'from utils import load_uci_boston_housing, standardize\n'), ((3958, 3987), 'numpy.vstack', 'np.vstack', (['[x_train, x_valid]'], {}), '([x_train, x_valid])\n', (3967, 3987), True, 'import numpy as np\n'), ((4002, 4031), 'numpy.hstack', 'np.hstack', (['[y_train, y_valid]'], {}), '([y_train, y_valid])\n', (4011, 4031), True, 'import numpy as np\n'), ((4119, 4147), 'utils.standardize', 'standardize', (['x_train', 'x_test'], {}), '(x_train, x_test)\n', (4130, 4147), False, 'from utils import load_uci_boston_housing, standardize\n'), ((4197, 4225), 'utils.standardize', 'standardize', (['y_train', 'y_test'], {}), '(y_train, y_test)\n', (4208, 4225), False, 'from utils import load_uci_boston_housing, standardize\n'), ((4815, 4844), 'math.floor', 'math.floor', (['(len_ / batch_size)'], {}), '(len_ / batch_size)\n', (4825, 4844), False, 'import math\n'), ((634, 670), 'oneflow.experimental.nn.init.uniform_', 'flow.nn.init.uniform_', (['self.y_logstd'], {}), '(self.y_logstd)\n', (655, 670), True, 'import oneflow.experimental as flow\n'), ((2106, 2126), 'oneflow.experimental.squeeze', 'flow.squeeze', (['h', '[2]'], {}), '(h, [2])\n', (2118, 2126), True, 'import oneflow.experimental as flow\n'), ((2176, 2202), 'oneflow.experimental.mean', 'flow.mean', (['y_mean'], {'dim': '[0]'}), '(y_mean, dim=[0])\n', (2185, 2202), True, 'import oneflow.experimental as flow\n'), ((3135, 3170), 'oneflow.experimental.nn.ParameterList', 'flow.nn.ParameterList', (['self.w_means'], {}), '(self.w_means)\n', (3156, 3170), True, 'import oneflow.experimental as flow\n'), ((3196, 3233), 'oneflow.experimental.nn.ParameterList', 'flow.nn.ParameterList', (['self.w_logstds'], {}), '(self.w_logstds)\n', (3217, 3233), True, 'import oneflow.experimental as flow\n'), ((4961, 5000), 'numpy.random.permutation', 'np.random.permutation', (['x_train.shape[0]'], {}), '(x_train.shape[0])\n', (4982, 5000), True, 'import numpy as np\n'), ((610, 624), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(1)'], {}), '(1)\n', (621, 624), True, 'import oneflow.experimental as flow\n'), ((1362, 1386), 'oneflow.experimental.unsqueeze', 'flow.unsqueeze', (['w'], {'dim': '(1)'}), '(w, dim=1)\n', (1376, 1386), True, 'import oneflow.experimental as flow\n'), ((1403, 1443), 'oneflow.experimental.tile', 'flow.tile', (['w'], {'reps': '(1, batch_size, 1, 1)'}), '(w, reps=(1, batch_size, 1, 1))\n', (1412, 1443), True, 'import oneflow.experimental as flow\n'), ((1743, 1781), 'oneflow.experimental.reshape', 'flow.reshape', (['w', '[wd1 * wd2, wd3, wd4]'], {}), '(w, [wd1 * wd2, wd3, wd4])\n', (1755, 1781), True, 'import oneflow.experimental as flow\n'), ((1837, 1875), 'oneflow.experimental.reshape', 'flow.reshape', (['h', '[hd1 * hd2, hd3, hd4]'], {}), '(h, [hd1 * hd2, hd3, hd4])\n', (1849, 1875), True, 'import oneflow.experimental as flow\n'), ((1926, 1963), 'oneflow.experimental.reshape', 'flow.reshape', (['h', '[hd1, hd2, wd3, hd4]'], {}), '(h, [hd1, hd2, wd3, hd4])\n', (1938, 1963), True, 'import oneflow.experimental as flow\n'), ((1981, 2002), 'oneflow.experimental.squeeze', 'flow.squeeze', (['h', '[-1]'], {}), '(h, [-1])\n', (1993, 2002), True, 'import oneflow.experimental as flow\n'), ((2242, 2270), 'oneflow.experimental.mean', 'flow.mean', (['((y - y_pred) ** 2)'], {}), '((y - y_pred) ** 2)\n', (2251, 2270), True, 'import oneflow.experimental as flow\n'), ((4634, 4656), 'zhusuan_of.variational.elbo.ELBO', 'ELBO', (['net', 'variational'], {}), '(net, variational)\n', (4638, 4656), False, 'from zhusuan_of.variational.elbo import ELBO\n'), ((1890, 1904), 'oneflow.experimental.bmm', 'flow.bmm', (['w', 'h'], {}), '(w, h)\n', (1898, 1904), True, 'import oneflow.experimental as flow\n'), ((2923, 2951), 'oneflow.experimental.Tensor', 'flow.Tensor', (['n_out', '(n_in + 1)'], {}), '(n_out, n_in + 1)\n', (2934, 2951), True, 'import oneflow.experimental as flow\n'), ((3036, 3064), 'oneflow.experimental.Tensor', 'flow.Tensor', (['n_out', '(n_in + 1)'], {}), '(n_out, n_in + 1)\n', (3047, 3064), True, 'import oneflow.experimental as flow\n'), ((1079, 1128), 'oneflow.experimental.zeros', 'flow.zeros', (['(n_out, n_in + 1)'], {'dtype': 'flow.float32'}), '((n_out, n_in + 1), dtype=flow.float32)\n', (1089, 1128), True, 'import oneflow.experimental as flow\n'), ((1159, 1207), 'oneflow.experimental.ones', 'flow.ones', (['(n_out, n_in + 1)'], {'dtype': 'flow.float32'}), '((n_out, n_in + 1), dtype=flow.float32)\n', (1168, 1207), True, 'import oneflow.experimental as flow\n'), ((2070, 2084), 'oneflow.experimental.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (2082, 2084), True, 'import oneflow.experimental as flow\n'), ((5125, 5188), 'oneflow.experimental.Tensor', 'flow.Tensor', (['x_train[step * batch_size:(step + 1) * batch_size]'], {}), '(x_train[step * batch_size:(step + 1) * batch_size])\n', (5136, 5188), True, 'import oneflow.experimental as flow\n'), ((5210, 5273), 'oneflow.experimental.Tensor', 'flow.Tensor', (['y_train[step * batch_size:(step + 1) * batch_size]'], {}), '(y_train[step * batch_size:(step + 1) * batch_size])\n', (5221, 5273), True, 'import oneflow.experimental as flow\n'), ((5785, 5804), 'oneflow.experimental.Tensor', 'flow.Tensor', (['x_test'], {}), '(x_test)\n', (5796, 5804), True, 'import oneflow.experimental as flow\n'), ((5834, 5853), 'oneflow.experimental.Tensor', 'flow.Tensor', (['y_test'], {}), '(y_test)\n', (5845, 5853), True, 'import oneflow.experimental as flow\n'), ((1626, 1671), 'oneflow.experimental.Tensor', 'flow.Tensor', (['[h.shape[2]]'], {'dtype': 'flow.float32'}), '([h.shape[2]], dtype=flow.float32)\n', (1637, 1671), True, 'import oneflow.experimental as flow\n')] |
# !/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=============================================================
"""
import json
import re
import six
import numpy as np
from typing import Tuple
# import requests # 在 nfs 没有挂载 时使用 url 访问
import sys
import oneflow as flow
import oneflow.typing as tp
BATCH_SIZE = 16
class TextCNN:
def __init__(self, emb_sz, emb_dim, ksize_list, n_filters_list, n_classes, dropout):
self.initializer = flow.random_normal_initializer(stddev=0.1)
self.emb_sz = emb_sz
self.emb_dim = emb_dim
self.ksize_list = ksize_list
self.n_filters_list = n_filters_list
self.n_classes = n_classes
self.dropout = dropout
self.total_n_filters = sum(self.n_filters_list)
def get_logits(self, inputs, is_train):
emb_weight = flow.get_variable(
'embedding-weight',
shape=(self.emb_sz, self.emb_dim),
dtype=flow.float32,
trainable=is_train,
reuse=False,
initializer=self.initializer,
)
data = flow.gather(emb_weight, inputs, axis=0)
data = flow.transpose(data, [0, 2, 1]) # BLH -> BHL
data = flow.reshape(data, list(data.shape) + [1])
seq_length = data.shape[2]
pooled_list = []
for i in range(len(self.n_filters_list)):
ksz = self.ksize_list[i]
n_filters = self.n_filters_list[i]
conv = flow.layers.conv2d(data, n_filters, [ksz, 1], data_format="NCHW",
kernel_initializer=self.initializer, name='conv-{}'.format(i)) # NCHW
# conv = flow.layers.layer_norm(conv, name='ln-{}'.format(i))
conv = flow.nn.relu(conv)
pooled = flow.nn.max_pool2d(conv, [seq_length - ksz + 1, 1], strides=1, padding='VALID', data_format="NCHW")
pooled_list.append(pooled)
pooled = flow.concat(pooled_list, 3)
pooled = flow.reshape(pooled, [-1, self.total_n_filters])
if is_train:
pooled = flow.nn.dropout(pooled, rate=self.dropout)
pooled = flow.layers.dense(pooled, self.total_n_filters, use_bias=True,
kernel_initializer=self.initializer, name='dense-1')
pooled = flow.nn.relu(pooled)
logits = flow.layers.dense(pooled, self.n_classes, use_bias=True,
kernel_initializer=self.initializer, name='dense-2')
return logits
def get_eval_config():
config = flow.function_config()
config.default_data_type(flow.float)
return config
def pad_sequences(sequences, maxlen=None, dtype='int32',
padding='pre', truncating='pre', value=0.):
"""Pads sequences to the same length.
This function transforms a list of
`num_samples` sequences (lists of integers)
into a 2D Numpy array of shape `(num_samples, num_timesteps)`.
`num_timesteps` is either the `maxlen` argument if provided,
or the length of the longest sequence otherwise.
Sequences that are shorter than `num_timesteps`
are padded with `value` at the beginning or the end
if padding='post.
Sequences longer than `num_timesteps` are truncated
so that they fit the desired length.
The position where padding or truncation happens is determined by
the arguments `padding` and `truncating`, respectively.
Pre-padding is the default.
# Arguments
sequences: List of lists, where each element is a sequence.
maxlen: Int, maximum length of all sequences.
dtype: Type of the output sequences.
To pad sequences with variable length strings, you can use `object`.
padding: String, 'pre' or 'post':
pad either before or after each sequence.
truncating: String, 'pre' or 'post':
remove values from sequences larger than
`maxlen`, either at the beginning or at the end of the sequences.
value: Float or String, padding value.
# Returns
x: Numpy array with shape `(len(sequences), maxlen)`
# Raises
ValueError: In case of invalid values for `truncating` or `padding`,
or in case of invalid shape for a `sequences` entry.
"""
if not hasattr(sequences, '__len__'):
raise ValueError('`sequences` must be iterable.')
num_samples = len(sequences)
lengths = []
sample_shape = ()
flag = True
# take the sample shape from the first non empty sequence
# checking for consistency in the main loop below.
for x in sequences:
try:
lengths.append(len(x))
if flag and len(x):
sample_shape = np.asarray(x).shape[1:]
flag = False
except TypeError:
raise ValueError('`sequences` must be a list of iterables. '
'Found non-iterable: ' + str(x))
if maxlen is None:
maxlen = np.max(lengths)
is_dtype_str = np.issubdtype(dtype, np.str_) or np.issubdtype(dtype, np.unicode_)
if isinstance(value, six.string_types) and dtype != object and not is_dtype_str:
raise ValueError("`dtype` {} is not compatible with `value`'s type: {}\n"
"You should set `dtype=object` for variable length strings."
.format(dtype, type(value)))
x = np.full((num_samples, maxlen) + sample_shape, value, dtype=dtype)
for idx, s in enumerate(sequences):
if not len(s):
continue # empty list/array was found
if truncating == 'pre':
trunc = s[-maxlen:]
elif truncating == 'post':
trunc = s[:maxlen]
else:
raise ValueError('Truncating type "%s" '
'not understood' % truncating)
# check `trunc` has expected shape
trunc = np.asarray(trunc, dtype=dtype)
if trunc.shape[1:] != sample_shape:
raise ValueError('Shape of sample %s of sequence at position %s '
'is different from expected shape %s' %
(trunc.shape[1:], idx, sample_shape))
if padding == 'post':
x[idx, :len(trunc)] = trunc
elif padding == 'pre':
x[idx, -len(trunc):] = trunc
else:
raise ValueError('Padding type "%s" not understood' % padding)
return x
@flow.global_function('predict', get_eval_config())
def predict_job(text: tp.Numpy.Placeholder((BATCH_SIZE, 150), dtype=flow.int32),
) -> Tuple[tp.Numpy, tp.Numpy]:
with flow.scope.placement("gpu", "0:0"):
model = TextCNN(50000, 100, ksize_list=[2, 3, 4, 5], n_filters_list=[100] * 4, n_classes=2, dropout=0.5)
logits = model.get_logits(text, is_train=False)
logits = flow.nn.softmax(logits)
label = flow.math.argmax(logits)
return label, logits
class TextCNNClassifier:
def __init__(self):
model_load_dir = "of_model/textcnn_imdb_of_best_model/"
word_index_dir = "of_model/imdb_word_index/imdb_word_index.json"
checkpoint = flow.train.CheckPoint()
checkpoint.init()
checkpoint.load(model_load_dir)
with open(word_index_dir) as f:
word_index = json.load(f)
word_index = {k: (v + 2) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<START>"] = 1
word_index["<UNK>"] = 2
self.word_index = word_index
def inference(self, text_path_list, id_list, label_list):
print("infer")
classifications = []
batch_text = []
for i, text_path in enumerate(text_path_list):
text = open('/nfs/' + text_path, "r").read()
"""
# 在 nfs 没有挂载 时使用 url 访问 MinIO 进行测试
url = "http://10.5.29.100:9000/" + text_path
print(url)
text = requests.get(url).text # .encode('utf-8').decode('utf-8')
"""
text = re.sub("[^a-zA-Z']", " ", text)
text = list(map(lambda x: x.lower(), text.split()))
text.insert(0, "<START>")
batch_text.append(
list(map(lambda x: self.word_index[x] if x in self.word_index else self.word_index["<UNK>"], text))
)
if i % BATCH_SIZE == BATCH_SIZE - 1:
text = pad_sequences(batch_text, value=self.word_index["<PAD>"], padding='post', maxlen=150)
text = np.array(text, dtype=np.int32)
label, logits = predict_job(text)
label = label.tolist()
logits = logits.tolist()
for k in range(BATCH_SIZE):
classifications.append({
'id': id_list[i - BATCH_SIZE + 1 + k],
'annotation': json.dumps(
[{'category_id': label_list[label[k]], 'score': round(logits[k][label[k]], 4)}])
})
batch_text = []
return classifications
| [
"oneflow.function_config",
"oneflow.nn.dropout",
"oneflow.transpose",
"oneflow.gather",
"oneflow.scope.placement",
"oneflow.concat",
"oneflow.nn.max_pool2d",
"oneflow.train.CheckPoint",
"oneflow.typing.Numpy.Placeholder",
"oneflow.nn.softmax",
"oneflow.nn.relu",
"oneflow.layers.dense",
"onef... | [((3123, 3145), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (3143, 3145), True, 'import oneflow as flow\n'), ((5998, 6063), 'numpy.full', 'np.full', (['((num_samples, maxlen) + sample_shape)', 'value'], {'dtype': 'dtype'}), '((num_samples, maxlen) + sample_shape, value, dtype=dtype)\n', (6005, 6063), True, 'import numpy as np\n'), ((1031, 1073), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'stddev': '(0.1)'}), '(stddev=0.1)\n', (1061, 1073), True, 'import oneflow as flow\n'), ((1408, 1572), 'oneflow.get_variable', 'flow.get_variable', (['"""embedding-weight"""'], {'shape': '(self.emb_sz, self.emb_dim)', 'dtype': 'flow.float32', 'trainable': 'is_train', 'reuse': '(False)', 'initializer': 'self.initializer'}), "('embedding-weight', shape=(self.emb_sz, self.emb_dim),\n dtype=flow.float32, trainable=is_train, reuse=False, initializer=self.\n initializer)\n", (1425, 1572), True, 'import oneflow as flow\n'), ((1662, 1701), 'oneflow.gather', 'flow.gather', (['emb_weight', 'inputs'], {'axis': '(0)'}), '(emb_weight, inputs, axis=0)\n', (1673, 1701), True, 'import oneflow as flow\n'), ((1717, 1748), 'oneflow.transpose', 'flow.transpose', (['data', '[0, 2, 1]'], {}), '(data, [0, 2, 1])\n', (1731, 1748), True, 'import oneflow as flow\n'), ((2498, 2525), 'oneflow.concat', 'flow.concat', (['pooled_list', '(3)'], {}), '(pooled_list, 3)\n', (2509, 2525), True, 'import oneflow as flow\n'), ((2543, 2591), 'oneflow.reshape', 'flow.reshape', (['pooled', '[-1, self.total_n_filters]'], {}), '(pooled, [-1, self.total_n_filters])\n', (2555, 2591), True, 'import oneflow as flow\n'), ((2712, 2831), 'oneflow.layers.dense', 'flow.layers.dense', (['pooled', 'self.total_n_filters'], {'use_bias': '(True)', 'kernel_initializer': 'self.initializer', 'name': '"""dense-1"""'}), "(pooled, self.total_n_filters, use_bias=True,\n kernel_initializer=self.initializer, name='dense-1')\n", (2729, 2831), True, 'import oneflow as flow\n'), ((2880, 2900), 'oneflow.nn.relu', 'flow.nn.relu', (['pooled'], {}), '(pooled)\n', (2892, 2900), True, 'import oneflow as flow\n'), ((2918, 3032), 'oneflow.layers.dense', 'flow.layers.dense', (['pooled', 'self.n_classes'], {'use_bias': '(True)', 'kernel_initializer': 'self.initializer', 'name': '"""dense-2"""'}), "(pooled, self.n_classes, use_bias=True, kernel_initializer\n =self.initializer, name='dense-2')\n", (2935, 3032), True, 'import oneflow as flow\n'), ((5571, 5586), 'numpy.max', 'np.max', (['lengths'], {}), '(lengths)\n', (5577, 5586), True, 'import numpy as np\n'), ((5611, 5640), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.str_'], {}), '(dtype, np.str_)\n', (5624, 5640), True, 'import numpy as np\n'), ((5644, 5677), 'numpy.issubdtype', 'np.issubdtype', (['dtype', 'np.unicode_'], {}), '(dtype, np.unicode_)\n', (5657, 5677), True, 'import numpy as np\n'), ((6503, 6533), 'numpy.asarray', 'np.asarray', (['trunc'], {'dtype': 'dtype'}), '(trunc, dtype=dtype)\n', (6513, 6533), True, 'import numpy as np\n'), ((7121, 7178), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE, 150)'], {'dtype': 'flow.int32'}), '((BATCH_SIZE, 150), dtype=flow.int32)\n', (7141, 7178), True, 'import oneflow.typing as tp\n'), ((7237, 7271), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (7257, 7271), True, 'import oneflow as flow\n'), ((7459, 7482), 'oneflow.nn.softmax', 'flow.nn.softmax', (['logits'], {}), '(logits)\n', (7474, 7482), True, 'import oneflow as flow\n'), ((7499, 7523), 'oneflow.math.argmax', 'flow.math.argmax', (['logits'], {}), '(logits)\n', (7515, 7523), True, 'import oneflow as flow\n'), ((7776, 7799), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (7797, 7799), True, 'import oneflow as flow\n'), ((2302, 2320), 'oneflow.nn.relu', 'flow.nn.relu', (['conv'], {}), '(conv)\n', (2314, 2320), True, 'import oneflow as flow\n'), ((2342, 2446), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv', '[seq_length - ksz + 1, 1]'], {'strides': '(1)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""'}), "(conv, [seq_length - ksz + 1, 1], strides=1, padding=\n 'VALID', data_format='NCHW')\n", (2360, 2446), True, 'import oneflow as flow\n'), ((2643, 2685), 'oneflow.nn.dropout', 'flow.nn.dropout', (['pooled'], {'rate': 'self.dropout'}), '(pooled, rate=self.dropout)\n', (2658, 2685), True, 'import oneflow as flow\n'), ((7940, 7952), 'json.load', 'json.load', (['f'], {}), '(f)\n', (7949, 7952), False, 'import json\n'), ((8664, 8695), 're.sub', 're.sub', (['"""[^a-zA-Z\']"""', '""" """', 'text'], {}), '("[^a-zA-Z\']", \' \', text)\n', (8670, 8695), False, 'import re\n'), ((9153, 9183), 'numpy.array', 'np.array', (['text'], {'dtype': 'np.int32'}), '(text, dtype=np.int32)\n', (9161, 9183), True, 'import numpy as np\n'), ((5312, 5325), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5322, 5325), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import math
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
from test_util import GenArgDict, RunOneflowOp
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def tf_gelu(x):
inv_sqrt2 = math.sqrt(0.5)
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
y = 0.5 * x * (1 + tf.math.erf(inv_sqrt2 * x))
x_diff = tape.gradient(y, x)
return y.numpy(), x_diff.numpy()
@flow.unittest.skip_unless_1n1d()
class TestGelu(flow.unittest.TestCase):
def test_gelu(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["flow_op"] = [flow.math.gelu]
arg_dict["flow_args"] = [[]]
arg_dict["x"] = [
np.random.uniform(low=-100, high=100, size=(10, 20, 30, 40)).astype(
np.float32
)
]
for arg in GenArgDict(arg_dict):
of_y, of_x_diff = RunOneflowOp(**arg)
tf_y, tf_x_diff = tf_gelu(arg["x"])
assert np.allclose(of_y, tf_y, rtol=1e-5, atol=1e-5)
assert np.allclose(of_x_diff, tf_x_diff, rtol=1e-5, atol=1e-5)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((786, 837), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (830, 837), True, 'import tensorflow as tf\n'), ((1166, 1198), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1196, 1198), True, 'import oneflow as flow\n'), ((859, 910), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (899, 910), True, 'import tensorflow as tf\n'), ((945, 959), 'math.sqrt', 'math.sqrt', (['(0.5)'], {}), '(0.5)\n', (954, 959), False, 'import math\n'), ((1899, 1914), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1912, 1914), False, 'import unittest\n'), ((969, 1001), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (984, 1001), True, 'import tensorflow as tf\n'), ((1023, 1037), 'tensorflow.Variable', 'tf.Variable', (['x'], {}), '(x)\n', (1034, 1037), True, 'import tensorflow as tf\n'), ((1288, 1301), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1299, 1301), False, 'from collections import OrderedDict\n'), ((1605, 1625), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (1615, 1625), False, 'from test_util import GenArgDict, RunOneflowOp\n'), ((1657, 1676), 'test_util.RunOneflowOp', 'RunOneflowOp', ([], {}), '(**arg)\n', (1669, 1676), False, 'from test_util import GenArgDict, RunOneflowOp\n'), ((1745, 1792), 'numpy.allclose', 'np.allclose', (['of_y', 'tf_y'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(of_y, tf_y, rtol=1e-05, atol=1e-05)\n', (1756, 1792), True, 'import numpy as np\n'), ((1810, 1867), 'numpy.allclose', 'np.allclose', (['of_x_diff', 'tf_x_diff'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(of_x_diff, tf_x_diff, rtol=1e-05, atol=1e-05)\n', (1821, 1867), True, 'import numpy as np\n'), ((1065, 1091), 'tensorflow.math.erf', 'tf.math.erf', (['(inv_sqrt2 * x)'], {}), '(inv_sqrt2 * x)\n', (1076, 1091), True, 'import tensorflow as tf\n'), ((1466, 1526), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-100)', 'high': '(100)', 'size': '(10, 20, 30, 40)'}), '(low=-100, high=100, size=(10, 20, 30, 40))\n', (1483, 1526), True, 'import numpy as np\n')] |
import tqdm
from typing import Optional
import oneflow as flow
import oneflow.nn as nn
from optimization import get_scheduler
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class Trainer:
""" Trainer for GPT model. """
def __init__(
self,
model,
train_dataloader=None,
test_dataloader=None,
epoch: int = 1,
lr: float = 1e-4,
betas=(0.9, 0.999),
weight_decay: float = 0.01,
warmup_steps: Optional[int] = None,
accumulate_gradient_steps: int = 1,
output_path=None,
):
"""
:param model: model
:param train_dataloader: train dataset data loader
:param test_dataloader: test dataset data loader [can be None]
:param lr: learning rate of optimizer
:param betas: Adam optimizer betas
:param weight_decay: Adam optimizer weight decay param
"""
self.model = model.cuda()
self.output_path = output_path
# Setting the train and test data loader
self.train_dataloader = train_dataloader
self.test_dataloader = test_dataloader
self.epoch = epoch
self.accumulate_gradient_steps = accumulate_gradient_steps
# # Setting the Adam optimizer with hyper-param
self.optimizer = flow.optim.Adam(self.model.parameters(), lr=lr, betas=betas, weight_decay=weight_decay)
total_train_steps = len(self.train_dataloader) * self.epoch
self.lr_scheduler = get_scheduler('linear', self.optimizer, num_warmup_steps=warmup_steps, num_training_steps=total_train_steps)
print("Total Parameters:", sum([p.nelement() for p in self.model.parameters()]))
def train(self):
for i in range(self.epoch):
self.train_single_epoch(self.train_dataloader, i)
self.evaluate(self.test_dataloader, i)
self.save(i + 1, file_path="checkpoints/")
flow.save(self.model.state_dict(), self.output_path)
def test(self):
self.evaluate(self.test_dataloader)
def train_single_epoch(self, data_loader, epoch):
self.model.train()
losses = AverageMeter("loss")
self.optimizer.zero_grad()
data_iter = tqdm.tqdm(data_loader, desc="Training: %0d" % (epoch), total=len(data_loader))
for step, batch in enumerate(data_iter):
inputs, labels = (batch, batch)
inputs = inputs.cuda()
labels = labels.cuda()
outputs = self.model(inputs, labels=labels)
loss = outputs[0]
losses.update(loss.numpy().item())
if step % self.accumulate_gradient_steps == 0:
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
self.optimizer.zero_grad()
logging = {
'epoch': epoch,
'step': step,
'avg_loss': losses.avg,
'loss': losses.val,
'lr': self.lr_scheduler.get_lr()[0]
}
data_iter.set_postfix(logging)
print("Training:%0d, avg_loss:%.4f" % (epoch, losses.avg))
def evaluate(self, data_loader, epoch=0):
self.model.eval()
losses = AverageMeter("loss")
data_iter = tqdm.tqdm(data_loader, desc="Evaluate: ", total=len(data_loader))
for step, batch in enumerate(data_iter):
with flow.no_grad():
inputs, labels = (batch, batch)
inputs = inputs.cuda()
labels = labels.cuda()
outputs = self.model(inputs, labels=labels)
loss = outputs[0]
loss_item = loss.numpy().item()
losses.update(loss_item)
logging = {
'epoch': epoch,
'step': step,
'avg_loss': losses.avg,
'loss': losses.val,
}
data_iter.set_postfix(logging)
print("Evaluating:%0d, avg_loss:%.4f" % (epoch, losses.avg))
def save(self, epoch, file_path="checkpoints/"):
"""
Saving the current model on file_path
:param epoch: current epoch number
:param file_path: model output path which gonna be file_path+"ep%d" % epoch
:return: final_output_path
"""
output_path = file_path + "epoch%d" % epoch
flow.save(self.model.state_dict(), output_path)
print("EP:%d Model Saved on:" % epoch, output_path)
return output_path
| [
"oneflow.no_grad"
] | [((2040, 2152), 'optimization.get_scheduler', 'get_scheduler', (['"""linear"""', 'self.optimizer'], {'num_warmup_steps': 'warmup_steps', 'num_training_steps': 'total_train_steps'}), "('linear', self.optimizer, num_warmup_steps=warmup_steps,\n num_training_steps=total_train_steps)\n", (2053, 2152), False, 'from optimization import get_scheduler\n'), ((3961, 3975), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (3973, 3975), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
class MemoryZoneOutOfMemoryException(Exception):
def __init__(self, err="memory_zone_out_of_memory"):
Exception.__init__(self, err)
def constant(device_type):
flow.env.init()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
@flow.global_function(function_config=func_config)
def ConstantJob():
with flow.scope.placement(device_type, "0:0"):
x = flow.constant(
6, dtype=flow.float, shape=(1024 * 1024 * 1024, 1024 * 1024 * 1024)
)
return x
try:
ConstantJob().get()
except Exception as e:
if "memory_zone_out_of_memory" in str(e):
print(e)
raise MemoryZoneOutOfMemoryException()
def memory_zone_out_of_memory_of_gpu():
return constant("gpu")
def memory_zone_out_of_memory_of_cpu():
return constant("cpu")
| [
"oneflow.compatible.single_client.constant",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.env.init",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.clear_default_session"
] | [((939, 954), 'oneflow.compatible.single_client.env.init', 'flow.env.init', ([], {}), '()\n', (952, 954), True, 'from oneflow.compatible import single_client as flow\n'), ((973, 994), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (992, 994), True, 'from oneflow.compatible import single_client as flow\n'), ((1086, 1114), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1112, 1114), True, 'from oneflow.compatible import single_client as flow\n'), ((1121, 1170), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1141, 1170), True, 'from oneflow.compatible import single_client as flow\n'), ((1207, 1247), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1227, 1247), True, 'from oneflow.compatible import single_client as flow\n'), ((1265, 1351), 'oneflow.compatible.single_client.constant', 'flow.constant', (['(6)'], {'dtype': 'flow.float', 'shape': '(1024 * 1024 * 1024, 1024 * 1024 * 1024)'}), '(6, dtype=flow.float, shape=(1024 * 1024 * 1024, 1024 * 1024 *\n 1024))\n', (1278, 1351), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import cv2
import numpy as np
import typing as tp
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as otp
import image_test_util
def _of_image_target_resize(
images, target_size, max_size, image_static_shape, aspect_ratio_list
):
assert image_static_shape[-1] == 3
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_target_resize_job(
image: otp.ListListNumpy.Placeholder(shape=image_static_shape, dtype=flow.float)
) -> tp.Tuple[otp.ListListNumpy, otp.ListNumpy, otp.ListNumpy]:
image_buffer = flow.tensor_list_to_tensor_buffer(image)
res_image_buffer, new_size, scale = flow.image_target_resize(
image_buffer,
target_size=target_size,
max_size=max_size,
resize_side="shorter",
)
out_shape = image_test_util.infer_keep_aspect_ratio_resized_images_static_shape(
target_size=target_size,
min_size=None,
max_size=max_size,
aspect_ratio_list=aspect_ratio_list,
resize_side="shorter",
channels=3,
)
res_image = flow.tensor_buffer_to_tensor_list(
res_image_buffer, shape=out_shape, dtype=flow.float,
)
return res_image, new_size, scale
res_image, new_size, scale = image_target_resize_job([images])
return res_image[0], new_size[0], scale[0]
def _target_resize_by_cv(images, target_size, max_size):
res_images = []
res_sizes = []
res_scales = []
for image in images:
h, w = image.shape[0:2]
res_size, res_scale = _get_target_resize_size(w, h, target_size, max_size)
res_images.append(cv2.resize(image, res_size))
res_sizes.append(res_size)
res_scales.append(res_scale)
return res_images, res_sizes, res_scales
def _get_target_resize_size(w, h, target_size, max_size):
aspect_ratio = float(min((w, h))) / float(max((w, h)))
(
min_res_size,
max_res_size,
) = image_test_util.compute_keep_aspect_ratio_resized_size(
target_size, None, max_size, aspect_ratio, "shorter"
)
if w < h:
res_w = min_res_size
res_h = max_res_size
else:
res_w = max_res_size
res_h = min_res_size
scale_w = res_w / w
scale_h = res_h / h
return (res_w, res_h), (scale_w, scale_h)
def _compare_image_target_resize_with_cv(
test_case, image_files, target_size, max_size, print_debug_info=False
):
images = image_test_util.read_images_by_cv(image_files, flow.float)
image_static_shape, aspect_ratio_list = image_test_util.infer_images_static_shape(
images
)
expand_images = [np.expand_dims(image, axis=0) for image in images]
resized_images, size, scale = _of_image_target_resize(
expand_images, target_size, max_size, image_static_shape, aspect_ratio_list
)
cv_resized_images, cv_resized_sizes, cv_resized_scales = _target_resize_by_cv(
images, target_size, max_size
)
for (
resized_image,
cv_resized_image,
image_size,
image_scale,
resized_size,
resized_scale,
) in zip(
resized_images,
cv_resized_images,
size,
scale,
cv_resized_sizes,
cv_resized_scales,
):
if print_debug_info:
print("resized_image shape:", resized_image.shape)
print("cv_resized_image shape:", cv_resized_image.shape)
print("resized w & h:", image_size, resized_size)
print("resize w_scale & h_scale:", image_scale, resized_scale)
test_case.assertTrue(np.allclose(resized_image, cv_resized_image))
test_case.assertTrue(np.allclose(image_size, resized_size))
test_case.assertTrue(np.allclose(image_scale, resized_scale))
# @flow.unittest.skip_unless_1n1d()
# TODO(zhangwenxiao, jiangxuefei): refine in multi-client
@unittest.skipIf(True, "skip for now because of single-client tensor_list removed")
class TestImageTargetResize(flow.unittest.TestCase):
def test_image_target_resize(test_case):
_compare_image_target_resize_with_cv(
test_case,
[
"/dataset/mscoco_2017/val2017/000000000139.jpg",
"/dataset/mscoco_2017/val2017/000000000632.jpg",
],
800,
1333,
# True,
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.tensor_buffer_to_tensor_list",
"oneflow.compatible.single_client.tensor_list_to_tensor_buffer",
"oneflow.compatible.single_client.scope.mirrored_view",
"oneflow.compatible.single_client.typing.ListListNumpy.Placeholder",
"oneflow.compatible.single_client.FunctionConfig",
... | [((4757, 4843), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""skip for now because of single-client tensor_list removed"""'], {}), "(True,\n 'skip for now because of single-client tensor_list removed')\n", (4772, 4843), False, 'import unittest\n'), ((942, 970), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (968, 970), True, 'from oneflow.compatible import single_client as flow\n'), ((989, 1010), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1008, 1010), True, 'from oneflow.compatible import single_client as flow\n'), ((1128, 1177), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1148, 1177), True, 'from oneflow.compatible import single_client as flow\n'), ((2838, 2950), 'image_test_util.compute_keep_aspect_ratio_resized_size', 'image_test_util.compute_keep_aspect_ratio_resized_size', (['target_size', 'None', 'max_size', 'aspect_ratio', '"""shorter"""'], {}), "(target_size, None,\n max_size, aspect_ratio, 'shorter')\n", (2892, 2950), False, 'import image_test_util\n'), ((3331, 3389), 'image_test_util.read_images_by_cv', 'image_test_util.read_images_by_cv', (['image_files', 'flow.float'], {}), '(image_files, flow.float)\n', (3364, 3389), False, 'import image_test_util\n'), ((3434, 3483), 'image_test_util.infer_images_static_shape', 'image_test_util.infer_images_static_shape', (['images'], {}), '(images)\n', (3475, 3483), False, 'import image_test_util\n'), ((5264, 5279), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5277, 5279), False, 'import unittest\n'), ((1094, 1120), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1118, 1120), True, 'from oneflow.compatible import single_client as flow\n'), ((1391, 1431), 'oneflow.compatible.single_client.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['image'], {}), '(image)\n', (1424, 1431), True, 'from oneflow.compatible import single_client as flow\n'), ((1476, 1586), 'oneflow.compatible.single_client.image_target_resize', 'flow.image_target_resize', (['image_buffer'], {'target_size': 'target_size', 'max_size': 'max_size', 'resize_side': '"""shorter"""'}), "(image_buffer, target_size=target_size, max_size=\n max_size, resize_side='shorter')\n", (1500, 1586), True, 'from oneflow.compatible import single_client as flow\n'), ((1661, 1869), 'image_test_util.infer_keep_aspect_ratio_resized_images_static_shape', 'image_test_util.infer_keep_aspect_ratio_resized_images_static_shape', ([], {'target_size': 'target_size', 'min_size': 'None', 'max_size': 'max_size', 'aspect_ratio_list': 'aspect_ratio_list', 'resize_side': '"""shorter"""', 'channels': '(3)'}), "(target_size\n =target_size, min_size=None, max_size=max_size, aspect_ratio_list=\n aspect_ratio_list, resize_side='shorter', channels=3)\n", (1728, 1869), False, 'import image_test_util\n'), ((1963, 2054), 'oneflow.compatible.single_client.tensor_buffer_to_tensor_list', 'flow.tensor_buffer_to_tensor_list', (['res_image_buffer'], {'shape': 'out_shape', 'dtype': 'flow.float'}), '(res_image_buffer, shape=out_shape, dtype=\n flow.float)\n', (1996, 2054), True, 'from oneflow.compatible import single_client as flow\n'), ((3519, 3548), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (3533, 3548), True, 'import numpy as np\n'), ((1226, 1299), 'oneflow.compatible.single_client.typing.ListListNumpy.Placeholder', 'otp.ListListNumpy.Placeholder', ([], {'shape': 'image_static_shape', 'dtype': 'flow.float'}), '(shape=image_static_shape, dtype=flow.float)\n', (1255, 1299), True, 'from oneflow.compatible.single_client import typing as otp\n'), ((2514, 2541), 'cv2.resize', 'cv2.resize', (['image', 'res_size'], {}), '(image, res_size)\n', (2524, 2541), False, 'import cv2\n'), ((4476, 4520), 'numpy.allclose', 'np.allclose', (['resized_image', 'cv_resized_image'], {}), '(resized_image, cv_resized_image)\n', (4487, 4520), True, 'import numpy as np\n'), ((4551, 4588), 'numpy.allclose', 'np.allclose', (['image_size', 'resized_size'], {}), '(image_size, resized_size)\n', (4562, 4588), True, 'import numpy as np\n'), ((4619, 4658), 'numpy.allclose', 'np.allclose', (['image_scale', 'resized_scale'], {}), '(image_scale, resized_scale)\n', (4630, 4658), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import jsonlines
import oneflow as flow
from oneflow.utils.data import Dataset
from libai.data.structures import DistTensorData, Instance
def load_data(name, path):
assert name in ["snli-sup", "snli-unsup", "lqcmc", "eng_sts", "cnsd_sts", "wiki", "add"]
def load_snli_data_unsup(path):
with jsonlines.open(path, "r") as f:
return [line.get("origin") for line in f]
def load_snli_data_sup(path):
with jsonlines.open(path, "r") as f:
return [(line["origin"], line["entailment"], line["contradiction"]) for line in f]
def load_lqcmc_data(path):
with open(path, "r", encoding="utf8") as f:
return [line.strip().split("\t")[0] for line in f]
def load_cnsd_sts_data(path):
with open(path, "r", encoding="utf8") as f:
return [(line.split("||")[1], line.split("||")[2], line.split("||")[3]) for line in f]
def load_wiki_data(path):
data = []
with open(path, "r", encoding="utf8") as file:
for line in file.readlines():
line = " ".join(line.strip().split())
data.append(line)
return data
def load_eng_sts_data(path):
data = []
with open(path, "r", encoding="utf8") as file:
for line in file.readlines():
line = line.strip().split("\t")
data.append(line)
return data
def load_sts_to_train(path):
if path is None:
return []
with open(
path,
"r",
encoding="utf8",
) as f:
data = [line.split("||")[1] for line in f]
return data
if name == "snli-unsup":
return load_snli_data_unsup(path)
elif name == "snli-sup":
return load_snli_data_sup(path)
elif name == "wiki":
return load_wiki_data(path)
elif name == "cnsd_sts":
return load_cnsd_sts_data(path)
elif name == "eng_sts":
return load_eng_sts_data(path)
elif name == "lqcmc":
return load_lqcmc_data(path)
else:
return load_sts_to_train(path)
def padding_for_ids(data, pad_id=0, max_len=64):
data["input_ids"] = data["input_ids"] + [pad_id] * (max_len - len(data["input_ids"]))
data["attention_mask"] = data["attention_mask"] + [0] * (max_len - len(data["attention_mask"]))
data["input_ids"] = [data["input_ids"], data["input_ids"]]
data["attention_mask"] = [data["attention_mask"], data["attention_mask"]]
return Instance(
input_ids=DistTensorData(flow.tensor(data["input_ids"], dtype=flow.long)),
attention_mask=DistTensorData(flow.tensor(data["attention_mask"], dtype=flow.long)),
)
class TrainDataset_unsup(Dataset):
# unsup
def __init__(self, name, path, tokenizer, max_len, path2=None):
self.name = name
self.data = load_data(name, path) + load_data("add", path2)
random.shuffle(self.data)
self.tokenizer = tokenizer
self.max_len = max_len
self.pad_id = self.tokenizer.pad_token_id
self.cls_id = self.tokenizer.cls_token_id
self.sep_id = self.tokenizer.sep_token_id
def __len__(self):
return len(self.data)
def text2id(self, text):
tokens = self.tokenizer.tokenize(text)
ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = ids[: self.max_len - 2]
ids = [self.cls_id] + ids + [self.sep_id]
attention_mask = [1] * len(ids)
return padding_for_ids(
data={
"input_ids": ids,
"attention_mask": attention_mask,
},
pad_id=self.pad_id,
max_len=self.max_len,
)
def __getitem__(self, index):
return self.text2id(self.data[index])
class TestDataset_unsup(Dataset):
# sts datasets
def __init__(self, name, path, tokenizer):
self.data = load_data(name, path)
self.tokenizer = tokenizer
self.max_len = 64
self.pad_id = self.tokenizer.pad_token_id
self.cls_id = self.tokenizer.cls_token_id
self.sep_id = self.tokenizer.sep_token_id
def __len__(self):
return len(self.data)
def text2id(self, text):
tokens = self.tokenizer.tokenize(text)
ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = ids[: self.max_len - 2]
ids = [self.cls_id] + ids + [self.sep_id]
length = len(ids)
ids = ids + [self.pad_id] * (self.max_len - length)
attention_mask = [1] * length + [0] * (self.max_len - length)
return {
"input_ids": ids,
"attention_mask": attention_mask,
}
def __getitem__(self, index):
# sent1, sent2, laebl
sample = self.data[index]
sent1 = self.text2id(sample[0])
sent2 = self.text2id(sample[1])
score = int(sample[2])
return Instance(
input_ids=DistTensorData(
flow.tensor([sent1["input_ids"], sent2["input_ids"]], dtype=flow.long)
),
attention_mask=DistTensorData(
flow.tensor([sent1["attention_mask"], sent2["attention_mask"]], dtype=flow.long)
),
labels=DistTensorData(flow.tensor(score, dtype=flow.int)),
)
class TrainDataset_sup(Dataset):
def __init__(self, name, path, tokenizer, max_len=64):
self.data = load_data(name, path)
self.tokenizer = tokenizer
self.max_len = max_len
self.pad_id = self.tokenizer.pad_token_id
self.cls_id = self.tokenizer.cls_token_id
self.sep_id = self.tokenizer.sep_token_id
def __len__(self):
return len(self.data)
def pad_text(self, ids):
attention_mask = [1] * len(ids)
ids = ids + [self.pad_id] * (self.max_len - len(ids))
attention_mask = attention_mask + [0] * (self.max_len - len(attention_mask))
return ids, attention_mask
def text2id(self, text):
tokens = self.tokenizer.tokenize(text)
ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = ids[: self.max_len - 2]
ids = [self.cls_id] + ids + [self.sep_id]
ids, attention_mask = self.pad_text(ids)
return ids, attention_mask
def __getitem__(self, index):
ids0, mask0 = self.text2id(self.data[index][0])
ids1, mask1 = self.text2id(self.data[index][1])
ids2, mask2 = self.text2id(self.data[index][2])
return Instance(
input_ids=DistTensorData(flow.tensor([ids0, ids1, ids2], dtype=flow.long)),
attention_mask=DistTensorData(flow.tensor([mask0, mask1, mask2], dtype=flow.long)),
)
class TestDataset_sup(TrainDataset_sup):
def __getitem__(self, index):
label = int(self.data[index][2])
ids0, mask0 = self.text2id(self.data[index][0])
ids1, mask1 = self.text2id(self.data[index][1])
return Instance(
input_ids=DistTensorData(flow.tensor([ids0, ids1], dtype=flow.long)),
attention_mask=DistTensorData(flow.tensor([mask0, mask1], dtype=flow.long)),
labels=DistTensorData(flow.tensor(label, dtype=flow.int)),
)
| [
"oneflow.tensor"
] | [((3556, 3581), 'random.shuffle', 'random.shuffle', (['self.data'], {}), '(self.data)\n', (3570, 3581), False, 'import random\n'), ((947, 972), 'jsonlines.open', 'jsonlines.open', (['path', '"""r"""'], {}), "(path, 'r')\n", (961, 972), False, 'import jsonlines\n'), ((1081, 1106), 'jsonlines.open', 'jsonlines.open', (['path', '"""r"""'], {}), "(path, 'r')\n", (1095, 1106), False, 'import jsonlines\n'), ((3189, 3236), 'oneflow.tensor', 'flow.tensor', (["data['input_ids']"], {'dtype': 'flow.long'}), "(data['input_ids'], dtype=flow.long)\n", (3200, 3236), True, 'import oneflow as flow\n'), ((3277, 3329), 'oneflow.tensor', 'flow.tensor', (["data['attention_mask']"], {'dtype': 'flow.long'}), "(data['attention_mask'], dtype=flow.long)\n", (3288, 3329), True, 'import oneflow as flow\n'), ((5612, 5682), 'oneflow.tensor', 'flow.tensor', (["[sent1['input_ids'], sent2['input_ids']]"], {'dtype': 'flow.long'}), "([sent1['input_ids'], sent2['input_ids']], dtype=flow.long)\n", (5623, 5682), True, 'import oneflow as flow\n'), ((5757, 5842), 'oneflow.tensor', 'flow.tensor', (["[sent1['attention_mask'], sent2['attention_mask']]"], {'dtype': 'flow.long'}), "([sent1['attention_mask'], sent2['attention_mask']], dtype=flow.long\n )\n", (5768, 5842), True, 'import oneflow as flow\n'), ((5887, 5921), 'oneflow.tensor', 'flow.tensor', (['score'], {'dtype': 'flow.int'}), '(score, dtype=flow.int)\n', (5898, 5921), True, 'import oneflow as flow\n'), ((7165, 7213), 'oneflow.tensor', 'flow.tensor', (['[ids0, ids1, ids2]'], {'dtype': 'flow.long'}), '([ids0, ids1, ids2], dtype=flow.long)\n', (7176, 7213), True, 'import oneflow as flow\n'), ((7258, 7309), 'oneflow.tensor', 'flow.tensor', (['[mask0, mask1, mask2]'], {'dtype': 'flow.long'}), '([mask0, mask1, mask2], dtype=flow.long)\n', (7269, 7309), True, 'import oneflow as flow\n'), ((7614, 7656), 'oneflow.tensor', 'flow.tensor', (['[ids0, ids1]'], {'dtype': 'flow.long'}), '([ids0, ids1], dtype=flow.long)\n', (7625, 7656), True, 'import oneflow as flow\n'), ((7701, 7745), 'oneflow.tensor', 'flow.tensor', (['[mask0, mask1]'], {'dtype': 'flow.long'}), '([mask0, mask1], dtype=flow.long)\n', (7712, 7745), True, 'import oneflow as flow\n'), ((7782, 7816), 'oneflow.tensor', 'flow.tensor', (['label'], {'dtype': 'flow.int'}), '(label, dtype=flow.int)\n', (7793, 7816), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestParitalFC(flow.unittest.TestCase):
@globaltest
def test_parital_fc(test_case):
placement = flow.env.all_device_placement("cuda")
w = flow.randn(5000, 128, placement=placement, sbp=flow.sbp.split(0))
label = flow.randint(
0, 5000, (512,), placement=placement, sbp=flow.sbp.split(0)
)
num_sample = 500
out = flow.distributed_partial_fc_sample(w, label, num_sample)
test_case.assertTrue(out[0].shape == flow.Size([512]))
test_case.assertTrue(out[1].shape == flow.Size([500]))
test_case.assertTrue(out[2].shape == flow.Size([500, 128]))
w = flow.randn(5000, 128, placement=placement, sbp=flow.sbp.broadcast)
label = flow.randint(
0, 5000, (512,), placement=placement, sbp=flow.sbp.split(0)
)
num_sample = 500
out = flow.distributed_partial_fc_sample(w, label, num_sample)
test_case.assertTrue(out[0].shape == flow.Size([512]))
test_case.assertTrue(out[1].shape == flow.Size([500]))
test_case.assertTrue(out[2].shape == flow.Size([500, 128]))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.randn",
"oneflow.env.all_device_placement",
"oneflow.sbp.split",
"oneflow.distributed_partial_fc_sample",
"oneflow.Size"
] | [((1955, 1970), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1968, 1970), False, 'import unittest\n'), ((922, 959), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (951, 959), True, 'import oneflow as flow\n'), ((1189, 1245), 'oneflow.distributed_partial_fc_sample', 'flow.distributed_partial_fc_sample', (['w', 'label', 'num_sample'], {}), '(w, label, num_sample)\n', (1223, 1245), True, 'import oneflow as flow\n'), ((1453, 1519), 'oneflow.randn', 'flow.randn', (['(5000)', '(128)'], {'placement': 'placement', 'sbp': 'flow.sbp.broadcast'}), '(5000, 128, placement=placement, sbp=flow.sbp.broadcast)\n', (1463, 1519), True, 'import oneflow as flow\n'), ((1671, 1727), 'oneflow.distributed_partial_fc_sample', 'flow.distributed_partial_fc_sample', (['w', 'label', 'num_sample'], {}), '(w, label, num_sample)\n', (1705, 1727), True, 'import oneflow as flow\n'), ((1019, 1036), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (1033, 1036), True, 'import oneflow as flow\n'), ((1122, 1139), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (1136, 1139), True, 'import oneflow as flow\n'), ((1291, 1307), 'oneflow.Size', 'flow.Size', (['[512]'], {}), '([512])\n', (1300, 1307), True, 'import oneflow as flow\n'), ((1354, 1370), 'oneflow.Size', 'flow.Size', (['[500]'], {}), '([500])\n', (1363, 1370), True, 'import oneflow as flow\n'), ((1417, 1438), 'oneflow.Size', 'flow.Size', (['[500, 128]'], {}), '([500, 128])\n', (1426, 1438), True, 'import oneflow as flow\n'), ((1604, 1621), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (1618, 1621), True, 'import oneflow as flow\n'), ((1773, 1789), 'oneflow.Size', 'flow.Size', (['[512]'], {}), '([512])\n', (1782, 1789), True, 'import oneflow as flow\n'), ((1836, 1852), 'oneflow.Size', 'flow.Size', (['[500]'], {}), '([500])\n', (1845, 1852), True, 'import oneflow as flow\n'), ((1899, 1920), 'oneflow.Size', 'flow.Size', (['[500, 128]'], {}), '([500, 128])\n', (1908, 1920), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import numpy as np
from google.protobuf import text_format
import oneflow
import oneflow_api
import oneflow.python.eager.blob_register as blob_register_util
import oneflow.core.operator.op_conf_pb2 as op_conf_pb
import oneflow.python.framework.config_util as config_util
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.ops.initializer_util as initializer_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.session_context as session_ctx
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.lib.core.async_util as async_util
import oneflow.python.eager.blob_cache as blob_cache_util
import oneflow.python.eager.vm_util as vm_util
import oneflow.python.eager.op_infer_util as op_infer_util
import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb
import oneflow.core.framework.user_op_attr_pb2 as attr_value_pb
from oneflow.python.experimental import interface_op_read_and_write
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.ops.get_variable as get_variable
from oneflow.python.oneflow_export import oneflow_export
import oneflow_api.oneflow.core.register.logical_blob_id as lbi_util
from oneflow_api import EagerBlobTrait
from typing import Any, Callable, Dict, List, Union, Sequence, Optional, Iterable, Tuple
META_INFO_FILENAME = "meta"
DATA_FILENAME = "out"
FAKE_JOB_NAME = "system_checkpoint"
OP_PREFIX = "system_checkpoint"
blob_register = blob_register_util.GetDefaultBlobRegister()
class FileBackendVariableBlob:
def __init__(
self,
var_dir: str,
dtype: Optional[dtype_util.dtype] = None,
shape: Optional[Sequence[int]] = None,
):
data_path = os.path.join(var_dir, DATA_FILENAME)
assert os.path.isfile(data_path)
self.var_dir_ = var_dir
meta_info_path = os.path.join(self.var_dir_, META_INFO_FILENAME)
if os.path.exists(meta_info_path):
meta_info = variable_meta_info_pb.VariableMetaInfo()
with open(meta_info_path) as f:
text_format.Parse(f.read(), meta_info)
self.has_meta_info_ = True
else:
self.has_meta_info_ = False
if self.has_meta_info_:
assert dtype is None and shape is None
self.shape_ = tuple(meta_info.shape.dim)
self.dtype_ = dtype_util.convert_proto_dtype_to_oneflow_dtype(
meta_info.data_type
)
else:
if shape is not None and dtype is not None:
self.shape_ = shape
self.dtype_ = dtype
self.has_meta_info_ = True
elif shape is not None or dtype is not None:
raise RuntimeError("both or neither of shape and dtype should be None")
else:
pass
if self.has_meta_info_:
itemsize = np.dtype(
dtype_util.convert_oneflow_dtype_to_numpy_dtype(self.dtype_)
).itemsize
assert os.path.getsize(data_path) == np.prod(self.shape).item() * itemsize
@property
def file_path(self) -> str:
return os.path.join(self.var_dir_, DATA_FILENAME)
@property
def shape(self) -> Tuple[int]:
return self.shape_
@property
def quant_info(self):
raise NotImplementedError()
@property
def dtype(self) -> dtype_util.dtype:
return self.dtype_
def numpy(self) -> np.ndarray:
if not self.has_meta_info_:
raise RuntimeError("This variable does not have meta info")
return np.fromfile(
self.file_path,
dtype=dtype_util.convert_oneflow_dtype_to_numpy_dtype(self.dtype),
).reshape(self.shape)
ValueContainer = Union[EagerBlobTrait, FileBackendVariableBlob, np.ndarray]
def _ElemCnt(shape):
return np.prod(shape).astype(np.int).item()
@oneflow_export("get_all_variables")
@session_ctx.try_init_default_session
def GetAllVariables() -> Dict[str, oneflow_api.EagerConsistentBlob]:
"""
Get all variables of all jobs as a dict.
"""
oneflow.sync_default_session()
sess = session_ctx.GetDefaultSession()
interface_ops = sess.interface_ops
variables = {}
for op in interface_ops:
op_attr = sess.OpAttribute4InterfaceOpName(op)
if op_attr.op_conf.WhichOneof("op_type") != "variable_conf":
continue
variables[op] = interface_op_read_and_write.GetEagerInterfaceBlob(op)
return variables
def _LoadSingleVariable(path: str) -> Optional[FileBackendVariableBlob]:
if os.path.isfile(os.path.join(path, DATA_FILENAME)):
return FileBackendVariableBlob(path)
return None
@oneflow_export("checkpoint.get")
@session_ctx.try_init_default_session
def GetCheckpoint(
path: str,
) -> Union[Dict[str, FileBackendVariableBlob], FileBackendVariableBlob]:
"""
Load variable(s) from file system.
"""
assert os.path.isdir(path), "Directory {} doesn't exist!".format(path)
single_var = _LoadSingleVariable(path)
if single_var is not None:
return single_var
var_dict = {}
for f in os.listdir(path):
var_dir = os.path.join(path, f)
var = _LoadSingleVariable(var_dir)
if var is not None:
var_dict[f] = var
return var_dict
def _GetOpNameFromLbn(lbn):
return lbn.split("/")[0]
def _GetScopeSymbolIdFromEagerBlob(blob):
name = _GetOpNameFromLbn(blob.logical_blob_name)
sess = session_ctx.GetDefaultSession()
op_conf = sess.OpAttribute4InterfaceOpName(name).op_conf
scope_symbol_id = op_conf.scope_symbol_id
return scope_symbol_id
def _ReadSlice(
container: ValueContainer,
) -> Iterable[Tuple[Sequence[int], Sequence[int], np.ndarray]]:
"""
Return a generator which iterates over the input blob or array and yields
(start_nd_idx, stop_nd_idx, slice_np_array)
"""
if isinstance(container, EagerBlobTrait):
def ReadFromEagerBlob(eager_blob, start_nd_idx, stop_nd_idx):
return _LogicalSlice(eager_blob, start_nd_idx, stop_nd_idx)
yield from _ForEachSlice(container, ReadFromEagerBlob)
elif isinstance(container, FileBackendVariableBlob):
np_dtype = np.dtype(
dtype_util.convert_oneflow_dtype_to_numpy_dtype(container.dtype)
)
with open(container.file_path, "rb") as f:
def ReadFromFile(_, start_nd_idx, stop_nd_idx):
length = _ElemCnt(np.array(stop_nd_idx) - np.array(start_nd_idx))
slice = f.read(length * np_dtype.itemsize)
return np.frombuffer(slice, dtype=np_dtype,).reshape(
np.array(stop_nd_idx) - np.array(start_nd_idx)
)
yield from _ForEachSlice(container, ReadFromFile)
elif isinstance(container, np.ndarray):
def ReadFromNpArray(array, start_nd_idx, stop_nd_idx):
slice_objs = []
for start, stop in zip(start_nd_idx, stop_nd_idx):
slice_objs.append(slice(start, stop))
return array[tuple(slice_objs)]
yield from _ForEachSlice(container, ReadFromNpArray)
else:
raise RuntimeError("Unknown type: {}".format(type(container).__name__))
@oneflow_export("checkpoint.save")
@session_ctx.try_init_default_session
def SaveVarDict(
path: str,
var_dict: Optional[
Dict[str, Union[FileBackendVariableBlob, EagerBlobTrait]]
] = None,
) -> None:
"""
Save `var_dict` to `path`
"""
oneflow.sync_default_session()
if var_dict is None:
var_dict = GetAllVariables()
def IsFileOrNonEmptyDir(path):
if os.path.isfile(path):
return True
if os.path.isdir(path) and len(os.listdir(path)) != 0:
return True
return False
assert not IsFileOrNonEmptyDir(
path
), "Non-empty directory {} already exists!".format(path)
os.makedirs(path, exist_ok=True)
for name, var in var_dict.items():
meta_info = variable_meta_info_pb.VariableMetaInfo()
meta_info.shape.dim[:] = var.shape
meta_info.data_type = var.dtype.oneflow_proto_dtype
var_dir = os.path.join(path, name)
param_path = os.path.join(var_dir, DATA_FILENAME)
os.makedirs(os.path.dirname(param_path))
with open(param_path, "wb") as f:
for _, _, slice in _ReadSlice(var):
f.write(slice.tobytes())
with open(os.path.join(var_dir, META_INFO_FILENAME), "w") as f:
f.write(text_format.MessageToString(meta_info))
# write a empty file 'snapshot_done', indicating that
# the save process finishes normally
with open(os.path.join(path, "snapshot_done"), "w"):
pass
def _LogicalSlice(
input_blob: EagerBlobTrait, start: Sequence[int], stop: Sequence[int]
) -> np.ndarray:
"""
Construct a logical_slice op and run it by oneflow eager,
return the sliced result as a numpy ndarray
"""
op_name = id_util.UniqueStr(OP_PREFIX)
def AsyncSlice(Yield):
def build(builder):
op_conf = op_conf_pb.OperatorConf()
# device_tag doesn't matter for logical_slice op
device_tag = oneflow.current_scope().device_parallel_desc_symbol.device_tag
op_conf.device_tag = device_tag
op_conf.name = op_name
op_conf.user_conf.op_type_name = "logical_slice"
op_conf.user_conf.input["x"].s.append("{}/x_0".format(op_name))
op_conf.user_conf.output["y"].s.append("{}/y_0".format(op_name))
input_blob_object = input_blob.blob_object
parallel_conf = input_blob_object.parallel_desc_symbol.parallel_conf
op_conf.user_conf.attr["parallel_conf"].at_string = str(parallel_conf)
op_conf.user_conf.attr["start"].at_list_int64.val[:] = start
op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop
op_conf.user_conf.attr["step"].at_list_int64.val[:] = [1] * len(start)
bn_in_op2blob_object = {"x_0": input_blob_object}
scope_symbol_id = _GetScopeSymbolIdFromEagerBlob(input_blob)
op_attribute = op_infer_util.Infer(
op_conf, bn_in_op2blob_object, scope_symbol_id
)
builder.StatelessCall(
op_attribute,
parallel_conf=parallel_conf,
bn_in_op2blob_object=bn_in_op2blob_object,
)
Yield(bn_in_op2blob_object["y_0"])
vm_util.LogicalRun(build)
lbi = lbi_util.LogicalBlobId()
lbi.set_op_name(op_name)
lbi.set_blob_name(op_name)
blob_object = async_util.Await(1, AsyncSlice)[0]
blob = oneflow_api.EagerConsistentBlob(
lbi,
blob_object=blob_object,
blob_register=blob_register,
job_name=FAKE_JOB_NAME,
)
return blob.numpy()
def _GetCpu0VariableBlobFromNumpy(
np_array: np.ndarray, dtype: dtype_util.dtype
) -> oneflow_api.EagerConsistentBlob:
"""
Add a variable on cpu 0, and feed the value of `np_array`
Note: dtype argument cannot be eliminated by
convert_numpy_dtype_to_oneflow_dtype(np_array.dtype),
because np.int8 == np.char and
numpy_dtype_to_oneflow_dtype(oneflow_dtype_to_numpy_dtype(flow.int8))
may be flow.char
"""
with oneflow.scope.placement("cpu", "0:0"):
op_name = id_util.UniqueStr(OP_PREFIX)
op_conf = get_variable.GenerateVariableOpConf(
name=op_name,
shape=np_array.shape,
dtype=dtype,
initializer=initializer_util.zeros_initializer(dtype=dtype),
trainable=False,
)
current_parallel_desc_sym = oneflow.current_scope().device_parallel_desc_symbol
device_tag = current_parallel_desc_sym.device_tag
op_conf.device_tag = device_tag
op_attribute = op_infer_util.Infer(op_conf, {})
var_blob = get_variable.CreateEagerVariableBlob(
op_attribute, job_name=FAKE_JOB_NAME
)
interface_op_read_and_write.FeedValueToInterfaceBlobObject(
var_blob.blob_object, np_array
)
return var_blob
def _LogicalSliceAssign(
ref_blob: EagerBlobTrait,
value_blob: EagerBlobTrait,
start: Sequence[int],
stop: Sequence[int],
) -> None:
"""
Construct a logical_slice_assign op and run it by oneflow eager
"""
ref_blob_object = ref_blob.blob_object
value_blob_object = value_blob.blob_object
def BuildAssignInstruction(builder):
op_conf = op_conf_pb.OperatorConf()
# device_tag doesn't matter for logical_slice_assign op
device_tag = oneflow.current_scope().device_parallel_desc_symbol.device_tag
op_conf.device_tag = device_tag
op_name = id_util.UniqueStr(OP_PREFIX)
op_conf.name = op_name
op_conf.user_conf.op_type_name = "logical_slice_assign"
op_conf.user_conf.input["value"].s.append("{}/value_0".format(op_name))
op_conf.user_conf.input["ref"].s.append("{}/ref_0".format(op_name))
parallel_conf = ref_blob_object.parallel_desc_symbol.parallel_conf
op_conf.user_conf.attr["parallel_conf"].at_string = str(parallel_conf)
op_conf.user_conf.attr["start"].at_list_int64.val[:] = start
op_conf.user_conf.attr["stop"].at_list_int64.val[:] = stop
op_conf.user_conf.attr["step"].at_list_int64.val[:] = [1] * len(start)
bn_in_op2blob_object = {"ref_0": ref_blob_object, "value_0": value_blob_object}
scope_symbol_id = _GetScopeSymbolIdFromEagerBlob(ref_blob)
op_attribute = op_infer_util.Infer(
op_conf, bn_in_op2blob_object, scope_symbol_id
)
builder.StatelessCall(
op_attribute,
parallel_conf=parallel_conf,
bn_in_op2blob_object=bn_in_op2blob_object,
)
vm_util.LogicalRun(BuildAssignInstruction)
blob_cache_util.TryDisableBlobCache(ref_blob_object)
def _FeedValueToVariable(
var_blob: oneflow_api.EagerConsistentBlob, value: ValueContainer
) -> None:
"""
Feed the value of `value` to the variable `var_blob`
"""
assert isinstance(
value, (EagerBlobTrait, FileBackendVariableBlob, np.ndarray)
), "Unknown value type: {}".format(type(value).__name__)
if isinstance(value, FileBackendVariableBlob):
if not value.has_meta_info_:
value = FileBackendVariableBlob(
value.var_dir_, var_blob.dtype, var_blob.shape
)
assert var_blob.shape == value.shape, "{} vs {}".format(var_blob.shape, value.shape)
if isinstance(value, np.ndarray):
value_flow_dtype = dtype_util.convert_numpy_dtype_to_oneflow_dtype(value.dtype)
else:
value_flow_dtype = value.dtype
assert var_blob.dtype == value_flow_dtype, "{} vs {}".format(
var_blob.dtype, value_flow_dtype
)
for start, stop, slice in _ReadSlice(value):
slice_value_blob = _GetCpu0VariableBlobFromNumpy(slice, var_blob.dtype)
_LogicalSliceAssign(
var_blob, slice_value_blob, start, stop,
)
@oneflow_export("load_variables")
@session_ctx.try_init_default_session
def LoadVariables(
value_dict: Dict[str, ValueContainer], ignore_mismatch: bool = True,
):
"""
Load value in `value_dict` into oneflow variables.
For example, if `value_dict` is {'x', np.ones(x_shape)},
the value of variable "x" will all ones.
If `ignore_mismatch` is False, an exception will be raised when
there is a name in `value_dict` not belonging to any variable.
"""
oneflow.sync_default_session()
all_vars = GetAllVariables()
for name, value in value_dict.items():
if name in all_vars:
var_blob = interface_op_read_and_write.GetEagerInterfaceBlob(name)
_FeedValueToVariable(var_blob, value)
else:
if not ignore_mismatch:
raise RuntimeError('"{}" is not a variable name'.format(name))
oneflow_api.eager.Sync()
def _ForEachSlice(
container: ValueContainer,
f: Union[
Callable[[EagerBlobTrait, Sequence[int], Sequence[int]], Any],
Callable[[FileBackendVariableBlob, Sequence[int], Sequence[int]], Any],
Callable[[np.ndarray, Sequence[int], Sequence[int]], Any],
],
):
"""
Slice container into slices whose size < SLICE_BYTES. For every slice,
yield start_nd_idx, stop_nd_idx and f(slice)
"""
assert isinstance(
container, (EagerBlobTrait, FileBackendVariableBlob, np.ndarray)
), "Unknown type: {}".format(type(container).__name__)
assert container.shape is not None
# For current implementation (transport data by grpc), SLICE_BYTES must be lower than 64M
SLICE_BYTES = 32 * 1024 * 1024
if isinstance(container, np.ndarray):
np_dtype = container.dtype
else:
np_dtype = np.dtype(
dtype_util.convert_oneflow_dtype_to_numpy_dtype(container.dtype)
)
SLICE_LEN = SLICE_BYTES // np_dtype.itemsize
start_idx = 0
size = _ElemCnt(container.shape)
cnt = 1
for axis in reversed(range(len(container.shape))):
cnt *= container.shape[axis]
if cnt > SLICE_LEN:
break
unit_size = _ElemCnt(container.shape[axis + 1 :])
max_unit_num = SLICE_LEN // unit_size
while start_idx < size:
remainder = container.shape[axis]
while remainder > 0:
unit_num = max_unit_num if remainder >= max_unit_num else remainder
length = unit_num * unit_size
remainder -= unit_num
stop_idx = start_idx + length
start_nd_idx = np.unravel_index(start_idx, container.shape)
stop_nd_idx = np.unravel_index(stop_idx - 1, container.shape)
stop_nd_idx = tuple([x + 1 for x in stop_nd_idx])
yield start_nd_idx, stop_nd_idx, f(container, start_nd_idx, stop_nd_idx)
start_idx = stop_idx
def Init() -> None:
oneflow.sync_default_session()
sess = session_ctx.GetDefaultSession()
for op_name, var_blob in GetAllVariables().items():
var_conf = sess.OpAttribute4InterfaceOpName(op_name).op_conf.variable_conf
if not (
var_conf.HasField("initializer")
or var_conf.HasField("initialize_with_snapshot")
):
continue
if var_conf.HasField("initialize_with_snapshot"):
initialize_with_snapshot_conf = var_conf.initialize_with_snapshot
if initialize_with_snapshot_conf.HasField("key"):
snapshot_key = op_name
else:
snapshot_key = initialize_with_snapshot_conf.key
var_dir = os.path.dirname(
os.path.join(initialize_with_snapshot_conf.path, snapshot_key,)
)
LoadVariables({op_name: GetCheckpoint(var_dir)})
continue
g = initializer_util.GetInitializer(
var_conf.initializer, var_conf.random_seed, var_blob.shape
)
def GenerateValueAndAssign(var_blob, start_nd_idx, stop_nd_idx):
np_dtype = np.dtype(
dtype_util.convert_oneflow_dtype_to_numpy_dtype(var_blob.dtype)
)
length = _ElemCnt(np.array(stop_nd_idx) - np.array(start_nd_idx))
vals = (
np.array(g(length))
.astype(np_dtype)
.reshape(np.array(stop_nd_idx) - np.array(start_nd_idx))
)
slice_value_blob = _GetCpu0VariableBlobFromNumpy(vals, var_blob.dtype)
_LogicalSliceAssign(
var_blob, slice_value_blob, start_nd_idx, stop_nd_idx,
)
# we just want to run f on every slice without caring about the return value
for _ in _ForEachSlice(var_blob, GenerateValueAndAssign):
pass
oneflow_api.eager.Sync()
| [
"oneflow.python.eager.blob_cache.TryDisableBlobCache",
"oneflow.python.ops.initializer_util.GetInitializer",
"oneflow.sync_default_session",
"oneflow.python.eager.op_infer_util.Infer",
"oneflow.core.operator.op_conf_pb2.OperatorConf",
"oneflow.python.ops.get_variable.CreateEagerVariableBlob",
"oneflow.p... | [((2117, 2160), 'oneflow.python.eager.blob_register.GetDefaultBlobRegister', 'blob_register_util.GetDefaultBlobRegister', ([], {}), '()\n', (2158, 2160), True, 'import oneflow.python.eager.blob_register as blob_register_util\n'), ((4542, 4577), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""get_all_variables"""'], {}), "('get_all_variables')\n", (4556, 4577), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5353, 5385), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""checkpoint.get"""'], {}), "('checkpoint.get')\n", (5367, 5385), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((7908, 7941), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""checkpoint.save"""'], {}), "('checkpoint.save')\n", (7922, 7941), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15783, 15815), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""load_variables"""'], {}), "('load_variables')\n", (15797, 15815), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4750, 4780), 'oneflow.sync_default_session', 'oneflow.sync_default_session', ([], {}), '()\n', (4778, 4780), False, 'import oneflow\n'), ((4793, 4824), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4822, 4824), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((5597, 5616), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (5610, 5616), False, 'import os\n'), ((5792, 5808), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (5802, 5808), False, 'import os\n'), ((6138, 6169), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (6167, 6169), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((8177, 8207), 'oneflow.sync_default_session', 'oneflow.sync_default_session', ([], {}), '()\n', (8205, 8207), False, 'import oneflow\n'), ((8587, 8619), 'os.makedirs', 'os.makedirs', (['path'], {'exist_ok': '(True)'}), '(path, exist_ok=True)\n', (8598, 8619), False, 'import os\n'), ((9657, 9685), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['OP_PREFIX'], {}), '(OP_PREFIX)\n', (9674, 9685), True, 'import oneflow.python.framework.id_util as id_util\n'), ((11214, 11238), 'oneflow_api.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (11236, 11238), True, 'import oneflow_api.oneflow.core.register.logical_blob_id as lbi_util\n'), ((11365, 11484), 'oneflow_api.EagerConsistentBlob', 'oneflow_api.EagerConsistentBlob', (['lbi'], {'blob_object': 'blob_object', 'blob_register': 'blob_register', 'job_name': 'FAKE_JOB_NAME'}), '(lbi, blob_object=blob_object, blob_register\n =blob_register, job_name=FAKE_JOB_NAME)\n', (11396, 11484), False, 'import oneflow_api\n'), ((14537, 14579), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['BuildAssignInstruction'], {}), '(BuildAssignInstruction)\n', (14555, 14579), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((14584, 14636), 'oneflow.python.eager.blob_cache.TryDisableBlobCache', 'blob_cache_util.TryDisableBlobCache', (['ref_blob_object'], {}), '(ref_blob_object)\n', (14619, 14636), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((16265, 16295), 'oneflow.sync_default_session', 'oneflow.sync_default_session', ([], {}), '()\n', (16293, 16295), False, 'import oneflow\n'), ((16664, 16688), 'oneflow_api.eager.Sync', 'oneflow_api.eager.Sync', ([], {}), '()\n', (16686, 16688), False, 'import oneflow_api\n'), ((18648, 18678), 'oneflow.sync_default_session', 'oneflow.sync_default_session', ([], {}), '()\n', (18676, 18678), False, 'import oneflow\n'), ((18691, 18722), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (18720, 18722), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((20510, 20534), 'oneflow_api.eager.Sync', 'oneflow_api.eager.Sync', ([], {}), '()\n', (20532, 20534), False, 'import oneflow_api\n'), ((2372, 2408), 'os.path.join', 'os.path.join', (['var_dir', 'DATA_FILENAME'], {}), '(var_dir, DATA_FILENAME)\n', (2384, 2408), False, 'import os\n'), ((2424, 2449), 'os.path.isfile', 'os.path.isfile', (['data_path'], {}), '(data_path)\n', (2438, 2449), False, 'import os\n'), ((2507, 2554), 'os.path.join', 'os.path.join', (['self.var_dir_', 'META_INFO_FILENAME'], {}), '(self.var_dir_, META_INFO_FILENAME)\n', (2519, 2554), False, 'import os\n'), ((2566, 2596), 'os.path.exists', 'os.path.exists', (['meta_info_path'], {}), '(meta_info_path)\n', (2580, 2596), False, 'import os\n'), ((3801, 3843), 'os.path.join', 'os.path.join', (['self.var_dir_', 'DATA_FILENAME'], {}), '(self.var_dir_, DATA_FILENAME)\n', (3813, 3843), False, 'import os\n'), ((5081, 5134), 'oneflow.python.experimental.interface_op_read_and_write.GetEagerInterfaceBlob', 'interface_op_read_and_write.GetEagerInterfaceBlob', (['op'], {}), '(op)\n', (5130, 5134), False, 'from oneflow.python.experimental import interface_op_read_and_write\n'), ((5253, 5286), 'os.path.join', 'os.path.join', (['path', 'DATA_FILENAME'], {}), '(path, DATA_FILENAME)\n', (5265, 5286), False, 'import os\n'), ((5828, 5849), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (5840, 5849), False, 'import os\n'), ((8318, 8338), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (8332, 8338), False, 'import os\n'), ((8679, 8719), 'oneflow.core.framework.variable_meta_info_pb2.VariableMetaInfo', 'variable_meta_info_pb.VariableMetaInfo', ([], {}), '()\n', (8717, 8719), True, 'import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb\n'), ((8841, 8865), 'os.path.join', 'os.path.join', (['path', 'name'], {}), '(path, name)\n', (8853, 8865), False, 'import os\n'), ((8887, 8923), 'os.path.join', 'os.path.join', (['var_dir', 'DATA_FILENAME'], {}), '(var_dir, DATA_FILENAME)\n', (8899, 8923), False, 'import os\n'), ((11177, 11202), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['build'], {}), '(build)\n', (11195, 11202), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((11318, 11349), 'oneflow.python.lib.core.async_util.Await', 'async_util.Await', (['(1)', 'AsyncSlice'], {}), '(1, AsyncSlice)\n', (11334, 11349), True, 'import oneflow.python.lib.core.async_util as async_util\n'), ((11993, 12030), 'oneflow.scope.placement', 'oneflow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (12016, 12030), False, 'import oneflow\n'), ((12050, 12078), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['OP_PREFIX'], {}), '(OP_PREFIX)\n', (12067, 12078), True, 'import oneflow.python.framework.id_util as id_util\n'), ((12540, 12572), 'oneflow.python.eager.op_infer_util.Infer', 'op_infer_util.Infer', (['op_conf', '{}'], {}), '(op_conf, {})\n', (12559, 12572), True, 'import oneflow.python.eager.op_infer_util as op_infer_util\n'), ((12592, 12666), 'oneflow.python.ops.get_variable.CreateEagerVariableBlob', 'get_variable.CreateEagerVariableBlob', (['op_attribute'], {'job_name': 'FAKE_JOB_NAME'}), '(op_attribute, job_name=FAKE_JOB_NAME)\n', (12628, 12666), True, 'import oneflow.python.ops.get_variable as get_variable\n'), ((12698, 12793), 'oneflow.python.experimental.interface_op_read_and_write.FeedValueToInterfaceBlobObject', 'interface_op_read_and_write.FeedValueToInterfaceBlobObject', (['var_blob.blob_object', 'np_array'], {}), '(var_blob.\n blob_object, np_array)\n', (12756, 12793), False, 'from oneflow.python.experimental import interface_op_read_and_write\n'), ((13220, 13245), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_pb.OperatorConf', ([], {}), '()\n', (13243, 13245), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((13452, 13480), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['OP_PREFIX'], {}), '(OP_PREFIX)\n', (13469, 13480), True, 'import oneflow.python.framework.id_util as id_util\n'), ((14279, 14346), 'oneflow.python.eager.op_infer_util.Infer', 'op_infer_util.Infer', (['op_conf', 'bn_in_op2blob_object', 'scope_symbol_id'], {}), '(op_conf, bn_in_op2blob_object, scope_symbol_id)\n', (14298, 14346), True, 'import oneflow.python.eager.op_infer_util as op_infer_util\n'), ((15336, 15396), 'oneflow.python.framework.dtype.convert_numpy_dtype_to_oneflow_dtype', 'dtype_util.convert_numpy_dtype_to_oneflow_dtype', (['value.dtype'], {}), '(value.dtype)\n', (15383, 15396), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((19564, 19659), 'oneflow.python.ops.initializer_util.GetInitializer', 'initializer_util.GetInitializer', (['var_conf.initializer', 'var_conf.random_seed', 'var_blob.shape'], {}), '(var_conf.initializer, var_conf.random_seed,\n var_blob.shape)\n', (19595, 19659), True, 'import oneflow.python.ops.initializer_util as initializer_util\n'), ((2622, 2662), 'oneflow.core.framework.variable_meta_info_pb2.VariableMetaInfo', 'variable_meta_info_pb.VariableMetaInfo', ([], {}), '()\n', (2660, 2662), True, 'import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb\n'), ((3018, 3086), 'oneflow.python.framework.dtype.convert_proto_dtype_to_oneflow_dtype', 'dtype_util.convert_proto_dtype_to_oneflow_dtype', (['meta_info.data_type'], {}), '(meta_info.data_type)\n', (3065, 3086), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((8375, 8394), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (8388, 8394), False, 'import os\n'), ((8944, 8971), 'os.path.dirname', 'os.path.dirname', (['param_path'], {}), '(param_path)\n', (8959, 8971), False, 'import os\n'), ((9349, 9384), 'os.path.join', 'os.path.join', (['path', '"""snapshot_done"""'], {}), "(path, 'snapshot_done')\n", (9361, 9384), False, 'import os\n'), ((9764, 9789), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_pb.OperatorConf', ([], {}), '()\n', (9787, 9789), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((10840, 10907), 'oneflow.python.eager.op_infer_util.Infer', 'op_infer_util.Infer', (['op_conf', 'bn_in_op2blob_object', 'scope_symbol_id'], {}), '(op_conf, bn_in_op2blob_object, scope_symbol_id)\n', (10859, 10907), True, 'import oneflow.python.eager.op_infer_util as op_infer_util\n'), ((12367, 12390), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (12388, 12390), False, 'import oneflow\n'), ((16425, 16480), 'oneflow.python.experimental.interface_op_read_and_write.GetEagerInterfaceBlob', 'interface_op_read_and_write.GetEagerInterfaceBlob', (['name'], {}), '(name)\n', (16474, 16480), False, 'from oneflow.python.experimental import interface_op_read_and_write\n'), ((17574, 17638), 'oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['container.dtype'], {}), '(container.dtype)\n', (17621, 17638), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((18323, 18367), 'numpy.unravel_index', 'np.unravel_index', (['start_idx', 'container.shape'], {}), '(start_idx, container.shape)\n', (18339, 18367), True, 'import numpy as np\n'), ((18394, 18441), 'numpy.unravel_index', 'np.unravel_index', (['(stop_idx - 1)', 'container.shape'], {}), '(stop_idx - 1, container.shape)\n', (18410, 18441), True, 'import numpy as np\n'), ((3671, 3697), 'os.path.getsize', 'os.path.getsize', (['data_path'], {}), '(data_path)\n', (3686, 3697), False, 'import os\n'), ((6910, 6974), 'oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['container.dtype'], {}), '(container.dtype)\n', (6957, 6974), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((9122, 9163), 'os.path.join', 'os.path.join', (['var_dir', 'META_INFO_FILENAME'], {}), '(var_dir, META_INFO_FILENAME)\n', (9134, 9163), False, 'import os\n'), ((9196, 9234), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['meta_info'], {}), '(meta_info)\n', (9223, 9234), False, 'from google.protobuf import text_format\n'), ((12243, 12290), 'oneflow.python.ops.initializer_util.zeros_initializer', 'initializer_util.zeros_initializer', ([], {'dtype': 'dtype'}), '(dtype=dtype)\n', (12277, 12290), True, 'import oneflow.python.ops.initializer_util as initializer_util\n'), ((13331, 13354), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (13352, 13354), False, 'import oneflow\n'), ((19392, 19454), 'os.path.join', 'os.path.join', (['initialize_with_snapshot_conf.path', 'snapshot_key'], {}), '(initialize_with_snapshot_conf.path, snapshot_key)\n', (19404, 19454), False, 'import os\n'), ((19801, 19864), 'oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['var_blob.dtype'], {}), '(var_blob.dtype)\n', (19848, 19864), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((3568, 3628), 'oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['self.dtype_'], {}), '(self.dtype_)\n', (3615, 3628), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((4502, 4516), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (4509, 4516), True, 'import numpy as np\n'), ((8403, 8419), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (8413, 8419), False, 'import os\n'), ((9876, 9899), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (9897, 9899), False, 'import oneflow\n'), ((19909, 19930), 'numpy.array', 'np.array', (['stop_nd_idx'], {}), '(stop_nd_idx)\n', (19917, 19930), True, 'import numpy as np\n'), ((19933, 19955), 'numpy.array', 'np.array', (['start_nd_idx'], {}), '(start_nd_idx)\n', (19941, 19955), True, 'import numpy as np\n'), ((20073, 20094), 'numpy.array', 'np.array', (['stop_nd_idx'], {}), '(stop_nd_idx)\n', (20081, 20094), True, 'import numpy as np\n'), ((20097, 20119), 'numpy.array', 'np.array', (['start_nd_idx'], {}), '(start_nd_idx)\n', (20105, 20119), True, 'import numpy as np\n'), ((4299, 4358), 'oneflow.python.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['self.dtype'], {}), '(self.dtype)\n', (4346, 4358), True, 'import oneflow.python.framework.dtype as dtype_util\n'), ((3701, 3720), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (3708, 3720), True, 'import numpy as np\n'), ((7131, 7152), 'numpy.array', 'np.array', (['stop_nd_idx'], {}), '(stop_nd_idx)\n', (7139, 7152), True, 'import numpy as np\n'), ((7155, 7177), 'numpy.array', 'np.array', (['start_nd_idx'], {}), '(start_nd_idx)\n', (7163, 7177), True, 'import numpy as np\n'), ((7261, 7297), 'numpy.frombuffer', 'np.frombuffer', (['slice'], {'dtype': 'np_dtype'}), '(slice, dtype=np_dtype)\n', (7274, 7297), True, 'import numpy as np\n'), ((7328, 7349), 'numpy.array', 'np.array', (['stop_nd_idx'], {}), '(stop_nd_idx)\n', (7336, 7349), True, 'import numpy as np\n'), ((7352, 7374), 'numpy.array', 'np.array', (['start_nd_idx'], {}), '(start_nd_idx)\n', (7360, 7374), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import sys
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.framework.tensor import Tensor, TensorTuple
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestGraphCheck(flow.unittest.TestCase):
def test_non_tensor_types_of_module(test_case):
class CustomModuleIOCheck(flow.nn.Module):
def __init__(self):
super().__init__()
def forward(self, t, tp, lt, n, i, s):
return t, tp, lt, n, i, s
class CustomGraphIOCheck(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = CustomModuleIOCheck()
def build(self, t, tp, lt, n):
rt, rtp, rlt, n, ri, rs = self.m(t, tp, lt, n, 1, "2")
return t, tp, lt, n
g = CustomGraphIOCheck()
g.debug()
x = np.ones((10, 10))
x = flow.tensor(x, dtype=flow.float32)
t0 = np.ones((10, 10))
t0 = flow.tensor(t0, dtype=flow.float32)
t1 = np.ones((10, 10))
t1 = flow.tensor(t1, dtype=flow.float32)
tp0 = TensorTuple()
tp0.append(t0)
tp0.append(t1)
t2 = np.ones((10, 10))
t2 = flow.tensor(t2, dtype=flow.float32)
t3 = np.ones((10, 10))
t3 = flow.tensor(t3, dtype=flow.float32)
lt0 = list()
lt0.append(t2)
lt0.append(t3)
ot, otp, olt, on = g(x, tp0, lt0, None)
test_case.assertTrue(np.array_equal(x.numpy(), ot.numpy()))
test_case.assertTrue(isinstance(otp, TensorTuple))
test_case.assertTrue(isinstance(otp[0], Tensor))
test_case.assertTrue(np.array_equal(otp[0].numpy(), tp0[0].numpy()))
test_case.assertTrue(isinstance(otp[1], Tensor))
test_case.assertTrue(np.array_equal(otp[1].numpy(), tp0[1].numpy()))
test_case.assertTrue(isinstance(olt, list))
test_case.assertTrue(isinstance(olt[0], Tensor))
test_case.assertTrue(np.array_equal(olt[0].numpy(), lt0[0].numpy()))
test_case.assertTrue(isinstance(olt[1], Tensor))
test_case.assertTrue(np.array_equal(olt[1].numpy(), lt0[1].numpy()))
test_case.assertTrue(on is None)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor",
"oneflow.framework.tensor.TensorTuple"
] | [((831, 863), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (861, 863), True, 'import oneflow as flow\n'), ((771, 805), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (780, 805), False, 'import os\n'), ((2923, 2938), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2936, 2938), False, 'import unittest\n'), ((1554, 1571), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1561, 1571), True, 'import numpy as np\n'), ((1584, 1618), 'oneflow.tensor', 'flow.tensor', (['x'], {'dtype': 'flow.float32'}), '(x, dtype=flow.float32)\n', (1595, 1618), True, 'import oneflow as flow\n'), ((1633, 1650), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1640, 1650), True, 'import numpy as np\n'), ((1664, 1699), 'oneflow.tensor', 'flow.tensor', (['t0'], {'dtype': 'flow.float32'}), '(t0, dtype=flow.float32)\n', (1675, 1699), True, 'import oneflow as flow\n'), ((1713, 1730), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1720, 1730), True, 'import numpy as np\n'), ((1744, 1779), 'oneflow.tensor', 'flow.tensor', (['t1'], {'dtype': 'flow.float32'}), '(t1, dtype=flow.float32)\n', (1755, 1779), True, 'import oneflow as flow\n'), ((1794, 1807), 'oneflow.framework.tensor.TensorTuple', 'TensorTuple', ([], {}), '()\n', (1805, 1807), False, 'from oneflow.framework.tensor import Tensor, TensorTuple\n'), ((1868, 1885), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1875, 1885), True, 'import numpy as np\n'), ((1899, 1934), 'oneflow.tensor', 'flow.tensor', (['t2'], {'dtype': 'flow.float32'}), '(t2, dtype=flow.float32)\n', (1910, 1934), True, 'import oneflow as flow\n'), ((1948, 1965), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1955, 1965), True, 'import numpy as np\n'), ((1979, 2014), 'oneflow.tensor', 'flow.tensor', (['t3'], {'dtype': 'flow.float32'}), '(t3, dtype=flow.float32)\n', (1990, 2014), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
BLOCK_COUNTS = [3, 4, 6, 3]
BLOCK_FILTERS = [256, 512, 1024, 2048]
BLOCK_FILTERS_INNER = [64, 128, 256, 512]
class ResnetBuilder(object):
def __init__(
self, weight_regularizer, trainable=True, training=True, channel_last=False
):
self.data_format = "NHWC" if channel_last else "NCHW"
self.weight_initializer = flow.variance_scaling_initializer(
2, "fan_in", "random_normal", data_format=self.data_format
)
self.weight_regularizer = weight_regularizer
self.trainable = trainable
self.training = training
def _conv2d(
self, name, input, filters, kernel_size, strides=1, padding="SAME", dilations=1,
):
# There are different shapes of weight metric between 'NCHW' and 'NHWC' mode
if self.data_format == "NHWC":
shape = (filters, kernel_size, kernel_size, input.shape[3])
else:
shape = (filters, input.shape[1], kernel_size, kernel_size)
weight = flow.get_variable(
name + "-weight",
shape=shape,
dtype=input.dtype,
initializer=self.weight_initializer,
regularizer=self.weight_regularizer,
model_name="weight",
trainable=self.trainable,
)
return flow.nn.conv2d(
input,
weight,
strides,
padding,
None,
self.data_format,
dilations,
name=name,
)
def _batch_norm(self, inputs, name=None, last=False):
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
if self.data_format == "NHWC":
axis = 3
return flow.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=0.9, # 97,
epsilon=1e-5,
center=True,
scale=True,
trainable=self.trainable,
training=self.training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=self.weight_regularizer,
beta_regularizer=self.weight_regularizer,
name=name,
)
def conv2d_affine(
self, input, name, filters, kernel_size, strides, activation=None, last=False
):
# input data_format must be NCHW, cannot check now
padding = "SAME" if strides > 1 or kernel_size > 1 else "VALID"
output = self._conv2d(name, input, filters, kernel_size, strides, padding)
output = self._batch_norm(output, name + "_bn", last=last)
if activation == "Relu":
output = flow.nn.relu(output)
return output
def bottleneck_transformation(
self, input, block_name, filters, filters_inner, strides
):
a = self.conv2d_affine(
input, block_name + "_branch2a", filters_inner, 1, 1, activation="Relu"
)
b = self.conv2d_affine(
a, block_name + "_branch2b", filters_inner, 3, strides, activation="Relu"
)
c = self.conv2d_affine(b, block_name + "_branch2c", filters, 1, 1, last=True)
return c
def residual_block(self, input, block_name, filters, filters_inner, strides_init):
if strides_init != 1 or block_name == "res2_0":
shortcut = self.conv2d_affine(
input, block_name + "_branch1", filters, 1, strides_init
)
else:
shortcut = input
bottleneck = self.bottleneck_transformation(
input, block_name, filters, filters_inner, strides_init,
)
return flow.nn.relu(bottleneck + shortcut)
def residual_stage(
self, input, stage_name, counts, filters, filters_inner, stride_init=2
):
output = input
for i in range(counts):
block_name = "%s_%d" % (stage_name, i)
output = self.residual_block(
output, block_name, filters, filters_inner, stride_init if i == 0 else 1
)
return output
def resnet_conv_x_body(self, input):
output = input
for i, (counts, filters, filters_inner) in enumerate(
zip(BLOCK_COUNTS, BLOCK_FILTERS, BLOCK_FILTERS_INNER)
):
stage_name = "res%d" % (i + 2)
output = self.residual_stage(
output, stage_name, counts, filters, filters_inner, 1 if i == 0 else 2
)
return output
def resnet_stem(self, input):
conv1 = self._conv2d("conv1", input, 64, 7, 2)
conv1_bn = flow.nn.relu(self._batch_norm(conv1, "conv1_bn"))
pool1 = flow.nn.max_pool2d(
conv1_bn,
ksize=3,
strides=2,
padding="SAME",
data_format=self.data_format,
name="pool1",
)
return pool1
def resnet50(
images,
trainable=True,
need_transpose=False,
training=True,
wd=1.0 / 32768,
channel_last=False,
):
weight_regularizer = flow.regularizers.l2(wd) if wd > 0.0 and wd < 1.0 else None
builder = ResnetBuilder(weight_regularizer, trainable, training, channel_last)
# note: images.shape = (N C H W) in cc's new dataloader, transpose is not needed anymore
if need_transpose:
images = flow.transpose(images, name="transpose", perm=[0, 3, 1, 2])
if channel_last:
# if channel_last=True, then change mode from 'nchw' to 'nhwc'
images = flow.transpose(images, name="transpose", perm=[0, 2, 3, 1])
with flow.scope.namespace("Resnet"):
stem = builder.resnet_stem(images)
body = builder.resnet_conv_x_body(stem)
pool5 = flow.nn.avg_pool2d(
body,
ksize=7,
strides=1,
padding="VALID",
data_format=builder.data_format,
name="pool5",
)
fc1001 = flow.layers.dense(
flow.reshape(pool5, (pool5.shape[0], -1)),
units=1000,
use_bias=True,
kernel_initializer=flow.variance_scaling_initializer(
2, "fan_in", "random_normal"
),
bias_initializer=flow.zeros_initializer(),
kernel_regularizer=weight_regularizer,
bias_regularizer=weight_regularizer,
trainable=trainable,
name="fc1001",
)
return fc1001
| [
"oneflow.nn.conv2d",
"oneflow.scope.namespace",
"oneflow.zeros_initializer",
"oneflow.layers.batch_normalization",
"oneflow.variance_scaling_initializer",
"oneflow.nn.max_pool2d",
"oneflow.nn.relu",
"oneflow.ones_initializer",
"oneflow.reshape",
"oneflow.transpose",
"oneflow.regularizers.l2",
... | [((960, 1058), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_in"""', '"""random_normal"""'], {'data_format': 'self.data_format'}), "(2, 'fan_in', 'random_normal', data_format\n =self.data_format)\n", (993, 1058), True, 'import oneflow as flow\n'), ((1610, 1807), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'shape', 'dtype': 'input.dtype', 'initializer': 'self.weight_initializer', 'regularizer': 'self.weight_regularizer', 'model_name': '"""weight"""', 'trainable': 'self.trainable'}), "(name + '-weight', shape=shape, dtype=input.dtype,\n initializer=self.weight_initializer, regularizer=self.\n weight_regularizer, model_name='weight', trainable=self.trainable)\n", (1627, 1807), True, 'import oneflow as flow\n'), ((1910, 2007), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'None', 'self.data_format', 'dilations'], {'name': 'name'}), '(input, weight, strides, padding, None, self.data_format,\n dilations, name=name)\n', (1924, 2007), True, 'import oneflow as flow\n'), ((2346, 2692), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': 'axis', 'momentum': '(0.9)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': 'self.trainable', 'training': 'self.training', 'gamma_initializer': 'initializer', 'moving_variance_initializer': 'initializer', 'gamma_regularizer': 'self.weight_regularizer', 'beta_regularizer': 'self.weight_regularizer', 'name': 'name'}), '(inputs=inputs, axis=axis, momentum=0.9,\n epsilon=1e-05, center=True, scale=True, trainable=self.trainable,\n training=self.training, gamma_initializer=initializer,\n moving_variance_initializer=initializer, gamma_regularizer=self.\n weight_regularizer, beta_regularizer=self.weight_regularizer, name=name)\n', (2377, 2692), True, 'import oneflow as flow\n'), ((4277, 4312), 'oneflow.nn.relu', 'flow.nn.relu', (['(bottleneck + shortcut)'], {}), '(bottleneck + shortcut)\n', (4289, 4312), True, 'import oneflow as flow\n'), ((5286, 5398), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv1_bn'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""SAME"""', 'data_format': 'self.data_format', 'name': '"""pool1"""'}), "(conv1_bn, ksize=3, strides=2, padding='SAME',\n data_format=self.data_format, name='pool1')\n", (5304, 5398), True, 'import oneflow as flow\n'), ((5664, 5688), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['wd'], {}), '(wd)\n', (5684, 5688), True, 'import oneflow as flow\n'), ((5940, 5999), 'oneflow.transpose', 'flow.transpose', (['images'], {'name': '"""transpose"""', 'perm': '[0, 3, 1, 2]'}), "(images, name='transpose', perm=[0, 3, 1, 2])\n", (5954, 5999), True, 'import oneflow as flow\n'), ((6109, 6168), 'oneflow.transpose', 'flow.transpose', (['images'], {'name': '"""transpose"""', 'perm': '[0, 2, 3, 1]'}), "(images, name='transpose', perm=[0, 2, 3, 1])\n", (6123, 6168), True, 'import oneflow as flow\n'), ((6178, 6208), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""Resnet"""'], {}), "('Resnet')\n", (6198, 6208), True, 'import oneflow as flow\n'), ((6317, 6430), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['body'], {'ksize': '(7)', 'strides': '(1)', 'padding': '"""VALID"""', 'data_format': 'builder.data_format', 'name': '"""pool5"""'}), "(body, ksize=7, strides=1, padding='VALID', data_format=\n builder.data_format, name='pool5')\n", (6335, 6430), True, 'import oneflow as flow\n'), ((2192, 2216), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2214, 2216), True, 'import oneflow as flow\n'), ((2230, 2253), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (2251, 2253), True, 'import oneflow as flow\n'), ((3301, 3321), 'oneflow.nn.relu', 'flow.nn.relu', (['output'], {}), '(output)\n', (3313, 3321), True, 'import oneflow as flow\n'), ((6557, 6598), 'oneflow.reshape', 'flow.reshape', (['pool5', '(pool5.shape[0], -1)'], {}), '(pool5, (pool5.shape[0], -1))\n', (6569, 6598), True, 'import oneflow as flow\n'), ((6682, 6745), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_in"""', '"""random_normal"""'], {}), "(2, 'fan_in', 'random_normal')\n", (6715, 6745), True, 'import oneflow as flow\n'), ((6806, 6830), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (6828, 6830), True, 'import oneflow as flow\n')] |
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
from oneflow.test.modules.test_util import GenArgList
from ops import nms, lib_path
flow.config.load_library_now(lib_path())
def box_area(boxes):
return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
def _box_inter_union_np(boxes1, boxes2):
area1 = box_area(boxes1)
area2 = box_area(boxes2)
lt = np.maximum(boxes1[:, np.newaxis, :2], boxes2[:, :2])
rb = np.minimum(boxes1[:, np.newaxis, 2:], boxes2[:, 2:])
wh = np.clip(rb - lt, a_min=0, a_max=np.inf)
inter = wh[:, :, 0] * wh[:, :, 1]
union = area1[:, np.newaxis] + area2 - inter
return inter, union
def box_iou_np(boxes1, boxes2):
inter, union = _box_inter_union_np(boxes1, boxes2)
iou = inter / union
return iou
def nms_np(boxes, scores, iou_threshold):
picked = []
indexes = np.argsort(-scores)
while len(indexes) > 0:
current = indexes[0]
picked.append(current.item())
if len(indexes) == 1:
break
current_box = boxes[current, :]
indexes = indexes[1:]
rest_boxes = boxes[indexes, :]
iou = np.squeeze(box_iou_np(rest_boxes, current_box[np.newaxis]), axis=1)
indexes = indexes[iou <= iou_threshold]
return np.asarray(picked)
def create_tensors_with_iou(N, iou_thresh):
boxes = np.random.rand(N, 4) * 100
boxes[:, 2:] += boxes[:, :2]
boxes[-1, :] = boxes[0, :]
x0, y0, x1, y1 = boxes[-1].tolist()
iou_thresh += 1e-5
boxes[-1, 2] += (x1 - x0) * (1 - iou_thresh) / iou_thresh
scores = np.random.rand(N)
return boxes, scores
def _test_nms(test_case, device):
iou = 0.5
boxes, scores = create_tensors_with_iou(1000, iou)
boxes = flow.Tensor(boxes, dtype=flow.float32, device=flow.device(device))
scores = flow.Tensor(scores, dtype=flow.float32, device=flow.device(device))
keep_np = nms_np(boxes.numpy(), scores.numpy(), iou)
keep = nms(boxes, scores, iou)
test_case.assertTrue(np.allclose(keep.numpy(), keep_np))
class TestNMS(flow.unittest.TestCase):
def test_nms(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_nms]
arg_dict["device"] = ["cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.device",
"oneflow.test.modules.test_util.GenArgList"
] | [((210, 220), 'ops.lib_path', 'lib_path', ([], {}), '()\n', (218, 220), False, 'from ops import nms, lib_path\n'), ((425, 477), 'numpy.maximum', 'np.maximum', (['boxes1[:, np.newaxis, :2]', 'boxes2[:, :2]'], {}), '(boxes1[:, np.newaxis, :2], boxes2[:, :2])\n', (435, 477), True, 'import numpy as np\n'), ((487, 539), 'numpy.minimum', 'np.minimum', (['boxes1[:, np.newaxis, 2:]', 'boxes2[:, 2:]'], {}), '(boxes1[:, np.newaxis, 2:], boxes2[:, 2:])\n', (497, 539), True, 'import numpy as np\n'), ((550, 589), 'numpy.clip', 'np.clip', (['(rb - lt)'], {'a_min': '(0)', 'a_max': 'np.inf'}), '(rb - lt, a_min=0, a_max=np.inf)\n', (557, 589), True, 'import numpy as np\n'), ((905, 924), 'numpy.argsort', 'np.argsort', (['(-scores)'], {}), '(-scores)\n', (915, 924), True, 'import numpy as np\n'), ((1319, 1337), 'numpy.asarray', 'np.asarray', (['picked'], {}), '(picked)\n', (1329, 1337), True, 'import numpy as np\n'), ((1625, 1642), 'numpy.random.rand', 'np.random.rand', (['N'], {}), '(N)\n', (1639, 1642), True, 'import numpy as np\n'), ((2001, 2024), 'ops.nms', 'nms', (['boxes', 'scores', 'iou'], {}), '(boxes, scores, iou)\n', (2004, 2024), False, 'from ops import nms, lib_path\n'), ((2384, 2399), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2397, 2399), False, 'import unittest\n'), ((1396, 1416), 'numpy.random.rand', 'np.random.rand', (['N', '(4)'], {}), '(N, 4)\n', (1410, 1416), True, 'import numpy as np\n'), ((2175, 2188), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2186, 2188), False, 'from collections import OrderedDict\n'), ((2289, 2309), 'oneflow.test.modules.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2299, 2309), False, 'from oneflow.test.modules.test_util import GenArgList\n'), ((1831, 1850), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1842, 1850), True, 'import oneflow as flow\n'), ((1912, 1931), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1923, 1931), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
from oneflow.test_utils.automated_test_util.generators import random
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestVar(flow.unittest.TestCase):
def test_flow_var_all_dim_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = torch.var(x)
return y
@autotest(check_graph=True)
def test_flow_var_one_dim_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4).to(device)
y = torch.var(
x,
dim=random(low=-4, high=4).to(int),
unbiased=random().to(bool),
keepdim=random().to(bool),
)
return y
@autotest(auto_backward=False, check_graph=True)
def test_flow_var_0_size_data_with_random_data(test_case):
device = random_device()
x = random_tensor(4, 2, 3, 0, 4).to(device)
y = torch.var(
x,
dim=random(low=-4, high=4).to(int),
unbiased=random().to(bool),
keepdim=random().to(bool),
)
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.test_utils.automated_test_util.generators.random"
] | [((780, 812), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (810, 812), True, 'import oneflow as flow\n'), ((1814, 1829), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1827, 1829), False, 'import unittest\n'), ((1249, 1271), 'oneflow.test_utils.automated_test_util.generators.random', 'random', ([], {'low': '(-4)', 'high': '(4)'}), '(low=-4, high=4)\n', (1255, 1271), False, 'from oneflow.test_utils.automated_test_util.generators import random\n'), ((1302, 1310), 'oneflow.test_utils.automated_test_util.generators.random', 'random', ([], {}), '()\n', (1308, 1310), False, 'from oneflow.test_utils.automated_test_util.generators import random\n'), ((1341, 1349), 'oneflow.test_utils.automated_test_util.generators.random', 'random', ([], {}), '()\n', (1347, 1349), False, 'from oneflow.test_utils.automated_test_util.generators import random\n'), ((1643, 1665), 'oneflow.test_utils.automated_test_util.generators.random', 'random', ([], {'low': '(-4)', 'high': '(4)'}), '(low=-4, high=4)\n', (1649, 1665), False, 'from oneflow.test_utils.automated_test_util.generators import random\n'), ((1696, 1704), 'oneflow.test_utils.automated_test_util.generators.random', 'random', ([], {}), '()\n', (1702, 1704), False, 'from oneflow.test_utils.automated_test_util.generators import random\n'), ((1735, 1743), 'oneflow.test_utils.automated_test_util.generators.random', 'random', ([], {}), '()\n', (1741, 1743), False, 'from oneflow.test_utils.automated_test_util.generators import random\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import cv2
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _of_image_batch_align(images, input_shape, output_shape, alignment):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_batch_align_job(
images_def: oft.ListListNumpy.Placeholder(shape=input_shape, dtype=flow.float)
):
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
image = flow.image_batch_align(
images_buffer, shape=output_shape[1:], dtype=flow.float, alignment=alignment
)
return image
image = image_batch_align_job([images]).get()
return image.numpy_list()[0]
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
assert image_static_shape[0] == 1, str(image_static_shape)
image_static_shape[0] = len(image_shapes)
return image_static_shape
def _roundup(x, n):
return int((x + n - 1) / n) * n
def _compare_image_batch_align(
test_case, image_files, alignment, print_debug_info=False
):
images = _read_images_by_cv(image_files)
image_shape = _get_images_static_shape(images)
assert len(image_shape) == 4
aligned_image_shape = [
image_shape[0],
_roundup(image_shape[1], alignment),
_roundup(image_shape[2], alignment),
image_shape[3],
]
if print_debug_info:
print("image_shape:", image_shape)
print("aligned_image_shape:", aligned_image_shape)
image_tensor = _of_image_batch_align(
images, tuple(image_shape), tuple(aligned_image_shape), alignment
)
test_case.assertTrue(np.array_equal(aligned_image_shape, image_tensor.shape))
empty_image_array = np.zeros(aligned_image_shape, np.single)
for empty_image, image in zip(empty_image_array, images):
image = image.squeeze()
empty_image[0 : image.shape[0], 0 : image.shape[1], :] = image
test_case.assertTrue(np.array_equal(image_tensor, empty_image_array))
# @flow.unittest.skip_unless_1n1d()
# TODO(zhangwenxiao, jiangxuefei): refine in multi-client
@unittest.skipIf(True, "skip for now because of single-client tensor_list removed")
class TestImageBatchAlign(flow.unittest.TestCase):
def test_image_batch_align(test_case):
_compare_image_batch_align(
test_case,
[
"/dataset/mscoco_2017/val2017/000000000139.jpg",
"/dataset/mscoco_2017/val2017/000000000632.jpg",
"/dataset/mscoco_2017/val2017/000000000785.jpg",
"/dataset/mscoco_2017/val2017/000000001000.jpg",
],
16,
# True,
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.tensor_list_to_tensor_buffer",
"oneflow.compatible.single_client.image_batch_align",
"oneflow.compatible.single_client.scope.mirrored_view",
"oneflow.compatible.single_client.typing.ListListNumpy.Placeholder",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.co... | [((3401, 3487), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""skip for now because of single-client tensor_list removed"""'], {}), "(True,\n 'skip for now because of single-client tensor_list removed')\n", (3416, 3487), False, 'import unittest\n'), ((827, 855), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (853, 855), True, 'from oneflow.compatible import single_client as flow\n'), ((874, 895), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (893, 895), True, 'from oneflow.compatible import single_client as flow\n'), ((1013, 1062), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1033, 1062), True, 'from oneflow.compatible import single_client as flow\n'), ((1808, 1837), 'numpy.amax', 'np.amax', (['image_shapes'], {'axis': '(0)'}), '(image_shapes, axis=0)\n', (1815, 1837), True, 'import numpy as np\n'), ((3023, 3063), 'numpy.zeros', 'np.zeros', (['aligned_image_shape', 'np.single'], {}), '(aligned_image_shape, np.single)\n', (3031, 3063), True, 'import numpy as np\n'), ((4005, 4020), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4018, 4020), False, 'import unittest\n'), ((979, 1005), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1003, 1005), True, 'from oneflow.compatible import single_client as flow\n'), ((1212, 1257), 'oneflow.compatible.single_client.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['images_def'], {}), '(images_def)\n', (1245, 1257), True, 'from oneflow.compatible import single_client as flow\n'), ((1274, 1379), 'oneflow.compatible.single_client.image_batch_align', 'flow.image_batch_align', (['images_buffer'], {'shape': 'output_shape[1:]', 'dtype': 'flow.float', 'alignment': 'alignment'}), '(images_buffer, shape=output_shape[1:], dtype=flow.\n float, alignment=alignment)\n', (1296, 1379), True, 'from oneflow.compatible import single_client as flow\n'), ((1639, 1668), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1653, 1668), True, 'import numpy as np\n'), ((2941, 2996), 'numpy.array_equal', 'np.array_equal', (['aligned_image_shape', 'image_tensor.shape'], {}), '(aligned_image_shape, image_tensor.shape)\n', (2955, 2996), True, 'import numpy as np\n'), ((3255, 3302), 'numpy.array_equal', 'np.array_equal', (['image_tensor', 'empty_image_array'], {}), '(image_tensor, empty_image_array)\n', (3269, 3302), True, 'import numpy as np\n'), ((1114, 1180), 'oneflow.compatible.single_client.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', ([], {'shape': 'input_shape', 'dtype': 'flow.float'}), '(shape=input_shape, dtype=flow.float)\n', (1143, 1180), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1555, 1577), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (1565, 1577), False, 'import cv2\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
import os
from collections import OrderedDict
from test_util import GenArgList
from test_util import type_name_to_flow_type
from test_util import type_name_to_np_type
import test_global_storage
def _compare_with_numpy(test_case, np_func, x, y, axis, keepdims=True):
x = test_global_storage.Get("x")
dx = test_global_storage.Get("x_diff")
np_y = np_func(x, axis=axis, keepdims=True)
test_case.assertTrue(np.allclose(y, np_y, rtol=1e-5, atol=1e-5))
mask = np.where(x == y, 1, 0)
count = np.add.reduce(mask, axis=axis, keepdims=True)
np_dx = np.where(x == y, 1 / count, 0)
test_case.assertTrue(np.allclose(dx, np_dx, rtol=1e-5, atol=1e-5))
def _test_two_stage_reduce(
test_case, flow_func, np_func, device_type, axis, split_axis
):
flow.clear_default_session()
flow.config.gpu_device_num(4)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(type="train", function_config=func_config)
def two_stage_reduce_job(x: oft.Numpy.Placeholder((4, 20, 20, 20))):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="v1",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
with flow.scope.placement(device_type, "0:0-3"):
loss = flow_func(
x.with_distribute(flow.distribute.split(split_axis)),
axis=axis,
keepdims=True,
)
loss = flow.identity(loss)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
return loss
x = np.random.randint(low=0, high=10, size=(4, 20, 20, 20)).astype(np.float32)
y = two_stage_reduce_job(x).get().numpy()
_compare_with_numpy(test_case, np_func, x, y, axis=tuple(axis))
@flow.unittest.skip_unless_1n4d()
class TestTwoStageReduce(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_two_stage_reduce_max(test_case):
arg_dict = OrderedDict()
arg_dict["flow_func"] = [flow.math.two_stage_reduce_max]
arg_dict["np_func"] = [np.maximum.reduce]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["axis"] = [[1], [1, 2], [1, 2, 3]]
arg_dict["split_axis"] = [1]
for arg in GenArgList(arg_dict):
_test_two_stage_reduce(test_case, *arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_two_stage_reduce_min(test_case):
arg_dict = OrderedDict()
arg_dict["flow_func"] = [flow.math.two_stage_reduce_min]
arg_dict["np_func"] = [np.minimum.reduce]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["axis"] = [[1], [1, 2], [1, 2, 3]]
arg_dict["split_axis"] = [1]
for arg in GenArgList(arg_dict):
_test_two_stage_reduce(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.zeros_initializer",
"oneflow.compatible.single_client.unittest.skip_unless_1n4d",
"oneflow.compatible.single_client.scope.consistent_view",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_c... | [((2885, 2917), 'oneflow.compatible.single_client.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (2915, 2917), True, 'from oneflow.compatible import single_client as flow\n'), ((1016, 1044), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x"""'], {}), "('x')\n", (1039, 1044), False, 'import test_global_storage\n'), ((1054, 1087), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (1077, 1087), False, 'import test_global_storage\n'), ((1216, 1238), 'numpy.where', 'np.where', (['(x == y)', '(1)', '(0)'], {}), '(x == y, 1, 0)\n', (1224, 1238), True, 'import numpy as np\n'), ((1251, 1296), 'numpy.add.reduce', 'np.add.reduce', (['mask'], {'axis': 'axis', 'keepdims': '(True)'}), '(mask, axis=axis, keepdims=True)\n', (1264, 1296), True, 'import numpy as np\n'), ((1309, 1339), 'numpy.where', 'np.where', (['(x == y)', '(1 / count)', '(0)'], {}), '(x == y, 1 / count, 0)\n', (1317, 1339), True, 'import numpy as np\n'), ((1513, 1541), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1539, 1541), True, 'from oneflow.compatible import single_client as flow\n'), ((1546, 1575), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(4)'], {}), '(4)\n', (1572, 1575), True, 'from oneflow.compatible import single_client as flow\n'), ((1594, 1615), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1613, 1615), True, 'from oneflow.compatible import single_client as flow\n'), ((1735, 1798), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1755, 1798), True, 'from oneflow.compatible import single_client as flow\n'), ((4014, 4029), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4027, 4029), False, 'import unittest\n'), ((1161, 1205), 'numpy.allclose', 'np.allclose', (['y', 'np_y'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(y, np_y, rtol=1e-05, atol=1e-05)\n', (1172, 1205), True, 'import numpy as np\n'), ((1365, 1411), 'numpy.allclose', 'np.allclose', (['dx', 'np_dx'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(dx, np_dx, rtol=1e-05, atol=1e-05)\n', (1376, 1411), True, 'import numpy as np\n'), ((1699, 1727), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1725, 1727), True, 'from oneflow.compatible import single_client as flow\n'), ((3113, 3126), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3124, 3126), False, 'from collections import OrderedDict\n'), ((3400, 3420), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3410, 3420), False, 'from test_util import GenArgList\n'), ((2989, 3023), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (2998, 3023), False, 'import os\n'), ((3620, 3633), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3631, 3633), False, 'from collections import OrderedDict\n'), ((3907, 3927), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3917, 3927), False, 'from test_util import GenArgList\n'), ((3496, 3530), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (3505, 3530), False, 'import os\n'), ((1831, 1869), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(4, 20, 20, 20)'], {}), '((4, 20, 20, 20))\n', (1852, 1869), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1885, 1925), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1905, 1925), True, 'from oneflow.compatible import single_client as flow\n'), ((2133, 2175), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-3"""'], {}), "(device_type, '0:0-3')\n", (2153, 2175), True, 'from oneflow.compatible import single_client as flow\n'), ((2368, 2387), 'oneflow.compatible.single_client.identity', 'flow.identity', (['loss'], {}), '(loss)\n', (2381, 2387), True, 'from oneflow.compatible import single_client as flow\n'), ((2693, 2748), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(10)', 'size': '(4, 20, 20, 20)'}), '(low=0, high=10, size=(4, 20, 20, 20))\n', (2710, 2748), True, 'import numpy as np\n'), ((2558, 2589), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (2584, 2589), False, 'import test_global_storage\n'), ((2622, 2658), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (2648, 2658), False, 'import test_global_storage\n'), ((2080, 2104), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2102, 2104), True, 'from oneflow.compatible import single_client as flow\n'), ((2241, 2274), 'oneflow.compatible.single_client.distribute.split', 'flow.distribute.split', (['split_axis'], {}), '(split_axis)\n', (2262, 2274), True, 'from oneflow.compatible import single_client as flow\n'), ((2436, 2491), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (2477, 2491), True, 'from oneflow.compatible import single_client as flow\n')] |
from collections import OrderedDict
import oneflow as flow
import oneflow.nn as nn
from typing import Any
__all__ = ["make_wide_and_deep_module"]
class Dense(nn.Module):
def __init__(
self, in_features: int, out_features: int, dropout_rate: float = 0.5
) -> None:
super(Dense, self).__init__()
self.features = nn.Sequential(
nn.Linear(in_features, out_features),
nn.ReLU(inplace=True),
nn.Dropout(p=dropout_rate),
)
for name, param in self.named_parameters():
if name.endswith("weight"):
nn.init.xavier_uniform_(param)
elif name.endswith("bias"):
nn.init.zeros_(param)
def forward(self, x: flow.Tensor) -> flow.Tensor:
x = self.features(x)
return x
class Embedding(nn.Embedding):
def __init__(self, vocab_size, embed_size):
super(Embedding, self).__init__(vocab_size, embed_size, padding_idx=0)
for param in self.parameters():
nn.init.uniform_(param, a=-0.05, b=0.05)
class GlobalWideAndDeep(nn.Module):
def __init__(
self,
wide_vocab_size: int,
deep_vocab_size: int,
deep_embedding_vec_size: int = 16,
num_deep_sparse_fields: int = 26,
num_dense_fields: int = 13,
hidden_size: int = 1024,
hidden_units_num: int = 7,
deep_dropout_rate: float = 0.5,
):
super(GlobalWideAndDeep, self).__init__()
self.wide_embedding = Embedding(wide_vocab_size // flow.env.get_world_size(), 1)
self.wide_embedding.to_global(
flow.env.all_device_placement("cuda"), flow.sbp.split(0)
)
self.deep_embedding = Embedding(
deep_vocab_size, deep_embedding_vec_size // flow.env.get_world_size()
)
self.deep_embedding.to_global(
flow.env.all_device_placement("cuda"), flow.sbp.split(1)
)
deep_feature_size = (
deep_embedding_vec_size * num_deep_sparse_fields + num_dense_fields
)
self.linear_layers = nn.Sequential(
OrderedDict(
[
(
f"fc{i}",
Dense(
deep_feature_size if i == 0 else hidden_size,
hidden_size,
deep_dropout_rate,
),
)
for i in range(hidden_units_num)
]
)
)
self.linear_layers.to_global(
flow.env.all_device_placement("cuda"), flow.sbp.broadcast
)
self.deep_scores = nn.Linear(hidden_size, 1)
self.deep_scores.to_global(
flow.env.all_device_placement("cuda"), flow.sbp.broadcast
)
self.sigmoid = nn.Sigmoid()
self.sigmoid.to_global(
flow.env.all_device_placement("cuda"), flow.sbp.broadcast
)
def forward(
self, dense_fields, wide_sparse_fields, deep_sparse_fields
) -> flow.Tensor:
wide_sparse_fields = wide_sparse_fields.to_global(sbp=flow.sbp.broadcast)
wide_embedding = self.wide_embedding(wide_sparse_fields)
wide_embedding = wide_embedding.view(
-1, wide_embedding.shape[-1] * wide_embedding.shape[-2]
)
wide_scores = flow.sum(wide_embedding, dim=1, keepdim=True)
wide_scores = wide_scores.to_global(
sbp=flow.sbp.split(0), grad_sbp=flow.sbp.broadcast
)
deep_sparse_fields = deep_sparse_fields.to_global(sbp=flow.sbp.broadcast)
deep_embedding = self.deep_embedding(deep_sparse_fields)
deep_embedding = deep_embedding.to_global(
sbp=flow.sbp.split(0), grad_sbp=flow.sbp.split(2)
)
deep_embedding = deep_embedding.view(
-1, deep_embedding.shape[-1] * deep_embedding.shape[-2]
)
deep_features = flow.cat([deep_embedding, dense_fields], dim=1)
deep_features = self.linear_layers(deep_features)
deep_scores = self.deep_scores(deep_features)
return self.sigmoid(wide_scores + deep_scores)
class LocalWideAndDeep(nn.Module):
def __init__(
self,
wide_vocab_size: int,
deep_vocab_size: int,
deep_embedding_vec_size: int = 16,
num_deep_sparse_fields: int = 26,
num_dense_fields: int = 13,
hidden_size: int = 1024,
hidden_units_num: int = 7,
deep_dropout_rate: float = 0.5,
):
super(LocalWideAndDeep, self).__init__()
self.wide_embedding = Embedding(wide_vocab_size, 1,)
self.deep_embedding = Embedding(deep_vocab_size, deep_embedding_vec_size)
deep_feature_size = (
deep_embedding_vec_size * num_deep_sparse_fields + num_dense_fields
)
self.linear_layers = nn.Sequential(
OrderedDict(
[
(
f"fc{i}",
Dense(
deep_feature_size if i == 0 else hidden_size,
hidden_size,
deep_dropout_rate,
),
)
for i in range(hidden_units_num)
]
)
)
self.deep_scores = nn.Linear(hidden_size, 1)
self.sigmoid = nn.Sigmoid()
def forward(
self, dense_fields, wide_sparse_fields, deep_sparse_fields
) -> flow.Tensor:
wide_embedding = self.wide_embedding(wide_sparse_fields)
wide_embedding = wide_embedding.view(
-1, wide_embedding.shape[-1] * wide_embedding.shape[-2]
)
wide_scores = flow.sum(wide_embedding, dim=1, keepdim=True)
deep_embedding = self.deep_embedding(deep_sparse_fields)
deep_embedding = deep_embedding.view(
-1, deep_embedding.shape[-1] * deep_embedding.shape[-2]
)
deep_features = flow.cat([deep_embedding, dense_fields], dim=1)
deep_features = self.linear_layers(deep_features)
deep_scores = self.deep_scores(deep_features)
return self.sigmoid(wide_scores + deep_scores)
def make_wide_and_deep_module(args, is_global):
if is_global:
model = GlobalWideAndDeep(
wide_vocab_size=args.wide_vocab_size,
deep_vocab_size=args.deep_vocab_size,
deep_embedding_vec_size=args.deep_embedding_vec_size,
num_deep_sparse_fields=args.num_deep_sparse_fields,
num_dense_fields=args.num_dense_fields,
hidden_size=args.hidden_size,
hidden_units_num=args.hidden_units_num,
deep_dropout_rate=args.deep_dropout_rate,
)
else:
model = LocalWideAndDeep(
wide_vocab_size=args.wide_vocab_size,
deep_vocab_size=args.deep_vocab_size,
deep_embedding_vec_size=args.deep_embedding_vec_size,
num_deep_sparse_fields=args.num_deep_sparse_fields,
num_dense_fields=args.num_dense_fields,
hidden_size=args.hidden_size,
hidden_units_num=args.hidden_units_num,
deep_dropout_rate=args.deep_dropout_rate,
)
model = model.to("cuda")
return model
| [
"oneflow.nn.Linear",
"oneflow.sum",
"oneflow.env.get_world_size",
"oneflow.env.all_device_placement",
"oneflow.sbp.split",
"oneflow.nn.Sigmoid",
"oneflow.nn.init.zeros_",
"oneflow.nn.init.xavier_uniform_",
"oneflow.nn.Dropout",
"oneflow.cat",
"oneflow.nn.ReLU",
"oneflow.nn.init.uniform_"
] | [((2687, 2712), 'oneflow.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (2696, 2712), True, 'import oneflow.nn as nn\n'), ((2852, 2864), 'oneflow.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2862, 2864), True, 'import oneflow.nn as nn\n'), ((3377, 3422), 'oneflow.sum', 'flow.sum', (['wide_embedding'], {'dim': '(1)', 'keepdim': '(True)'}), '(wide_embedding, dim=1, keepdim=True)\n', (3385, 3422), True, 'import oneflow as flow\n'), ((3959, 4006), 'oneflow.cat', 'flow.cat', (['[deep_embedding, dense_fields]'], {'dim': '(1)'}), '([deep_embedding, dense_fields], dim=1)\n', (3967, 4006), True, 'import oneflow as flow\n'), ((5358, 5383), 'oneflow.nn.Linear', 'nn.Linear', (['hidden_size', '(1)'], {}), '(hidden_size, 1)\n', (5367, 5383), True, 'import oneflow.nn as nn\n'), ((5407, 5419), 'oneflow.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (5417, 5419), True, 'import oneflow.nn as nn\n'), ((5738, 5783), 'oneflow.sum', 'flow.sum', (['wide_embedding'], {'dim': '(1)', 'keepdim': '(True)'}), '(wide_embedding, dim=1, keepdim=True)\n', (5746, 5783), True, 'import oneflow as flow\n'), ((5997, 6044), 'oneflow.cat', 'flow.cat', (['[deep_embedding, dense_fields]'], {'dim': '(1)'}), '([deep_embedding, dense_fields], dim=1)\n', (6005, 6044), True, 'import oneflow as flow\n'), ((373, 409), 'oneflow.nn.Linear', 'nn.Linear', (['in_features', 'out_features'], {}), '(in_features, out_features)\n', (382, 409), True, 'import oneflow.nn as nn\n'), ((423, 444), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (430, 444), True, 'import oneflow.nn as nn\n'), ((458, 484), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout_rate'}), '(p=dropout_rate)\n', (468, 484), True, 'import oneflow.nn as nn\n'), ((1026, 1066), 'oneflow.nn.init.uniform_', 'nn.init.uniform_', (['param'], {'a': '(-0.05)', 'b': '(0.05)'}), '(param, a=-0.05, b=0.05)\n', (1042, 1066), True, 'import oneflow.nn as nn\n'), ((1624, 1661), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (1653, 1661), True, 'import oneflow as flow\n'), ((1663, 1680), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (1677, 1680), True, 'import oneflow as flow\n'), ((1875, 1912), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (1904, 1912), True, 'import oneflow as flow\n'), ((1914, 1931), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (1928, 1931), True, 'import oneflow as flow\n'), ((2592, 2629), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (2621, 2629), True, 'import oneflow as flow\n'), ((2761, 2798), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (2790, 2798), True, 'import oneflow as flow\n'), ((2909, 2946), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (2938, 2946), True, 'import oneflow as flow\n'), ((604, 634), 'oneflow.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['param'], {}), '(param)\n', (627, 634), True, 'import oneflow.nn as nn\n'), ((1543, 1568), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (1566, 1568), True, 'import oneflow as flow\n'), ((1788, 1813), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (1811, 1813), True, 'import oneflow as flow\n'), ((3484, 3501), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (3498, 3501), True, 'import oneflow as flow\n'), ((3755, 3772), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (3769, 3772), True, 'import oneflow as flow\n'), ((3783, 3800), 'oneflow.sbp.split', 'flow.sbp.split', (['(2)'], {}), '(2)\n', (3797, 3800), True, 'import oneflow as flow\n'), ((691, 712), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['param'], {}), '(param)\n', (705, 712), True, 'import oneflow.nn as nn\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.automated_test_util import *
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_sum_impl(test_case, device):
input = flow.tensor(
np.random.randn(2, 3), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.sum(input, dim=0)
np_out = np.sum(input.numpy(), axis=0)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
input = flow.tensor(
np.random.randn(2, 3), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.sum(input, dim=0)
np_out = np.sum(input.numpy(), axis=0)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
input = flow.tensor(
np.random.randn(2, 3), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.sum(input, dim=1)
of_out2 = input.sum(dim=1)
np_out = np.sum(input.numpy(), axis=1)
test_case.assertTrue(np.allclose(of_out2.numpy(), of_out.numpy(), 1e-05, 1e-05))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
input = flow.tensor(
np.random.randn(4, 5, 6),
dtype=flow.float32,
device=flow.device(device),
requires_grad=True,
)
of_out = flow.sum(input, dim=(2, 1))
np_out = np.sum(input.numpy(), axis=(2, 1))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
of_out = of_out.sum()
of_out.backward()
np_grad = np.ones((4, 5, 6))
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestSumModule(flow.unittest.TestCase):
def test_sum(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_sum_impl(test_case, *arg)
@autotest()
def test_sum_against_pytorch(test_case):
device = random_device()
x = random_pytorch_tensor(4, random(0, 5), 2).to(device)
y = torch.sum(x)
return y
@autotest(auto_backward=False)
def test_sum_with_0shape_tensor(test_case):
device = random_device()
x = random_pytorch_tensor(4, 4, 3, 0, 2).to(device)
y = torch.sum(x, dim=np.random.randint(0, 3))
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.sum",
"oneflow.device"
] | [((2234, 2266), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2264, 2266), True, 'import oneflow as flow\n'), ((961, 983), 'oneflow.sum', 'flow.sum', (['input'], {'dim': '(0)'}), '(input, dim=0)\n', (969, 983), True, 'import oneflow as flow\n'), ((1225, 1247), 'oneflow.sum', 'flow.sum', (['input'], {'dim': '(0)'}), '(input, dim=0)\n', (1233, 1247), True, 'import oneflow as flow\n'), ((1489, 1511), 'oneflow.sum', 'flow.sum', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (1497, 1511), True, 'import oneflow as flow\n'), ((1917, 1944), 'oneflow.sum', 'flow.sum', (['input'], {'dim': '(2, 1)'}), '(input, dim=(2, 1))\n', (1925, 1944), True, 'import oneflow as flow\n'), ((2131, 2149), 'numpy.ones', 'np.ones', (['(4, 5, 6)'], {}), '((4, 5, 6))\n', (2138, 2149), True, 'import numpy as np\n'), ((2987, 3002), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3000, 3002), False, 'import unittest\n'), ((872, 893), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (887, 893), True, 'import numpy as np\n'), ((1136, 1157), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1151, 1157), True, 'import numpy as np\n'), ((1400, 1421), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1415, 1421), True, 'import numpy as np\n'), ((1780, 1804), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)', '(6)'], {}), '(4, 5, 6)\n', (1795, 1804), True, 'import numpy as np\n'), ((2360, 2373), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2371, 2373), False, 'from collections import OrderedDict\n'), ((2438, 2458), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2448, 2458), False, 'from test_util import GenArgList\n'), ((922, 941), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (933, 941), True, 'import oneflow as flow\n'), ((1186, 1205), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1197, 1205), True, 'import oneflow as flow\n'), ((1450, 1469), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1461, 1469), True, 'import oneflow as flow\n'), ((1849, 1868), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1860, 1868), True, 'import oneflow as flow\n'), ((2912, 2935), 'numpy.random.randint', 'np.random.randint', (['(0)', '(3)'], {}), '(0, 3)\n', (2929, 2935), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import numpy as np
import oneflow as flow
import oneflow.typing as oft
def _of_object_bbox_flip(bbox_list, image_size, flip_code):
bbox_shape = _get_bbox_static_shape(bbox_list)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(func_config)
def object_bbox_flip_job(
bbox_def: oft.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
image_size_def: oft.ListNumpy.Placeholder(
shape=image_size.shape, dtype=flow.int32
),
):
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
flip_bbox = flow.object_bbox_flip(bbox_buffer, image_size_def, flip_code)
return flow.tensor_buffer_to_tensor_list(
flip_bbox, shape=bbox_shape[1:], dtype=flow.float
)
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
bbox_tensor = object_bbox_flip_job([input_bbox_list], [image_size]).get()
return bbox_tensor.numpy_lists()[0]
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
def _compare_bbox_flip(
test_case, anno_file, batch_size, flip_code, print_debug_info=False
):
from pycocotools.coco import COCO
coco = COCO(anno_file)
img_ids = coco.getImgIds()
bbox_list = []
image_size_list = []
sample_cnt = 0
while sample_cnt < batch_size:
rand_img_id = random.choice(img_ids)
anno_ids = coco.getAnnIds(imgIds=[rand_img_id])
if len(anno_ids) == 0:
continue
bbox_array = np.array(
[coco.anns[anno_id]["bbox"] for anno_id in anno_ids], dtype=np.single
)
bbox_list.append(bbox_array)
image_size_list.append(
[coco.imgs[rand_img_id]["height"], coco.imgs[rand_img_id]["width"]]
)
sample_cnt += 1
image_size_array = np.array(image_size_list, dtype=np.int32)
of_bbox_list = _of_object_bbox_flip(bbox_list, image_size_array, flip_code)
for of_bbox, bbox, image_size in zip(of_bbox_list, bbox_list, image_size_list):
h, w = image_size
if flip_code == 1:
xmin = bbox[:, 0].copy()
xmax = bbox[:, 2].copy()
bbox[:, 0] = w - xmax - 1
bbox[:, 2] = w - xmin - 1
else:
raise NotImplementedError
if print_debug_info:
print("-" * 20)
print("ret_bbox:\n", of_bbox.squeeze())
print("bbox:\n", bbox)
test_case.assertTrue(np.allclose(of_bbox.squeeze(), bbox))
def test_object_bbox_flip(test_case):
_compare_bbox_flip(
test_case, "/dataset/mscoco_2017/annotations/instances_val2017.json", 4, 1
)
| [
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.scope.mirrored_view",
"oneflow.object_bbox_flip",
"oneflow.tensor_buffer_to_tensor_list",
"oneflow.typing.ListNumpy.Placeholder",
"oneflow.FunctionConfig",
"oneflow.tensor_list_to_tensor_buffer"
] | [((794, 822), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (820, 822), True, 'import oneflow as flow\n'), ((841, 862), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (860, 862), True, 'import oneflow as flow\n'), ((980, 1013), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (1000, 1013), True, 'import oneflow as flow\n'), ((1862, 1890), 'numpy.amax', 'np.amax', (['bbox_shapes'], {'axis': '(0)'}), '(bbox_shapes, axis=0)\n', (1869, 1890), True, 'import numpy as np\n'), ((2342, 2357), 'pycocotools.coco.COCO', 'COCO', (['anno_file'], {}), '(anno_file)\n', (2346, 2357), False, 'from pycocotools.coco import COCO\n'), ((2971, 3012), 'numpy.array', 'np.array', (['image_size_list'], {'dtype': 'np.int32'}), '(image_size_list, dtype=np.int32)\n', (2979, 3012), True, 'import numpy as np\n'), ((946, 972), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (970, 972), True, 'import oneflow as flow\n'), ((1302, 1345), 'oneflow.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['bbox_def'], {}), '(bbox_def)\n', (1335, 1345), True, 'import oneflow as flow\n'), ((1366, 1427), 'oneflow.object_bbox_flip', 'flow.object_bbox_flip', (['bbox_buffer', 'image_size_def', 'flip_code'], {}), '(bbox_buffer, image_size_def, flip_code)\n', (1387, 1427), True, 'import oneflow as flow\n'), ((1443, 1532), 'oneflow.tensor_buffer_to_tensor_list', 'flow.tensor_buffer_to_tensor_list', (['flip_bbox'], {'shape': 'bbox_shape[1:]', 'dtype': 'flow.float'}), '(flip_bbox, shape=bbox_shape[1:], dtype=\n flow.float)\n', (1476, 1532), True, 'import oneflow as flow\n'), ((1574, 1602), 'numpy.expand_dims', 'np.expand_dims', (['bbox'], {'axis': '(0)'}), '(bbox, axis=0)\n', (1588, 1602), True, 'import numpy as np\n'), ((2510, 2532), 'random.choice', 'random.choice', (['img_ids'], {}), '(img_ids)\n', (2523, 2532), False, 'import random\n'), ((2662, 2741), 'numpy.array', 'np.array', (["[coco.anns[anno_id]['bbox'] for anno_id in anno_ids]"], {'dtype': 'np.single'}), "([coco.anns[anno_id]['bbox'] for anno_id in anno_ids], dtype=np.single)\n", (2670, 2741), True, 'import numpy as np\n'), ((1182, 1249), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', ([], {'shape': 'image_size.shape', 'dtype': 'flow.int32'}), '(shape=image_size.shape, dtype=flow.int32)\n', (1207, 1249), True, 'import oneflow.typing as oft\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow._oneflow_internal
from oneflow.compatible.single_client.eager import gradient_util as gradient_util
from oneflow.compatible.single_client.framework import c_api_util as c_api_util
from oneflow.compatible.single_client.framework import hob as hob
from oneflow.compatible.single_client.framework import remote_blob as remote_blob_util
from oneflow.compatible.single_client.support import enable_if as enable_if
def api_add_loss(loss: oneflow._oneflow_internal.BlobDesc) -> None:
"""Mark a `Blob` as a loss. Auto grad starts at every loss blob. It doesn't has to be a product of typical "loss" operator like softmax loss but can also be a `Blob` produced by any operator.
Args:
loss: A `Blob`.
"""
return enable_if.unique([lazy_add_loss, eager_add_loss])(loss)
@enable_if.condition(
hob.in_global_mode & hob.is_trainable & ~hob.eager_execution_enabled
)
def lazy_add_loss(loss):
c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName(loss.unique_name)
@enable_if.condition(
hob.in_global_mode & hob.is_trainable & hob.eager_execution_enabled
)
def eager_add_loss(loss):
c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName(loss.unique_name)
gradient_util.GetDefaultBackwardBlobRegister().TrySetObject4BlobName(
loss.logical_blob_name, loss.blob_object
)
| [
"oneflow.compatible.single_client.support.enable_if.condition",
"oneflow.compatible.single_client.framework.c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName",
"oneflow.compatible.single_client.eager.gradient_util.GetDefaultBackwardBlobRegister",
"oneflow.compatible.single_client.support.enable_if.uniq... | [((1393, 1487), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.is_trainable & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.is_trainable & ~hob.\n eager_execution_enabled)\n', (1412, 1487), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((1596, 1689), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.is_trainable & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.is_trainable & hob.\n eager_execution_enabled)\n', (1615, 1689), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((1518, 1592), 'oneflow.compatible.single_client.framework.c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName', 'c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName', (['loss.unique_name'], {}), '(loss.unique_name)\n', (1574, 1592), True, 'from oneflow.compatible.single_client.framework import c_api_util as c_api_util\n'), ((1721, 1795), 'oneflow.compatible.single_client.framework.c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName', 'c_api_util.CurJobBuildAndInferCtx_AddLossLogicalBlobName', (['loss.unique_name'], {}), '(loss.unique_name)\n', (1777, 1795), True, 'from oneflow.compatible.single_client.framework import c_api_util as c_api_util\n'), ((1334, 1383), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[lazy_add_loss, eager_add_loss]'], {}), '([lazy_add_loss, eager_add_loss])\n', (1350, 1383), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((1800, 1846), 'oneflow.compatible.single_client.eager.gradient_util.GetDefaultBackwardBlobRegister', 'gradient_util.GetDefaultBackwardBlobRegister', ([], {}), '()\n', (1844, 1846), True, 'from oneflow.compatible.single_client.eager import gradient_util as gradient_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_instancenorm1d(test_case, device):
input_arr = np.array(
[
[
[-0.1091, 2.0041, 0.885, -0.0412],
[-1.2055, 0.7442, 2.33, 1.2411],
[-1.2466, 0.3667, 1.2267, 0.3043],
],
[
[-0.2484, -1.1407, 0.3352, 0.6687],
[-0.2975, -0.0227, -0.2302, -0.3762],
[-0.7759, -0.6789, 1.1444, 1.8077],
],
],
dtype=np.float32,
)
output_arr = np.array(
[
[
[-0.9262, 1.5395, 0.2337, -0.847],
[-1.5486, -0.026, 1.2125, 0.3621],
[-1.5807, 0.2287, 1.1933, 0.1587],
],
[
[-0.2215, -1.5212, 0.6285, 1.1143],
[-0.5016, 1.5917, 0.011, -1.1011],
[-1.0207, -0.9346, 0.6833, 1.2719],
],
],
dtype=np.float32,
)
m = flow.nn.InstanceNorm1d(num_features=3, eps=1e-05, momentum=0.1).to(
device=flow.device(device)
)
x = flow.tensor(input_arr, dtype=flow.float32, device=flow.device(device))
y = m(x)
test_case.assertTrue(np.allclose(y.numpy(), output_arr, rtol=1e-3, atol=1e-3))
m.eval()
y = m(x)
test_case.assertTrue(np.allclose(y.numpy(), output_arr, rtol=1e-3, atol=1e-3))
def _test_instancenorm2d(test_case, device):
input_arr = np.array(
[
[
[
[-0.8791, 0.2553, 0.7403, -0.2859],
[0.8006, -1.7701, -0.9617, 0.1705],
[0.2842, 1.7825, 0.3365, -0.8525],
],
[
[0.7332, -0.0737, 0.7245, -0.6551],
[1.4461, -0.1827, 0.9737, -2.1571],
[0.4657, 0.7244, 0.3378, 0.1775],
],
],
[
[
[1.8896, 1.8686, 0.1896, 0.9817],
[-0.0671, 1.5569, 1.1449, 0.0086],
[-0.9468, -0.0124, 1.3227, -0.6567],
],
[
[-0.8472, 1.3012, -1.1065, 0.9348],
[1.0346, 1.5703, 0.2419, -0.7048],
[0.6957, -0.4523, -0.8819, 1.0164],
],
],
],
dtype=np.float32,
)
output = np.array(
[
[
[
[-0.9155, 0.31, 0.8339, -0.2747],
[0.8991, -1.8781, -1.0048, 0.2183],
[0.3412, 1.9598, 0.3977, -0.8868],
],
[
[0.586, -0.3169, 0.5763, -0.9675],
[1.3837, -0.4389, 0.8551, -2.6483],
[0.2867, 0.5761, 0.1435, -0.0358],
],
],
[
[
[1.374, 1.3515, -0.4466, 0.4017],
[-0.7215, 1.0177, 0.5765, -0.6405],
[-1.6636, -0.663, 0.7669, -1.353],
],
[
[-1.1583, 1.1444, -1.4363, 0.7516],
[0.8586, 1.4328, 0.009, -1.0057],
[0.4954, -0.7351, -1.1955, 0.8391],
],
],
],
dtype=np.float32,
)
m = flow.nn.InstanceNorm2d(num_features=2, eps=1e-05, momentum=0.1).to(
device=flow.device(device)
)
x = flow.tensor(input_arr, dtype=flow.float32, device=flow.device(device))
y = m(x)
test_case.assertTrue(np.allclose(y.numpy(), output, 0.0001, 0.0001))
m.eval()
y = m(x)
test_case.assertTrue(np.allclose(y.numpy(), output, 0.0001, 0.0001))
def _test_instancenorm3d(test_case, device):
input_arr = np.array(
[
[
[
[
[1.04569761, 0.22863248, 1.42439335, 1.62249689],
[-0.80578825, -0.27276461, 1.04556507, 0.56864134],
[-1.24085419, -1.23960097, 0.33451416, -1.84820402],
],
[
[-1.511261, 1.06157517, -0.26715858, -1.32888141],
[1.17976881, -0.07931171, 0.33910684, -1.93458573],
[-1.72659647, 0.79049652, 0.39102785, -1.16264882],
],
],
[
[
[0.30067973, -1.2912226, -0.61508225, 0.56454001],
[0.87074187, -1.69257376, 0.36119148, -0.31014289],
[0.20776964, 1.26195488, -1.37122193, -0.17945234],
],
[
[-0.31112407, -0.80682631, 0.8233194, 0.6384975],
[0.57617527, 0.45505028, 1.68286151, -1.09590744],
[-1.18127546, -1.07529277, 0.52779943, 1.21755926],
],
],
],
[
[
[
[-0.12832351, 1.05625455, -0.23253249, -0.64747611],
[-0.00738123, -1.41390089, -1.92664144, -0.21427625],
[-0.94631219, -0.86493989, 0.21026905, 0.24989732],
],
[
[1.3859182, 1.72002107, 0.50091892, 1.04198896],
[0.71694594, 1.66417023, -1.63030052, 0.77182641],
[0.71545083, 1.96458366, -1.99031931, 1.3196714],
],
],
[
[
[1.80091702, 0.02834973, 0.82259214, -1.05597501],
[-0.58212207, 0.44205949, -0.14740003, -0.994508],
[1.14678114, -0.39196097, 1.2554798, -0.41829324],
],
[
[-1.0153903, -0.25755713, -1.81756333, -1.06781159],
[1.79680841, -1.9107133, -0.64325796, -1.94640775],
[1.30671156, 1.20445339, -1.26262901, -0.79494188],
],
],
],
],
dtype=np.float32,
)
output_arr = np.array(
[
[
[
[
[1.067, 0.3324, 1.4075, 1.5856],
[-0.5976, -0.1184, 1.0669, 0.6381],
[-0.9888, -0.9877, 0.4276, -1.5349],
],
[
[-1.2319, 1.0813, -0.1134, -1.068],
[1.1876, 0.0555, 0.4317, -1.6126],
[-1.4256, 0.8376, 0.4784, -0.9185],
],
],
[
[
[0.3447, -1.3751, -0.6446, 0.6298],
[0.9606, -1.8087, 0.4101, -0.3152],
[0.2444, 1.3833, -1.4615, -0.174],
],
[
[-0.3162, -0.8518, 0.9094, 0.7097],
[0.6424, 0.5115, 1.838, -1.1641],
[-1.2563, -1.1418, 0.5901, 1.3353],
],
],
],
[
[
[
[-0.2327, 0.8016, -0.3236, -0.6859],
[-0.1271, -1.3551, -1.8028, -0.3077],
[-0.9469, -0.8758, 0.063, 0.0976],
],
[
[1.0895, 1.3812, 0.3167, 0.7892],
[0.5054, 1.3324, -1.5441, 0.5533],
[0.5041, 1.5947, -1.8584, 1.0316],
],
],
[
[
[1.7507, 0.1901, 0.8894, -0.7645],
[-0.3473, 0.5544, 0.0354, -0.7104],
[1.1748, -0.1799, 1.2705, -0.2031],
],
[
[-0.7288, -0.0616, -1.435, -0.7749],
[1.7471, -1.517, -0.4012, -1.5485],
[1.3156, 1.2256, -0.9465, -0.5347],
],
],
],
],
dtype=np.float32,
)
m = flow.nn.InstanceNorm3d(num_features=2, eps=1e-05, momentum=0.1).to(
device=flow.device(device)
)
x = flow.tensor(input_arr, dtype=flow.float32, device=flow.device(device))
y = m(x)
test_case.assertTrue(np.allclose(y.numpy(), output_arr, 0.0001, 0.0001))
m.eval()
y = m(x)
test_case.assertTrue(np.allclose(y.numpy(), output_arr, 0.0001, 0.0001))
def _test_instancenorm1d_backward(test_case, device):
input_arr = np.array(
[
[
[-0.1091, 2.0041, 0.885, -0.0412],
[-1.2055, 0.7442, 2.33, 1.2411],
[-1.2466, 0.3667, 1.2267, 0.3043],
],
[
[-0.2484, -1.1407, 0.3352, 0.6687],
[-0.2975, -0.0227, -0.2302, -0.3762],
[-0.7759, -0.6789, 1.1444, 1.8077],
],
],
dtype=np.float32,
)
m = flow.nn.InstanceNorm1d(num_features=2, eps=1e-05, momentum=0.1).to(
device=flow.device(device)
)
x = flow.tensor(input_arr, device=flow.device(device), requires_grad=True)
y = m(x)
z = y.sum()
z.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.zeros(shape=input_arr.shape), 1e-05, 1e-05)
)
def _test_instancenorm2d_backward(test_case, device):
input_arr = np.array(
[
[
[
[-0.8791, 0.2553, 0.7403, -0.2859],
[0.8006, -1.7701, -0.9617, 0.1705],
[0.2842, 1.7825, 0.3365, -0.8525],
],
[
[0.7332, -0.0737, 0.7245, -0.6551],
[1.4461, -0.1827, 0.9737, -2.1571],
[0.4657, 0.7244, 0.3378, 0.1775],
],
],
[
[
[1.8896, 1.8686, 0.1896, 0.9817],
[-0.0671, 1.5569, 1.1449, 0.0086],
[-0.9468, -0.0124, 1.3227, -0.6567],
],
[
[-0.8472, 1.3012, -1.1065, 0.9348],
[1.0346, 1.5703, 0.2419, -0.7048],
[0.6957, -0.4523, -0.8819, 1.0164],
],
],
],
dtype=np.float32,
)
m = flow.nn.InstanceNorm2d(num_features=2, eps=1e-05, momentum=0.1).to(
device=flow.device(device)
)
x = flow.tensor(input_arr, device=flow.device(device), requires_grad=True)
y = m(x)
z = y.sum()
z.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.zeros(shape=input_arr.shape), 1e-05, 1e-05)
)
def _test_instancenorm3d_backward(test_case, device):
input_arr = np.array(
[
[
[
[
[1.04569761, 0.22863248, 1.42439335, 1.62249689],
[-0.80578825, -0.27276461, 1.04556507, 0.56864134],
[-1.24085419, -1.23960097, 0.33451416, -1.84820402],
],
[
[-1.511261, 1.06157517, -0.26715858, -1.32888141],
[1.17976881, -0.07931171, 0.33910684, -1.93458573],
[-1.72659647, 0.79049652, 0.39102785, -1.16264882],
],
],
[
[
[0.30067973, -1.2912226, -0.61508225, 0.56454001],
[0.87074187, -1.69257376, 0.36119148, -0.31014289],
[0.20776964, 1.26195488, -1.37122193, -0.17945234],
],
[
[-0.31112407, -0.80682631, 0.8233194, 0.6384975],
[0.57617527, 0.45505028, 1.68286151, -1.09590744],
[-1.18127546, -1.07529277, 0.52779943, 1.21755926],
],
],
],
[
[
[
[-0.12832351, 1.05625455, -0.23253249, -0.64747611],
[-0.00738123, -1.41390089, -1.92664144, -0.21427625],
[-0.94631219, -0.86493989, 0.21026905, 0.24989732],
],
[
[1.3859182, 1.72002107, 0.50091892, 1.04198896],
[0.71694594, 1.66417023, -1.63030052, 0.77182641],
[0.71545083, 1.96458366, -1.99031931, 1.3196714],
],
],
[
[
[1.80091702, 0.02834973, 0.82259214, -1.05597501],
[-0.58212207, 0.44205949, -0.14740003, -0.994508],
[1.14678114, -0.39196097, 1.2554798, -0.41829324],
],
[
[-1.0153903, -0.25755713, -1.81756333, -1.06781159],
[1.79680841, -1.9107133, -0.64325796, -1.94640775],
[1.30671156, 1.20445339, -1.26262901, -0.79494188],
],
],
],
],
dtype=np.float32,
)
m = flow.nn.InstanceNorm3d(num_features=2, eps=1e-05, momentum=0.1).to(
device=flow.device(device)
)
x = flow.tensor(input_arr, device=flow.device(device), requires_grad=True)
y = m(x)
z = y.sum()
z.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.zeros(shape=input_arr.shape), 1e-05, 1e-05)
)
@flow.unittest.skip_unless_1n1d()
class TestInstanceNorm(flow.unittest.TestCase):
def test_instancenorm(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_instancenorm1d,
_test_instancenorm2d,
_test_instancenorm3d,
_test_instancenorm1d_backward,
_test_instancenorm2d_backward,
_test_instancenorm3d_backward,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(n=5, auto_backward=True, rtol=1e-3, atol=1e-3, check_graph=True)
def test_instancenorm_with_random_data(test_case):
height = random(1, 6).to(int)
width = random(1, 6).to(int)
m = torch.nn.InstanceNorm1d(
num_features=height,
eps=random().to(float) | nothing(),
momentum=random().to(float) | nothing(),
affine=random().to(bool),
track_running_stats=random().to(bool),
)
m.train(random())
device = random_device()
m.to(device)
x = random_tensor(ndim=3, dim1=height, dim2=width).to(device)
y = m(x)
return y
@autotest(n=5, auto_backward=True, rtol=1e-3, atol=1e-3, check_graph=False)
def test_instancenorm_with_random_data(test_case):
channel = random(1, 6).to(int)
height = random(1, 6).to(int)
width = random(1, 6).to(int)
m = torch.nn.InstanceNorm2d(
num_features=channel,
eps=random().to(float) | nothing(),
momentum=random().to(float) | nothing(),
affine=random().to(bool),
track_running_stats=random().to(bool),
)
m.train(random())
device = random_device()
m.to(device)
x = random_tensor(ndim=4, dim1=channel, dim2=height, dim3=width).to(device)
y = m(x)
return y
@autotest(n=5, auto_backward=False, rtol=1e-3, atol=1e-3, check_graph=True)
def test_instancenorm_with_random_data(test_case):
channel = random(1, 6).to(int)
depth = random(1, 6).to(int)
height = random(1, 6).to(int)
width = random(1, 6).to(int)
# Set auto_backward=True will raise AssertionError: False is not true
# Set track_running_stats=True will raise error: Unexpected key(s) in state_dict: "num_batches_tracked".
m = torch.nn.InstanceNorm3d(
num_features=channel,
eps=random().to(float) | nothing(),
momentum=random().to(float) | nothing(),
affine=random().to(bool),
track_running_stats=False,
)
m.train(random())
device = random_device()
m.to(device)
x = random_tensor(ndim=5, dim1=channel, dim2=depth, dim3=height, dim4=width).to(
device
)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.device",
"oneflow.nn.InstanceNorm2d",
"oneflow.nn.InstanceNorm1d",
"oneflow.nn.InstanceNorm3d"
] | [((14530, 14562), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (14560, 14562), True, 'import oneflow as flow\n'), ((861, 1120), 'numpy.array', 'np.array', (['[[[-0.1091, 2.0041, 0.885, -0.0412], [-1.2055, 0.7442, 2.33, 1.2411], [-\n 1.2466, 0.3667, 1.2267, 0.3043]], [[-0.2484, -1.1407, 0.3352, 0.6687],\n [-0.2975, -0.0227, -0.2302, -0.3762], [-0.7759, -0.6789, 1.1444, 1.8077]]]'], {'dtype': 'np.float32'}), '([[[-0.1091, 2.0041, 0.885, -0.0412], [-1.2055, 0.7442, 2.33, \n 1.2411], [-1.2466, 0.3667, 1.2267, 0.3043]], [[-0.2484, -1.1407, 0.3352,\n 0.6687], [-0.2975, -0.0227, -0.2302, -0.3762], [-0.7759, -0.6789, \n 1.1444, 1.8077]]], dtype=np.float32)\n', (869, 1120), True, 'import numpy as np\n'), ((1308, 1566), 'numpy.array', 'np.array', (['[[[-0.9262, 1.5395, 0.2337, -0.847], [-1.5486, -0.026, 1.2125, 0.3621], [-\n 1.5807, 0.2287, 1.1933, 0.1587]], [[-0.2215, -1.5212, 0.6285, 1.1143],\n [-0.5016, 1.5917, 0.011, -1.1011], [-1.0207, -0.9346, 0.6833, 1.2719]]]'], {'dtype': 'np.float32'}), '([[[-0.9262, 1.5395, 0.2337, -0.847], [-1.5486, -0.026, 1.2125, \n 0.3621], [-1.5807, 0.2287, 1.1933, 0.1587]], [[-0.2215, -1.5212, 0.6285,\n 1.1143], [-0.5016, 1.5917, 0.011, -1.1011], [-1.0207, -0.9346, 0.6833, \n 1.2719]]], dtype=np.float32)\n', (1316, 1566), True, 'import numpy as np\n'), ((2201, 2695), 'numpy.array', 'np.array', (['[[[[-0.8791, 0.2553, 0.7403, -0.2859], [0.8006, -1.7701, -0.9617, 0.1705],\n [0.2842, 1.7825, 0.3365, -0.8525]], [[0.7332, -0.0737, 0.7245, -0.6551],\n [1.4461, -0.1827, 0.9737, -2.1571], [0.4657, 0.7244, 0.3378, 0.1775]]],\n [[[1.8896, 1.8686, 0.1896, 0.9817], [-0.0671, 1.5569, 1.1449, 0.0086],\n [-0.9468, -0.0124, 1.3227, -0.6567]], [[-0.8472, 1.3012, -1.1065, \n 0.9348], [1.0346, 1.5703, 0.2419, -0.7048], [0.6957, -0.4523, -0.8819, \n 1.0164]]]]'], {'dtype': 'np.float32'}), '([[[[-0.8791, 0.2553, 0.7403, -0.2859], [0.8006, -1.7701, -0.9617, \n 0.1705], [0.2842, 1.7825, 0.3365, -0.8525]], [[0.7332, -0.0737, 0.7245,\n -0.6551], [1.4461, -0.1827, 0.9737, -2.1571], [0.4657, 0.7244, 0.3378, \n 0.1775]]], [[[1.8896, 1.8686, 0.1896, 0.9817], [-0.0671, 1.5569, 1.1449,\n 0.0086], [-0.9468, -0.0124, 1.3227, -0.6567]], [[-0.8472, 1.3012, -\n 1.1065, 0.9348], [1.0346, 1.5703, 0.2419, -0.7048], [0.6957, -0.4523, -\n 0.8819, 1.0164]]]], dtype=np.float32)\n', (2209, 2695), True, 'import numpy as np\n'), ((3149, 3640), 'numpy.array', 'np.array', (['[[[[-0.9155, 0.31, 0.8339, -0.2747], [0.8991, -1.8781, -1.0048, 0.2183], [\n 0.3412, 1.9598, 0.3977, -0.8868]], [[0.586, -0.3169, 0.5763, -0.9675],\n [1.3837, -0.4389, 0.8551, -2.6483], [0.2867, 0.5761, 0.1435, -0.0358]]],\n [[[1.374, 1.3515, -0.4466, 0.4017], [-0.7215, 1.0177, 0.5765, -0.6405],\n [-1.6636, -0.663, 0.7669, -1.353]], [[-1.1583, 1.1444, -1.4363, 0.7516],\n [0.8586, 1.4328, 0.009, -1.0057], [0.4954, -0.7351, -1.1955, 0.8391]]]]'], {'dtype': 'np.float32'}), '([[[[-0.9155, 0.31, 0.8339, -0.2747], [0.8991, -1.8781, -1.0048, \n 0.2183], [0.3412, 1.9598, 0.3977, -0.8868]], [[0.586, -0.3169, 0.5763, \n -0.9675], [1.3837, -0.4389, 0.8551, -2.6483], [0.2867, 0.5761, 0.1435, \n -0.0358]]], [[[1.374, 1.3515, -0.4466, 0.4017], [-0.7215, 1.0177, \n 0.5765, -0.6405], [-1.6636, -0.663, 0.7669, -1.353]], [[-1.1583, 1.1444,\n -1.4363, 0.7516], [0.8586, 1.4328, 0.009, -1.0057], [0.4954, -0.7351, -\n 1.1955, 0.8391]]]], dtype=np.float32)\n', (3157, 3640), True, 'import numpy as np\n'), ((4524, 5909), 'numpy.array', 'np.array', (['[[[[[1.04569761, 0.22863248, 1.42439335, 1.62249689], [-0.80578825, -\n 0.27276461, 1.04556507, 0.56864134], [-1.24085419, -1.23960097, \n 0.33451416, -1.84820402]], [[-1.511261, 1.06157517, -0.26715858, -\n 1.32888141], [1.17976881, -0.07931171, 0.33910684, -1.93458573], [-\n 1.72659647, 0.79049652, 0.39102785, -1.16264882]]], [[[0.30067973, -\n 1.2912226, -0.61508225, 0.56454001], [0.87074187, -1.69257376, \n 0.36119148, -0.31014289], [0.20776964, 1.26195488, -1.37122193, -\n 0.17945234]], [[-0.31112407, -0.80682631, 0.8233194, 0.6384975], [\n 0.57617527, 0.45505028, 1.68286151, -1.09590744], [-1.18127546, -\n 1.07529277, 0.52779943, 1.21755926]]]], [[[[-0.12832351, 1.05625455, -\n 0.23253249, -0.64747611], [-0.00738123, -1.41390089, -1.92664144, -\n 0.21427625], [-0.94631219, -0.86493989, 0.21026905, 0.24989732]], [[\n 1.3859182, 1.72002107, 0.50091892, 1.04198896], [0.71694594, 1.66417023,\n -1.63030052, 0.77182641], [0.71545083, 1.96458366, -1.99031931, \n 1.3196714]]], [[[1.80091702, 0.02834973, 0.82259214, -1.05597501], [-\n 0.58212207, 0.44205949, -0.14740003, -0.994508], [1.14678114, -\n 0.39196097, 1.2554798, -0.41829324]], [[-1.0153903, -0.25755713, -\n 1.81756333, -1.06781159], [1.79680841, -1.9107133, -0.64325796, -\n 1.94640775], [1.30671156, 1.20445339, -1.26262901, -0.79494188]]]]]'], {'dtype': 'np.float32'}), '([[[[[1.04569761, 0.22863248, 1.42439335, 1.62249689], [-0.80578825,\n -0.27276461, 1.04556507, 0.56864134], [-1.24085419, -1.23960097, \n 0.33451416, -1.84820402]], [[-1.511261, 1.06157517, -0.26715858, -\n 1.32888141], [1.17976881, -0.07931171, 0.33910684, -1.93458573], [-\n 1.72659647, 0.79049652, 0.39102785, -1.16264882]]], [[[0.30067973, -\n 1.2912226, -0.61508225, 0.56454001], [0.87074187, -1.69257376, \n 0.36119148, -0.31014289], [0.20776964, 1.26195488, -1.37122193, -\n 0.17945234]], [[-0.31112407, -0.80682631, 0.8233194, 0.6384975], [\n 0.57617527, 0.45505028, 1.68286151, -1.09590744], [-1.18127546, -\n 1.07529277, 0.52779943, 1.21755926]]]], [[[[-0.12832351, 1.05625455, -\n 0.23253249, -0.64747611], [-0.00738123, -1.41390089, -1.92664144, -\n 0.21427625], [-0.94631219, -0.86493989, 0.21026905, 0.24989732]], [[\n 1.3859182, 1.72002107, 0.50091892, 1.04198896], [0.71694594, 1.66417023,\n -1.63030052, 0.77182641], [0.71545083, 1.96458366, -1.99031931, \n 1.3196714]]], [[[1.80091702, 0.02834973, 0.82259214, -1.05597501], [-\n 0.58212207, 0.44205949, -0.14740003, -0.994508], [1.14678114, -\n 0.39196097, 1.2554798, -0.41829324]], [[-1.0153903, -0.25755713, -\n 1.81756333, -1.06781159], [1.79680841, -1.9107133, -0.64325796, -\n 1.94640775], [1.30671156, 1.20445339, -1.26262901, -0.79494188]]]]],\n dtype=np.float32)\n', (4532, 5909), True, 'import numpy as np\n'), ((6983, 7951), 'numpy.array', 'np.array', (['[[[[[1.067, 0.3324, 1.4075, 1.5856], [-0.5976, -0.1184, 1.0669, 0.6381], [-\n 0.9888, -0.9877, 0.4276, -1.5349]], [[-1.2319, 1.0813, -0.1134, -1.068],\n [1.1876, 0.0555, 0.4317, -1.6126], [-1.4256, 0.8376, 0.4784, -0.9185]]],\n [[[0.3447, -1.3751, -0.6446, 0.6298], [0.9606, -1.8087, 0.4101, -0.3152\n ], [0.2444, 1.3833, -1.4615, -0.174]], [[-0.3162, -0.8518, 0.9094, \n 0.7097], [0.6424, 0.5115, 1.838, -1.1641], [-1.2563, -1.1418, 0.5901, \n 1.3353]]]], [[[[-0.2327, 0.8016, -0.3236, -0.6859], [-0.1271, -1.3551, \n -1.8028, -0.3077], [-0.9469, -0.8758, 0.063, 0.0976]], [[1.0895, 1.3812,\n 0.3167, 0.7892], [0.5054, 1.3324, -1.5441, 0.5533], [0.5041, 1.5947, -\n 1.8584, 1.0316]]], [[[1.7507, 0.1901, 0.8894, -0.7645], [-0.3473, \n 0.5544, 0.0354, -0.7104], [1.1748, -0.1799, 1.2705, -0.2031]], [[-\n 0.7288, -0.0616, -1.435, -0.7749], [1.7471, -1.517, -0.4012, -1.5485],\n [1.3156, 1.2256, -0.9465, -0.5347]]]]]'], {'dtype': 'np.float32'}), '([[[[[1.067, 0.3324, 1.4075, 1.5856], [-0.5976, -0.1184, 1.0669, \n 0.6381], [-0.9888, -0.9877, 0.4276, -1.5349]], [[-1.2319, 1.0813, -\n 0.1134, -1.068], [1.1876, 0.0555, 0.4317, -1.6126], [-1.4256, 0.8376, \n 0.4784, -0.9185]]], [[[0.3447, -1.3751, -0.6446, 0.6298], [0.9606, -\n 1.8087, 0.4101, -0.3152], [0.2444, 1.3833, -1.4615, -0.174]], [[-0.3162,\n -0.8518, 0.9094, 0.7097], [0.6424, 0.5115, 1.838, -1.1641], [-1.2563, -\n 1.1418, 0.5901, 1.3353]]]], [[[[-0.2327, 0.8016, -0.3236, -0.6859], [-\n 0.1271, -1.3551, -1.8028, -0.3077], [-0.9469, -0.8758, 0.063, 0.0976]],\n [[1.0895, 1.3812, 0.3167, 0.7892], [0.5054, 1.3324, -1.5441, 0.5533], [\n 0.5041, 1.5947, -1.8584, 1.0316]]], [[[1.7507, 0.1901, 0.8894, -0.7645],\n [-0.3473, 0.5544, 0.0354, -0.7104], [1.1748, -0.1799, 1.2705, -0.2031]],\n [[-0.7288, -0.0616, -1.435, -0.7749], [1.7471, -1.517, -0.4012, -1.5485\n ], [1.3156, 1.2256, -0.9465, -0.5347]]]]], dtype=np.float32)\n', (6991, 7951), True, 'import numpy as np\n'), ((9505, 9764), 'numpy.array', 'np.array', (['[[[-0.1091, 2.0041, 0.885, -0.0412], [-1.2055, 0.7442, 2.33, 1.2411], [-\n 1.2466, 0.3667, 1.2267, 0.3043]], [[-0.2484, -1.1407, 0.3352, 0.6687],\n [-0.2975, -0.0227, -0.2302, -0.3762], [-0.7759, -0.6789, 1.1444, 1.8077]]]'], {'dtype': 'np.float32'}), '([[[-0.1091, 2.0041, 0.885, -0.0412], [-1.2055, 0.7442, 2.33, \n 1.2411], [-1.2466, 0.3667, 1.2267, 0.3043]], [[-0.2484, -1.1407, 0.3352,\n 0.6687], [-0.2975, -0.0227, -0.2302, -0.3762], [-0.7759, -0.6789, \n 1.1444, 1.8077]]], dtype=np.float32)\n', (9513, 9764), True, 'import numpy as np\n'), ((10364, 10858), 'numpy.array', 'np.array', (['[[[[-0.8791, 0.2553, 0.7403, -0.2859], [0.8006, -1.7701, -0.9617, 0.1705],\n [0.2842, 1.7825, 0.3365, -0.8525]], [[0.7332, -0.0737, 0.7245, -0.6551],\n [1.4461, -0.1827, 0.9737, -2.1571], [0.4657, 0.7244, 0.3378, 0.1775]]],\n [[[1.8896, 1.8686, 0.1896, 0.9817], [-0.0671, 1.5569, 1.1449, 0.0086],\n [-0.9468, -0.0124, 1.3227, -0.6567]], [[-0.8472, 1.3012, -1.1065, \n 0.9348], [1.0346, 1.5703, 0.2419, -0.7048], [0.6957, -0.4523, -0.8819, \n 1.0164]]]]'], {'dtype': 'np.float32'}), '([[[[-0.8791, 0.2553, 0.7403, -0.2859], [0.8006, -1.7701, -0.9617, \n 0.1705], [0.2842, 1.7825, 0.3365, -0.8525]], [[0.7332, -0.0737, 0.7245,\n -0.6551], [1.4461, -0.1827, 0.9737, -2.1571], [0.4657, 0.7244, 0.3378, \n 0.1775]]], [[[1.8896, 1.8686, 0.1896, 0.9817], [-0.0671, 1.5569, 1.1449,\n 0.0086], [-0.9468, -0.0124, 1.3227, -0.6567]], [[-0.8472, 1.3012, -\n 1.1065, 0.9348], [1.0346, 1.5703, 0.2419, -0.7048], [0.6957, -0.4523, -\n 0.8819, 1.0164]]]], dtype=np.float32)\n', (10372, 10858), True, 'import numpy as np\n'), ((11728, 13113), 'numpy.array', 'np.array', (['[[[[[1.04569761, 0.22863248, 1.42439335, 1.62249689], [-0.80578825, -\n 0.27276461, 1.04556507, 0.56864134], [-1.24085419, -1.23960097, \n 0.33451416, -1.84820402]], [[-1.511261, 1.06157517, -0.26715858, -\n 1.32888141], [1.17976881, -0.07931171, 0.33910684, -1.93458573], [-\n 1.72659647, 0.79049652, 0.39102785, -1.16264882]]], [[[0.30067973, -\n 1.2912226, -0.61508225, 0.56454001], [0.87074187, -1.69257376, \n 0.36119148, -0.31014289], [0.20776964, 1.26195488, -1.37122193, -\n 0.17945234]], [[-0.31112407, -0.80682631, 0.8233194, 0.6384975], [\n 0.57617527, 0.45505028, 1.68286151, -1.09590744], [-1.18127546, -\n 1.07529277, 0.52779943, 1.21755926]]]], [[[[-0.12832351, 1.05625455, -\n 0.23253249, -0.64747611], [-0.00738123, -1.41390089, -1.92664144, -\n 0.21427625], [-0.94631219, -0.86493989, 0.21026905, 0.24989732]], [[\n 1.3859182, 1.72002107, 0.50091892, 1.04198896], [0.71694594, 1.66417023,\n -1.63030052, 0.77182641], [0.71545083, 1.96458366, -1.99031931, \n 1.3196714]]], [[[1.80091702, 0.02834973, 0.82259214, -1.05597501], [-\n 0.58212207, 0.44205949, -0.14740003, -0.994508], [1.14678114, -\n 0.39196097, 1.2554798, -0.41829324]], [[-1.0153903, -0.25755713, -\n 1.81756333, -1.06781159], [1.79680841, -1.9107133, -0.64325796, -\n 1.94640775], [1.30671156, 1.20445339, -1.26262901, -0.79494188]]]]]'], {'dtype': 'np.float32'}), '([[[[[1.04569761, 0.22863248, 1.42439335, 1.62249689], [-0.80578825,\n -0.27276461, 1.04556507, 0.56864134], [-1.24085419, -1.23960097, \n 0.33451416, -1.84820402]], [[-1.511261, 1.06157517, -0.26715858, -\n 1.32888141], [1.17976881, -0.07931171, 0.33910684, -1.93458573], [-\n 1.72659647, 0.79049652, 0.39102785, -1.16264882]]], [[[0.30067973, -\n 1.2912226, -0.61508225, 0.56454001], [0.87074187, -1.69257376, \n 0.36119148, -0.31014289], [0.20776964, 1.26195488, -1.37122193, -\n 0.17945234]], [[-0.31112407, -0.80682631, 0.8233194, 0.6384975], [\n 0.57617527, 0.45505028, 1.68286151, -1.09590744], [-1.18127546, -\n 1.07529277, 0.52779943, 1.21755926]]]], [[[[-0.12832351, 1.05625455, -\n 0.23253249, -0.64747611], [-0.00738123, -1.41390089, -1.92664144, -\n 0.21427625], [-0.94631219, -0.86493989, 0.21026905, 0.24989732]], [[\n 1.3859182, 1.72002107, 0.50091892, 1.04198896], [0.71694594, 1.66417023,\n -1.63030052, 0.77182641], [0.71545083, 1.96458366, -1.99031931, \n 1.3196714]]], [[[1.80091702, 0.02834973, 0.82259214, -1.05597501], [-\n 0.58212207, 0.44205949, -0.14740003, -0.994508], [1.14678114, -\n 0.39196097, 1.2554798, -0.41829324]], [[-1.0153903, -0.25755713, -\n 1.81756333, -1.06781159], [1.79680841, -1.9107133, -0.64325796, -\n 1.94640775], [1.30671156, 1.20445339, -1.26262901, -0.79494188]]]]],\n dtype=np.float32)\n', (11736, 13113), True, 'import numpy as np\n'), ((17467, 17482), 'unittest.main', 'unittest.main', ([], {}), '()\n', (17480, 17482), False, 'import unittest\n'), ((14668, 14681), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14679, 14681), False, 'from collections import OrderedDict\n'), ((15020, 15040), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (15030, 15040), False, 'from test_util import GenArgList\n'), ((1745, 1808), 'oneflow.nn.InstanceNorm1d', 'flow.nn.InstanceNorm1d', ([], {'num_features': '(3)', 'eps': '(1e-05)', 'momentum': '(0.1)'}), '(num_features=3, eps=1e-05, momentum=0.1)\n', (1767, 1808), True, 'import oneflow as flow\n'), ((1828, 1847), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1839, 1847), True, 'import oneflow as flow\n'), ((1912, 1931), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1923, 1931), True, 'import oneflow as flow\n'), ((4088, 4151), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', ([], {'num_features': '(2)', 'eps': '(1e-05)', 'momentum': '(0.1)'}), '(num_features=2, eps=1e-05, momentum=0.1)\n', (4110, 4151), True, 'import oneflow as flow\n'), ((4171, 4190), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (4182, 4190), True, 'import oneflow as flow\n'), ((4255, 4274), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (4266, 4274), True, 'import oneflow as flow\n'), ((9052, 9115), 'oneflow.nn.InstanceNorm3d', 'flow.nn.InstanceNorm3d', ([], {'num_features': '(2)', 'eps': '(1e-05)', 'momentum': '(0.1)'}), '(num_features=2, eps=1e-05, momentum=0.1)\n', (9074, 9115), True, 'import oneflow as flow\n'), ((9135, 9154), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (9146, 9154), True, 'import oneflow as flow\n'), ((9219, 9238), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (9230, 9238), True, 'import oneflow as flow\n'), ((9943, 10006), 'oneflow.nn.InstanceNorm1d', 'flow.nn.InstanceNorm1d', ([], {'num_features': '(2)', 'eps': '(1e-05)', 'momentum': '(0.1)'}), '(num_features=2, eps=1e-05, momentum=0.1)\n', (9965, 10006), True, 'import oneflow as flow\n'), ((10026, 10045), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (10037, 10045), True, 'import oneflow as flow\n'), ((10090, 10109), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (10101, 10109), True, 'import oneflow as flow\n'), ((10239, 10270), 'numpy.zeros', 'np.zeros', ([], {'shape': 'input_arr.shape'}), '(shape=input_arr.shape)\n', (10247, 10270), True, 'import numpy as np\n'), ((11307, 11370), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', ([], {'num_features': '(2)', 'eps': '(1e-05)', 'momentum': '(0.1)'}), '(num_features=2, eps=1e-05, momentum=0.1)\n', (11329, 11370), True, 'import oneflow as flow\n'), ((11390, 11409), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (11401, 11409), True, 'import oneflow as flow\n'), ((11454, 11473), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (11465, 11473), True, 'import oneflow as flow\n'), ((11603, 11634), 'numpy.zeros', 'np.zeros', ([], {'shape': 'input_arr.shape'}), '(shape=input_arr.shape)\n', (11611, 11634), True, 'import numpy as np\n'), ((14178, 14241), 'oneflow.nn.InstanceNorm3d', 'flow.nn.InstanceNorm3d', ([], {'num_features': '(2)', 'eps': '(1e-05)', 'momentum': '(0.1)'}), '(num_features=2, eps=1e-05, momentum=0.1)\n', (14200, 14241), True, 'import oneflow as flow\n'), ((14261, 14280), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (14272, 14280), True, 'import oneflow as flow\n'), ((14325, 14344), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (14336, 14344), True, 'import oneflow as flow\n'), ((14474, 14505), 'numpy.zeros', 'np.zeros', ([], {'shape': 'input_arr.shape'}), '(shape=input_arr.shape)\n', (14482, 14505), True, 'import numpy as np\n')] |
import math
from functools import partial
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.init as init
from flowvision.layers import lecun_normal_, DropPath, PatchEmbed
from .helpers import named_apply
from .utils import load_state_dict_from_url
from .registry import ModelCreator
model_urls = {
"mlp_mixer_s16_224": None,
"mlp_mixer_s32_224": None,
"mlp_mixer_b16_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/Mlp-Mixer/mlp_mixer_b16_224.zip",
"mlp_mixer_b32_224": None,
"mlp_mixer_b16_224_in21k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/Mlp-Mixer/mlp_mixer_b16_224_in21k.zip",
"mlp_mixer_l16_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/Mlp-Mixer/mlp_mixer_l16_224.zip",
"mlp_mixer_l32_224": None,
"mlp_mixer_l16_224_in21k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/Mlp-Mixer/mlp_mixer_l16_224_in21k.zip",
"mlp_mixer_b16_224_miil": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/Mlp-Mixer/mlp_mixer_b16_224_miil.zip",
"mlp_mixer_b16_224_miil_in21k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/Mlp-Mixer/mlp_mixer_b16_224_miil_in21k.zip",
"gmlp_ti16_224": None,
"gmlp_s16_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/Mlp-Mixer/gmlp_s16_224.zip",
"gmlp_b16_224": None,
}
# helpers
def pair(x):
if not isinstance(x, tuple):
return (x, x)
else:
return x
class Mlp(nn.Module):
"""
You can also import Mlp Block in flowvision.layers.blocks like this:
from flowvision.layers.blocks import Mlp
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop(x)
x = self.fc2(x)
x = self.drop(x)
return x
class GatedMlp(nn.Module):
""" MLP as used in gMLP
"""
def __init__(
self,
in_features,
hidden_features=None,
out_features=None,
act_layer=nn.GELU,
gate_layer=None,
drop=0.0,
):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.fc1 = nn.Linear(in_features, hidden_features)
self.act = act_layer()
self.drop1 = nn.Dropout(drop)
if gate_layer is not None:
assert hidden_features % 2 == 0
self.gate = gate_layer(hidden_features)
hidden_features = hidden_features // 2
else:
self.gate = nn.Identity()
self.fc2 = nn.Linear(hidden_features, out_features)
self.drop2 = nn.Dropout(drop)
def forward(self, x):
x = self.fc1(x)
x = self.act(x)
x = self.drop1(x)
x = self.gate(x)
x = self.fc2(x)
x = self.drop2(x)
return x
class SpatialGatingUnit(nn.Module):
""" Spatial Gating Unit
"""
def __init__(self, dim, num_patches, norm_layer=nn.LayerNorm):
super().__init__()
gate_dim = dim // 2
self.norm = norm_layer(gate_dim)
self.proj = nn.Linear(num_patches, num_patches)
def init_weights(self):
# special init for the projection gate, called as override by base model init
nn.init.normal_(self.proj.weight, std=1e-6)
nn.init.ones_(self.proj.bias)
def forward(self, x):
# TODO: use fixed chunk op
# u, v = x.chunk(2, dim=-1)
B, N, C = x.size()
split_dim = C // 2
u, v = flow.split(x, split_dim, dim=-1)[0], flow.split(x, split_dim, dim=-1)[1]
v = self.norm(v)
v = self.proj(v.transpose(-1, -2))
return u * v.transpose(-1, -2)
class SpatialGatingBlock(nn.Module):
""" Residual Block w/ Spatial Gating
Based on: `Pay Attention to MLPs` - https://arxiv.org/abs/2105.08050
"""
def __init__(
self,
dim,
num_patches,
mlp_ratio=4,
mlp_layer=GatedMlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop=0.0,
drop_path=0.0,
):
super().__init__()
channel_dim = int(dim * mlp_ratio)
self.norm = norm_layer(dim)
sgu = partial(SpatialGatingUnit, num_patches=num_patches)
self.mlp_channels = mlp_layer(
dim, channel_dim, act_layer=act_layer, gate_layer=sgu, drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
x = x + self.drop_path(self.mlp_channels(self.norm(x)))
return x
class MixerBlock(nn.Module):
""" Residual Block w/ token mixing and channel MLPs
Based on: 'MLP-Mixer: An all-MLP Architecture for Vision' - https://arxiv.org/abs/2105.01601
"""
def __init__(
self,
dim,
num_patches,
mlp_ratio=(0.5, 4.0),
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop=0.0,
drop_path=0.0,
):
super().__init__()
tokens_dim, channels_dim = [int(x * dim) for x in pair(mlp_ratio)]
self.norm1 = norm_layer(dim)
self.mlp_tokens = mlp_layer(
num_patches, tokens_dim, act_layer=act_layer, drop=drop
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp_channels = mlp_layer(dim, channels_dim, act_layer=act_layer, drop=drop)
def forward(self, x):
x = x + self.drop_path(
self.mlp_tokens(self.norm1(x).transpose(1, 2)).transpose(1, 2)
)
x = x + self.drop_path(self.mlp_channels(self.norm2(x)))
return x
class MlpMixer(nn.Module):
def __init__(
self,
num_classes=1000,
img_size=224,
in_chans=3,
patch_size=16,
num_blocks=8,
embed_dim=512,
mlp_ratio=(0.5, 4.0),
block_layer=MixerBlock,
mlp_layer=Mlp,
norm_layer=partial(nn.LayerNorm, eps=1e-6),
act_layer=nn.GELU,
drop_rate=0.0,
drop_path_rate=0.0,
nlhb=False,
stem_norm=False,
):
super().__init__()
self.num_classes = num_classes
self.num_features = (
self.embed_dim
) = embed_dim # num_features for consistency with other models
self.stem = PatchEmbed(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
norm_layer=norm_layer if stem_norm else None,
)
# TODO consistent the drop-path-rate rule with the original repo
self.blocks = nn.Sequential(
*[
block_layer(
embed_dim,
self.stem.num_patches,
mlp_ratio,
mlp_layer=mlp_layer,
norm_layer=norm_layer,
act_layer=act_layer,
drop=drop_rate,
drop_path=drop_path_rate,
)
for _ in range(num_blocks)
]
)
self.norm = norm_layer(embed_dim)
self.head = (
nn.Linear(embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
)
self.init_weights(nlhb=nlhb)
def init_weights(self, nlhb=False):
head_bias = -math.log(self.num_classes) if nlhb else 0.0
named_apply(
partial(_init_weights, head_bias=head_bias), module=self
) # depth-first
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes, global_pool=""):
self.num_classes = num_classes
self.head = (
nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
)
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
x = self.norm(x)
x = x.mean(dim=1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
def _init_weights(module: nn.Module, name: str, head_bias: float = 0.0, flax=False):
""" Mixer weight initialization (trying to match Flax defaults)
"""
if isinstance(module, nn.Linear):
if name.startswith("head"):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
if flax:
# Flax defaults
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
else:
# like MLP init in vit
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if "mlp" in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, (nn.LayerNorm, nn.BatchNorm2d, nn.GroupNorm)):
nn.init.ones_(module.weight)
nn.init.zeros_(module.bias)
elif hasattr(module, "init_weights"):
# NOTE if a parent module contains init_weights method, it can override the init of the
# child modules as this will be called in depth-first order.
module.init_weights()
def _create_mlp_mixer(arch, pretrained=False, progress=True, **model_kwargs):
model = MlpMixer(**model_kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
@ModelCreator.register_model
def mlp_mixer_s16_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-S/16 224x224 model.
.. note::
Mixer-S/16 224x224 model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_s16_224 = flowvision.models.mlp_mixer_s16_224(pretrained=False, progress=True)
"""
model_kwargs = dict(patch_size=16, num_blocks=8, embed_dim=512, **kwargs)
return _create_mlp_mixer(
"mlp_mixer_s16_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_s32_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-S/32 224x224 model.
.. note::
Mixer-S/32 224x224 model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_s32_224 = flowvision.models.mlp_mixer_s32_224(pretrained=False, progress=True)
"""
model_kwargs = dict(patch_size=32, num_blocks=8, embed_dim=512, **kwargs)
return _create_mlp_mixer(
"mlp_mixer_s32_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_b16_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-B/16 224x224 model.
.. note::
Mixer-B/16 224x224 model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_b16_224 = flowvision.models.mlp_mixer_b16_224(pretrained=False, progress=True)
"""
model_kwargs = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs)
return _create_mlp_mixer(
"mlp_mixer_b16_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_b32_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-B/32 224x224 model.
.. note::
Mixer-B/32 224x224 model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_b32_224 = flowvision.models.mlp_mixer_b32_224(pretrained=False, progress=True)
"""
model_kwargs = dict(patch_size=32, num_blocks=12, embed_dim=768, **kwargs)
return _create_mlp_mixer(
"mlp_mixer_b32_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_b16_224_in21k(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-B/16 224x224 ImageNet21k pretrained model.
.. note::
Mixer-B/16 224x224 ImageNet21k pretrained model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Note that this model is the pretrained model for fine-tune on different datasets.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_b16_224_in21k = flowvision.models.mlp_mixer_b16_224_in21k(pretrained=False, progress=True)
"""
model_kwargs = dict(
num_classes=21843, patch_size=16, num_blocks=12, embed_dim=768, **kwargs
)
return _create_mlp_mixer(
"mlp_mixer_b16_224_in21k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_l16_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-L/16 224x224 model.
.. note::
Mixer-L/16 224x224 model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_l16_224 = flowvision.models.mlp_mixer_l16_224(pretrained=False, progress=True)
"""
model_kwargs = dict(patch_size=16, num_blocks=24, embed_dim=1024, **kwargs)
return _create_mlp_mixer(
"mlp_mixer_l16_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_l32_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-L/32 224x224 model.
.. note::
Mixer-L/32 224x224 model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_l32_224 = flowvision.models.mlp_mixer_l32_224(pretrained=False, progress=True)
"""
model_kwargs = dict(patch_size=32, num_blocks=24, embed_dim=1024, **kwargs)
return _create_mlp_mixer(
"mlp_mixer_l32_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_l16_224_in21k(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-L/16 224x224 ImageNet21k pretrained model.
.. note::
Mixer-L/16 224x224 ImageNet21k pretrained model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Note that this model is the pretrained model for fine-tune on different datasets.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_l16_224_in21k = flowvision.models.mlp_mixer_l16_224_in21k(pretrained=False, progress=True)
"""
model_kwargs = dict(
num_classes=21843, patch_size=16, num_blocks=24, embed_dim=1024, **kwargs
)
return _create_mlp_mixer(
"mlp_mixer_l16_224_in21k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_b16_224_miil(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-B/16 224x224 model with different weights.
.. note::
Mixer-B/16 224x224 model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_b16_224_miil = flowvision.models.mlp_mixer_b16_224_miil(pretrained=False, progress=True)
"""
model_kwargs = dict(patch_size=16, num_blocks=12, embed_dim=768, **kwargs)
return _create_mlp_mixer(
"mlp_mixer_b16_224_miil",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def mlp_mixer_b16_224_miil_in21k(pretrained=False, progress=True, **kwargs):
"""
Constructs the Mixer-B/16 224x224 ImageNet21k pretrained model.
.. note::
Mixer-B/16 224x224 ImageNet21k pretrained model from `"MLP-Mixer: An all-MLP Architecture for Vision" <https://arxiv.org/pdf/2105.01601.pdf>`_.
Weights taken from: https://github.com/Alibaba-MIIL/ImageNet21K
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> mlp_mixer_b16_224_miil_in21k = flowvision.models.mlp_mixer_b16_224_miil_in21k(pretrained=False, progress=True)
"""
model_kwargs = dict(
num_classes=11221, patch_size=16, num_blocks=12, embed_dim=768, **kwargs
)
return _create_mlp_mixer(
"mlp_mixer_b16_224_miil_in21k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def gmlp_ti16_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the gMLP-tiny-16 224x224 model.
.. note::
gMLP-tiny-16 224x224 model from `"Pay Attention to MLPs" <https://arxiv.org/pdf/2105.08050.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> gmlp_ti16_224 = flowvision.models.gmlp_ti16_224(pretrained=False, progress=True)
"""
model_kwargs = dict(
patch_size=16,
num_blocks=30,
embed_dim=128,
mlp_ratio=6,
block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp,
**kwargs
)
return _create_mlp_mixer(
"gmlp_ti16_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def gmlp_s16_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the gMLP-small-16 224x224 model.
.. note::
gMLP-small-16 224x224 model from `"Pay Attention to MLPs" <https://arxiv.org/pdf/2105.08050.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> gmlp_s16_224 = flowvision.models.gmlp_s16_224(pretrained=False, progress=True)
"""
model_kwargs = dict(
patch_size=16,
num_blocks=30,
embed_dim=256,
mlp_ratio=6,
block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp,
**kwargs
)
return _create_mlp_mixer(
"gmlp_s16_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def gmlp_b16_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the gMLP-base-16 224x224 model.
.. note::
gMLP-base-16 224x224 model from `"Pay Attention to MLPs" <https://arxiv.org/pdf/2105.08050.pdf>`_.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> gmlp_b16_224 = flowvision.models.gmlp_b16_224(pretrained=False, progress=True)
"""
model_kwargs = dict(
patch_size=16,
num_blocks=30,
embed_dim=512,
mlp_ratio=6,
block_layer=SpatialGatingBlock,
mlp_layer=GatedMlp,
**kwargs
)
return _create_mlp_mixer(
"gmlp_b16_224", pretrained=pretrained, progress=progress, **model_kwargs
)
| [
"oneflow.nn.init.ones_",
"oneflow.nn.Linear",
"oneflow.split",
"oneflow.nn.init.constant_",
"oneflow.nn.init.zeros_",
"oneflow.nn.init.xavier_uniform_",
"oneflow.nn.Dropout",
"oneflow.nn.init.normal_",
"oneflow.nn.Identity"
] | [((2152, 2191), 'oneflow.nn.Linear', 'nn.Linear', (['in_features', 'hidden_features'], {}), '(in_features, hidden_features)\n', (2161, 2191), True, 'import oneflow.nn as nn\n'), ((2242, 2282), 'oneflow.nn.Linear', 'nn.Linear', (['hidden_features', 'out_features'], {}), '(hidden_features, out_features)\n', (2251, 2282), True, 'import oneflow.nn as nn\n'), ((2303, 2319), 'oneflow.nn.Dropout', 'nn.Dropout', (['drop'], {}), '(drop)\n', (2313, 2319), True, 'import oneflow.nn as nn\n'), ((2894, 2933), 'oneflow.nn.Linear', 'nn.Linear', (['in_features', 'hidden_features'], {}), '(in_features, hidden_features)\n', (2903, 2933), True, 'import oneflow.nn as nn\n'), ((2986, 3002), 'oneflow.nn.Dropout', 'nn.Dropout', (['drop'], {}), '(drop)\n', (2996, 3002), True, 'import oneflow.nn as nn\n'), ((3256, 3296), 'oneflow.nn.Linear', 'nn.Linear', (['hidden_features', 'out_features'], {}), '(hidden_features, out_features)\n', (3265, 3296), True, 'import oneflow.nn as nn\n'), ((3318, 3334), 'oneflow.nn.Dropout', 'nn.Dropout', (['drop'], {}), '(drop)\n', (3328, 3334), True, 'import oneflow.nn as nn\n'), ((3786, 3821), 'oneflow.nn.Linear', 'nn.Linear', (['num_patches', 'num_patches'], {}), '(num_patches, num_patches)\n', (3795, 3821), True, 'import oneflow.nn as nn\n'), ((3945, 3989), 'oneflow.nn.init.normal_', 'nn.init.normal_', (['self.proj.weight'], {'std': '(1e-06)'}), '(self.proj.weight, std=1e-06)\n', (3960, 3989), True, 'import oneflow.nn as nn\n'), ((3997, 4026), 'oneflow.nn.init.ones_', 'nn.init.ones_', (['self.proj.bias'], {}), '(self.proj.bias)\n', (4010, 4026), True, 'import oneflow.nn as nn\n'), ((4670, 4702), 'functools.partial', 'partial', (['nn.LayerNorm'], {'eps': '(1e-06)'}), '(nn.LayerNorm, eps=1e-06)\n', (4677, 4702), False, 'from functools import partial\n'), ((4898, 4949), 'functools.partial', 'partial', (['SpatialGatingUnit'], {'num_patches': 'num_patches'}), '(SpatialGatingUnit, num_patches=num_patches)\n', (4905, 4949), False, 'from functools import partial\n'), ((5598, 5630), 'functools.partial', 'partial', (['nn.LayerNorm'], {'eps': '(1e-06)'}), '(nn.LayerNorm, eps=1e-06)\n', (5605, 5630), False, 'from functools import partial\n'), ((6696, 6728), 'functools.partial', 'partial', (['nn.LayerNorm'], {'eps': '(1e-06)'}), '(nn.LayerNorm, eps=1e-06)\n', (6703, 6728), False, 'from functools import partial\n'), ((7075, 7217), 'flowvision.layers.PatchEmbed', 'PatchEmbed', ([], {'img_size': 'img_size', 'patch_size': 'patch_size', 'in_chans': 'in_chans', 'embed_dim': 'embed_dim', 'norm_layer': '(norm_layer if stem_norm else None)'}), '(img_size=img_size, patch_size=patch_size, in_chans=in_chans,\n embed_dim=embed_dim, norm_layer=norm_layer if stem_norm else None)\n', (7085, 7217), False, 'from flowvision.layers import lecun_normal_, DropPath, PatchEmbed\n'), ((3223, 3236), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (3234, 3236), True, 'import oneflow.nn as nn\n'), ((5101, 5120), 'flowvision.layers.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (5109, 5120), False, 'from flowvision.layers import lecun_normal_, DropPath, PatchEmbed\n'), ((5145, 5158), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (5156, 5158), True, 'import oneflow.nn as nn\n'), ((5985, 6004), 'flowvision.layers.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (5993, 6004), False, 'from flowvision.layers import lecun_normal_, DropPath, PatchEmbed\n'), ((6029, 6042), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (6040, 6042), True, 'import oneflow.nn as nn\n'), ((7912, 7950), 'oneflow.nn.Linear', 'nn.Linear', (['embed_dim', 'self.num_classes'], {}), '(embed_dim, self.num_classes)\n', (7921, 7950), True, 'import oneflow.nn as nn\n'), ((7975, 7988), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (7986, 7988), True, 'import oneflow.nn as nn\n'), ((8176, 8219), 'functools.partial', 'partial', (['_init_weights'], {'head_bias': 'head_bias'}), '(_init_weights, head_bias=head_bias)\n', (8183, 8219), False, 'from functools import partial\n'), ((8449, 8487), 'oneflow.nn.Linear', 'nn.Linear', (['self.embed_dim', 'num_classes'], {}), '(self.embed_dim, num_classes)\n', (8458, 8487), True, 'import oneflow.nn as nn\n'), ((8512, 8525), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (8523, 8525), True, 'import oneflow.nn as nn\n'), ((9047, 9076), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['module.weight'], {}), '(module.weight)\n', (9061, 9076), True, 'import oneflow.nn as nn\n'), ((9089, 9130), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['module.bias', 'head_bias'], {}), '(module.bias, head_bias)\n', (9106, 9130), True, 'import oneflow.nn as nn\n'), ((9718, 9746), 'flowvision.layers.lecun_normal_', 'lecun_normal_', (['module.weight'], {}), '(module.weight)\n', (9731, 9746), False, 'from flowvision.layers import lecun_normal_, DropPath, PatchEmbed\n'), ((4194, 4226), 'oneflow.split', 'flow.split', (['x', 'split_dim'], {'dim': '(-1)'}), '(x, split_dim, dim=-1)\n', (4204, 4226), True, 'import oneflow as flow\n'), ((4231, 4263), 'oneflow.split', 'flow.split', (['x', 'split_dim'], {'dim': '(-1)'}), '(x, split_dim, dim=-1)\n', (4241, 4263), True, 'import oneflow as flow\n'), ((8099, 8125), 'math.log', 'math.log', (['self.num_classes'], {}), '(self.num_classes)\n', (8107, 8125), False, 'import math\n'), ((9214, 9242), 'flowvision.layers.lecun_normal_', 'lecun_normal_', (['module.weight'], {}), '(module.weight)\n', (9227, 9242), False, 'from flowvision.layers import lecun_normal_, DropPath, PatchEmbed\n'), ((9408, 9446), 'oneflow.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['module.weight'], {}), '(module.weight)\n', (9431, 9446), True, 'import oneflow.nn as nn\n'), ((9795, 9822), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['module.bias'], {}), '(module.bias)\n', (9809, 9822), True, 'import oneflow.nn as nn\n'), ((9906, 9934), 'oneflow.nn.init.ones_', 'nn.init.ones_', (['module.weight'], {}), '(module.weight)\n', (9919, 9934), True, 'import oneflow.nn as nn\n'), ((9943, 9970), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['module.bias'], {}), '(module.bias)\n', (9957, 9970), True, 'import oneflow.nn as nn\n'), ((9307, 9334), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['module.bias'], {}), '(module.bias)\n', (9321, 9334), True, 'import oneflow.nn as nn\n'), ((9553, 9592), 'oneflow.nn.init.normal_', 'nn.init.normal_', (['module.bias'], {'std': '(1e-06)'}), '(module.bias, std=1e-06)\n', (9568, 9592), True, 'import oneflow.nn as nn\n'), ((9642, 9669), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['module.bias'], {}), '(module.bias)\n', (9656, 9669), True, 'import oneflow.nn as nn\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _check(test_case, x, y, out, case):
if case == "add":
np_out = np.add(x, y)
elif case == "sub":
np_out = np.subtract(x, y)
elif case == "mul":
np_out = np.multiply(x, y)
elif case == "div":
if type(y[0]) == np.float32 or type(y[0]) == np.double:
np_out = np.divide(x, y)
else:
np_out = np.floor_divide(x, y)
test_case.assertTrue(np.allclose(np_out, out, rtol=1e-5, atol=1e-5))
def _run_test(test_case, x, y, case, dtype=None, device="gpu"):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def ScalarByTensorJob(
x: oft.Numpy.Placeholder(x.shape, dtype=dtype),
y: oft.Numpy.Placeholder(y.shape, dtype=dtype),
):
if case == "add":
return flow.math.add(x, y)
elif case == "sub":
return flow.math.subtract(x, y)
elif case == "mul":
return flow.math.multiply(x, y)
elif case == "div":
return flow.math.divide(x, y)
out = ScalarByTensorJob(x, y).get()
_check(test_case, x, y, out.numpy(), case)
@flow.unittest.skip_unless_1n1d()
class TestScalarByTensorInt(flow.unittest.TestCase):
def test_scalar_add_by_tensor_gpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
y = np.random.rand(1).astype(np.float32)
_run_test(test_case, x, y, "add", flow.float, "gpu")
def test_scalar_add_by_tensor_cpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
y = np.random.rand(1).astype(np.float32)
_run_test(test_case, x, y, "add", flow.float, "cpu")
def test_scalar_add_by_tensor_gpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.double)
y = np.random.rand(1).astype(np.double)
_run_test(test_case, x, y, "add", flow.double, "gpu")
def test_scalar_add_by_tensor_cpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.double)
y = np.random.rand(1).astype(np.double)
_run_test(test_case, x, y, "add", flow.double, "cpu")
def test_scalar_add_by_tensor_gpu_int8(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int8)
_run_test(test_case, x, y, "add", flow.int8, "gpu")
def test_scalar_add_by_tensor_cpu_int8(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int8)
_run_test(test_case, x, y, "add", flow.int8, "cpu")
def test_scalar_add_by_tensor_gpu_int32(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int32)
_run_test(test_case, x, y, "add", flow.int32, "gpu")
def test_scalar_add_by_tensor_cpu_int32(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int32)
_run_test(test_case, x, y, "add", flow.int32, "cpu")
def test_scalar_add_by_tensor_gpu_int64(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int64)
_run_test(test_case, x, y, "add", flow.int64, "gpu")
def test_scalar_add_by_tensor_cpu_int64(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int64)
_run_test(test_case, x, y, "add", flow.int64, "cpu")
def test_scalar_sub_by_tensor_gpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
y = np.random.rand(1).astype(np.float32)
_run_test(test_case, x, y, "sub", flow.float, "gpu")
def test_scalar_sub_by_tensor_cpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
y = np.random.rand(1).astype(np.float32)
_run_test(test_case, x, y, "sub", flow.float, "cpu")
def test_scalar_sub_by_tensor_gpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.double)
y = np.random.rand(1).astype(np.double)
_run_test(test_case, x, y, "sub", flow.double, "gpu")
def test_scalar_sub_by_tensor_cpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.double)
y = np.random.rand(1).astype(np.double)
_run_test(test_case, x, y, "sub", flow.double, "cpu")
def test_scalar_sub_by_tensor_gpu_int8(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int8)
_run_test(test_case, x, y, "sub", flow.int8, "gpu")
def test_scalar_sub_by_tensor_cpu_int8(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int8)
_run_test(test_case, x, y, "sub", flow.int8, "cpu")
def test_scalar_sub_by_tensor_gpu_int32(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int32)
_run_test(test_case, x, y, "sub", flow.int32, "gpu")
def test_scalar_sub_by_tensor_cpu_int32(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int32)
_run_test(test_case, x, y, "sub", flow.int32, "cpu")
def test_scalar_sub_by_tensor_gpu_int64(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int64)
_run_test(test_case, x, y, "sub", flow.int64, "gpu")
def test_scalar_sub_by_tensor_cpu_int64(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int64)
_run_test(test_case, x, y, "sub", flow.int64, "cpu")
def test_scalar_mul_by_tensor_gpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
y = np.random.rand(1).astype(np.float32)
_run_test(test_case, x, y, "mul", flow.float, "gpu")
def test_scalar_mul_by_tensor_cpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
y = np.random.rand(1).astype(np.float32)
_run_test(test_case, x, y, "mul", flow.float, "cpu")
def test_scalar_mul_by_tensor_gpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.double)
y = np.random.rand(1).astype(np.double)
_run_test(test_case, x, y, "mul", flow.double, "gpu")
def test_scalar_mul_by_tensor_cpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.double)
y = np.random.rand(1).astype(np.double)
_run_test(test_case, x, y, "mul", flow.double, "cpu")
def test_scalar_mul_by_tensor_gpu_int8(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int8)
_run_test(test_case, x, y, "mul", flow.int8, "gpu")
def test_scalar_mul_by_tensor_cpu_int8(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int8)
_run_test(test_case, x, y, "mul", flow.int8, "cpu")
def test_scalar_mul_by_tensor_gpu_int32(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int32)
_run_test(test_case, x, y, "mul", flow.int32, "gpu")
def test_scalar_mul_by_tensor_cpu_int32(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int32)
_run_test(test_case, x, y, "mul", flow.int32, "cpu")
def test_scalar_mul_by_tensor_gpu_int64(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int64)
_run_test(test_case, x, y, "mul", flow.int64, "gpu")
def test_scalar_mul_by_tensor_cpu_int64(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int64)
_run_test(test_case, x, y, "mul", flow.int64, "cpu")
def test_scalar_div_by_tensor_gpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
y = np.random.rand(1).astype(np.float32)
_run_test(test_case, x, y, "div", flow.float, "gpu")
def test_scalar_div_by_tensor_cpu_float(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.float32)
y = np.random.rand(1).astype(np.float32)
_run_test(test_case, x, y, "div", flow.float, "cpu")
def test_scalar_div_by_tensor_gpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.double)
y = np.random.rand(1).astype(np.double)
_run_test(test_case, x, y, "div", flow.double, "gpu")
def test_scalar_div_by_tensor_cpu_double(test_case):
x = np.random.rand(10, 3, 32, 1024).astype(np.double)
y = np.random.rand(1).astype(np.double)
_run_test(test_case, x, y, "div", flow.double, "cpu")
def test_scalar_div_by_tensor_gpu_int8(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int8)
_run_test(test_case, x, y, "div", flow.int8, "gpu")
def test_scalar_div_by_tensor_cpu_int8(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int8)
_run_test(test_case, x, y, "div", flow.int8, "cpu")
def test_scalar_div_by_tensor_gpu_int32(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int32)
_run_test(test_case, x, y, "div", flow.int32, "gpu")
def test_scalar_div_by_tensor_cpu_int32(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int32)
_run_test(test_case, x, y, "div", flow.int32, "cpu")
def test_scalar_div_by_tensor_gpu_int64(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int64)
_run_test(test_case, x, y, "div", flow.int64, "gpu")
def test_scalar_div_by_tensor_cpu_int64(test_case):
x = np.random.randint(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)
y = np.random.randint(low=1, high=10, size=(1,), dtype=np.int64)
_run_test(test_case, x, y, "div", flow.int64, "cpu")
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.math.multiply",
"oneflow.compatible.single_client.math.divide",
"oneflow.compatible.single_client.math.add",
"oneflow.compatible.single_client.unittest.skip_unless_1n1d",
"oneflow.compatible.single_client.math.subtract",
"oneflow.compatible.single_client.typing.Numpy.Plac... | [((1996, 2028), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2026, 2028), True, 'from oneflow.compatible import single_client as flow\n'), ((1289, 1310), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1308, 1310), True, 'from oneflow.compatible import single_client as flow\n'), ((1430, 1479), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1450, 1479), True, 'from oneflow.compatible import single_client as flow\n'), ((12410, 12425), 'unittest.main', 'unittest.main', ([], {}), '()\n', (12423, 12425), False, 'import unittest\n'), ((818, 830), 'numpy.add', 'np.add', (['x', 'y'], {}), '(x, y)\n', (824, 830), True, 'import numpy as np\n'), ((1157, 1205), 'numpy.allclose', 'np.allclose', (['np_out', 'out'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(np_out, out, rtol=1e-05, atol=1e-05)\n', (1168, 1205), True, 'import numpy as np\n'), ((1394, 1422), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1420, 1422), True, 'from oneflow.compatible import single_client as flow\n'), ((3069, 3141), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)\n', (3086, 3141), True, 'import numpy as np\n'), ((3154, 3213), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(1,), dtype=np.int8)\n', (3171, 3213), True, 'import numpy as np\n'), ((3342, 3414), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)\n', (3359, 3414), True, 'import numpy as np\n'), ((3427, 3486), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(1,), dtype=np.int8)\n', (3444, 3486), True, 'import numpy as np\n'), ((3616, 3689), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)\n', (3633, 3689), True, 'import numpy as np\n'), ((3702, 3762), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(1,), dtype=np.int32)\n', (3719, 3762), True, 'import numpy as np\n'), ((3893, 3966), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)\n', (3910, 3966), True, 'import numpy as np\n'), ((3979, 4039), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(1,), dtype=np.int32)\n', (3996, 4039), True, 'import numpy as np\n'), ((4170, 4243), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)\n', (4187, 4243), True, 'import numpy as np\n'), ((4256, 4316), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(1,), dtype=np.int64)\n', (4273, 4316), True, 'import numpy as np\n'), ((4447, 4520), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)\n', (4464, 4520), True, 'import numpy as np\n'), ((4533, 4593), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(1,), dtype=np.int64)\n', (4550, 4593), True, 'import numpy as np\n'), ((5643, 5715), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)\n', (5660, 5715), True, 'import numpy as np\n'), ((5728, 5787), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(1,), dtype=np.int8)\n', (5745, 5787), True, 'import numpy as np\n'), ((5916, 5988), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)\n', (5933, 5988), True, 'import numpy as np\n'), ((6001, 6060), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(1,), dtype=np.int8)\n', (6018, 6060), True, 'import numpy as np\n'), ((6190, 6263), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)\n', (6207, 6263), True, 'import numpy as np\n'), ((6276, 6336), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(1,), dtype=np.int32)\n', (6293, 6336), True, 'import numpy as np\n'), ((6467, 6540), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)\n', (6484, 6540), True, 'import numpy as np\n'), ((6553, 6613), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(1,), dtype=np.int32)\n', (6570, 6613), True, 'import numpy as np\n'), ((6744, 6817), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)\n', (6761, 6817), True, 'import numpy as np\n'), ((6830, 6890), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(1,), dtype=np.int64)\n', (6847, 6890), True, 'import numpy as np\n'), ((7021, 7094), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)\n', (7038, 7094), True, 'import numpy as np\n'), ((7107, 7167), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(1,), dtype=np.int64)\n', (7124, 7167), True, 'import numpy as np\n'), ((8217, 8289), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)\n', (8234, 8289), True, 'import numpy as np\n'), ((8302, 8361), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(1,), dtype=np.int8)\n', (8319, 8361), True, 'import numpy as np\n'), ((8490, 8562), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)\n', (8507, 8562), True, 'import numpy as np\n'), ((8575, 8634), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(1,), dtype=np.int8)\n', (8592, 8634), True, 'import numpy as np\n'), ((8764, 8837), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)\n', (8781, 8837), True, 'import numpy as np\n'), ((8850, 8910), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(1,), dtype=np.int32)\n', (8867, 8910), True, 'import numpy as np\n'), ((9041, 9114), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)\n', (9058, 9114), True, 'import numpy as np\n'), ((9127, 9187), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(1,), dtype=np.int32)\n', (9144, 9187), True, 'import numpy as np\n'), ((9318, 9391), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)\n', (9335, 9391), True, 'import numpy as np\n'), ((9404, 9464), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(1,), dtype=np.int64)\n', (9421, 9464), True, 'import numpy as np\n'), ((9595, 9668), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)\n', (9612, 9668), True, 'import numpy as np\n'), ((9681, 9741), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(1,), dtype=np.int64)\n', (9698, 9741), True, 'import numpy as np\n'), ((10791, 10863), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)\n', (10808, 10863), True, 'import numpy as np\n'), ((10876, 10935), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(1,), dtype=np.int8)\n', (10893, 10935), True, 'import numpy as np\n'), ((11064, 11136), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int8)\n', (11081, 11136), True, 'import numpy as np\n'), ((11149, 11208), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int8'}), '(low=1, high=10, size=(1,), dtype=np.int8)\n', (11166, 11208), True, 'import numpy as np\n'), ((11338, 11411), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)\n', (11355, 11411), True, 'import numpy as np\n'), ((11424, 11484), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(1,), dtype=np.int32)\n', (11441, 11484), True, 'import numpy as np\n'), ((11615, 11688), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int32)\n', (11632, 11688), True, 'import numpy as np\n'), ((11701, 11761), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int32'}), '(low=1, high=10, size=(1,), dtype=np.int32)\n', (11718, 11761), True, 'import numpy as np\n'), ((11892, 11965), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)\n', (11909, 11965), True, 'import numpy as np\n'), ((11978, 12038), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(1,), dtype=np.int64)\n', (11995, 12038), True, 'import numpy as np\n'), ((12169, 12242), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(10, 3, 32, 1024)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(10, 3, 32, 1024), dtype=np.int64)\n', (12186, 12242), True, 'import numpy as np\n'), ((12255, 12315), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(1)', 'high': '(10)', 'size': '(1,)', 'dtype': 'np.int64'}), '(low=1, high=10, size=(1,), dtype=np.int64)\n', (12272, 12315), True, 'import numpy as np\n'), ((872, 889), 'numpy.subtract', 'np.subtract', (['x', 'y'], {}), '(x, y)\n', (883, 889), True, 'import numpy as np\n'), ((1518, 1561), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x.shape'], {'dtype': 'dtype'}), '(x.shape, dtype=dtype)\n', (1539, 1561), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1574, 1617), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['y.shape'], {'dtype': 'dtype'}), '(y.shape, dtype=dtype)\n', (1595, 1617), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1671, 1690), 'oneflow.compatible.single_client.math.add', 'flow.math.add', (['x', 'y'], {}), '(x, y)\n', (1684, 1690), True, 'from oneflow.compatible import single_client as flow\n'), ((931, 948), 'numpy.multiply', 'np.multiply', (['x', 'y'], {}), '(x, y)\n', (942, 948), True, 'import numpy as np\n'), ((1738, 1762), 'oneflow.compatible.single_client.math.subtract', 'flow.math.subtract', (['x', 'y'], {}), '(x, y)\n', (1756, 1762), True, 'from oneflow.compatible import single_client as flow\n'), ((2150, 2181), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2164, 2181), True, 'import numpy as np\n'), ((2213, 2230), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2227, 2230), True, 'import numpy as np\n'), ((2380, 2411), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2394, 2411), True, 'import numpy as np\n'), ((2443, 2460), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2457, 2460), True, 'import numpy as np\n'), ((2611, 2642), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2625, 2642), True, 'import numpy as np\n'), ((2673, 2690), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2687, 2690), True, 'import numpy as np\n'), ((2841, 2872), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (2855, 2872), True, 'import numpy as np\n'), ((2903, 2920), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (2917, 2920), True, 'import numpy as np\n'), ((4724, 4755), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (4738, 4755), True, 'import numpy as np\n'), ((4787, 4804), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (4801, 4804), True, 'import numpy as np\n'), ((4954, 4985), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (4968, 4985), True, 'import numpy as np\n'), ((5017, 5034), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (5031, 5034), True, 'import numpy as np\n'), ((5185, 5216), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (5199, 5216), True, 'import numpy as np\n'), ((5247, 5264), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (5261, 5264), True, 'import numpy as np\n'), ((5415, 5446), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (5429, 5446), True, 'import numpy as np\n'), ((5477, 5494), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (5491, 5494), True, 'import numpy as np\n'), ((7298, 7329), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (7312, 7329), True, 'import numpy as np\n'), ((7361, 7378), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (7375, 7378), True, 'import numpy as np\n'), ((7528, 7559), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (7542, 7559), True, 'import numpy as np\n'), ((7591, 7608), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (7605, 7608), True, 'import numpy as np\n'), ((7759, 7790), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (7773, 7790), True, 'import numpy as np\n'), ((7821, 7838), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (7835, 7838), True, 'import numpy as np\n'), ((7989, 8020), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (8003, 8020), True, 'import numpy as np\n'), ((8051, 8068), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (8065, 8068), True, 'import numpy as np\n'), ((9872, 9903), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (9886, 9903), True, 'import numpy as np\n'), ((9935, 9952), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (9949, 9952), True, 'import numpy as np\n'), ((10102, 10133), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (10116, 10133), True, 'import numpy as np\n'), ((10165, 10182), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (10179, 10182), True, 'import numpy as np\n'), ((10333, 10364), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (10347, 10364), True, 'import numpy as np\n'), ((10395, 10412), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (10409, 10412), True, 'import numpy as np\n'), ((10563, 10594), 'numpy.random.rand', 'np.random.rand', (['(10)', '(3)', '(32)', '(1024)'], {}), '(10, 3, 32, 1024)\n', (10577, 10594), True, 'import numpy as np\n'), ((10625, 10642), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (10639, 10642), True, 'import numpy as np\n'), ((1810, 1834), 'oneflow.compatible.single_client.math.multiply', 'flow.math.multiply', (['x', 'y'], {}), '(x, y)\n', (1828, 1834), True, 'from oneflow.compatible import single_client as flow\n'), ((1058, 1073), 'numpy.divide', 'np.divide', (['x', 'y'], {}), '(x, y)\n', (1067, 1073), True, 'import numpy as np\n'), ((1109, 1130), 'numpy.floor_divide', 'np.floor_divide', (['x', 'y'], {}), '(x, y)\n', (1124, 1130), True, 'import numpy as np\n'), ((1882, 1904), 'oneflow.compatible.single_client.math.divide', 'flow.math.divide', (['x', 'y'], {}), '(x, y)\n', (1898, 1904), True, 'from oneflow.compatible import single_client as flow\n')] |
import math
import oneflow as flow
from oneflow import nn
def gelu_new(x):
return (
0.5
* x
* (
1.0
+ flow.tanh(math.sqrt(2.0 / math.pi) * (x + 0.044715 * flow.pow(x, 3.0)))
)
)
def gelu_fast(x):
return 0.5 * x * (1.0 + flow.tanh(x * 0.7978845608 * (1.0 + 0.044715 * x * x)))
def quick_gelu(x):
return x * flow.sigmoid(1.702 * x)
def linear_act(x):
return x
ACT2FN = {
"relu": nn.functional.relu,
"silu": nn.functional.silu,
"swish": nn.functional.silu,
"gelu": nn.functional.gelu,
"tanh": flow.tanh,
"gelu_new": gelu_new,
"gelu_fast": gelu_fast,
"quick_gelu": quick_gelu,
"mish": flow.mish,
"linear": linear_act,
"sigmoid": flow.sigmoid,
}
| [
"oneflow.sigmoid",
"oneflow.pow",
"oneflow.tanh"
] | [((385, 408), 'oneflow.sigmoid', 'flow.sigmoid', (['(1.702 * x)'], {}), '(1.702 * x)\n', (397, 408), True, 'import oneflow as flow\n'), ((293, 347), 'oneflow.tanh', 'flow.tanh', (['(x * 0.7978845608 * (1.0 + 0.044715 * x * x))'], {}), '(x * 0.7978845608 * (1.0 + 0.044715 * x * x))\n', (302, 347), True, 'import oneflow as flow\n'), ((167, 191), 'math.sqrt', 'math.sqrt', (['(2.0 / math.pi)'], {}), '(2.0 / math.pi)\n', (176, 191), False, 'import math\n'), ((210, 226), 'oneflow.pow', 'flow.pow', (['x', '(3.0)'], {}), '(x, 3.0)\n', (218, 226), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
from oneflow.python.nn.module import Module
class Acosh(Module):
def __init__(self):
super().__init__()
def forward(self, x):
return flow.F.acosh(x)
@oneflow_export("acosh")
@experimental_api
def acosh_op(x):
r"""Returns a new tensor with the inverse hyperbolic cosine of the elements of :attr:`input`.
.. math::
\text{out}_{i} = \cosh^{-1}(\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> x1 = flow.Tensor(np.array([2, 3, 4]).astype(np.float32))
>>> out1 = flow.acosh(x1)
>>> out1
tensor([1.317 , 1.7627, 2.0634], dtype=oneflow.float32)
>>> x2 = flow.Tensor(np.array([1.5, 2.6, 3.7]).astype(np.float32),device=flow.device('cuda'))
>>> out2 = flow.acosh(x2)
>>> out2
tensor([0.9624, 1.6094, 1.9827], device='cuda:0', dtype=oneflow.float32)
"""
return Acosh()(x)
@register_tensor_op("acosh")
@experimental_api
def acosh_op_tensor(x):
r"""
acosh() -> Tensor
See :func:`oneflow.experimental.acosh`
"""
return Acosh()(x)
@oneflow_export("arccosh")
@experimental_api
def arccosh_op(x):
r"""
See :func:`oneflow.experimental.acosh`
"""
return Acosh()(x)
@register_tensor_op("arccosh")
@experimental_api
def arccosh_op_tensor(x):
r"""
arccosh() -> Tensor
See :func:`oneflow.experimental.acosh`
"""
return Acosh()(x)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.F.acosh",
"oneflow.python.framework.tensor.register_tensor_op"
] | [((931, 954), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""acosh"""'], {}), "('acosh')\n", (945, 954), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((1832, 1859), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""acosh"""'], {}), "('acosh')\n", (1850, 1859), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((2013, 2038), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""arccosh"""'], {}), "('arccosh')\n", (2027, 2038), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((2164, 2193), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""arccosh"""'], {}), "('arccosh')\n", (2182, 2193), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((2401, 2437), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (2416, 2437), False, 'import doctest\n'), ((912, 927), 'oneflow.F.acosh', 'flow.F.acosh', (['x'], {}), '(x)\n', (924, 927), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
from automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestAvgPoolingModule(flow.unittest.TestCase):
@autotest(n=100)
def test_avgpool1d_with_random_data(test_case):
m = torch.nn.AvgPool1d(
kernel_size=random(4, 6),
stride=random(1, 3) | nothing(),
padding=random(1, 3) | nothing(),
ceil_mode=random(),
count_include_pad=random(),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=3, dim2=random(20, 22)).to(device)
y = m(x)
return y
@autotest(n=100)
def test_avgpool2d_with_random_data(test_case):
m = torch.nn.AvgPool2d(
kernel_size=random(4, 6),
stride=random(1, 3) | nothing(),
padding=random(1, 3) | nothing(),
ceil_mode=random(),
count_include_pad=random(),
divisor_override=random().to(int),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=4, dim2=random(20, 22), dim3=random(20, 22)).to(
device
)
y = m(x)
return y
@autotest(n=100)
def test_avgpool3d_with_random_data(test_case):
m = torch.nn.AvgPool3d(
kernel_size=random(4, 6),
stride=random(1, 3) | nothing(),
padding=random(1, 3) | nothing(),
ceil_mode=random(),
count_include_pad=random(),
divisor_override=random().to(int),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(
ndim=5, dim2=random(20, 22), dim3=random(20, 22), dim4=random(20, 22)
).to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((691, 723), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (721, 723), True, 'import oneflow as flow\n'), ((2524, 2539), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2537, 2539), False, 'import unittest\n')] |
"""
Modified from https://github.com/pytorch/vision/blob/main/torchvision/models/squeezenet.py
"""
from typing import Any
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.init as init
from .utils import load_state_dict_from_url
from .registry import ModelCreator
__all__ = ["SqueezeNet", "squeezenet1_0", "squeezenet1_1"]
model_urls = {
"squeezenet1_0": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/SqueezeNet/squeezenet1_0.zip",
"squeezenet1_1": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/SqueezeNet/squeezenet1_1.zip",
}
class Fire(nn.Module):
def __init__(
self,
inplanes: int,
squeeze_planes: int,
expand1x1_planes: int,
expand3x3_planes: int,
) -> None:
super(Fire, self).__init__()
self.inplanes = inplanes
self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1)
self.squeeze_activation = nn.ReLU(inplace=True)
self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, kernel_size=1)
self.expand1x1_activation = nn.ReLU(inplace=True)
self.expand3x3 = nn.Conv2d(
squeeze_planes, expand3x3_planes, kernel_size=3, padding=1
)
self.expand3x3_activation = nn.ReLU(inplace=True)
def forward(self, x: flow.Tensor) -> flow.Tensor:
x = self.squeeze_activation(self.squeeze(x))
return flow.cat(
[
self.expand1x1_activation(self.expand1x1(x)),
self.expand3x3_activation(self.expand3x3(x)),
],
1,
)
class SqueezeNet(nn.Module):
def __init__(self, version: str = "1_0", num_classes: int = 1000) -> None:
super(SqueezeNet, self).__init__()
self.num_classes = num_classes
if version == "1_0":
self.features = nn.Sequential(
nn.Conv2d(3, 96, kernel_size=7, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(96, 16, 64, 64),
Fire(128, 16, 64, 64),
Fire(128, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 32, 128, 128),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(512, 64, 256, 256),
)
elif version == "1_1":
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, stride=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(64, 16, 64, 64),
Fire(128, 16, 64, 64),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(128, 32, 128, 128),
Fire(256, 32, 128, 128),
nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True),
Fire(256, 48, 192, 192),
Fire(384, 48, 192, 192),
Fire(384, 64, 256, 256),
Fire(512, 64, 256, 256),
)
else:
# FIXME: Is this needed? SqueezeNet should only be called from the
# FIXME: squeezenet1_x() functions
# FIXME: This checking is not done for the other models
raise ValueError(
"Unsupported SqueezeNet version {version}:"
"1_0 or 1_1 expected".format(version=version)
)
# Final convolution is initialized differently from the rest
final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1)
self.classifier = nn.Sequential(
nn.Dropout(p=0.5),
final_conv,
nn.ReLU(inplace=True),
nn.AdaptiveAvgPool2d((1, 1)),
)
for m in self.modules():
if isinstance(m, nn.Conv2d):
if m is final_conv:
init.normal_(m.weight, mean=0.0, std=0.01)
else:
init.kaiming_uniform_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x: flow.Tensor) -> flow.Tensor:
x = self.features(x)
x = self.classifier(x)
return flow.flatten(x, 1)
def _squeezenet(
version: str, pretrained: bool, progress: bool, **kwargs: Any
) -> SqueezeNet:
model = SqueezeNet(version, **kwargs)
if pretrained:
arch = "squeezenet" + version
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
@ModelCreator.register_model
def squeezenet1_0(
pretrained: bool = False, progress: bool = True, **kwargs: Any
) -> SqueezeNet:
"""
Constructs the SqueezeNet model.
.. note::
SqueezeNet model from the `SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size <https://arxiv.org/abs/1602.07360>`_ paper.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> squeezenet1_0 = flowvision.models.squeezenet1_0(pretrained=False, progress=True)
"""
return _squeezenet("1_0", pretrained, progress, **kwargs)
@ModelCreator.register_model
def squeezenet1_1(
pretrained: bool = False, progress: bool = True, **kwargs: Any
) -> SqueezeNet:
"""
Constructs the SqueezeNet 1.1 model.
.. note::
SqueezeNet 1.1 model from the `SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and <0.5MB model size <https://arxiv.org/abs/1602.07360>`_ paper.
Note that SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters than SqueezeNet 1.0, without sacrificing accuracy.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> squeezenet1_1 = flowvision.models.squeezenet1_1(pretrained=False, progress=True)
"""
return _squeezenet("1_1", pretrained, progress, **kwargs)
| [
"oneflow.nn.MaxPool2d",
"oneflow.nn.init.kaiming_uniform_",
"oneflow.nn.init.constant_",
"oneflow.flatten",
"oneflow.nn.Conv2d",
"oneflow.nn.Dropout",
"oneflow.nn.init.normal_",
"oneflow.nn.AdaptiveAvgPool2d",
"oneflow.nn.ReLU"
] | [((918, 968), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['inplanes', 'squeeze_planes'], {'kernel_size': '(1)'}), '(inplanes, squeeze_planes, kernel_size=1)\n', (927, 968), True, 'import oneflow.nn as nn\n'), ((1003, 1024), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1010, 1024), True, 'import oneflow.nn as nn\n'), ((1050, 1108), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['squeeze_planes', 'expand1x1_planes'], {'kernel_size': '(1)'}), '(squeeze_planes, expand1x1_planes, kernel_size=1)\n', (1059, 1108), True, 'import oneflow.nn as nn\n'), ((1145, 1166), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1152, 1166), True, 'import oneflow.nn as nn\n'), ((1192, 1261), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['squeeze_planes', 'expand3x3_planes'], {'kernel_size': '(3)', 'padding': '(1)'}), '(squeeze_planes, expand3x3_planes, kernel_size=3, padding=1)\n', (1201, 1261), True, 'import oneflow.nn as nn\n'), ((1320, 1341), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1327, 1341), True, 'import oneflow.nn as nn\n'), ((3752, 3799), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(512)', 'self.num_classes'], {'kernel_size': '(1)'}), '(512, self.num_classes, kernel_size=1)\n', (3761, 3799), True, 'import oneflow.nn as nn\n'), ((4446, 4464), 'oneflow.flatten', 'flow.flatten', (['x', '(1)'], {}), '(x, 1)\n', (4458, 4464), True, 'import oneflow as flow\n'), ((3853, 3870), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {'p': '(0.5)'}), '(p=0.5)\n', (3863, 3870), True, 'import oneflow.nn as nn\n'), ((3908, 3929), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3915, 3929), True, 'import oneflow.nn as nn\n'), ((3943, 3971), 'oneflow.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1, 1)'], {}), '((1, 1))\n', (3963, 3971), True, 'import oneflow.nn as nn\n'), ((1933, 1974), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(3)', '(96)'], {'kernel_size': '(7)', 'stride': '(2)'}), '(3, 96, kernel_size=7, stride=2)\n', (1942, 1974), True, 'import oneflow.nn as nn\n'), ((1992, 2013), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1999, 2013), True, 'import oneflow.nn as nn\n'), ((2031, 2084), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (2043, 2084), True, 'import oneflow.nn as nn\n'), ((2220, 2273), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (2232, 2273), True, 'import oneflow.nn as nn\n'), ((2455, 2508), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (2467, 2508), True, 'import oneflow.nn as nn\n'), ((2655, 2696), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(3)', 'stride': '(2)'}), '(3, 64, kernel_size=3, stride=2)\n', (2664, 2696), True, 'import oneflow.nn as nn\n'), ((2714, 2735), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2721, 2735), True, 'import oneflow.nn as nn\n'), ((2753, 2806), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (2765, 2806), True, 'import oneflow.nn as nn\n'), ((2901, 2954), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (2913, 2954), True, 'import oneflow.nn as nn\n'), ((3054, 3107), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)', 'ceil_mode': '(True)'}), '(kernel_size=3, stride=2, ceil_mode=True)\n', (3066, 3107), True, 'import oneflow.nn as nn\n'), ((4114, 4156), 'oneflow.nn.init.normal_', 'init.normal_', (['m.weight'], {'mean': '(0.0)', 'std': '(0.01)'}), '(m.weight, mean=0.0, std=0.01)\n', (4126, 4156), True, 'import oneflow.nn.init as init\n'), ((4199, 4230), 'oneflow.nn.init.kaiming_uniform_', 'init.kaiming_uniform_', (['m.weight'], {}), '(m.weight)\n', (4220, 4230), True, 'import oneflow.nn.init as init\n'), ((4290, 4315), 'oneflow.nn.init.constant_', 'init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (4304, 4315), True, 'import oneflow.nn.init as init\n')] |
import oneflow as flow
import oneflow.nn as nn
from oneflow import Tensor
from typing import Type, Any, Callable, Union, List, Optional
class ccmp(nn.Module):
def __init__(self, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True):
super(ccmp, self).__init__()
self.kernel_size = kernel_size
self.stride = stride or kernel_size
self.padding = padding
self.ceil_mode = ceil_mode
self.count_include_pad = count_include_pad
self.avgpool2d = nn.AvgPool2d(kernel_size, stride, padding, ceil_mode)
def forward(self, x):
x = x.transpose(3, 1)
x = self.avgpool2d(x)
x = x.transpose(3, 1)
return x
class scloss(nn.Module):
def __init__(self, label, cnum=3, num_classes=8):
super(scloss, self).__init__()
self.cnum = cnum
self.label = label
self.avgpool = nn.AvgPool2d((7, 7))
self.fc = nn.Linear(2048, num_classes)
self.criterion = nn.CrossEntropyLoss()
def loss_div(self, x):
branch = x
branch = flow.reshape(branch, shape=[branch.size(0), branch.size(1), branch.size(2) * branch.size(3)])
branch = flow.softmax(branch, 2)
branch = flow.reshape(branch, shape=[branch.size(0), branch.size(1), x.size(2), x.size(2)])
branch = ccmp(kernel_size=(1, self.cnum), stride=(1, self.cnum))(branch)
branch = flow.reshape(branch, shape=[branch.size(0), branch.size(1), branch.size(2) * branch.size(3)])
loss_dis = 1.0 - 1.0 * flow.mean(flow.sum(branch, 2)) / self.cnum # set margin = 3.0
return loss_dis
def loss_con(self, feature):
return self.criterion(feature,self.label)
def loss_pre(self, fc):
return self.criterion(fc,self.label)
def _forward_impl(self, x: Tensor, y: Tensor) -> Tensor:
loss_div = self.loss_div(x)
x = self.avgpool(x)
x = flow.flatten(x, 1)
x = self.fc(x)
loss_con = self.loss_con(x)
loss_pre = self.loss_pre(y)
return loss_con + loss_pre + loss_div
def forward(self, x: Tensor, y: Tensor) -> Tensor:
return self._forward_impl(x, y)
| [
"oneflow.nn.Linear",
"oneflow.sum",
"oneflow.nn.CrossEntropyLoss",
"oneflow.nn.AvgPool2d",
"oneflow.flatten",
"oneflow.softmax"
] | [((542, 595), 'oneflow.nn.AvgPool2d', 'nn.AvgPool2d', (['kernel_size', 'stride', 'padding', 'ceil_mode'], {}), '(kernel_size, stride, padding, ceil_mode)\n', (554, 595), True, 'import oneflow.nn as nn\n'), ((925, 945), 'oneflow.nn.AvgPool2d', 'nn.AvgPool2d', (['(7, 7)'], {}), '((7, 7))\n', (937, 945), True, 'import oneflow.nn as nn\n'), ((964, 992), 'oneflow.nn.Linear', 'nn.Linear', (['(2048)', 'num_classes'], {}), '(2048, num_classes)\n', (973, 992), True, 'import oneflow.nn as nn\n'), ((1018, 1039), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1037, 1039), True, 'import oneflow.nn as nn\n'), ((1215, 1238), 'oneflow.softmax', 'flow.softmax', (['branch', '(2)'], {}), '(branch, 2)\n', (1227, 1238), True, 'import oneflow as flow\n'), ((1944, 1962), 'oneflow.flatten', 'flow.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1956, 1962), True, 'import oneflow as flow\n'), ((1572, 1591), 'oneflow.sum', 'flow.sum', (['branch', '(2)'], {}), '(branch, 2)\n', (1580, 1591), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import oneflow as flow
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.nn.module import Module
@oneflow_export("nn.CrossEntropyLoss")
@experimental_api
class CrossEntropyLoss(Module):
r"""This criterion combines :class:`~flow.nn.LogSoftmax` and :class:`~flow.nn.NLLLoss` in one single class.
It is useful when training a classification problem with `C` classes.
The `input` is expected to contain raw, unnormalized scores for each class.
`input` has to be a Tensor of size either :math:`(minibatch, C)` or
:math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 1` for the `K`-dimensional case (described later).
This criterion expects a class index in the range :math:`[0, C-1]` as the
`target` for each value of a 1D tensor of size `minibatch`;
The loss can be described as:
.. math::
\text{loss}(x, class) = -\log\left(\frac{\exp(x[class])}{\sum_j \exp(x[j])}\right)
= -x[class] + \log\left(\sum_j \exp(x[j])\right)
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below).
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in
the meantime, specifying either of those two args will override
:attr:`reduction`. Default: ``'mean'``
For example:
.. code-block:: python
import oneflow.experimental as flow
input = flow.Tensor(
[[-0.1664078, -1.7256707, -0.14690138],
[-0.21474946, 0.53737473, 0.99684894],
[-1.135804, -0.50371903, 0.7645404]], dtype=flow.float32)
target = flow.Tensor(np.array([0, 1, 2]), dtype=flow.int32)
out = flow.nn.CrossEntropyLoss(reduction="none")(input, target)
# out: [0.80199665 1.1166505 0.35826027]
out_sum = flow.nn.CrossEntropyLoss(reduction="sum")(input, target)
# out_sum: [2.2769074]
out_mean = flow.nn.CrossEntropyLoss(reduction="mean")(input, target)
# out_mean: [0.7589692]
"""
def __init__(
self,
weight=None,
ignore_index: Optional[int] = None,
reduction: Optional[str] = "mean",
) -> None:
super().__init__()
if weight is not None:
raise ValueError("Argument weight is not supported yet")
if ignore_index is not None:
raise ValueError("Argument ignore_index is not supported yet")
assert reduction in [
"sum",
"none",
"mean",
None,
], "only 'sum', 'mean' and None supported by now"
self.reduction = reduction
self._op = (
flow.builtin_op("sparse_softmax_cross_entropy")
.Input("prediction")
.Input("label")
.Output("prob")
.Output("out")
.Build()
)
self._transpose_op = (
flow.builtin_op("transpose")
.Input("input")
.Output("output")
.Attr("perm", [])
.Build()
)
def forward(self, input, target):
assert len(input.shape) <= 4
assert len(target.shape) == len(input.shape) - 1
input_shape_len = len(input.shape)
if input_shape_len == 3:
b, c, h = input.shape[0], input.shape[1], input.shape[2]
input = self._transpose_op(input, perm=(0, 2, 1))[0]
input = input.reshape(shape=[-1, input.shape[2]])
target = target.flatten()
elif input_shape_len == 4:
b, c, h, w = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
input = self._transpose_op(input, perm=(0, 2, 3, 1))[0]
input = input.reshape(shape=[-1, input.shape[3]])
target = target.flatten()
elif input_shape_len >= 5:
raise NotImplemented
prob, out = self._op(input, target, depth=input.shape[len(input.shape) - 1])
if self.reduction == "mean":
return flow.experimental.mean(out)
elif self.reduction == "sum":
return flow.experimental.sum(out)
else:
if input_shape_len == 4:
out = out.reshape((b, h, w))
return out
@oneflow_export("nn.NLLLoss")
@experimental_api
class NLLLoss(Module):
r""" The negative log likelihood loss. It is useful to train a classification
problem with `C` classes.
The `input` given through a forward call is expected to contain
log-probabilities of each class. `input` has to be a Tensor of size either
:math:`(minibatch, C)` or :math:`(minibatch, C, d_1, d_2, ..., d_K)`
with :math:`K \geq 1` for the `K`-dimensional case (described later).
Obtaining log-probabilities in a neural network is easily achieved by
adding a `LogSoftmax` layer in the last layer of your network.
You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
layer.
The `target` that this loss expects should be a class index in the range :math:`[0, C-1]`
where `C = number of classes`;
The unreduced (i.e. with :attr:`reduction` set to ``'none'``) loss can be described as:
.. math::
\ell(x, y) = L = \{l_1,\dots,l_N\}^\top, \quad
l_n = - w_{y_n} x_{n,y_n}, \quad
w_{c} = \mathbb{1},
where :math:`x` is the input, :math:`y` is the target, :math:`w` is the weight, and
:math:`N` is the batch size. If :attr:`reduction` is not ``'none'``
(default ``'mean'``), then
.. math::
\ell(x, y) = \begin{cases}
\sum_{n=1}^N \frac{1}{N} l_n, &
\text{if reduction} = \text{`mean';}\\
\sum_{n=1}^N l_n, &
\text{if reduction} = \text{`sum'.}
\end{cases}
Can also be used for higher dimension inputs, such as 2D images, by providing
an input of size :math:`(minibatch, C, d_1, d_2, ..., d_K)` with :math:`K \geq 1`,
where :math:`K` is the number of dimensions, and a target of appropriate shape
(see below). In the case of images, it computes NLL loss per-pixel.
Args:
reduction (string, optional): Specifies the reduction to apply to the output:
``'none'`` | ``'mean'`` | ``'sum'``. ``'none'``: no reduction will
be applied, ``'mean'``: the weighted mean of the output is taken,
``'sum'``: the output will be summed. Note: :attr:`size_average`
and :attr:`reduce` are in the process of being deprecated, and in
the meantime, specifying either of those two args will override
:attr:`reduction`. Default: ``'mean'``
For example:
.. code-block:: python
import oneflow.experimental as flow
import numpy as np
input = flow.Tensor(
[[-0.1664078, -1.7256707, -0.14690138],
[-0.21474946, 0.53737473, 0.99684894],
[-1.135804, -0.50371903, 0.7645404]], dtype=flow.float32)
target = flow.Tensor(np.array([0, 1, 2]), dtype=flow.int32)
out = flow.nn.NLLLoss(reduction="none")(input, target)
# out: [0.80199665 1.1166505 0.35826027]
out_sum = flow.nn.NLLLoss(reduction="sum")(input, target)
# out_sum: [2.2769074]
out_mean = flow.nn.NLLLoss(reduction="mean")(input, target)
# out_mean: [0.7589692]
"""
def __init__(
self, weight=None, ignore_index: int = None, reduction: str = "none",
) -> None:
super().__init__()
if weight != None:
raise ValueError("Argument weight is not supported yet")
if ignore_index != None:
raise ValueError("Argument ignore_index is not supported yet")
assert reduction in [
"sum",
"none",
"mean",
None,
], "only 'sum', 'mean' and None supported by now"
self.reduction = reduction
self._dim_gather_op = (
flow.builtin_op("dim_gather")
.Input("input")
.Input("index")
.Output("output")
.Attr("dim", 1)
.Build()
)
self._transpose_op = (
flow.builtin_op("transpose")
.Input("input")
.Output("output")
.Attr("perm", [])
.Build()
)
def nllloss_1d(self, input, target):
target = flow.experimental.reshape(target, (target.shape[0], 1))
res = self._dim_gather_op(input, target)[0]
res = flow.experimental.squeeze(res, dim=[1])
return res
def forward(self, input, target):
assert len(input.shape) <= 4
assert len(target.shape) == len(input.shape) - 1
input = input.negative()
if len(input.shape) == 2:
res = self.nllloss_1d(input, target)
elif len(input.shape) == 3:
b, c, h = input.shape[0], input.shape[1], input.shape[2]
input = self._transpose_op(input, perm=(0, 2, 1))[0]
input = input.reshape(shape=[-1, input.shape[2]])
target = target.flatten()
res = self.nllloss_1d(input, target)
res = res.reshape((b, h))
elif len(input.shape) == 4:
b, c, h, w = input.shape[0], input.shape[1], input.shape[2], input.shape[3]
input = self._transpose_op(input, perm=(0, 2, 3, 1))[0]
input = input.reshape(shape=[-1, input.shape[3]])
target = target.flatten()
res = self.nllloss_1d(input, target)
res = res.reshape((b, h, w))
else:
raise NotImplemented
if self.reduction == "none":
return res
elif self.reduction == "sum":
return res.sum()
else:
return res.mean()
| [
"oneflow.experimental.squeeze",
"oneflow.experimental.sum",
"oneflow.experimental.mean",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.builtin_op",
"oneflow.experimental.reshape"
] | [((764, 801), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.CrossEntropyLoss"""'], {}), "('nn.CrossEntropyLoss')\n", (778, 801), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((5399, 5427), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.NLLLoss"""'], {}), "('nn.NLLLoss')\n", (5413, 5427), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((9527, 9582), 'oneflow.experimental.reshape', 'flow.experimental.reshape', (['target', '(target.shape[0], 1)'], {}), '(target, (target.shape[0], 1))\n', (9552, 9582), True, 'import oneflow as flow\n'), ((9649, 9688), 'oneflow.experimental.squeeze', 'flow.experimental.squeeze', (['res'], {'dim': '[1]'}), '(res, dim=[1])\n', (9674, 9688), True, 'import oneflow as flow\n'), ((5165, 5192), 'oneflow.experimental.mean', 'flow.experimental.mean', (['out'], {}), '(out)\n', (5187, 5192), True, 'import oneflow as flow\n'), ((5250, 5276), 'oneflow.experimental.sum', 'flow.experimental.sum', (['out'], {}), '(out)\n', (5271, 5276), True, 'import oneflow as flow\n'), ((4073, 4101), 'oneflow.builtin_op', 'flow.builtin_op', (['"""transpose"""'], {}), "('transpose')\n", (4088, 4101), True, 'import oneflow as flow\n'), ((9320, 9348), 'oneflow.builtin_op', 'flow.builtin_op', (['"""transpose"""'], {}), "('transpose')\n", (9335, 9348), True, 'import oneflow as flow\n'), ((3835, 3882), 'oneflow.builtin_op', 'flow.builtin_op', (['"""sparse_softmax_cross_entropy"""'], {}), "('sparse_softmax_cross_entropy')\n", (3850, 3882), True, 'import oneflow as flow\n'), ((9102, 9131), 'oneflow.builtin_op', 'flow.builtin_op', (['"""dim_gather"""'], {}), "('dim_gather')\n", (9117, 9131), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow._oneflow_internal.exception import IndexException
import oneflow.framework.check_point_v2 as check_point_v2
import oneflow.framework.tensor_str as tensor_str_util
import oneflow.ops.initializer_util as initializer_util
import oneflow._oneflow_internal.lazy_mode as lazy_mode
import numpy as np
from typing import Union
Tensor = flow._oneflow_internal.Tensor
TensorTuple = flow._oneflow_internal.TensorTuple
def _tensor_numpy(eager_local_tensor):
if eager_local_tensor.dtype == flow.tensor_buffer:
shapes, dtypes = eager_local_tensor._tensor_buffer_shapes_and_dtypes
tensors = flow.tensor_buffer_to_list_of_tensors(
eager_local_tensor, shapes, dtypes
)
return [t.numpy() for t in tensors]
method_name = eager_local_tensor._get_copy_mirrored_tensor_to_numpy_func_name()
copy_to_numpy = getattr(eager_local_tensor, method_name)
ndarray = np.empty(
shape=tuple(eager_local_tensor.shape),
dtype=flow.convert_oneflow_dtype_to_numpy_dtype(eager_local_tensor.dtype),
)
if ndarray.size != 0:
copy_to_numpy(ndarray)
return ndarray
def _size(self, idx=None):
if idx is None:
return self.shape
else:
return self.shape[idx]
def _ndim(self):
return len(self.shape)
def _nelement(self):
prod = 1
for dim in self.shape:
prod *= dim
return prod
def _numel(self):
return self.nelement()
def _element_size(self):
return self.dtype.bytes
def _backward(self, gradient=None, retain_graph=False, create_graph=False):
if not lazy_mode.is_enabled():
flow.autograd.backward(self, gradient, retain_graph, create_graph)
else:
assert (
self.is_lazy
), "nn.Graph only accept lazy tensor to call backward() in lazy mode."
flow._oneflow_internal.nn.graph.AddTensorAsGraphLoss(self)
def _getitem(self, key):
try:
return flow.F.tensor_getitem(self, key)
except IndexException as e:
# The stop condition of for in python is IndexError,
# so we have to catch IndexException from C++ and throw IndexError
raise IndexError(e)
def _setitem(self, key, value):
if isinstance(value, (int, float)):
value = flow.F.constant([1], value, self.dtype)
flow.F.tensor_setitem(self, key, value)
return self
def _str(self):
return self.__repr__()
def _repr(self):
return tensor_str_util._gen_tensor_str(self)
def _meta_repr(self):
return tensor_str_util._gen_tensor_meta_str(self)
def _gt(self, other):
return self.gt(other)
def _lt(self, other):
return self.lt(other)
def _ge(self, other):
return self.ge(other)
def _le(self, other):
return self.le(other)
def _mul(self, other):
return self.mul(other)
def _rmul(self, other):
return self.mul(other)
def _add(self, other):
return self.add(other)
def _iadd(self, other):
return self.add_(other)
def _radd(self, other):
return self.add(other)
def _sub(self, other):
return self.sub(other)
def _rsub(self, other):
return flow.sub(other, self)
def _truediv(self, other):
return self.div(other)
def _rtruediv(self, other):
return flow.div(other, self)
def _neg(self):
return flow.neg(self)
def _pow(self, b):
return flow.pow(self, b)
def _uniform_(self, a=0, b=1):
initializer_conf = flow.random_uniform_initializer(
minval=a, maxval=b, dtype=self.dtype
)
return _init_by_initializer_conf(self, initializer_conf)
def _kaiming_uniform(
self, a=0, mode="fan_in", nonlinearity="leaky_relu", *, data_format="NCHW"
):
initializer_conf = flow.kaiming_initializer(
shape=self.shape,
distribution="random_uniform",
mode=mode,
nonlinearity=nonlinearity,
negative_slope=a,
data_format=data_format,
)
return _init_by_initializer_conf(self, initializer_conf)
def _kaiming_normal(
self, a=0, mode="fan_in", nonlinearity="leaky_relu", *, data_format="NCHW"
):
initializer_conf = flow.kaiming_initializer(
shape=self.shape,
distribution="random_normal",
mode=mode,
nonlinearity=nonlinearity,
negative_slope=a,
data_format=data_format,
)
return _init_by_initializer_conf(self, initializer_conf)
def _xavier_normal(self, gain=1.0, *, data_format="NCHW"):
assert gain == 1.0, "Only gain == 1.0 is supported now"
initializer_conf = flow.xavier_normal_initializer(data_format=data_format)
return _init_by_initializer_conf(self, initializer_conf)
def _xavier_uniform(self, gain=1.0, *, data_format="NCHW"):
assert gain == 1.0, "Only gain == 1.0 is supported now"
initializer_conf = flow.xavier_uniform_initializer(data_format=data_format)
return _init_by_initializer_conf(self, initializer_conf)
def _normal(self, mean=0, std=1):
initializer_conf = flow.random_normal_initializer(mean=mean, stddev=std)
return _init_by_initializer_conf(self, initializer_conf)
def _fill(self, value):
initializer_conf = flow.constant_initializer(value=value, dtype=self.dtype)
return _init_by_initializer_conf(self, initializer_conf)
def _copy_from_numpy_to_eager_local_tensor(eager_local_tensor, np_arr):
method_name = eager_local_tensor._get_copy_mirrored_tensor_from_numpy_func_name()
copy_from_numpy = getattr(eager_local_tensor, method_name)
assert np_arr.dtype == flow.convert_oneflow_dtype_to_numpy_dtype(
eager_local_tensor.dtype
)
assert np_arr.shape == tuple(eager_local_tensor.shape)
copy_from_numpy(np_arr)
def _init_eager_local_tensor_by_initializer_conf(
eager_local_tensor, initializer_conf, random_seed=None
):
if random_seed is None:
random_seed = flow.default_generator().seed()
shape = tuple(eager_local_tensor.shape)
initializer = initializer_util.GetInitializer(initializer_conf, random_seed, shape)
# initializer is None if and only if the initializer_conf is empty_initializer
if initializer is None:
return
_copy_from_numpy_to_eager_local_tensor(
eager_local_tensor,
check_point_v2.generate_values_by_initializer(
initializer, shape, eager_local_tensor.dtype
),
)
def _init_by_initializer_conf(tensor, initializer_conf):
if tensor.is_consistent:
with tensor._placement_scope():
check_point_v2.init_by_initializer_conf(
tensor, initializer_conf, True, None
)
else:
_init_eager_local_tensor_by_initializer_conf(tensor, initializer_conf)
return tensor
def _convert_to_placement_scope(placement_or_device):
if isinstance(placement_or_device, flow.placement):
placement = placement_or_device
return flow.scope.placement(
placement.device_tag,
list(placement.parallel_conf.device_name()),
placement.hierarchy,
)
else:
device = placement_or_device
# TODO(jianhao): replace 0 with real machine id
machine_id = 0
# TODO(jianhao): support cuda in of
if device.type == "cuda":
device_tag = "gpu"
else:
device_tag = device.type
return flow.scope.placement(
device_tag, "{}:{}".format(machine_id, device.index), None
)
def _placement_scope(self):
if self.is_consistent:
return _convert_to_placement_scope(self.placement)
else:
return _convert_to_placement_scope(self.device)
def _copy(self, other: Union[Tensor, np.ndarray]):
if isinstance(other, (Tensor, check_point_v2.FileBackendVariableBlob)):
src_np = other.numpy()
else:
assert isinstance(other, np.ndarray)
src_np = other
_copy_from_numpy_to_eager_local_tensor(self, src_np)
def _get_device(self):
if self.device.type == "cuda":
return self.device.index
raise NotImplementedError("get_device is only available for GPU tensor.")
def RegisterMethods():
Tensor.__mul__ = lambda self, other: self.mul(other)
Tensor.__rmul__ = lambda self, other: self.mul(other)
Tensor.__add__ = lambda self, other: self.add(other)
Tensor.__iadd__ = lambda self, other: self.add_(other)
Tensor.tolist = lambda self: self.numpy().tolist()
Tensor.ndim = property(_ndim)
Tensor.numpy = _tensor_numpy
Tensor.size = _size
Tensor.dim = _ndim
Tensor.ndimension = _ndim
Tensor.nelement = _nelement
Tensor.numel = _numel
Tensor.element_size = _element_size
Tensor.backward = _backward
Tensor.__getitem__ = _getitem
Tensor.__setitem__ = _setitem
Tensor.__str__ = _str
Tensor.__repr__ = _repr
Tensor.__gt__ = _gt
Tensor.__lt__ = _lt
Tensor.__ge__ = _ge
Tensor.__le__ = _le
Tensor.__mul__ = _mul
Tensor.__rmul__ = _rmul
Tensor.__add__ = _add
Tensor.__iadd__ = _iadd
Tensor.__radd__ = _radd
Tensor.__sub__ = _sub
Tensor.__rsub__ = _rsub
Tensor.__truediv__ = _truediv
Tensor.__rtruediv__ = _rtruediv
Tensor.__neg__ = _neg
Tensor.__pow__ = _pow
Tensor.uniform_ = _uniform_
Tensor.kaiming_uniform_ = _kaiming_uniform
Tensor.kaiming_normal_ = _kaiming_normal
Tensor.xavier_normal_ = _xavier_normal
Tensor.xavier_uniform_ = _xavier_uniform
Tensor.normal_ = _normal
Tensor.fill_ = _fill
Tensor._placement_scope = _placement_scope
Tensor.copy_ = _copy
Tensor.get_device = _get_device
Tensor._meta_repr = _meta_repr
def register_tensor_op(op_name):
def set_tensor_op(method):
setattr(Tensor, op_name, method)
return method
return set_tensor_op
def tensor(*args, **kwargs):
return flow._oneflow_internal.tensor(*args, **kwargs)
| [
"oneflow._oneflow_internal.nn.graph.AddTensorAsGraphLoss",
"oneflow.F.constant",
"oneflow.tensor_buffer_to_list_of_tensors",
"oneflow.constant_initializer",
"oneflow.F.tensor_getitem",
"oneflow.framework.tensor_str._gen_tensor_meta_str",
"oneflow.framework.tensor_str._gen_tensor_str",
"oneflow.xavier_... | [((2912, 2951), 'oneflow.F.tensor_setitem', 'flow.F.tensor_setitem', (['self', 'key', 'value'], {}), '(self, key, value)\n', (2933, 2951), True, 'import oneflow as flow\n'), ((3043, 3080), 'oneflow.framework.tensor_str._gen_tensor_str', 'tensor_str_util._gen_tensor_str', (['self'], {}), '(self)\n', (3074, 3080), True, 'import oneflow.framework.tensor_str as tensor_str_util\n'), ((3116, 3158), 'oneflow.framework.tensor_str._gen_tensor_meta_str', 'tensor_str_util._gen_tensor_meta_str', (['self'], {}), '(self)\n', (3152, 3158), True, 'import oneflow.framework.tensor_str as tensor_str_util\n'), ((3712, 3733), 'oneflow.sub', 'flow.sub', (['other', 'self'], {}), '(other, self)\n', (3720, 3733), True, 'import oneflow as flow\n'), ((3831, 3852), 'oneflow.div', 'flow.div', (['other', 'self'], {}), '(other, self)\n', (3839, 3852), True, 'import oneflow as flow\n'), ((3882, 3896), 'oneflow.neg', 'flow.neg', (['self'], {}), '(self)\n', (3890, 3896), True, 'import oneflow as flow\n'), ((3929, 3946), 'oneflow.pow', 'flow.pow', (['self', 'b'], {}), '(self, b)\n', (3937, 3946), True, 'import oneflow as flow\n'), ((4003, 4072), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': 'a', 'maxval': 'b', 'dtype': 'self.dtype'}), '(minval=a, maxval=b, dtype=self.dtype)\n', (4034, 4072), True, 'import oneflow as flow\n'), ((4277, 4440), 'oneflow.kaiming_initializer', 'flow.kaiming_initializer', ([], {'shape': 'self.shape', 'distribution': '"""random_uniform"""', 'mode': 'mode', 'nonlinearity': 'nonlinearity', 'negative_slope': 'a', 'data_format': 'data_format'}), "(shape=self.shape, distribution='random_uniform',\n mode=mode, nonlinearity=nonlinearity, negative_slope=a, data_format=\n data_format)\n", (4301, 4440), True, 'import oneflow as flow\n'), ((4676, 4838), 'oneflow.kaiming_initializer', 'flow.kaiming_initializer', ([], {'shape': 'self.shape', 'distribution': '"""random_normal"""', 'mode': 'mode', 'nonlinearity': 'nonlinearity', 'negative_slope': 'a', 'data_format': 'data_format'}), "(shape=self.shape, distribution='random_normal',\n mode=mode, nonlinearity=nonlinearity, negative_slope=a, data_format=\n data_format)\n", (4700, 4838), True, 'import oneflow as flow\n'), ((5090, 5145), 'oneflow.xavier_normal_initializer', 'flow.xavier_normal_initializer', ([], {'data_format': 'data_format'}), '(data_format=data_format)\n', (5120, 5145), True, 'import oneflow as flow\n'), ((5352, 5408), 'oneflow.xavier_uniform_initializer', 'flow.xavier_uniform_initializer', ([], {'data_format': 'data_format'}), '(data_format=data_format)\n', (5383, 5408), True, 'import oneflow as flow\n'), ((5529, 5582), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'mean': 'mean', 'stddev': 'std'}), '(mean=mean, stddev=std)\n', (5559, 5582), True, 'import oneflow as flow\n'), ((5693, 5749), 'oneflow.constant_initializer', 'flow.constant_initializer', ([], {'value': 'value', 'dtype': 'self.dtype'}), '(value=value, dtype=self.dtype)\n', (5718, 5749), True, 'import oneflow as flow\n'), ((6488, 6557), 'oneflow.ops.initializer_util.GetInitializer', 'initializer_util.GetInitializer', (['initializer_conf', 'random_seed', 'shape'], {}), '(initializer_conf, random_seed, shape)\n', (6519, 6557), True, 'import oneflow.ops.initializer_util as initializer_util\n'), ((10338, 10384), 'oneflow._oneflow_internal.tensor', 'flow._oneflow_internal.tensor', (['*args'], {}), '(*args, **kwargs)\n', (10367, 10384), True, 'import oneflow as flow\n'), ((1227, 1300), 'oneflow.tensor_buffer_to_list_of_tensors', 'flow.tensor_buffer_to_list_of_tensors', (['eager_local_tensor', 'shapes', 'dtypes'], {}), '(eager_local_tensor, shapes, dtypes)\n', (1264, 1300), True, 'import oneflow as flow\n'), ((2201, 2223), 'oneflow._oneflow_internal.lazy_mode.is_enabled', 'lazy_mode.is_enabled', ([], {}), '()\n', (2221, 2223), True, 'import oneflow._oneflow_internal.lazy_mode as lazy_mode\n'), ((2233, 2299), 'oneflow.autograd.backward', 'flow.autograd.backward', (['self', 'gradient', 'retain_graph', 'create_graph'], {}), '(self, gradient, retain_graph, create_graph)\n', (2255, 2299), True, 'import oneflow as flow\n'), ((2439, 2497), 'oneflow._oneflow_internal.nn.graph.AddTensorAsGraphLoss', 'flow._oneflow_internal.nn.graph.AddTensorAsGraphLoss', (['self'], {}), '(self)\n', (2491, 2497), True, 'import oneflow as flow\n'), ((2549, 2581), 'oneflow.F.tensor_getitem', 'flow.F.tensor_getitem', (['self', 'key'], {}), '(self, key)\n', (2570, 2581), True, 'import oneflow as flow\n'), ((2868, 2907), 'oneflow.F.constant', 'flow.F.constant', (['[1]', 'value', 'self.dtype'], {}), '([1], value, self.dtype)\n', (2883, 2907), True, 'import oneflow as flow\n'), ((6061, 6128), 'oneflow.convert_oneflow_dtype_to_numpy_dtype', 'flow.convert_oneflow_dtype_to_numpy_dtype', (['eager_local_tensor.dtype'], {}), '(eager_local_tensor.dtype)\n', (6102, 6128), True, 'import oneflow as flow\n'), ((6765, 6860), 'oneflow.framework.check_point_v2.generate_values_by_initializer', 'check_point_v2.generate_values_by_initializer', (['initializer', 'shape', 'eager_local_tensor.dtype'], {}), '(initializer, shape,\n eager_local_tensor.dtype)\n', (6810, 6860), True, 'import oneflow.framework.check_point_v2 as check_point_v2\n'), ((1598, 1665), 'oneflow.convert_oneflow_dtype_to_numpy_dtype', 'flow.convert_oneflow_dtype_to_numpy_dtype', (['eager_local_tensor.dtype'], {}), '(eager_local_tensor.dtype)\n', (1639, 1665), True, 'import oneflow as flow\n'), ((7026, 7103), 'oneflow.framework.check_point_v2.init_by_initializer_conf', 'check_point_v2.init_by_initializer_conf', (['tensor', 'initializer_conf', '(True)', 'None'], {}), '(tensor, initializer_conf, True, None)\n', (7065, 7103), True, 'import oneflow.framework.check_point_v2 as check_point_v2\n'), ((6394, 6418), 'oneflow.default_generator', 'flow.default_generator', ([], {}), '()\n', (6416, 6418), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import numpy as np
from oneflow.compatible.single_client.core.common import data_type_pb2 as data_type_pb2
from oneflow.compatible.single_client.python.oneflow_export import oneflow_export
from oneflow.compatible import single_client as flow
import oneflow._oneflow_internal
_dtypes = [
flow.char,
flow.float,
flow.float32,
flow.double,
flow.float64,
flow.float16,
flow.int8,
flow.int32,
flow.int64,
flow.uint8,
flow.record,
flow.tensor_buffer,
]
@oneflow_export("dtypes")
def dtypes():
return _dtypes
def convert_proto_dtype_to_oneflow_dtype(proto_dtype):
return oneflow._oneflow_internal.deprecated.GetDTypeByDataType(proto_dtype)
_ONEFLOW_DTYPE_TO_NUMPY_DTYPE = {
# could be np.ubyte on some platform
flow.char: np.byte,
flow.float: np.float32,
flow.float16: np.float16,
flow.float32: np.float32,
flow.float64: np.double,
flow.double: np.double,
flow.int8: np.int8,
flow.int32: np.int32,
flow.int64: np.int64,
flow.uint8: np.uint8,
}
@oneflow_export("convert_oneflow_dtype_to_numpy_dtype")
def convert_oneflow_dtype_to_numpy_dtype(oneflow_dtype: flow.dtype):
if oneflow_dtype not in _ONEFLOW_DTYPE_TO_NUMPY_DTYPE:
raise NotImplementedError
return _ONEFLOW_DTYPE_TO_NUMPY_DTYPE[oneflow_dtype]
def convert_numpy_dtype_to_oneflow_dtype(numpy_dtype: np.dtype):
for k, v in _ONEFLOW_DTYPE_TO_NUMPY_DTYPE.items():
if v == numpy_dtype:
return k
raise NotImplementedError
del data_type_pb2
del np
| [
"oneflow.compatible.single_client.python.oneflow_export.oneflow_export"
] | [((1130, 1154), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""dtypes"""'], {}), "('dtypes')\n", (1144, 1154), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export\n'), ((1678, 1732), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""convert_oneflow_dtype_to_numpy_dtype"""'], {}), "('convert_oneflow_dtype_to_numpy_dtype')\n", (1692, 1732), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_unsqueeze(test_case, device):
np_arr = np.random.rand(2, 6, 9, 3)
x = flow.tensor(np_arr, dtype=flow.float32, device=flow.device(device))
y = flow.unsqueeze(x, dim=1)
output = np.expand_dims(np_arr, axis=1)
test_case.assertTrue(np.allclose(output, y.numpy(), 1e-05, 1e-05))
def _test_unsqueeze_tensor_function(test_case, device):
np_arr = np.random.rand(2, 3, 4)
x = flow.tensor(np_arr, dtype=flow.float32, device=flow.device(device))
y = x.unsqueeze(dim=2)
output = np.expand_dims(np_arr, axis=2)
test_case.assertTrue(np.allclose(output, y.numpy(), 1e-05, 1e-05))
def _test_unsqueeze_different_dim(test_case, device):
np_arr = np.random.rand(4, 5, 6, 7)
x = flow.tensor(np_arr, dtype=flow.float32, device=flow.device(device))
for axis in range(-5, 5):
y = flow.unsqueeze(x, dim=axis)
output = np.expand_dims(np_arr, axis=axis)
test_case.assertTrue(np.allclose(output, y.numpy(), 1e-05, 1e-05))
def _test_unsqueeze_backward(test_case, device):
np_arr = np.random.rand(2, 3, 4, 5)
x = flow.tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
y = flow.unsqueeze(x, dim=1).sum()
y.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.ones((2, 3, 4, 5)), 1e-05, 1e-05)
)
@flow.unittest.skip_unless_1n1d()
class TestUnsqueeze(flow.unittest.TestCase):
def test_unsqueeze(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_unsqueeze,
_test_unsqueeze_tensor_function,
_test_unsqueeze_different_dim,
_test_unsqueeze_backward,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(check_graph=True)
def test_flow_unsqueeze_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device)
y = torch.unsqueeze(x, random(1, 3).to(int))
return y
@autotest(auto_backward=False, check_graph=True)
def test_unsqueeze_with_0_size_data(test_case):
device = random_device()
x = random_tensor(3, 2, 1, 0).to(device)
y = torch.unsqueeze(x, random(0, 2).to(int))
return y
@autotest(auto_backward=False, check_graph=True)
def test_flow_unsqueeze_bool_with_random_data(test_case):
device = random_device()
x = random_tensor().to(device=device, dtype=torch.bool)
y = torch.unsqueeze(x, random(1, 3).to(int))
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.unsqueeze",
"oneflow.test_utils.test_util.GenArgList",
"oneflow.device"
] | [((2169, 2201), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2199, 2201), True, 'import oneflow as flow\n'), ((872, 898), 'numpy.random.rand', 'np.random.rand', (['(2)', '(6)', '(9)', '(3)'], {}), '(2, 6, 9, 3)\n', (886, 898), True, 'import numpy as np\n'), ((983, 1007), 'oneflow.unsqueeze', 'flow.unsqueeze', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (997, 1007), True, 'import oneflow as flow\n'), ((1021, 1051), 'numpy.expand_dims', 'np.expand_dims', (['np_arr'], {'axis': '(1)'}), '(np_arr, axis=1)\n', (1035, 1051), True, 'import numpy as np\n'), ((1194, 1217), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (1208, 1217), True, 'import numpy as np\n'), ((1334, 1364), 'numpy.expand_dims', 'np.expand_dims', (['np_arr'], {'axis': '(2)'}), '(np_arr, axis=2)\n', (1348, 1364), True, 'import numpy as np\n'), ((1505, 1531), 'numpy.random.rand', 'np.random.rand', (['(4)', '(5)', '(6)', '(7)'], {}), '(4, 5, 6, 7)\n', (1519, 1531), True, 'import numpy as np\n'), ((1868, 1894), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (1882, 1894), True, 'import numpy as np\n'), ((3445, 3460), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3458, 3460), False, 'import unittest\n'), ((1650, 1677), 'oneflow.unsqueeze', 'flow.unsqueeze', (['x'], {'dim': 'axis'}), '(x, dim=axis)\n', (1664, 1677), True, 'import oneflow as flow\n'), ((1695, 1728), 'numpy.expand_dims', 'np.expand_dims', (['np_arr'], {'axis': 'axis'}), '(np_arr, axis=axis)\n', (1709, 1728), True, 'import numpy as np\n'), ((2301, 2314), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2312, 2314), False, 'from collections import OrderedDict\n'), ((2577, 2597), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2587, 2597), False, 'from oneflow.test_utils.test_util import GenArgList\n'), ((954, 973), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (965, 973), True, 'import oneflow as flow\n'), ((1273, 1292), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1284, 1292), True, 'import oneflow as flow\n'), ((1587, 1606), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1598, 1606), True, 'import oneflow as flow\n'), ((1959, 1978), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1970, 1978), True, 'import oneflow as flow\n'), ((2013, 2037), 'oneflow.unsqueeze', 'flow.unsqueeze', (['x'], {'dim': '(1)'}), '(x, dim=1)\n', (2027, 2037), True, 'import oneflow as flow\n'), ((2123, 2144), 'numpy.ones', 'np.ones', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (2130, 2144), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import re
from contextlib import contextmanager
import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb
import oneflow.core.job.placement_pb2 as placement_pb
import oneflow.core.job.job_conf_pb2 as job_conf_pb
import oneflow.core.job.scope_pb2 as scope_pb
import oneflow.core.operator.op_conf_pb2 as op_conf_pb
import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb
import oneflow.core.register.blob_desc_pb2 as blob_desc_pb
import oneflow.python.eager.blob_cache as blob_cache_util
import oneflow.python.eager.boxing_util as boxing_util
import oneflow.python.eager.object as object_util
import oneflow.python.eager.object_storage as object_storage
import oneflow.python.eager.symbol as symbol_util
import oneflow.python.eager.symbol_storage as symbol_storage
import oneflow.python.framework.balanced_splitter as balanced_splitter
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.scope_symbol as scope_symbol
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.op_arg_util as op_arg_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.python_callback as python_callback
import oneflow.python.framework.session_context as session_ctx
from oneflow.python.eager.opkernel_object import OpKernelObject
import oneflow.python.vm.id_util as vm_id_util
import oneflow
import oneflow_api.oneflow.core.vm.instruction as instr_cfg
import oneflow_api.oneflow.core.job.placement as placement_cfg
from google.protobuf import text_format
oneflow_api = oneflow.oneflow_api
def PhysicalRun(build):
return _Run(
build,
vm_id_util.PhysicalIdGenerator(),
c_api_util.RunPhysicalInstruction,
_ReleasePhysicalObject,
)
def LogicalRun(build):
return _Run(
build,
vm_id_util.LogicalIdGenerator(),
c_api_util.RunLogicalInstruction,
_ReleaseLogicalObject,
)
def _Run(build, id_generator, run_api, release_object):
instruction_list = session_ctx.GetDefaultSession().instruction_list
eager_symbol_list = session_ctx.GetDefaultSession().eager_symbol_list
build(
InstructionsBuilder(
id_generator, release_object, instruction_list, eager_symbol_list
)
)
run_api(instruction_list, eager_symbol_list)
instruction_list.clear_instruction()
eager_symbol_list.ClearField("eager_symbol")
def _DefaultBlobObject4Ibn(ibn):
raise NotImplementedError
class InstructionsBuilder(object):
def __init__(
self, id_generator, release_object, instruction_list, eager_symbol_list
):
self.id_generator_ = id_generator
self.release_object_ = release_object
assert isinstance(instruction_list, instr_cfg.InstructionListProto)
assert isinstance(eager_symbol_list, eager_symbol_pb.EagerSymbolList)
self.instruction_list_ = instruction_list
self.eager_symbol_list_ = eager_symbol_list
def StatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def FetchDelegateBlobObject(x_blob_object, op_arg_parallel_attr):
return boxing_util.BoxingTo(self, x_blob_object, op_arg_parallel_attr)
def GetDelegateBlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, FetchDelegateBlobObject, blob_object, op_arg_parallel_attr
)
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDelegateBlobObject,
)
def NoBoxingStatelessCall(
self, op_attribute, parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def FetchDelegateBlobObject(blob_object, op_arg_parallel_attr):
from_pd = blob_object.parallel_desc_symbol
to_pd = op_arg_parallel_attr.parallel_desc_symbol
if from_pd == to_pd:
return blob_object
assert from_pd.device_tag == "cpu"
assert to_pd.device_tag == "cpu"
assert from_pd.parallel_num == to_pd.parallel_num
from_machine_ids = from_pd.machine_id2device_id_list.keys()
to_machine_ids = to_pd.machine_id2device_id_list.keys()
if (
len(from_pd.machine_id2device_id_list) == from_pd.parallel_num
and from_machine_ids == to_machine_ids
):
return self.BroadcastBlobReference(blob_object, to_pd)
return self.Build121To(blob_object, to_pd)
def GetDirectOr121BlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, FetchDelegateBlobObject, blob_object, op_arg_parallel_attr
)
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectOr121BlobObject,
)
def NoBoxingCudaD2HStatelessCall(
self, op_attribute, in_parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(in_parallel_conf)
blob_parallel_desc_sym = boxing_util.TryReplaceDeviceTag(
self, op_parallel_desc_sym, "cpu"
)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"copy_d2h",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def NoBoxingCudaH2DStatelessCall(
self, op_attribute, out_parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(out_parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"copy_h2d",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def RawStatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def StatefulCall(self, op_attribute, opkernel_object, bn_in_op2blob_object={}):
op_parallel_desc_sym = opkernel_object.parallel_desc_symbol
parallel_sig = op_attribute.parallel_signature
assert parallel_sig.HasField("op_parallel_desc_symbol_id")
assert op_parallel_desc_sym.symbol_id == parallel_sig.op_parallel_desc_symbol_id
self._CheckRefInBlobObjectParallelDesc(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
def FetchDelegateBlobObject(x_blob_object, op_arg_parallel_attr):
return boxing_util.BoxingTo(self, x_blob_object, op_arg_parallel_attr)
def GetDelegateBlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, FetchDelegateBlobObject, blob_object, op_arg_parallel_attr
)
self._StatefulCall(
op_attribute,
opkernel_object=opkernel_object,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDelegateBlobObject,
)
def DeleteObject(self, obj):
self._TryClearObject(obj)
self._DeleteObject(obj)
def InsertRemoveForeignCallbackInstruction(self, object_id, callback):
unique_callback_id = python_callback.GetIdForRegisteredCallback(callback)
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("RemoveForeignCallback")
instruction.mutable_operand().Add().CopyFrom(_DelObjectOperand(object_id))
instruction.mutable_operand().Add().CopyFrom(_Int64Operand(unique_callback_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def FetchBlobHeader(self, blob_object, callback):
return self._FetchBlob("FetchBlobHeader", blob_object, callback)
def FetchBlobBody(self, blob_object, callback):
return self._FetchBlob("FetchBlobBody", blob_object, callback)
def PackPhysicalBlobsToLogicalBlob(
self, physical_blob_objects, op_arg_parallel_attr, op_arg_blob_attr
):
parallel_desc_symbol = op_arg_parallel_attr.parallel_desc_symbol
machine_id2device_ids = parallel_desc_symbol.machine_id2device_id_list
device_tag = parallel_desc_symbol.parallel_conf.device_tag()
machine_device_ids = set()
for physical_blob_object in physical_blob_objects:
phy_paralle_desc_sym = physical_blob_object.parallel_desc_symbol
assert (
phy_paralle_desc_sym.parallel_num == 1
), phy_paralle_desc_sym.parallel_num
assert phy_paralle_desc_sym.device_tag == device_tag, "%s v.s. %s" % (
phy_paralle_desc_sym.device_tag,
device_tag,
)
phy_machine_id2device_ids = phy_paralle_desc_sym.machine_id2device_id_list
machine_id = list(phy_machine_id2device_ids.keys())[0]
pair = (machine_id, phy_machine_id2device_ids[machine_id][0])
machine_device_ids.add(pair)
for machine_id, device_ids in machine_id2device_ids.items():
for device_id in device_ids:
assert (machine_id, device_id) in machine_device_ids, "%s not in %s" % (
(machine_id, device_id),
machine_device_ids,
)
logical_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
self._ReplaceMirrored(
op_arg_parallel_attr.parallel_desc_symbol,
[logical_blob_object],
physical_blob_objects,
)
return logical_blob_object
def GetPhysicalParallelDescSymbols(self, parallel_desc_symbol):
machine_id2device_ids = parallel_desc_symbol.machine_id2device_id_list
device_tag = parallel_desc_symbol.parallel_conf.device_tag()
phy_parallel_desc_symbols = []
def AppendPhyParallelDescSymbol(machine_id, device_id):
parallel_conf = placement_cfg.ParallelConf()
parallel_conf.set_device_tag(device_tag)
parallel_conf.add_device_name("%d:%d" % (machine_id, device_id))
phy_parallel_desc_symbols.append(self.GetParallelDescSymbol(parallel_conf))
for machine_id, device_ids in machine_id2device_ids.items():
for device_id in device_ids:
AppendPhyParallelDescSymbol(machine_id, device_id)
return phy_parallel_desc_symbols
def _GetPhysicalOpArgBlobAttrs(self, logical_blob_object):
parallel_num = logical_blob_object.parallel_desc_symbol.parallel_num
logical_blob_attr = logical_blob_object.op_arg_blob_attr
sbp_parallel = logical_blob_object.op_arg_parallel_attr.sbp_parallel
def GetSplittedBlobAttr(
logical_blob_attr, split_axis, parallel_num, parallel_id
):
blob_desc = blob_desc_pb.BlobDescProto()
blob_desc.CopyFrom(logical_blob_attr.blob_desc)
physical_len = balanced_splitter.BalancedPartNums(
logical_blob_attr.shape[split_axis], parallel_num
)[parallel_id]
blob_desc.body.shape.dim[split_axis] = physical_len
physical_blob_attr = op_arg_util.OpArgBlobAttribute(
logical_blob_attr.batch_axis,
blob_desc,
logical_blob_attr.logical_blob_name,
)
return physical_blob_attr
if sbp_parallel.HasField("split_parallel"):
split_axis = sbp_parallel.split_parallel.axis
return [
GetSplittedBlobAttr(logical_blob_attr, split_axis, parallel_num, i)
for i in range(parallel_num)
]
else:
return [logical_blob_attr] * parallel_num
def UnpackLogicalBlobToPhysicalBlobs(self, blob_object):
phy_parallel_desc_symbols = self.GetPhysicalParallelDescSymbols(
blob_object.parallel_desc_symbol
)
phy_op_arg_blob_attrs = self._GetPhysicalOpArgBlobAttrs(blob_object)
def GetPhysicalBlob(parallel_desc_sym, blob_attr):
op_arg_parallel_attr = op_arg_util.MakeMirroredOpArgParallelAttribute(
parallel_desc_sym
)
pyhsical_blob_object = self._NewBlobObject(op_arg_parallel_attr, blob_attr)
return pyhsical_blob_object
physical_blob_objects = [
GetPhysicalBlob(phy_parallel_desc_symbols[i], phy_op_arg_blob_attrs[i])
for i in range(len(phy_parallel_desc_symbols))
]
self._ReplaceMirrored(
blob_object.parallel_desc_symbol, physical_blob_objects, [blob_object]
)
return physical_blob_objects
def MakeReferenceBlobObject(self, blob_object, op_arg_parallel_attr):
parallel_desc_symbol = blob_object.parallel_desc_symbol
assert parallel_desc_symbol == op_arg_parallel_attr.parallel_desc_symbol
ref_blob_object = self._NewBlobObject(
op_arg_parallel_attr, blob_object.op_arg_blob_attr
)
self._ReplaceMirrored(parallel_desc_symbol, [ref_blob_object], [blob_object])
return ref_blob_object
def MakeLazyRefBlobObject(self, interface_op_name):
sess = session_ctx.GetDefaultSession()
op_attribute = sess.OpAttribute4InterfaceOpName(interface_op_name)
assert len(op_attribute.output_bns) == 1
obn = op_attribute.output_bns[0]
parallel_conf = sess.ParallelConf4LazyInterfaceOpName(interface_op_name)
blob_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
blob_parallel_desc_sym, op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
blob_object = self._NewBlobObject(op_arg_parallel_attr, op_arg_blob_attr)
self._LazyReference(blob_object, interface_op_name)
return blob_object
def GetSymbol4String(self, string):
if symbol_storage.HasSymbol4String(string):
return symbol_storage.GetSymbol4String(string)
symbol_id = self._NewSymbolId4String(string)
symbol = symbol_util.Symbol(symbol_id, string)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4String(string, symbol)
return symbol
def GetJobConfSymbol(self, job_conf):
if symbol_storage.HasSymbol4JobConf(job_conf):
return symbol_storage.GetSymbol4JobConf(job_conf)
symbol_id = self._NewSymbolId4JobConf(job_conf)
symbol = symbol_util.Symbol(symbol_id, job_conf)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4JobConf(job_conf, symbol)
return symbol
def GetParallelDescSymbol(self, parallel_conf):
# parallel_conf is cfg
serialized_parallel_conf = str(parallel_conf)
if symbol_storage.HasSymbol4SerializedParallelConf(serialized_parallel_conf):
return symbol_storage.GetSymbol4SerializedParallelConf(
serialized_parallel_conf
)
symbol_id = self._NewSymbolId4ParallelConf(parallel_conf)
symbol = symbol_util.ParallelDescSymbol(symbol_id, parallel_conf)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedParallelConf(
serialized_parallel_conf, symbol
)
return symbol
def GetScopeSymbol(self, scope_proto, parent_scope_symbol=None):
symbol_id = self._NewSymbolId4Scope(scope_proto)
serialized_scope_proto = str(scope_proto)
if symbol_storage.HasSymbol4SerializedScopeProto(serialized_scope_proto):
return symbol_storage.GetSymbol4SerializedScopeProto(serialized_scope_proto)
symbol = scope_symbol.ScopeSymbol(symbol_id, scope_proto, parent_scope_symbol)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedScopeProto(serialized_scope_proto, symbol)
return symbol
def GetSharedOpKernelObject4ParallelConfSymbol(self, parallel_desc_sym):
if object_storage.HasSharedOpKernelObject4ParallelConfSymbol(parallel_desc_sym):
return object_storage.GetSharedOpKernelObject4ParallelConfSymbol(
parallel_desc_sym
)
object_id = self._NewSharedOpKernelObjectId4ParallelConfSymbolId(
parallel_desc_sym
)
obj = object_util.Object(object_id, parallel_desc_sym)
object_storage.SetSharedOpKernelObject4ParallelConfSymbol(
parallel_desc_sym, obj
)
return obj
@contextmanager
def CudaHostPinBlob(self, blob_object):
self._CudaHostRegisterBlob(blob_object)
try:
yield
finally:
self._CudaHostUnregisterBlob(blob_object)
def BroadcastBlobReference(self, sole_mirrored_blob_object, parallel_desc_sym):
device_ids = (
sole_mirrored_blob_object.parallel_desc_symbol.machine_id2device_id_list
)
for _, dev_ids in device_ids.items():
assert len(dev_ids) == 1, "dev_ids: %s" % dev_ids
object_id = self._BroadcastObjectReference(
sole_mirrored_blob_object, parallel_desc_sym
)
op_arg_parallel_attr = op_arg_util.MakeBroadcastOpArgParallelAttribute(
parallel_desc_sym
)
return object_util.BlobObject(
object_id=object_id,
op_arg_parallel_attr=op_arg_parallel_attr,
op_arg_blob_attr=sole_mirrored_blob_object.op_arg_blob_attr,
release=self.release_object_,
)
def NewOpKernelObject(self, op_conf):
assert op_conf.HasField("scope_symbol_id")
scope_symbol = symbol_storage.GetSymbol4Id(op_conf.scope_symbol_id)
op_conf_sym = self._GetOpConfSymbol(op_conf)
parallel_desc_sym_id = c_api_util.GetOpParallelSymbolId(op_conf)
parallel_desc_symbol = symbol_storage.GetSymbol4Id(parallel_desc_sym_id)
object_id = self._NewOpKernelObject(
parallel_desc_symbol, scope_symbol.job_desc_symbol, op_conf_sym
)
return OpKernelObject(object_id, op_conf, self.release_object_)
def Build121To(self, blob_object, parallel_desc_symbol):
ref_blob_object = _MakeNewBlobObjectLike(
self, blob_object, parallel_desc_symbol
)
self.Build121AssignInstruction(ref_blob_object, blob_object)
return ref_blob_object
def Build121AssignInstruction(self, ref_blob_object, value_blob_object):
parallel_num = ref_blob_object.parallel_desc_symbol.parallel_num
assert parallel_num == value_blob_object.parallel_desc_symbol.parallel_num
token_ids = (
[oneflow_api.NewTokenId() for _ in range(parallel_num)],
[oneflow_api.NewTokenId() for _ in range(parallel_num)],
)
self._BuildSendInstruction(
ref_blob_object.parallel_desc_symbol, value_blob_object, token_ids
)
self._BuildRecvInstruction(
value_blob_object.parallel_desc_symbol, ref_blob_object, token_ids
)
def _BuildSendInstruction(
self, dst_parallel_desc_symbol, src_blob_object, token_ids
):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("SendBlob")
instruction.set_parallel_desc_symbol_id(
src_blob_object.parallel_desc_symbol.symbol_id
)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(dst_parallel_desc_symbol.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
_ConstOperand(src_blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for token_id in token_ids[0]:
instruction.mutable_operand().Add().CopyFrom(_Uint64Operand(token_id))
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for token_id in token_ids[1]:
instruction.mutable_operand().Add().CopyFrom(_Uint64Operand(token_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _BuildRecvInstruction(
self, src_parallel_desc_symbol, dst_blob_object, token_ids
):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("ReceiveBlob")
instruction.set_parallel_desc_symbol_id(
dst_blob_object.parallel_desc_symbol.symbol_id
)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(src_parallel_desc_symbol.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
_Mut2Operand(dst_blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for token_id in token_ids[0]:
instruction.mutable_operand().Add().CopyFrom(_Uint64Operand(token_id))
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for token_id in token_ids[1]:
instruction.mutable_operand().Add().CopyFrom(_Uint64Operand(token_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _NewOpKernelObject(self, parallel_desc_symbol, job_desc_sym, op_conf_sym):
object_id = self._NewObjectId(parallel_desc_symbol)
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("InitOpKernelObject")
instruction.set_parallel_desc_symbol_id(parallel_desc_symbol.symbol_id)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(job_desc_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(op_conf_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(_MutOperand(object_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
return object_id
def _StatelessCall(
self,
stream_tag,
op_attribute,
op_parallel_desc_sym=None,
blob_parallel_desc_sym=None,
bn_in_op2blob_object={},
get_delegate_blob_object=None,
):
assert callable(get_delegate_blob_object)
if op_attribute.parallel_signature.HasField("op_parallel_desc_symbol_id"):
symbol_id = op_attribute.parallel_signature.op_parallel_desc_symbol_id
op_parallel_desc_sym = symbol_storage.GetSymbol4Id(symbol_id)
assert op_parallel_desc_sym is not None
def DelegateBlobObject4Ibn(ibn):
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
op_parallel_desc_sym, op_attribute, ibn
)
return get_delegate_blob_object(
bn_in_op2blob_object[ibn], op_arg_parallel_attr
)
op_conf = op_attribute.op_conf
assert op_conf.HasField("scope_symbol_id"), op_conf
scope_symbol = symbol_storage.GetSymbol4Id(op_conf.scope_symbol_id)
job_desc_sym = scope_symbol.job_desc_symbol
op_conf_sym = self._GetOpConfSymbol(op_conf)
op_node_signature_sym = self._GetOpNodeSignatureSymbol(op_attribute)
opkernel_obj = self.GetSharedOpKernelObject4ParallelConfSymbol(
op_parallel_desc_sym
)
const_input_operand_blob_objects = self._GetConstInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mutable_input_operand_blob_objects = self._GetMutableInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mut1_operand_blob_objects = self._GetMut1OperandBlobObjects(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
mut2_operand_blob_objects = self._GetMut2OperandBlobObjects(
op_attribute,
blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
is_user_op = op_attribute.op_conf.HasField("user_conf")
instruction_prefix = "User" if is_user_op else "System"
self._StatelessCallOpKernel(
"%s.%sStatelessCallOpKernel" % (stream_tag, instruction_prefix),
op_parallel_desc_sym,
job_desc_sym,
op_conf_sym,
op_node_signature_sym,
opkernel_obj,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
)
def _StatefulCall(
self,
op_attribute,
opkernel_object,
bn_in_op2blob_object,
get_delegate_blob_object,
):
op_parallel_desc_sym = opkernel_object.parallel_desc_symbol
def DelegateBlobObject4Ibn(ibn):
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
op_parallel_desc_sym, op_attribute, ibn
)
return get_delegate_blob_object(
bn_in_op2blob_object[ibn], op_arg_parallel_attr
)
op_node_signature_sym = self._GetOpNodeSignatureSymbol(op_attribute)
const_input_operand_blob_objects = self._GetConstInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mutable_input_operand_blob_objects = self._GetMutableInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mut1_operand_blob_objects = self._GetMut1OperandBlobObjects(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
mut2_operand_blob_objects = self._GetMut2OperandBlobObjects(
op_attribute,
op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
)
is_user_op = op_attribute.op_conf.HasField("user_conf")
assert is_user_op
instruction_prefix = "" if is_user_op else "System"
self._StatefulCallOpKernel(
"%sCallOpKernel" % instruction_prefix,
op_parallel_desc_sym,
opkernel_object,
op_node_signature_sym,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
)
def _CudaHostRegisterBlob(self, blob_object):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("CudaHostRegisterBlob")
instruction.set_parallel_desc_symbol_id(
blob_object.parallel_desc_symbol.symbol_id
)
instruction.mutable_operand().Add().CopyFrom(_MutOperand(blob_object.object_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _CudaHostUnregisterBlob(self, blob_object):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("CudaHostUnregisterBlob")
instruction.set_parallel_desc_symbol_id(
blob_object.parallel_desc_symbol.symbol_id
)
instruction.mutable_operand().Add().CopyFrom(_MutOperand(blob_object.object_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _GetOpConfSymbol(self, op_conf):
serialized_op_conf = op_conf.SerializeToString()
if symbol_storage.HasSymbol4SerializedOpConf(serialized_op_conf):
return symbol_storage.GetSymbol4SerializedOpConf(serialized_op_conf)
symbol_id = self._NewSymbolId4OpConf(op_conf)
symbol = symbol_util.Symbol(symbol_id, op_conf)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedOpConf(serialized_op_conf, symbol)
return symbol
def _GetOpNodeSignatureSymbol(self, op_attribute):
new_op_node_signature = op_attribute_pb.OpNodeSignature()
new_op_node_signature.sbp_signature.CopyFrom(op_attribute.sbp_signature)
new_op_node_signature.mirrored_signature.CopyFrom(
op_attribute.mirrored_signature
)
new_op_node_signature.logical_blob_desc_signature.CopyFrom(
op_attribute.logical_blob_desc_signature
)
new_op_node_signature.batch_axis_signature.CopyFrom(
op_attribute.batch_axis_signature
)
new_op_node_signature.parallel_signature.CopyFrom(
op_attribute.parallel_signature
)
serialized_op_node_signature = new_op_node_signature.SerializeToString()
if symbol_storage.HasSymbol4SerializedOpNodeSignature(
serialized_op_node_signature
):
return symbol_storage.GetSymbol4SerializedOpNodeSignature(
serialized_op_node_signature
)
symbol_id = self._NewSymbolId4OpNodeSignature(new_op_node_signature)
symbol = symbol_util.Symbol(symbol_id, new_op_node_signature)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedOpNodeSignature(
serialized_op_node_signature, symbol
)
return symbol
def _GetConstInputOperandBlobObjects(self, op_attribute, blob_object4ibn=None):
assert callable(blob_object4ibn)
const_input_operand_blob_objects = []
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if ibn2modifier[ibn].is_mutable:
continue
ibn_sym = self.GetSymbol4String(ibn)
in_object = blob_object4ibn(ibn)
const_input_operand_blob_objects.append((ibn_sym, in_object))
return const_input_operand_blob_objects
def _GetMutableInputOperandBlobObjects(self, op_attribute, blob_object4ibn=None):
mutable_input_operand_blob_objects = []
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if not ibn2modifier[ibn].is_mutable:
continue
ibn_sym = self.GetSymbol4String(ibn)
in_object = blob_object4ibn(ibn)
mutable_input_operand_blob_objects.append((ibn_sym, in_object))
return mutable_input_operand_blob_objects
def _GetMut1OperandBlobObjects(
self, op_attribute, parallel_desc_sym, bn_in_op2blob_object={}
):
mut1_operand_blob_objects = []
def GetOutBlobParallelDescSymbol(obn):
parallel_signature = op_attribute.parallel_signature
bn2symbol_id = parallel_signature.bn_in_op2parallel_desc_symbol_id
if obn in bn2symbol_id:
return symbol_storage.GetSymbol4Id(bn2symbol_id[obn])
else:
return parallel_desc_sym
def OutputBns():
obn2modifier = op_attribute.arg_modifier_signature.obn2output_blob_modifier
for obn in op_attribute.output_bns:
if obn2modifier[obn].header_infered_before_compute:
yield obn
for tmp_bn in op_attribute.tmp_bns:
yield tmp_bn
for obn in OutputBns():
obn_sym = self.GetSymbol4String(obn)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
GetOutBlobParallelDescSymbol(obn), op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
out_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
lbi = op_attribute.arg_signature.bn_in_op2lbi[obn]
bn_in_op2blob_object[obn] = out_blob_object
mut1_operand_blob_objects.append((obn_sym, out_blob_object))
return mut1_operand_blob_objects
def _CheckRefInBlobObjectParallelDesc(
self, op_attribute, op_parallel_desc_sym, bn_in_op2blob_object={}
):
op_conf = op_attribute.op_conf
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if not ibn2modifier[ibn].is_mutable:
continue
ref_blob_object = bn_in_op2blob_object[ibn]
assert op_parallel_desc_sym == ref_blob_object.parallel_desc_symbol, (
"op_conf: %s\n%s\nv.s.\n%s"
% (op_conf, op_parallel_desc_sym, ref_blob_object.parallel_desc_symbol)
)
def _GetMut2OperandBlobObjects(
self, op_attribute, parallel_desc_sym, bn_in_op2blob_object={}
):
mut2_operand_blob_objects = []
def GetOutBlobParallelDescSymbol(obn):
parallel_signature = op_attribute.parallel_signature
bn2symbol_id = parallel_signature.bn_in_op2parallel_desc_symbol_id
if obn in bn2symbol_id:
return symbol_storage.GetSymbol4Id(bn2symbol_id[obn])
else:
return parallel_desc_sym
for obn in op_attribute.output_bns:
obn2modifier = op_attribute.arg_modifier_signature.obn2output_blob_modifier
if obn2modifier[obn].header_infered_before_compute:
continue
obn_sym = self.GetSymbol4String(obn)
op_arg_parallel_attr = op_arg_util.GetOpArgParallelAttribute(
GetOutBlobParallelDescSymbol(obn), op_attribute, obn
)
op_arg_blob_attr = op_arg_util.GetOpArgBlobAttribute(op_attribute, obn)
out_blob_object = self._NewBlobObject(
op_arg_parallel_attr, op_arg_blob_attr
)
bn_in_op2blob_object[obn] = out_blob_object
mut2_operand_blob_objects.append((obn_sym, out_blob_object))
return mut2_operand_blob_objects
def _NewBlobObject(self, op_arg_parallel_attr, op_arg_blob_attr):
object_id = self._NewObjectId(op_arg_parallel_attr.parallel_desc_symbol)
return object_util.BlobObject(
object_id=object_id,
op_arg_parallel_attr=op_arg_parallel_attr,
op_arg_blob_attr=op_arg_blob_attr,
release=self.release_object_,
)
def _NewSymbolId4String(self, string):
symbol_id = self._NewSymbolId()
self._InitStringSymbol(symbol_id, string)
return symbol_id
def _NewSymbolId4ParallelConf(self, parallel_conf):
symbol_id = self.id_generator_.NewSymbolId()
self._NewParallelConfSymbol(symbol_id, parallel_conf)
return symbol_id
def _NewSymbolId4Scope(self, scope_proto):
symbol_id = self._NewSymbolId()
self._NewScopeSymbol(symbol_id, scope_proto)
return symbol_id
def _NewSymbolId4JobConf(self, job_conf):
symbol_id = self._NewSymbolId()
self._InitJobConfSymbol(symbol_id, job_conf)
return symbol_id
def _NewSymbolId4OpConf(self, op_conf):
symbol_id = self._NewSymbolId()
self._InitOpConfSymbol(symbol_id, op_conf)
return symbol_id
def _NewSymbolId4OpNodeSignature(self, op_node_signature):
symbol_id = self._NewSymbolId()
self._InitOpNodeSignatureDescSymbol(symbol_id, op_node_signature)
return symbol_id
def _NewSharedOpKernelObjectId4ParallelConfSymbolId(self, parallel_desc_sym):
return self._NewObjectId(parallel_desc_sym)
def _StatelessCallOpKernel(
self,
instr_name,
parallel_desc_sym,
job_desc_sym,
op_conf_sym,
op_node_signature_sym,
shared_opkernel_obj,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name(
"%s.%s" % (parallel_desc_sym.device_tag, instr_name)
)
instruction.set_parallel_desc_symbol_id(parallel_desc_sym.symbol_id)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(job_desc_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(op_conf_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(op_node_signature_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
_MutOperand(shared_opkernel_obj.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for ibn_sym, _ in const_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(ibn_sym.symbol_id)
)
for _, blob_object in const_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_ConstOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for ibn_sym, _ in mutable_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(ibn_sym.symbol_id)
)
for _, blob_object in mutable_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_MutOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for obn_sym, _ in mut1_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(obn_sym.symbol_id)
)
for _, blob_object in mut1_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_MutOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for obn_sym, _ in mut2_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(obn_sym.symbol_id)
)
for _, blob_object in mut2_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_Mut2Operand(blob_object.object_id)
)
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _StatefulCallOpKernel(
self,
instr_name,
parallel_desc_sym,
opkernel_object,
op_node_signature_sym,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name(
"%s.%s" % (parallel_desc_sym.device_tag, instr_name,)
)
instruction.set_parallel_desc_symbol_id(parallel_desc_sym.symbol_id)
instruction.mutable_operand().Add().CopyFrom(
_MutOperand(opkernel_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(op_node_signature_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for ibn_sym, _ in const_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(ibn_sym.symbol_id)
)
for _, blob_object in const_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_ConstOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for ibn_sym, _ in mutable_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(ibn_sym.symbol_id)
)
for _, blob_object in mutable_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_MutOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for obn_sym, _ in mut1_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(obn_sym.symbol_id)
)
for _, blob_object in mut1_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_MutOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for obn_sym, _ in mut2_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(obn_sym.symbol_id)
)
for _, blob_object in mut2_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
_Mut2Operand(blob_object.object_id)
)
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _NewSymbolId(self):
symbol_id = self.id_generator_.NewSymbolId()
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("NewSymbol")
instruction.mutable_operand().Add().CopyFrom(_Int64Operand(symbol_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
return symbol_id
def _NewObjectId(self, parallel_desc_sym):
object_id = self.id_generator_.NewObjectId()
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("NewObject")
instruction.set_parallel_desc_symbol_id(parallel_desc_sym.symbol_id)
instruction.mutable_operand().Add().CopyFrom(_Int64Operand(object_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
return object_id
def _LazyReference(self, blob_object, interface_op_name):
instruction = instr_cfg.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.set_instr_type_name("{}.LazyReference".format(device_tag))
instruction.set_parallel_desc_symbol_id(
blob_object.parallel_desc_symbol.symbol_id
)
instruction.mutable_operand().Add().CopyFrom(_MutOperand(blob_object.object_id))
interface_op_name_sym = self.GetSymbol4String(
blob_object.op_arg_blob_attr.logical_blob_name
)
instruction.mutable_operand().Add().CopyFrom(
_SymbolOperand(interface_op_name_sym.symbol_id)
)
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _BroadcastObjectReference(self, sole_mirrored_object, parallel_desc_sym):
object_id = self.id_generator_.NewObjectId()
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("BroadcastObjectReference")
instruction.set_parallel_desc_symbol_id(parallel_desc_sym.symbol_id)
instruction.mutable_operand().Add().CopyFrom(_Int64Operand(object_id))
instruction.mutable_operand().Add().CopyFrom(
_Int64Operand(sole_mirrored_object.object_id)
)
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
return object_id
def _InitStringSymbol(self, symbol_id, string):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("InitStringSymbol")
instruction.mutable_operand().Add().CopyFrom(_InitSymbolOperand(symbol_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
eager_symbol = eager_symbol_pb.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.string_symbol = string
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _NewParallelConfSymbol(self, symbol_id, parallel_conf):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("NewParallelDescSymbol")
instruction.mutable_operand().Add().CopyFrom(_Int64Operand(symbol_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
eager_symbol = eager_symbol_pb.EagerSymbol()
eager_symbol.symbol_id = symbol_id
# TODO(oyy) change temporary transformation after python code migrated into cpp code
eager_symbol.parallel_conf_symbol.CopyFrom(
text_format.Parse(str(parallel_conf), placement_pb.ParallelConf())
)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _NewScopeSymbol(self, symbol_id, scope_proto):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("InitScopeSymbol")
instruction.mutable_operand().Add().CopyFrom(_InitSymbolOperand(symbol_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
eager_symbol = eager_symbol_pb.EagerSymbol()
eager_symbol.symbol_id = symbol_id
# TODO(oyy): text_format.Parse will be removed after eager_symbol proto obj is replaced with cfg obj in python side
eager_symbol.scope_symbol.CopyFrom(
text_format.Parse(str(scope_proto), scope_pb.ScopeProto())
)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitJobConfSymbol(self, symbol_id, job_conf):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("InitJobDescSymbol")
instruction.mutable_operand().Add().CopyFrom(_InitSymbolOperand(symbol_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
eager_symbol = eager_symbol_pb.EagerSymbol()
eager_symbol.symbol_id = symbol_id
# TODO(oyy) change temporary transformation after python code migrated into cpp code
eager_symbol.job_conf_symbol.CopyFrom(
text_format.Parse(str(job_conf), job_conf_pb.JobConfigProto())
)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitOpConfSymbol(self, symbol_id, op_conf):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("InitOperatorConfSymbol")
instruction.mutable_operand().Add().CopyFrom(_InitSymbolOperand(symbol_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
eager_symbol = eager_symbol_pb.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.op_conf_symbol.CopyFrom(op_conf)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _InitOpNodeSignatureDescSymbol(self, symbol_id, op_node_signature):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("InitOpNodeSignatureDescSymbol")
instruction.mutable_operand().Add().CopyFrom(_InitSymbolOperand(symbol_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
eager_symbol = eager_symbol_pb.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.op_node_signature_symbol.CopyFrom(op_node_signature)
self.eager_symbol_list_.eager_symbol.append(eager_symbol)
def _FetchBlob(self, instruction_name, blob_object, fetcher):
unique_callback_id = python_callback.GetIdForRegisteredCallback(fetcher)
instruction = instr_cfg.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.set_instr_type_name("%s.%s" % (device_tag, instruction_name))
instruction.set_parallel_desc_symbol_id(
blob_object.parallel_desc_symbol.symbol_id
)
instruction.mutable_operand().Add().CopyFrom(
_ConstOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_Int64Operand(unique_callback_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def FeedBlob(self, blob_object, feeder):
unique_callback_id = python_callback.GetIdForRegisteredCallback(feeder)
instruction = instr_cfg.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.set_instr_type_name("%s.%s" % (device_tag, "FeedBlob"))
instruction.set_parallel_desc_symbol_id(
blob_object.parallel_desc_symbol.symbol_id
)
instruction.mutable_operand().Add().CopyFrom(
_Mut2Operand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_Int64Operand(unique_callback_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _TryClearObject(self, obj):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("TryClearObject")
instruction.set_parallel_desc_symbol_id(obj.parallel_desc_symbol.symbol_id)
instruction.mutable_operand().Add().CopyFrom(_MutOperand(obj.object_id))
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _DeleteObject(self, blob_object):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("DeleteObject")
instruction.set_parallel_desc_symbol_id(
blob_object.parallel_desc_symbol.symbol_id
)
instruction.mutable_operand().Add().CopyFrom(
_DelObjectOperand(blob_object.object_id)
)
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _ReplaceMirrored(self, parallel_desc_sym, lhs_objects, rhs_objects):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("ReplaceMirrored")
instruction.set_parallel_desc_symbol_id(parallel_desc_sym.symbol_id)
for lhs_object in lhs_objects:
instruction.mutable_operand().Add().CopyFrom(
_Int64Operand(lhs_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(_OperandSeparator())
for rhs_object in rhs_objects:
instruction.mutable_operand().Add().CopyFrom(
_Int64Operand(rhs_object.object_id)
)
self.instruction_list_.mutable_instruction().Add().CopyFrom(instruction)
def _MakeNewBlobObjectLike(builder, blob_object, new_parallel_desc_symbol):
op_conf = op_conf_pb.OperatorConf()
op_conf.name = id_util.UniqueStr("Input")
op_conf.device_tag = new_parallel_desc_symbol.device_tag
op_conf.input_conf.out = "out"
blob_object.op_arg_parallel_attr.DumpToToInterfaceBlobConf(
op_conf.input_conf.blob_conf
)
blob_object.op_arg_blob_attr.DumpToToInterfaceBlobConf(op_conf.input_conf.blob_conf)
op_conf.scope_symbol_id = oneflow.current_scope().symbol_id
upstream_signature = op_attribute_pb.OpNodeSignature()
op_attribute = c_api_util.InferOpConf(op_conf, upstream_signature)
parallel_conf = new_parallel_desc_symbol.parallel_conf
bn_in_op2blob_object = {}
builder.RawStatelessCall(
op_attribute, parallel_conf, bn_in_op2blob_object=bn_in_op2blob_object
)
return bn_in_op2blob_object["out"]
def _SymbolOperand(val):
operand = instr_cfg.InstructionOperandProto()
_SetSoleMirroredOperand(operand.mutable_symbol_operand(), val)
return operand
def _InitSymbolOperand(val):
operand = instr_cfg.InstructionOperandProto()
_SetSoleMirroredOperand(operand.mutable_init_symbol_operand(), val)
return operand
def _ConstOperand(val):
operand = instr_cfg.InstructionOperandProto()
_SetMirroredOperand(operand.mutable_const_operand(), val)
return operand
def _MutOperand(val):
operand = instr_cfg.InstructionOperandProto()
_SetMirroredOperand(operand.mutable_mut_operand(), val)
return operand
def _Mut2Operand(val):
operand = instr_cfg.InstructionOperandProto()
_SetMirroredOperand(operand.mutable_mut2_operand(), val)
return operand
def _DelObjectOperand(val):
operand = instr_cfg.InstructionOperandProto()
_SetAllMirroredOperand(operand.mutable_mut_operand(), val)
return operand
def _Int64Operand(val):
operand = instr_cfg.InstructionOperandProto()
operand.set_int64_operand(val)
return operand
def _Uint64Operand(val):
operand = instr_cfg.InstructionOperandProto()
operand.set_uint64_operand(val)
return operand
def _OperandSeparator():
operand = instr_cfg.InstructionOperandProto()
operand.mutable_separator()
return operand
def _SetMirroredOperand(operand, val):
operand.set_logical_object_id(val)
operand.mutable_current_global_device_id()
def _SetSoleMirroredOperand(operand, val):
operand.set_logical_object_id(val)
operand.mutable_sole_mirrored_object()
def _SetAllMirroredOperand(operand, val):
operand.set_logical_object_id(val)
operand.mutable_all_mirrored_object()
def _FindOrCreateDelegateBlobObject(
builder, Fetch, x_blob_object, op_arg_parallel_attr
):
if x_blob_object.op_arg_parallel_attr == op_arg_parallel_attr:
return x_blob_object
blob_cache = blob_cache_util.FindOrCreateBlobCache(x_blob_object)
return blob_cache.GetCachedDelegateBlobObject(op_arg_parallel_attr, Fetch)
def _GetOpConfBlobNameAttr(pb_message, field):
if hasattr(pb_message, field):
return getattr(pb_message, field)
m = re.search("_(\d+)$", field)
assert m is not None
blob_name = field[0 : -len(m.group(0))]
index = int(m.group(0)[1:])
assert hasattr(pb_message, blob_name), (pb_message, blob_name)
repeated_field = getattr(pb_message, blob_name)
assert index >= 0
assert index < len(repeated_field)
return repeated_field[index]
def _ReleaseLogicalObject(obj):
LogicalRun(lambda builder: builder.DeleteObject(obj))
def _ReleasePhysicalObject(obj):
PhysicalRun(lambda builder: builder.DeleteObject(obj))
| [
"oneflow.python.eager.symbol_storage.GetSymbol4String",
"oneflow.python.eager.boxing_util.BoxingTo",
"oneflow.python.eager.symbol.Symbol",
"oneflow.python.eager.object.BlobObject",
"oneflow.python.vm.id_util.PhysicalIdGenerator",
"oneflow.core.register.blob_desc_pb2.BlobDescProto",
"oneflow.python.eager... | [((53169, 53194), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_pb.OperatorConf', ([], {}), '()\n', (53192, 53194), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((53214, 53240), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Input"""'], {}), "('Input')\n", (53231, 53240), True, 'import oneflow.python.framework.id_util as id_util\n'), ((53622, 53655), 'oneflow.core.operator.op_attribute_pb2.OpNodeSignature', 'op_attribute_pb.OpNodeSignature', ([], {}), '()\n', (53653, 53655), True, 'import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb\n'), ((53675, 53726), 'oneflow.python.framework.c_api_util.InferOpConf', 'c_api_util.InferOpConf', (['op_conf', 'upstream_signature'], {}), '(op_conf, upstream_signature)\n', (53697, 53726), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((54011, 54046), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (54044, 54046), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((54178, 54213), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (54211, 54213), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((54345, 54380), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (54378, 54380), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((54500, 54535), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (54533, 54535), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((54654, 54689), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (54687, 54689), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((54814, 54849), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (54847, 54849), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((54972, 55007), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (55005, 55007), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((55103, 55138), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (55136, 55138), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((55235, 55270), 'oneflow_api.oneflow.core.vm.instruction.InstructionOperandProto', 'instr_cfg.InstructionOperandProto', ([], {}), '()\n', (55268, 55270), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((55912, 55964), 'oneflow.python.eager.blob_cache.FindOrCreateBlobCache', 'blob_cache_util.FindOrCreateBlobCache', (['x_blob_object'], {}), '(x_blob_object)\n', (55949, 55964), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((56178, 56206), 're.search', 're.search', (['"""_(\\\\d+)$"""', 'field'], {}), "('_(\\\\d+)$', field)\n", (56187, 56206), False, 'import re\n'), ((2293, 2325), 'oneflow.python.vm.id_util.PhysicalIdGenerator', 'vm_id_util.PhysicalIdGenerator', ([], {}), '()\n', (2323, 2325), True, 'import oneflow.python.vm.id_util as vm_id_util\n'), ((2473, 2504), 'oneflow.python.vm.id_util.LogicalIdGenerator', 'vm_id_util.LogicalIdGenerator', ([], {}), '()\n', (2502, 2504), True, 'import oneflow.python.vm.id_util as vm_id_util\n'), ((2666, 2697), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (2695, 2697), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((2739, 2770), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (2768, 2770), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((6596, 6662), 'oneflow.python.eager.boxing_util.TryReplaceDeviceTag', 'boxing_util.TryReplaceDeviceTag', (['self', 'op_parallel_desc_sym', '"""cpu"""'], {}), "(self, op_parallel_desc_sym, 'cpu')\n", (6627, 6662), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((10149, 10201), 'oneflow.python.framework.python_callback.GetIdForRegisteredCallback', 'python_callback.GetIdForRegisteredCallback', (['callback'], {}), '(callback)\n', (10191, 10201), True, 'import oneflow.python.framework.python_callback as python_callback\n'), ((10224, 10252), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (10250, 10252), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((16118, 16149), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (16147, 16149), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((16504, 16589), 'oneflow.python.framework.op_arg_util.GetOpArgParallelAttribute', 'op_arg_util.GetOpArgParallelAttribute', (['blob_parallel_desc_sym', 'op_attribute', 'obn'], {}), '(blob_parallel_desc_sym, op_attribute, obn\n )\n', (16541, 16589), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((16634, 16686), 'oneflow.python.framework.op_arg_util.GetOpArgBlobAttribute', 'op_arg_util.GetOpArgBlobAttribute', (['op_attribute', 'obn'], {}), '(op_attribute, obn)\n', (16667, 16686), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((16909, 16948), 'oneflow.python.eager.symbol_storage.HasSymbol4String', 'symbol_storage.HasSymbol4String', (['string'], {}), '(string)\n', (16940, 16948), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17079, 17116), 'oneflow.python.eager.symbol.Symbol', 'symbol_util.Symbol', (['symbol_id', 'string'], {}), '(symbol_id, string)\n', (17097, 17116), True, 'import oneflow.python.eager.symbol as symbol_util\n'), ((17125, 17171), 'oneflow.python.eager.symbol_storage.SetSymbol4Id', 'symbol_storage.SetSymbol4Id', (['symbol_id', 'symbol'], {}), '(symbol_id, symbol)\n', (17152, 17171), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17180, 17227), 'oneflow.python.eager.symbol_storage.SetSymbol4String', 'symbol_storage.SetSymbol4String', (['string', 'symbol'], {}), '(string, symbol)\n', (17211, 17227), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17304, 17346), 'oneflow.python.eager.symbol_storage.HasSymbol4JobConf', 'symbol_storage.HasSymbol4JobConf', (['job_conf'], {}), '(job_conf)\n', (17336, 17346), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17483, 17522), 'oneflow.python.eager.symbol.Symbol', 'symbol_util.Symbol', (['symbol_id', 'job_conf'], {}), '(symbol_id, job_conf)\n', (17501, 17522), True, 'import oneflow.python.eager.symbol as symbol_util\n'), ((17531, 17577), 'oneflow.python.eager.symbol_storage.SetSymbol4Id', 'symbol_storage.SetSymbol4Id', (['symbol_id', 'symbol'], {}), '(symbol_id, symbol)\n', (17558, 17577), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17586, 17636), 'oneflow.python.eager.symbol_storage.SetSymbol4JobConf', 'symbol_storage.SetSymbol4JobConf', (['job_conf', 'symbol'], {}), '(job_conf, symbol)\n', (17618, 17636), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17808, 17881), 'oneflow.python.eager.symbol_storage.HasSymbol4SerializedParallelConf', 'symbol_storage.HasSymbol4SerializedParallelConf', (['serialized_parallel_conf'], {}), '(serialized_parallel_conf)\n', (17855, 17881), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((18089, 18145), 'oneflow.python.eager.symbol.ParallelDescSymbol', 'symbol_util.ParallelDescSymbol', (['symbol_id', 'parallel_conf'], {}), '(symbol_id, parallel_conf)\n', (18119, 18145), True, 'import oneflow.python.eager.symbol as symbol_util\n'), ((18154, 18200), 'oneflow.python.eager.symbol_storage.SetSymbol4Id', 'symbol_storage.SetSymbol4Id', (['symbol_id', 'symbol'], {}), '(symbol_id, symbol)\n', (18181, 18200), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((18209, 18294), 'oneflow.python.eager.symbol_storage.SetSymbol4SerializedParallelConf', 'symbol_storage.SetSymbol4SerializedParallelConf', (['serialized_parallel_conf', 'symbol'], {}), '(serialized_parallel_conf,\n symbol)\n', (18256, 18294), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((18523, 18592), 'oneflow.python.eager.symbol_storage.HasSymbol4SerializedScopeProto', 'symbol_storage.HasSymbol4SerializedScopeProto', (['serialized_scope_proto'], {}), '(serialized_scope_proto)\n', (18568, 18592), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((18700, 18769), 'oneflow.python.framework.scope_symbol.ScopeSymbol', 'scope_symbol.ScopeSymbol', (['symbol_id', 'scope_proto', 'parent_scope_symbol'], {}), '(symbol_id, scope_proto, parent_scope_symbol)\n', (18724, 18769), True, 'import oneflow.python.framework.scope_symbol as scope_symbol\n'), ((18778, 18824), 'oneflow.python.eager.symbol_storage.SetSymbol4Id', 'symbol_storage.SetSymbol4Id', (['symbol_id', 'symbol'], {}), '(symbol_id, symbol)\n', (18805, 18824), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((18833, 18910), 'oneflow.python.eager.symbol_storage.SetSymbol4SerializedScopeProto', 'symbol_storage.SetSymbol4SerializedScopeProto', (['serialized_scope_proto', 'symbol'], {}), '(serialized_scope_proto, symbol)\n', (18878, 18910), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((19022, 19098), 'oneflow.python.eager.object_storage.HasSharedOpKernelObject4ParallelConfSymbol', 'object_storage.HasSharedOpKernelObject4ParallelConfSymbol', (['parallel_desc_sym'], {}), '(parallel_desc_sym)\n', (19079, 19098), True, 'import oneflow.python.eager.object_storage as object_storage\n'), ((19354, 19402), 'oneflow.python.eager.object.Object', 'object_util.Object', (['object_id', 'parallel_desc_sym'], {}), '(object_id, parallel_desc_sym)\n', (19372, 19402), True, 'import oneflow.python.eager.object as object_util\n'), ((19411, 19496), 'oneflow.python.eager.object_storage.SetSharedOpKernelObject4ParallelConfSymbol', 'object_storage.SetSharedOpKernelObject4ParallelConfSymbol', (['parallel_desc_sym', 'obj'], {}), '(parallel_desc_sym,\n obj)\n', (19468, 19496), True, 'import oneflow.python.eager.object_storage as object_storage\n'), ((20210, 20276), 'oneflow.python.framework.op_arg_util.MakeBroadcastOpArgParallelAttribute', 'op_arg_util.MakeBroadcastOpArgParallelAttribute', (['parallel_desc_sym'], {}), '(parallel_desc_sym)\n', (20257, 20276), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((20314, 20501), 'oneflow.python.eager.object.BlobObject', 'object_util.BlobObject', ([], {'object_id': 'object_id', 'op_arg_parallel_attr': 'op_arg_parallel_attr', 'op_arg_blob_attr': 'sole_mirrored_blob_object.op_arg_blob_attr', 'release': 'self.release_object_'}), '(object_id=object_id, op_arg_parallel_attr=\n op_arg_parallel_attr, op_arg_blob_attr=sole_mirrored_blob_object.\n op_arg_blob_attr, release=self.release_object_)\n', (20336, 20501), True, 'import oneflow.python.eager.object as object_util\n'), ((20668, 20720), 'oneflow.python.eager.symbol_storage.GetSymbol4Id', 'symbol_storage.GetSymbol4Id', (['op_conf.scope_symbol_id'], {}), '(op_conf.scope_symbol_id)\n', (20695, 20720), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((20805, 20846), 'oneflow.python.framework.c_api_util.GetOpParallelSymbolId', 'c_api_util.GetOpParallelSymbolId', (['op_conf'], {}), '(op_conf)\n', (20837, 20846), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((20878, 20927), 'oneflow.python.eager.symbol_storage.GetSymbol4Id', 'symbol_storage.GetSymbol4Id', (['parallel_desc_sym_id'], {}), '(parallel_desc_sym_id)\n', (20905, 20927), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((21074, 21130), 'oneflow.python.eager.opkernel_object.OpKernelObject', 'OpKernelObject', (['object_id', 'op_conf', 'self.release_object_'], {}), '(object_id, op_conf, self.release_object_)\n', (21088, 21130), False, 'from oneflow.python.eager.opkernel_object import OpKernelObject\n'), ((22187, 22215), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (22213, 22215), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((23229, 23257), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (23255, 23257), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((24311, 24339), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (24337, 24339), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((25896, 25948), 'oneflow.python.eager.symbol_storage.GetSymbol4Id', 'symbol_storage.GetSymbol4Id', (['op_conf.scope_symbol_id'], {}), '(op_conf.scope_symbol_id)\n', (25923, 25948), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((29433, 29461), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (29459, 29461), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((29885, 29913), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (29911, 29913), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((30374, 30435), 'oneflow.python.eager.symbol_storage.HasSymbol4SerializedOpConf', 'symbol_storage.HasSymbol4SerializedOpConf', (['serialized_op_conf'], {}), '(serialized_op_conf)\n', (30415, 30435), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((30589, 30627), 'oneflow.python.eager.symbol.Symbol', 'symbol_util.Symbol', (['symbol_id', 'op_conf'], {}), '(symbol_id, op_conf)\n', (30607, 30627), True, 'import oneflow.python.eager.symbol as symbol_util\n'), ((30636, 30682), 'oneflow.python.eager.symbol_storage.SetSymbol4Id', 'symbol_storage.SetSymbol4Id', (['symbol_id', 'symbol'], {}), '(symbol_id, symbol)\n', (30663, 30682), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((30691, 30760), 'oneflow.python.eager.symbol_storage.SetSymbol4SerializedOpConf', 'symbol_storage.SetSymbol4SerializedOpConf', (['serialized_op_conf', 'symbol'], {}), '(serialized_op_conf, symbol)\n', (30732, 30760), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((30871, 30904), 'oneflow.core.operator.op_attribute_pb2.OpNodeSignature', 'op_attribute_pb.OpNodeSignature', ([], {}), '()\n', (30902, 30904), True, 'import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb\n'), ((31552, 31637), 'oneflow.python.eager.symbol_storage.HasSymbol4SerializedOpNodeSignature', 'symbol_storage.HasSymbol4SerializedOpNodeSignature', (['serialized_op_node_signature'], {}), '(serialized_op_node_signature\n )\n', (31602, 31637), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((31880, 31932), 'oneflow.python.eager.symbol.Symbol', 'symbol_util.Symbol', (['symbol_id', 'new_op_node_signature'], {}), '(symbol_id, new_op_node_signature)\n', (31898, 31932), True, 'import oneflow.python.eager.symbol as symbol_util\n'), ((31941, 31987), 'oneflow.python.eager.symbol_storage.SetSymbol4Id', 'symbol_storage.SetSymbol4Id', (['symbol_id', 'symbol'], {}), '(symbol_id, symbol)\n', (31968, 31987), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((31996, 32089), 'oneflow.python.eager.symbol_storage.SetSymbol4SerializedOpNodeSignature', 'symbol_storage.SetSymbol4SerializedOpNodeSignature', (['serialized_op_node_signature', 'symbol'], {}), '(serialized_op_node_signature\n , symbol)\n', (32046, 32089), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((36934, 37095), 'oneflow.python.eager.object.BlobObject', 'object_util.BlobObject', ([], {'object_id': 'object_id', 'op_arg_parallel_attr': 'op_arg_parallel_attr', 'op_arg_blob_attr': 'op_arg_blob_attr', 'release': 'self.release_object_'}), '(object_id=object_id, op_arg_parallel_attr=\n op_arg_parallel_attr, op_arg_blob_attr=op_arg_blob_attr, release=self.\n release_object_)\n', (36956, 37095), True, 'import oneflow.python.eager.object as object_util\n'), ((38713, 38741), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (38739, 38741), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((41573, 41601), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (41599, 41601), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((43971, 43999), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (43997, 43999), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((44361, 44389), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (44387, 44389), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((44790, 44818), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (44816, 44818), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((45653, 45681), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (45679, 45681), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((46209, 46237), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (46235, 46237), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((46486, 46515), 'oneflow.core.eager.eager_symbol_pb2.EagerSymbol', 'eager_symbol_pb.EagerSymbol', ([], {}), '()\n', (46513, 46515), True, 'import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb\n'), ((46756, 46784), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (46782, 46784), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((47033, 47062), 'oneflow.core.eager.eager_symbol_pb2.EagerSymbol', 'eager_symbol_pb.EagerSymbol', ([], {}), '()\n', (47060, 47062), True, 'import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb\n'), ((47484, 47512), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (47510, 47512), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((47760, 47789), 'oneflow.core.eager.eager_symbol_pb2.EagerSymbol', 'eager_symbol_pb.EagerSymbol', ([], {}), '()\n', (47787, 47789), True, 'import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb\n'), ((48226, 48254), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (48252, 48254), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((48504, 48533), 'oneflow.core.eager.eager_symbol_pb2.EagerSymbol', 'eager_symbol_pb.EagerSymbol', ([], {}), '()\n', (48531, 48533), True, 'import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb\n'), ((48944, 48972), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (48970, 48972), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((49227, 49256), 'oneflow.core.eager.eager_symbol_pb2.EagerSymbol', 'eager_symbol_pb.EagerSymbol', ([], {}), '()\n', (49254, 49256), True, 'import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb\n'), ((49519, 49547), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (49545, 49547), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((49809, 49838), 'oneflow.core.eager.eager_symbol_pb2.EagerSymbol', 'eager_symbol_pb.EagerSymbol', ([], {}), '()\n', (49836, 49838), True, 'import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb\n'), ((50118, 50169), 'oneflow.python.framework.python_callback.GetIdForRegisteredCallback', 'python_callback.GetIdForRegisteredCallback', (['fetcher'], {}), '(fetcher)\n', (50160, 50169), True, 'import oneflow.python.framework.python_callback as python_callback\n'), ((50192, 50220), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (50218, 50220), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((50839, 50889), 'oneflow.python.framework.python_callback.GetIdForRegisteredCallback', 'python_callback.GetIdForRegisteredCallback', (['feeder'], {}), '(feeder)\n', (50881, 50889), True, 'import oneflow.python.framework.python_callback as python_callback\n'), ((50912, 50940), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (50938, 50940), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((51536, 51564), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (51562, 51564), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((51934, 51962), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (51960, 51962), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((52431, 52459), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (52457, 52459), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((53563, 53586), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (53584, 53586), False, 'import oneflow\n'), ((4037, 4100), 'oneflow.python.eager.boxing_util.BoxingTo', 'boxing_util.BoxingTo', (['self', 'x_blob_object', 'op_arg_parallel_attr'], {}), '(self, x_blob_object, op_arg_parallel_attr)\n', (4057, 4100), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((9437, 9500), 'oneflow.python.eager.boxing_util.BoxingTo', 'boxing_util.BoxingTo', (['self', 'x_blob_object', 'op_arg_parallel_attr'], {}), '(self, x_blob_object, op_arg_parallel_attr)\n', (9457, 9500), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((12871, 12899), 'oneflow_api.oneflow.core.job.placement.ParallelConf', 'placement_cfg.ParallelConf', ([], {}), '()\n', (12897, 12899), True, 'import oneflow_api.oneflow.core.job.placement as placement_cfg\n'), ((13758, 13786), 'oneflow.core.register.blob_desc_pb2.BlobDescProto', 'blob_desc_pb.BlobDescProto', ([], {}), '()\n', (13784, 13786), True, 'import oneflow.core.register.blob_desc_pb2 as blob_desc_pb\n'), ((14100, 14212), 'oneflow.python.framework.op_arg_util.OpArgBlobAttribute', 'op_arg_util.OpArgBlobAttribute', (['logical_blob_attr.batch_axis', 'blob_desc', 'logical_blob_attr.logical_blob_name'], {}), '(logical_blob_attr.batch_axis, blob_desc,\n logical_blob_attr.logical_blob_name)\n', (14130, 14212), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((15016, 15081), 'oneflow.python.framework.op_arg_util.MakeMirroredOpArgParallelAttribute', 'op_arg_util.MakeMirroredOpArgParallelAttribute', (['parallel_desc_sym'], {}), '(parallel_desc_sym)\n', (15062, 15081), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((16969, 17008), 'oneflow.python.eager.symbol_storage.GetSymbol4String', 'symbol_storage.GetSymbol4String', (['string'], {}), '(string)\n', (17000, 17008), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17367, 17409), 'oneflow.python.eager.symbol_storage.GetSymbol4JobConf', 'symbol_storage.GetSymbol4JobConf', (['job_conf'], {}), '(job_conf)\n', (17399, 17409), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17902, 17975), 'oneflow.python.eager.symbol_storage.GetSymbol4SerializedParallelConf', 'symbol_storage.GetSymbol4SerializedParallelConf', (['serialized_parallel_conf'], {}), '(serialized_parallel_conf)\n', (17949, 17975), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((18613, 18682), 'oneflow.python.eager.symbol_storage.GetSymbol4SerializedScopeProto', 'symbol_storage.GetSymbol4SerializedScopeProto', (['serialized_scope_proto'], {}), '(serialized_scope_proto)\n', (18658, 18682), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((19119, 19195), 'oneflow.python.eager.object_storage.GetSharedOpKernelObject4ParallelConfSymbol', 'object_storage.GetSharedOpKernelObject4ParallelConfSymbol', (['parallel_desc_sym'], {}), '(parallel_desc_sym)\n', (19176, 19195), True, 'import oneflow.python.eager.object_storage as object_storage\n'), ((25377, 25415), 'oneflow.python.eager.symbol_storage.GetSymbol4Id', 'symbol_storage.GetSymbol4Id', (['symbol_id'], {}), '(symbol_id)\n', (25404, 25415), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((25541, 25619), 'oneflow.python.framework.op_arg_util.GetOpArgParallelAttribute', 'op_arg_util.GetOpArgParallelAttribute', (['op_parallel_desc_sym', 'op_attribute', 'ibn'], {}), '(op_parallel_desc_sym, op_attribute, ibn)\n', (25578, 25619), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((27827, 27905), 'oneflow.python.framework.op_arg_util.GetOpArgParallelAttribute', 'op_arg_util.GetOpArgParallelAttribute', (['op_parallel_desc_sym', 'op_attribute', 'ibn'], {}), '(op_parallel_desc_sym, op_attribute, ibn)\n', (27864, 27905), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((30456, 30517), 'oneflow.python.eager.symbol_storage.GetSymbol4SerializedOpConf', 'symbol_storage.GetSymbol4SerializedOpConf', (['serialized_op_conf'], {}), '(serialized_op_conf)\n', (30497, 30517), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((31675, 31760), 'oneflow.python.eager.symbol_storage.GetSymbol4SerializedOpNodeSignature', 'symbol_storage.GetSymbol4SerializedOpNodeSignature', (['serialized_op_node_signature'], {}), '(serialized_op_node_signature\n )\n', (31725, 31760), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((34395, 34447), 'oneflow.python.framework.op_arg_util.GetOpArgBlobAttribute', 'op_arg_util.GetOpArgBlobAttribute', (['op_attribute', 'obn'], {}), '(op_attribute, obn)\n', (34428, 34447), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((36424, 36476), 'oneflow.python.framework.op_arg_util.GetOpArgBlobAttribute', 'op_arg_util.GetOpArgBlobAttribute', (['op_attribute', 'obn'], {}), '(op_attribute, obn)\n', (36457, 36476), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((13874, 13963), 'oneflow.python.framework.balanced_splitter.BalancedPartNums', 'balanced_splitter.BalancedPartNums', (['logical_blob_attr.shape[split_axis]', 'parallel_num'], {}), '(logical_blob_attr.shape[split_axis],\n parallel_num)\n', (13908, 13963), True, 'import oneflow.python.framework.balanced_splitter as balanced_splitter\n'), ((33681, 33727), 'oneflow.python.eager.symbol_storage.GetSymbol4Id', 'symbol_storage.GetSymbol4Id', (['bn2symbol_id[obn]'], {}), '(bn2symbol_id[obn])\n', (33708, 33727), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((35859, 35905), 'oneflow.python.eager.symbol_storage.GetSymbol4Id', 'symbol_storage.GetSymbol4Id', (['bn2symbol_id[obn]'], {}), '(bn2symbol_id[obn])\n', (35886, 35905), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((47301, 47328), 'oneflow.core.job.placement_pb2.ParallelConf', 'placement_pb.ParallelConf', ([], {}), '()\n', (47326, 47328), True, 'import oneflow.core.job.placement_pb2 as placement_pb\n'), ((48049, 48070), 'oneflow.core.job.scope_pb2.ScopeProto', 'scope_pb.ScopeProto', ([], {}), '()\n', (48068, 48070), True, 'import oneflow.core.job.scope_pb2 as scope_pb\n'), ((48762, 48790), 'oneflow.core.job.job_conf_pb2.JobConfigProto', 'job_conf_pb.JobConfigProto', ([], {}), '()\n', (48788, 48790), True, 'import oneflow.core.job.job_conf_pb2 as job_conf_pb\n')] |
import oneflow.experimental as flow
import numpy as np
flow.enable_eager_execution()
class MyLayer(flow.nn.Module):
def __init__(self):
super(MyLayer, self).__init__()
self.p = flow.nn.Parameter(flow.Tensor(1, dtype=flow.float32))
flow.nn.init.normal_(self.p)
def forward(self, input):
t1 = flow.Tensor(np.log(np.random.rand(*[1000,10])), dtype=flow.float32)
t2 = flow.Tensor(np.log(np.random.rand(*[1000,10])), dtype=flow.float32)
for i in range(1000):
event = flow.Tensor(np.log(np.random.rand(1000,10)), dtype=flow.float32)
a = flow.cast(t2 > event, dtype=flow.float32)
t1 = flow.Tensor(a * t2 + (1.0 - a) * t1)
return flow.cat([self.p, self.p], dim=0)
x = flow.Tensor(np.random.randn(10, 1))
mylayer = MyLayer()
mylayer.train()
opt = flow.optim.Adam(parameters=mylayer.parameters(), lr=0.001)
out = mylayer(x)
print(out)
out.backward(gradient=flow.ones_like(out))
print('p.grad: ', mylayer.p.grad)
opt.step()
| [
"oneflow.experimental.nn.init.normal_",
"oneflow.experimental.enable_eager_execution",
"oneflow.experimental.Tensor",
"oneflow.experimental.cat",
"oneflow.experimental.cast",
"oneflow.experimental.ones_like"
] | [((56, 85), 'oneflow.experimental.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (83, 85), True, 'import oneflow.experimental as flow\n'), ((776, 798), 'numpy.random.randn', 'np.random.randn', (['(10)', '(1)'], {}), '(10, 1)\n', (791, 798), True, 'import numpy as np\n'), ((261, 289), 'oneflow.experimental.nn.init.normal_', 'flow.nn.init.normal_', (['self.p'], {}), '(self.p)\n', (281, 289), True, 'import oneflow.experimental as flow\n'), ((725, 758), 'oneflow.experimental.cat', 'flow.cat', (['[self.p, self.p]'], {'dim': '(0)'}), '([self.p, self.p], dim=0)\n', (733, 758), True, 'import oneflow.experimental as flow\n'), ((955, 974), 'oneflow.experimental.ones_like', 'flow.ones_like', (['out'], {}), '(out)\n', (969, 974), True, 'import oneflow.experimental as flow\n'), ((217, 251), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(1)'], {'dtype': 'flow.float32'}), '(1, dtype=flow.float32)\n', (228, 251), True, 'import oneflow.experimental as flow\n'), ((614, 655), 'oneflow.experimental.cast', 'flow.cast', (['(t2 > event)'], {'dtype': 'flow.float32'}), '(t2 > event, dtype=flow.float32)\n', (623, 655), True, 'import oneflow.experimental as flow\n'), ((673, 709), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(a * t2 + (1.0 - a) * t1)'], {}), '(a * t2 + (1.0 - a) * t1)\n', (684, 709), True, 'import oneflow.experimental as flow\n'), ((353, 380), 'numpy.random.rand', 'np.random.rand', (['*[1000, 10]'], {}), '(*[1000, 10])\n', (367, 380), True, 'import numpy as np\n'), ((434, 461), 'numpy.random.rand', 'np.random.rand', (['*[1000, 10]'], {}), '(*[1000, 10])\n', (448, 461), True, 'import numpy as np\n'), ((552, 576), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(10)'], {}), '(1000, 10)\n', (566, 576), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
from resnet50_model import FakeBN, resnet50
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestResNet50(flow.unittest.TestCase):
def test_resnet50_without_batchnorm(test_case):
batch_size = 32
color_space = "RGB"
height = 224
width = 224
output_layout = "NCHW"
rgb_mean = [123.68, 116.779, 103.939]
rgb_std = [58.393, 57.12, 57.375]
record_reader = flow.nn.OfrecordReader(
"/dataset/imagenette/ofrecord",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=5,
shuffle_after_epoch=False,
)
record_image_decoder = flow.nn.OFRecordImageDecoder(
"encoded", color_space=color_space
)
record_label_decoder = flow.nn.OfrecordRawDecoder(
"class/label", shape=(), dtype=flow.int32
)
resize = flow.nn.image.Resize(
resize_side="shorter", keep_aspect_ratio=True, target_size=256
)
crop_mirror_normal = flow.nn.CropMirrorNormalize(
color_space=color_space,
output_layout=output_layout,
crop_h=height,
crop_w=width,
crop_pos_y=0.5,
crop_pos_x=0.5,
mean=rgb_mean,
std=rgb_std,
output_dtype=flow.float,
)
res50_module = resnet50(
replace_stride_with_dilation=[False, False, False], norm_layer=FakeBN
)
res50_module.train()
res50_module.load_state_dict(
flow.load("/dataset/imagenette/resnet50_pretrained")
)
of_corss_entropy = flow.nn.CrossEntropyLoss()
res50_module.to("cuda")
of_corss_entropy.to("cuda")
learning_rate = 0.001
mom = 0.9
of_sgd = flow.optim.SGD(
res50_module.parameters(), lr=learning_rate, momentum=mom
)
gt_of_losses = [
6.823004722595215,
6.818080902099609,
6.817478179931641,
6.820215702056885,
6.820272445678711,
6.805415630340576,
6.812217712402344,
6.822971343994141,
6.81321907043457,
6.812097549438477,
6.808729648590088,
6.809578895568848,
6.810042381286621,
6.81298303604126,
6.806015968322754,
6.809454917907715,
6.808111190795898,
6.80530309677124,
6.808160781860352,
6.809715747833252,
6.804327487945557,
6.801260948181152,
6.801140785217285,
6.802030086517334,
6.802935600280762,
6.793076992034912,
6.800511360168457,
6.7988386154174805,
6.798485279083252,
6.802251815795898,
6.798983573913574,
6.798493385314941,
6.796577453613281,
6.787880897521973,
6.796964645385742,
6.783697128295898,
6.7896833419799805,
6.786165714263916,
6.790346145629883,
6.785680770874023,
6.782796859741211,
6.784112930297852,
6.792185306549072,
6.780761241912842,
6.778015613555908,
6.778000354766846,
6.789952278137207,
6.773430824279785,
6.780228614807129,
6.774554252624512,
6.77685546875,
6.7801337242126465,
6.767944812774658,
6.7757134437561035,
6.772693157196045,
6.770571231842041,
6.766884803771973,
6.762784004211426,
6.765412330627441,
6.768856048583984,
6.769237518310547,
6.77099609375,
6.765361785888672,
6.7630228996276855,
6.757351875305176,
6.761430740356445,
6.757913112640381,
6.756040096282959,
6.75714111328125,
6.752540588378906,
6.7559967041015625,
6.759932041168213,
6.756745338439941,
6.750467300415039,
6.750478744506836,
6.750133514404297,
6.75436544418335,
6.744396209716797,
6.753242492675781,
6.747480392456055,
6.744192123413086,
6.744802474975586,
6.742746829986572,
6.7499589920043945,
6.739953517913818,
6.739869117736816,
6.744085311889648,
6.744339942932129,
6.741791248321533,
6.737485885620117,
6.735355377197266,
6.7377848625183105,
6.73032283782959,
6.734944820404053,
6.7288079261779785,
6.737483978271484,
6.730724334716797,
6.728422164916992,
6.723917007446289,
6.734870910644531,
]
for b in range(100):
val_record = record_reader()
label = record_label_decoder(val_record)
image_raw_buffer = record_image_decoder(val_record)
image = resize(image_raw_buffer)[0]
image = crop_mirror_normal(image)
image = image.to("cuda")
label = label.to("cuda")
logits = res50_module(image)
loss = of_corss_entropy(logits, label)
loss.backward()
of_sgd.step()
of_sgd.zero_grad()
l = loss.numpy()[0]
test_case.assertTrue(np.allclose(l.item(), gt_of_losses[b], atol=1e-05))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.CropMirrorNormalize",
"oneflow.nn.OFRecordImageDecoder",
"oneflow.nn.CrossEntropyLoss",
"oneflow.nn.image.Resize",
"oneflow.load",
"oneflow.nn.OfrecordRawDecoder",
"oneflow.nn.OfrecordReader"
] | [((732, 764), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (762, 764), True, 'import oneflow as flow\n'), ((782, 816), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (791, 816), False, 'import os\n'), ((6464, 6479), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6477, 6479), False, 'import unittest\n'), ((1173, 1330), 'oneflow.nn.OfrecordReader', 'flow.nn.OfrecordReader', (['"""/dataset/imagenette/ofrecord"""'], {'batch_size': 'batch_size', 'data_part_num': '(1)', 'part_name_suffix_length': '(5)', 'shuffle_after_epoch': '(False)'}), "('/dataset/imagenette/ofrecord', batch_size=\n batch_size, data_part_num=1, part_name_suffix_length=5,\n shuffle_after_epoch=False)\n", (1195, 1330), True, 'import oneflow as flow\n'), ((1424, 1488), 'oneflow.nn.OFRecordImageDecoder', 'flow.nn.OFRecordImageDecoder', (['"""encoded"""'], {'color_space': 'color_space'}), "('encoded', color_space=color_space)\n", (1452, 1488), True, 'import oneflow as flow\n'), ((1542, 1611), 'oneflow.nn.OfrecordRawDecoder', 'flow.nn.OfrecordRawDecoder', (['"""class/label"""'], {'shape': '()', 'dtype': 'flow.int32'}), "('class/label', shape=(), dtype=flow.int32)\n", (1568, 1611), True, 'import oneflow as flow\n'), ((1651, 1739), 'oneflow.nn.image.Resize', 'flow.nn.image.Resize', ([], {'resize_side': '"""shorter"""', 'keep_aspect_ratio': '(True)', 'target_size': '(256)'}), "(resize_side='shorter', keep_aspect_ratio=True,\n target_size=256)\n", (1671, 1739), True, 'import oneflow as flow\n'), ((1787, 1992), 'oneflow.nn.CropMirrorNormalize', 'flow.nn.CropMirrorNormalize', ([], {'color_space': 'color_space', 'output_layout': 'output_layout', 'crop_h': 'height', 'crop_w': 'width', 'crop_pos_y': '(0.5)', 'crop_pos_x': '(0.5)', 'mean': 'rgb_mean', 'std': 'rgb_std', 'output_dtype': 'flow.float'}), '(color_space=color_space, output_layout=\n output_layout, crop_h=height, crop_w=width, crop_pos_y=0.5, crop_pos_x=\n 0.5, mean=rgb_mean, std=rgb_std, output_dtype=flow.float)\n', (1814, 1992), True, 'import oneflow as flow\n'), ((2125, 2204), 'resnet50_model.resnet50', 'resnet50', ([], {'replace_stride_with_dilation': '[False, False, False]', 'norm_layer': 'FakeBN'}), '(replace_stride_with_dilation=[False, False, False], norm_layer=FakeBN)\n', (2133, 2204), False, 'from resnet50_model import FakeBN, resnet50\n'), ((2396, 2422), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (2420, 2422), True, 'import oneflow as flow\n'), ((2306, 2358), 'oneflow.load', 'flow.load', (['"""/dataset/imagenette/resnet50_pretrained"""'], {}), "('/dataset/imagenette/resnet50_pretrained')\n", (2315, 2358), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.job.regularizer_conf_pb2 as regularizer_conf_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("regularizers.l1_l2")
def l1_l2_regularizer(
l1: float = 0.01, l2: float = 0.01
) -> regularizer_conf_util.RegularizerConf:
"""This operator creates a L1 and L2 weight regularizer.
Args:
l1 (float, optional): The L1 regularization coefficient. Defaults to 0.01.
l2 (float, optional): The L2 regularization coefficient. Defaults to 0.01.
Returns:
regularizer_conf_util.RegularizerConf: A regularizer that can be used in other layers or operators.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_l1_l2_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
regularizer = flow.regularizers.l1_l2(l1=0.001, l2=0.001)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_l1_l2_Job(x)
"""
regularizer = regularizer_conf_util.RegularizerConf()
setattr(regularizer.l1_l2_conf, "l1", l1)
setattr(regularizer.l1_l2_conf, "l2", l2)
return regularizer
@oneflow_export("regularizers.l1")
def l1_regularizer(l: float = 0.01) -> regularizer_conf_util.RegularizerConf:
"""This operator creates a L1 weight regularizer.
Args:
l (float, optional): The L1 regularization coefficient. Defaults to 0.01.
Returns:
regularizer_conf_util.RegularizerConf: A regularizer that can be used in other layers or operators.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_l1_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
regularizer = flow.regularizers.l1(l=0.001)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_l1_Job(x)
"""
return l1_l2_regularizer(l1=l, l2=0.0)
@oneflow_export("regularizers.l2")
def l2_regularizer(l: float = 0.01) -> regularizer_conf_util.RegularizerConf:
"""This operator creates a L2 weight regularizer.
Args:
l (float, optional): The L2 regularization coefficient. Defaults to 0.01.
Returns:
regularizer_conf_util.RegularizerConf: A regularizer that can be used in other layers or operators.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def conv2d_l2_Job(x: tp.Numpy.Placeholder((1, 256, 32, 32))
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
regularizer = flow.regularizers.l2(l=0.001)
conv2d = flow.layers.conv2d(
x,
filters=128,
kernel_size=3,
strides=1,
padding='SAME',
kernel_initializer=initializer,
kernel_regularizer=regularizer,
name="Conv2d"
)
return conv2d
x = np.random.randn(1, 256, 32, 32).astype(np.float32)
out = conv2d_l2_Job(x)
"""
return l1_l2_regularizer(l1=0.0, l2=l)
| [
"oneflow.core.job.regularizer_conf_pb2.RegularizerConf",
"oneflow.python.oneflow_export.oneflow_export"
] | [((817, 853), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""regularizers.l1_l2"""'], {}), "('regularizers.l1_l2')\n", (831, 853), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2353, 2386), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""regularizers.l1"""'], {}), "('regularizers.l1')\n", (2367, 2386), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3625, 3658), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""regularizers.l2"""'], {}), "('regularizers.l2')\n", (3639, 3658), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2195, 2234), 'oneflow.core.job.regularizer_conf_pb2.RegularizerConf', 'regularizer_conf_util.RegularizerConf', ([], {}), '()\n', (2232, 2234), True, 'import oneflow.core.job.regularizer_conf_pb2 as regularizer_conf_util\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""dataset for bert."""
import oneflow as flow
from libai.data.structures import DistTensorData, Instance
class DemoNlpDataset(flow.utils.data.Dataset):
def __init__(self, data_root="", datasetname="Demodataset"):
self.data_root = data_root
self.datasetname = datasetname
self.dataset = list(range(50000))
def __len__(self):
return len(self.dataset)
def __getitem__(self, idx):
sample = Instance(
input=DistTensorData(flow.ones((512), dtype=flow.long), placement_idx=0),
label=DistTensorData(flow.ones((1,), dtype=flow.long), placement_idx=-1),
)
return sample
@property
def supports_prefetch(self):
return False
def prefetch(self, indices):
self.dataset.prefetch(indices)
| [
"oneflow.ones"
] | [((1108, 1139), 'oneflow.ones', 'flow.ones', (['(512)'], {'dtype': 'flow.long'}), '(512, dtype=flow.long)\n', (1117, 1139), True, 'import oneflow as flow\n'), ((1194, 1226), 'oneflow.ones', 'flow.ones', (['(1,)'], {'dtype': 'flow.long'}), '((1,), dtype=flow.long)\n', (1203, 1226), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Union, List
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
def eye_op(
n,
m=None,
dtype: flow.dtype = flow.float,
device: Union[str, flow.device] = None,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
requires_grad: bool = False,
):
"""This operator creates a 2-D Tensor with ones on the diagonal and zeros elsewhere.
Args:
n (int): the number of rows.
m (Optional[int], optional): the number of colums with default being n. Defaults to None.
Keyword args:
device(flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.
Returns:
oneflow.Tensor: The result Blob with ones on the diagonal and zeros elsewhere.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> out = flow.eye(3, 3)
>>> out
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]], dtype=oneflow.float32)
"""
if placement is None:
if isinstance(device, str):
device = flow.device(device)
res = flow._C.eye(n, m, dtype=dtype, device=device)
else:
assert isinstance(
placement, flow._oneflow_internal.placement
), "placement should be oneflow._oneflow_internal.placement type."
assert isinstance(sbp, (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp
if isinstance(sbp, flow.sbp.sbp):
assert sbp == flow.sbp.broadcast
sbp = (sbp,)
else:
for elem in sbp:
assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp
assert elem == flow.sbp.broadcast
assert len(sbp) == len(placement.hierarchy)
res = flow._C.consistent_eye(n, m, dtype=dtype, placement=placement, sbp=sbp)
res.requires_grad = requires_grad
return res
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.device",
"oneflow._C.consistent_eye",
"oneflow._C.eye"
] | [((2761, 2797), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (2776, 2797), False, 'import doctest\n'), ((1949, 1994), 'oneflow._C.eye', 'flow._C.eye', (['n', 'm'], {'dtype': 'dtype', 'device': 'device'}), '(n, m, dtype=dtype, device=device)\n', (1960, 1994), True, 'import oneflow as flow\n'), ((2582, 2653), 'oneflow._C.consistent_eye', 'flow._C.consistent_eye', (['n', 'm'], {'dtype': 'dtype', 'placement': 'placement', 'sbp': 'sbp'}), '(n, m, dtype=dtype, placement=placement, sbp=sbp)\n', (2604, 2653), True, 'import oneflow as flow\n'), ((1915, 1934), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1926, 1934), True, 'import oneflow as flow\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------
# MoCo v3 Model
# References:
# moco-v3: https://github.com/facebookresearch/moco-v3/blob/main/moco/builder.py
# --------------------------------------------------------
import math
import oneflow as flow
import oneflow.nn as nn
from libai.layers import Linear
from libai.utils.distributed import get_world_size
class MoCo(nn.Module):
"""
Build a MoCo model with a base encoder, a momentum encoder, and two MLPs
https://arxiv.org/abs/1911.05722
"""
def __init__(
self, base_encoder, momentum_encoder, dim=256, mlp_dim=4096, T=1.0, m=0.99, max_iter=300
):
"""
dim: feature dimension (default: 256)
mlp_dim: hidden dimension in MLPs (default: 4096)
T: softmax temperature (default: 1.0)
"""
super(MoCo, self).__init__()
self.T = T
self.m = m
# build encoders
self.base_encoder = base_encoder
self.momentum_encoder = momentum_encoder
self.base_encoder.num_classes = dim
self.momentum_encoder.num_classes = dim
self.max_iter = max_iter
self._build_projector_and_predictor_mlps(dim, mlp_dim)
for param_b, param_m in zip(
self.base_encoder.parameters(), self.momentum_encoder.parameters()
):
param_m.data.copy_(param_b.data) # initialize
param_m.requires_grad = False # not update by gradient
def _build_mlp(self, num_layers, input_dim, mlp_dim, output_dim, last_bn=True):
mlp = []
for l in range(num_layers):
dim1 = input_dim if l == 0 else mlp_dim
dim2 = output_dim if l == num_layers - 1 else mlp_dim
mlp.append(Linear(dim1, dim2, bias=False)) # libai
if l < num_layers - 1:
mlp.append(nn.BatchNorm1d(dim2))
mlp.append(nn.ReLU(inplace=True))
elif last_bn:
# follow SimCLR's design:
# https://github.com/google-research/simclr/blob/master/model_util.py#L157
# for simplicity, we further removed gamma in BN
# TODO: affine should be False (bug here)
mlp.append(nn.BatchNorm1d(dim2, affine=True))
return nn.Sequential(*mlp)
def _build_projector_and_predictor_mlps(self, dim, mlp_dim):
pass
@flow.no_grad()
def _update_momentum_encoder(self, m):
"""Momentum update of the momentum encoder"""
for param_b, param_m in zip(
self.base_encoder.parameters(), self.momentum_encoder.parameters()
):
param_m.data = param_m.data * m + param_b.data * (1.0 - m)
def contrastive_loss(self, q, k):
# normalize
q = nn.functional.normalize(q, dim=1)
k = nn.functional.normalize(k, dim=1)
# gather all targets
# k = concat_all_gather(k).to_global(sbp=q.sbp, placement=q.placement)
k = k.to_global(sbp=flow.sbp.broadcast)
# Einstein sum is more intuitive
logits = flow.einsum("nc,mc->nm", q, k) / self.T
N = logits.shape[0] // get_world_size()
labels = (flow.arange(N, dtype=flow.long) + N * flow.env.get_rank()).to_global(
sbp=flow.sbp.split(0), placement=logits.placement
)
return nn.CrossEntropyLoss()(logits, labels) * (2 * self.T)
def adjust_moco_momentum(self, cu_iter, m):
"""Adjust moco momentum based on current epoch"""
m = 1.0 - 0.5 * (1.0 + math.cos(math.pi * cu_iter / self.max_iter)) * (1.0 - m)
return m
def forward(self, images, labels=None, cu_iter=0, m=0.99):
if self.training:
[x1, x2] = flow.chunk(images, 2, dim=1)
# compute features
q1 = self.predictor(self.base_encoder(x1)["prediction_scores"])
q2 = self.predictor(self.base_encoder(x2)["prediction_scores"])
m = self.adjust_moco_momentum(cu_iter, m) # update the moco_momentum
with flow.no_grad(): # no gradient
self._update_momentum_encoder(m) # update the momentum encoder
# compute momentum features as targets
k1 = self.momentum_encoder(x1)["prediction_scores"]
k2 = self.momentum_encoder(x2)["prediction_scores"]
return {"losses": self.contrastive_loss(q1, k2) + self.contrastive_loss(q2, k1)}, {
"m": m
}
else:
return self.base_encoder(images)
class MoCo_ViT(MoCo):
def _build_projector_and_predictor_mlps(self, dim, mlp_dim):
hidden_dim = self.base_encoder.head.weight.shape[1]
# projectors
self.base_encoder.head = self._build_mlp(3, hidden_dim, mlp_dim, dim)
self.momentum_encoder.head = self._build_mlp(3, hidden_dim, mlp_dim, dim)
# predictor
self.predictor = self._build_mlp(2, dim, mlp_dim, dim)
| [
"oneflow.einsum",
"oneflow.chunk",
"oneflow.sbp.split",
"oneflow.no_grad",
"oneflow.nn.CrossEntropyLoss",
"oneflow.nn.BatchNorm1d",
"oneflow.nn.functional.normalize",
"oneflow.nn.Sequential",
"oneflow.env.get_rank",
"oneflow.arange",
"oneflow.nn.ReLU"
] | [((3005, 3019), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (3017, 3019), True, 'import oneflow as flow\n'), ((2900, 2919), 'oneflow.nn.Sequential', 'nn.Sequential', (['*mlp'], {}), '(*mlp)\n', (2913, 2919), True, 'import oneflow.nn as nn\n'), ((3387, 3420), 'oneflow.nn.functional.normalize', 'nn.functional.normalize', (['q'], {'dim': '(1)'}), '(q, dim=1)\n', (3410, 3420), True, 'import oneflow.nn as nn\n'), ((3433, 3466), 'oneflow.nn.functional.normalize', 'nn.functional.normalize', (['k'], {'dim': '(1)'}), '(k, dim=1)\n', (3456, 3466), True, 'import oneflow.nn as nn\n'), ((3683, 3713), 'oneflow.einsum', 'flow.einsum', (['"""nc,mc->nm"""', 'q', 'k'], {}), "('nc,mc->nm', q, k)\n", (3694, 3713), True, 'import oneflow as flow\n'), ((3754, 3770), 'libai.utils.distributed.get_world_size', 'get_world_size', ([], {}), '()\n', (3768, 3770), False, 'from libai.utils.distributed import get_world_size\n'), ((4326, 4354), 'oneflow.chunk', 'flow.chunk', (['images', '(2)'], {'dim': '(1)'}), '(images, 2, dim=1)\n', (4336, 4354), True, 'import oneflow as flow\n'), ((2364, 2394), 'libai.layers.Linear', 'Linear', (['dim1', 'dim2'], {'bias': '(False)'}), '(dim1, dim2, bias=False)\n', (2370, 2394), False, 'from libai.layers import Linear\n'), ((3875, 3892), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (3889, 3892), True, 'import oneflow as flow\n'), ((3947, 3968), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3966, 3968), True, 'import oneflow.nn as nn\n'), ((4639, 4653), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (4651, 4653), True, 'import oneflow as flow\n'), ((2467, 2487), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['dim2'], {}), '(dim2)\n', (2481, 2487), True, 'import oneflow.nn as nn\n'), ((2516, 2537), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2523, 2537), True, 'import oneflow.nn as nn\n'), ((3789, 3820), 'oneflow.arange', 'flow.arange', (['N'], {'dtype': 'flow.long'}), '(N, dtype=flow.long)\n', (3800, 3820), True, 'import oneflow as flow\n'), ((2849, 2882), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['dim2'], {'affine': '(True)'}), '(dim2, affine=True)\n', (2863, 2882), True, 'import oneflow.nn as nn\n'), ((3827, 3846), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (3844, 3846), True, 'import oneflow as flow\n'), ((4138, 4181), 'math.cos', 'math.cos', (['(math.pi * cu_iter / self.max_iter)'], {}), '(math.pi * cu_iter / self.max_iter)\n', (4146, 4181), False, 'import math\n')] |
import random
import argparse
from tqdm import tqdm
from projects.optimization import get_schedule
from flowtext.models.bert import bert
from flowtext.models.bert.model_bert import BertForSequenceClassification
from projects.utils import (
accuracy,
convert_examples_to_features,
ColaProcessor,
MnliProcessor,
MrpcProcessor
)
import math
import numpy as np
import oneflow as flow
from oneflow.utils.data import (
TensorDataset,
RandomSampler,
SequentialSampler,
DataLoader
)
processors = {
"cola": ColaProcessor(),
"mnli": MnliProcessor(),
"mrpc": MrpcProcessor(),
}
def set_seed(args):
if args.seed != None:
random.seed(args.seed)
np.random.seed(args.seed)
flow.manual_seed(args.seed)
def get_parser():
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir",
default=None,
type=str,
required=True,
help="The input data dir. Should contain the .tsv files (or other data files) for the task.")
parser.add_argument("--model_type",
default=None,
type=str,
required=True,
help="Bert pre-trained model type.")
parser.add_argument("--bert_model",
default=None,
type=str,
help="Bert pre-trained model dir.")
parser.add_argument("--task_name",
default=None,
type=str,
required=True,
help="The name of the task to train.")
parser.add_argument("--max_seq_length",
default=128,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--do_train",
default=False,
action='store_true',
help="Whether to run training.")
parser.add_argument("--do_eval",
default=False,
action='store_true',
help="Whether to run eval.")
parser.add_argument("--do_lower_case",
default=False,
action='store_true',
help="Set this flag if you are using an uncased model.")
parser.add_argument("--train_batch_size",
default=8,
type=int,
help="Total batch size for training.")
parser.add_argument("--eval_batch_size",
default=8,
type=int,
help="Total batch size for eval.")
parser.add_argument("--learning_rate",
default=5e-5,
type=float,
help="The initial learning rate for Adam.")
parser.add_argument("--weight_decay",
type=float,
default=0.0,
help="Weight decay to use."
)
parser.add_argument("--num_train_epochs",
default=3.0,
type=float,
help="Total number of training epochs to perform.")
parser.add_argument("--max_train_steps",
type=int,
default=None,
help="Total number of training steps to perform. If provided, overrides num_train_epochs.")
parser.add_argument("--lr_scheduler_type",
type=str,
default="linear",
help="The scheduler type to use.",
choices=["linear", "cosine", "cosine_with_restarts", "polynomial", "constant", "constant_with_warmup"])
parser.add_argument("--num_warmup_steps", type=int, default=0, help="Number of steps for the warmup in the lr scheduler.")
parser.add_argument('--gradient_accumulation_steps',
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.")
parser.add_argument("--no_cuda",
default=False,
action='store_true',
help="Whether not to use CUDA when available")
parser.add_argument('--seed',
type=int,
default=13,
help="random seed for initialization")
args = parser.parse_args()
return args
def main():
args = get_parser()
set_seed(args)
if args.no_cuda:
device = flow.device("cuda" if flow.cuda.is_available() and not args.no_cuda else "cpu")
else:
device = flow.device("cuda")
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = int(args.train_batch_size / args.gradient_accumulation_steps)
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True.")
task_name = args.task_name.lower()
if task_name not in processors:
raise ValueError("Task not found: %s" % (task_name))
processor = processors[task_name]
label_list = processor.get_labels()
model, tokenizer, _ = bert(
pretrained=True,
model_type=args.model_type,
checkpoint_path=args.bert_model,
bert_type=BertForSequenceClassification
)
tokenizer.do_lower_case = args.do_lower_case
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
model.to(device)
defaults = {
"lr": args.learning_rate,
"clip_grad_max_norm": 1.0,
"clip_grad_norm_type": 2.0,
}
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
"weight_decay": args.weight_decay,
**defaults
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
"weight_decay": 0.0,
**defaults
}
]
if args.do_train:
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, tokenizer
)
all_input_ids = flow.tensor([f.input_ids for f in train_features], dtype=flow.long)
all_input_mask = flow.tensor([f.input_mask for f in train_features], dtype=flow.long)
all_segment_ids = flow.tensor([f.segment_ids for f in train_features], dtype=flow.long)
all_label_ids = flow.tensor([f.label_id for f in train_features], dtype=flow.long)
train_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size = args.train_batch_size)
num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
if args.max_train_steps is None:
args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
else:
args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
optimizer = flow.optim.AdamW(optimizer_grouped_parameters)
lr_schedule = get_schedule(
name = args.lr_scheduler_type,
optimizer = optimizer,
num_warmup_steps=args.num_warmup_steps,
num_training_steps=args.max_train_steps,
)
global_step = 0
for epoch in range(int(args.num_train_epochs)):
tr_loss, nb_tr_steps = 0, 0
model.train()
for step, batch in tqdm(enumerate(train_dataloader)):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids = batch
loss, _ = model(input_ids=input_ids, attention_mask=input_mask, token_type_ids=segment_ids, labels=label_ids)
loss = loss / args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
nb_tr_steps += 1
if step % args.gradient_accumulation_steps == 0 or step == len(train_dataloader) - 1:
optimizer.step()
lr_schedule.step()
optimizer.zero_grad()
global_step += 1
if global_step >= args.max_train_steps:
break
if args.do_eval:
eval_examples = processor.get_dev_examples(args.data_dir)
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, tokenizer
)
all_input_ids = flow.tensor([f.input_ids for f in eval_features], dtype=flow.long)
all_input_mask = flow.tensor([f.input_mask for f in eval_features], dtype=flow.long)
all_segment_ids = flow.tensor([f.segment_ids for f in eval_features], dtype=flow.long)
all_label_ids = flow.tensor([f.label_id for f in eval_features], dtype=flow.long)
eval_data = TensorDataset(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)
eval_sampler = SequentialSampler(eval_data)
eval_dataloader = DataLoader(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)
eval_loss, eval_accuracy = 0, 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
with flow.no_grad():
tmp_eval_loss, logits = model(
input_ids=input_ids,
attention_mask=input_mask,
token_type_ids=segment_ids,
labels=label_ids
)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to('cpu').numpy()
tmp_eval_accuracy = accuracy(logits, label_ids)
eval_loss += tmp_eval_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
eval_accuracy = eval_accuracy / nb_eval_examples
result = {'eval_loss': eval_loss,
'eval_accuracy': eval_accuracy,
'global_step': global_step,
'loss': tr_loss/nb_tr_steps}
if __name__ == '__main__':
main()
| [
"oneflow.tensor",
"oneflow.no_grad",
"oneflow.device",
"oneflow.cuda.is_available",
"oneflow.utils.data.DataLoader",
"oneflow.optim.AdamW",
"oneflow.manual_seed",
"oneflow.utils.data.TensorDataset",
"oneflow.utils.data.SequentialSampler",
"oneflow.utils.data.RandomSampler"
] | [((549, 564), 'projects.utils.ColaProcessor', 'ColaProcessor', ([], {}), '()\n', (562, 564), False, 'from projects.utils import accuracy, convert_examples_to_features, ColaProcessor, MnliProcessor, MrpcProcessor\n'), ((582, 597), 'projects.utils.MnliProcessor', 'MnliProcessor', ([], {}), '()\n', (595, 597), False, 'from projects.utils import accuracy, convert_examples_to_features, ColaProcessor, MnliProcessor, MrpcProcessor\n'), ((615, 630), 'projects.utils.MrpcProcessor', 'MrpcProcessor', ([], {}), '()\n', (628, 630), False, 'from projects.utils import accuracy, convert_examples_to_features, ColaProcessor, MnliProcessor, MrpcProcessor\n'), ((816, 841), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (839, 841), False, 'import argparse\n'), ((5791, 5919), 'flowtext.models.bert.bert', 'bert', ([], {'pretrained': '(True)', 'model_type': 'args.model_type', 'checkpoint_path': 'args.bert_model', 'bert_type': 'BertForSequenceClassification'}), '(pretrained=True, model_type=args.model_type, checkpoint_path=args.\n bert_model, bert_type=BertForSequenceClassification)\n', (5795, 5919), False, 'from flowtext.models.bert import bert\n'), ((690, 712), 'random.seed', 'random.seed', (['args.seed'], {}), '(args.seed)\n', (701, 712), False, 'import random\n'), ((721, 746), 'numpy.random.seed', 'np.random.seed', (['args.seed'], {}), '(args.seed)\n', (735, 746), True, 'import numpy as np\n'), ((755, 782), 'oneflow.manual_seed', 'flow.manual_seed', (['args.seed'], {}), '(args.seed)\n', (771, 782), True, 'import oneflow as flow\n'), ((5093, 5112), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (5104, 5112), True, 'import oneflow as flow\n'), ((6771, 6864), 'projects.utils.convert_examples_to_features', 'convert_examples_to_features', (['train_examples', 'label_list', 'args.max_seq_length', 'tokenizer'], {}), '(train_examples, label_list, args.\n max_seq_length, tokenizer)\n', (6799, 6864), False, 'from projects.utils import accuracy, convert_examples_to_features, ColaProcessor, MnliProcessor, MrpcProcessor\n'), ((6910, 6977), 'oneflow.tensor', 'flow.tensor', (['[f.input_ids for f in train_features]'], {'dtype': 'flow.long'}), '([f.input_ids for f in train_features], dtype=flow.long)\n', (6921, 6977), True, 'import oneflow as flow\n'), ((7003, 7071), 'oneflow.tensor', 'flow.tensor', (['[f.input_mask for f in train_features]'], {'dtype': 'flow.long'}), '([f.input_mask for f in train_features], dtype=flow.long)\n', (7014, 7071), True, 'import oneflow as flow\n'), ((7098, 7167), 'oneflow.tensor', 'flow.tensor', (['[f.segment_ids for f in train_features]'], {'dtype': 'flow.long'}), '([f.segment_ids for f in train_features], dtype=flow.long)\n', (7109, 7167), True, 'import oneflow as flow\n'), ((7192, 7258), 'oneflow.tensor', 'flow.tensor', (['[f.label_id for f in train_features]'], {'dtype': 'flow.long'}), '([f.label_id for f in train_features], dtype=flow.long)\n', (7203, 7258), True, 'import oneflow as flow\n'), ((7280, 7356), 'oneflow.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_label_ids'], {}), '(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n', (7293, 7356), False, 'from oneflow.utils.data import TensorDataset, RandomSampler, SequentialSampler, DataLoader\n'), ((7381, 7406), 'oneflow.utils.data.RandomSampler', 'RandomSampler', (['train_data'], {}), '(train_data)\n', (7394, 7406), False, 'from oneflow.utils.data import TensorDataset, RandomSampler, SequentialSampler, DataLoader\n'), ((7434, 7513), 'oneflow.utils.data.DataLoader', 'DataLoader', (['train_data'], {'sampler': 'train_sampler', 'batch_size': 'args.train_batch_size'}), '(train_data, sampler=train_sampler, batch_size=args.train_batch_size)\n', (7444, 7513), False, 'from oneflow.utils.data import TensorDataset, RandomSampler, SequentialSampler, DataLoader\n'), ((7889, 7935), 'oneflow.optim.AdamW', 'flow.optim.AdamW', (['optimizer_grouped_parameters'], {}), '(optimizer_grouped_parameters)\n', (7905, 7935), True, 'import oneflow as flow\n'), ((7958, 8110), 'projects.optimization.get_schedule', 'get_schedule', ([], {'name': 'args.lr_scheduler_type', 'optimizer': 'optimizer', 'num_warmup_steps': 'args.num_warmup_steps', 'num_training_steps': 'args.max_train_steps'}), '(name=args.lr_scheduler_type, optimizer=optimizer,\n num_warmup_steps=args.num_warmup_steps, num_training_steps=args.\n max_train_steps)\n', (7970, 8110), False, 'from projects.optimization import get_schedule\n'), ((9250, 9341), 'projects.utils.convert_examples_to_features', 'convert_examples_to_features', (['eval_examples', 'label_list', 'args.max_seq_length', 'tokenizer'], {}), '(eval_examples, label_list, args.max_seq_length,\n tokenizer)\n', (9278, 9341), False, 'from projects.utils import accuracy, convert_examples_to_features, ColaProcessor, MnliProcessor, MrpcProcessor\n'), ((9388, 9454), 'oneflow.tensor', 'flow.tensor', (['[f.input_ids for f in eval_features]'], {'dtype': 'flow.long'}), '([f.input_ids for f in eval_features], dtype=flow.long)\n', (9399, 9454), True, 'import oneflow as flow\n'), ((9480, 9547), 'oneflow.tensor', 'flow.tensor', (['[f.input_mask for f in eval_features]'], {'dtype': 'flow.long'}), '([f.input_mask for f in eval_features], dtype=flow.long)\n', (9491, 9547), True, 'import oneflow as flow\n'), ((9574, 9642), 'oneflow.tensor', 'flow.tensor', (['[f.segment_ids for f in eval_features]'], {'dtype': 'flow.long'}), '([f.segment_ids for f in eval_features], dtype=flow.long)\n', (9585, 9642), True, 'import oneflow as flow\n'), ((9667, 9732), 'oneflow.tensor', 'flow.tensor', (['[f.label_id for f in eval_features]'], {'dtype': 'flow.long'}), '([f.label_id for f in eval_features], dtype=flow.long)\n', (9678, 9732), True, 'import oneflow as flow\n'), ((9753, 9829), 'oneflow.utils.data.TensorDataset', 'TensorDataset', (['all_input_ids', 'all_input_mask', 'all_segment_ids', 'all_label_ids'], {}), '(all_input_ids, all_input_mask, all_segment_ids, all_label_ids)\n', (9766, 9829), False, 'from oneflow.utils.data import TensorDataset, RandomSampler, SequentialSampler, DataLoader\n'), ((9853, 9881), 'oneflow.utils.data.SequentialSampler', 'SequentialSampler', (['eval_data'], {}), '(eval_data)\n', (9870, 9881), False, 'from oneflow.utils.data import TensorDataset, RandomSampler, SequentialSampler, DataLoader\n'), ((9908, 9984), 'oneflow.utils.data.DataLoader', 'DataLoader', (['eval_data'], {'sampler': 'eval_sampler', 'batch_size': 'args.eval_batch_size'}), '(eval_data, sampler=eval_sampler, batch_size=args.eval_batch_size)\n', (9918, 9984), False, 'from oneflow.utils.data import TensorDataset, RandomSampler, SequentialSampler, DataLoader\n'), ((7799, 7859), 'math.ceil', 'math.ceil', (['(args.max_train_steps / num_update_steps_per_epoch)'], {}), '(args.max_train_steps / num_update_steps_per_epoch)\n', (7808, 7859), False, 'import math\n'), ((10763, 10790), 'projects.utils.accuracy', 'accuracy', (['logits', 'label_ids'], {}), '(logits, label_ids)\n', (10771, 10790), False, 'from projects.utils import accuracy, convert_examples_to_features, ColaProcessor, MnliProcessor, MrpcProcessor\n'), ((10354, 10368), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (10366, 10368), True, 'import oneflow as flow\n'), ((5008, 5032), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (5030, 5032), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
from util import convert_to_onnx_and_check
def set_moving_max_min_value():
max_key, min_key = "", ""
keys = flow.get_all_variables().keys()
for key in keys:
if max_key != "" and min_key != "":
break
if key[-3:] == "max":
max_key = key
if key[-3:] == "min":
min_key = key
flow.load_variables(
{
max_key: np.array([0.5]).astype(np.float32),
min_key: np.array([-0.2]).astype(np.float32),
}
)
def generate_moving_average_min_max_observer_test(
out_pos: int, formula: str, scheme: str = "symmetric", device_type: str = "cpu",
):
flow.clear_default_session()
@flow.global_function()
def moving_average_min_max_observer():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
name="x1",
shape=(2, 3, 4),
dtype=flow.float,
initializer=flow.random_uniform_initializer(-10, 10),
)
return flow.quantization.moving_average_min_max_observer(
x, quantization_formula=formula, quantization_scheme=scheme
)[out_pos]
set_moving_max_min_value()
convert_to_onnx_and_check(
moving_average_min_max_observer, opset=10, explicit_init=False
)
def test_moving_average_min_max_observer_symmetric(test_case):
generate_moving_average_min_max_observer_test(0, "google", "symmetric")
def test_moving_average_min_max_observer_symmetric_zero_point(test_case):
generate_moving_average_min_max_observer_test(1, "google", "symmetric")
def test_moving_average_min_max_observer_affine(test_case):
generate_moving_average_min_max_observer_test(0, "google", "affine")
def test_moving_average_min_max_observer_affine_zero_point(test_case):
generate_moving_average_min_max_observer_test(1, "google", "affine")
def test_moving_average_min_max_observer_cambricon(test_case):
generate_moving_average_min_max_observer_test(0, "cambricon")
def test_moving_average_min_max_observer_cambricon_zero_point(test_case):
generate_moving_average_min_max_observer_test(1, "cambricon")
def test_moving_average_min_max_observer_symmetric_gpu(test_case):
generate_moving_average_min_max_observer_test(
0, "google", "symmetric", device_type="gpu"
)
def test_moving_average_min_max_observer_symmetric_zero_point_gpu(test_case):
generate_moving_average_min_max_observer_test(
1, "google", "symmetric", device_type="gpu"
)
def test_moving_average_min_max_observer_affine_gpu(test_case):
generate_moving_average_min_max_observer_test(
0, "google", "affine", device_type="gpu"
)
def test_moving_average_min_max_observer_affine_zero_point_gpu(test_case):
generate_moving_average_min_max_observer_test(
1, "google", "affine", device_type="gpu"
)
def test_moving_average_min_max_observer_cambricon_gpu(test_case):
generate_moving_average_min_max_observer_test(0, "cambricon", device_type="gpu")
def test_moving_average_min_max_observer_cambricon_zero_point_gpu(test_case):
generate_moving_average_min_max_observer_test(1, "cambricon", device_type="gpu")
| [
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.quantization.moving_average_min_max_observer",
"oneflow.scope.placement",
"oneflow.get_all_variables",
"oneflow.random_uniform_initializer"
] | [((1288, 1316), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1314, 1316), True, 'import oneflow as flow\n'), ((1323, 1345), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1343, 1345), True, 'import oneflow as flow\n'), ((1863, 1956), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['moving_average_min_max_observer'], {'opset': '(10)', 'explicit_init': '(False)'}), '(moving_average_min_max_observer, opset=10,\n explicit_init=False)\n', (1888, 1956), False, 'from util import convert_to_onnx_and_check\n'), ((750, 774), 'oneflow.get_all_variables', 'flow.get_all_variables', ([], {}), '()\n', (772, 774), True, 'import oneflow as flow\n'), ((1402, 1442), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1422, 1442), True, 'import oneflow as flow\n'), ((1676, 1791), 'oneflow.quantization.moving_average_min_max_observer', 'flow.quantization.moving_average_min_max_observer', (['x'], {'quantization_formula': 'formula', 'quantization_scheme': 'scheme'}), '(x, quantization_formula=\n formula, quantization_scheme=scheme)\n', (1725, 1791), True, 'import oneflow as flow\n'), ((1033, 1048), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (1041, 1048), True, 'import numpy as np\n'), ((1090, 1106), 'numpy.array', 'np.array', (['[-0.2]'], {}), '([-0.2])\n', (1098, 1106), True, 'import numpy as np\n'), ((1601, 1641), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', (['(-10)', '(10)'], {}), '(-10, 10)\n', (1632, 1641), True, 'import oneflow as flow\n')] |
import os
from os import mkdir
from oneflow_onnx.oneflow2onnx.util import convert_to_onnx_and_check
import oneflow as flow
import logging
from backbones import get_model
from utils.utils_config import get_config
import argparse
import tempfile
class ModelGraph(flow.nn.Graph):
def __init__(self, model):
super().__init__()
self.backbone = model
def build(self, x):
x = x.to("cuda")
out = self.backbone(x)
return out
def convert_func(cfg, model_path, out_path,image_size):
model_module = get_model(cfg.network, dropout=0.0,
num_features=cfg.embedding_size).to("cuda")
model_module.eval()
print(model_module)
model_graph = ModelGraph(model_module)
model_graph._compile(flow.randn(1, 3, image_size, image_size).to("cuda"))
with tempfile.TemporaryDirectory() as tmpdirname:
new_parameters = dict()
parameters = flow.load(model_path)
for key, value in parameters.items():
if "num_batches_tracked" not in key:
if key == "fc.weight":
continue
val = value
new_key = key.replace("backbone.", "")
new_parameters[new_key] = val
model_module.load_state_dict(new_parameters)
flow.save(model_module.state_dict(), tmpdirname)
convert_to_onnx_and_check(
model_graph, flow_weight_dir=tmpdirname, onnx_model_path="./", print_outlier=True)
def main(args):
logging.basicConfig(level=logging.NOTSET)
logging.info(args.model_path)
cfg = get_config(args.config)
if not os.path.exists(args.out_path):
mkdir(args.out_path)
convert_func(cfg, args.model_path, args.out_path,args.image_size)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='OneFlow ArcFace val')
parser.add_argument('config', type=str, help='py config file')
parser.add_argument('--model_path', type=str, help='model path')
parser.add_argument('--image_size', type=int,
default=112, help='input image size')
parser.add_argument('--out_path', type=str,
default="onnx_model", help='out path')
| [
"oneflow.randn",
"oneflow.load"
] | [((1508, 1549), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.NOTSET'}), '(level=logging.NOTSET)\n', (1527, 1549), False, 'import logging\n'), ((1554, 1583), 'logging.info', 'logging.info', (['args.model_path'], {}), '(args.model_path)\n', (1566, 1583), False, 'import logging\n'), ((1594, 1617), 'utils.utils_config.get_config', 'get_config', (['args.config'], {}), '(args.config)\n', (1604, 1617), False, 'from utils.utils_config import get_config\n'), ((1802, 1860), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""OneFlow ArcFace val"""'}), "(description='OneFlow ArcFace val')\n", (1825, 1860), False, 'import argparse\n'), ((834, 863), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (861, 863), False, 'import tempfile\n'), ((932, 953), 'oneflow.load', 'flow.load', (['model_path'], {}), '(model_path)\n', (941, 953), True, 'import oneflow as flow\n'), ((1364, 1476), 'oneflow_onnx.oneflow2onnx.util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['model_graph'], {'flow_weight_dir': 'tmpdirname', 'onnx_model_path': '"""./"""', 'print_outlier': '(True)'}), "(model_graph, flow_weight_dir=tmpdirname,\n onnx_model_path='./', print_outlier=True)\n", (1389, 1476), False, 'from oneflow_onnx.oneflow2onnx.util import convert_to_onnx_and_check\n'), ((1629, 1658), 'os.path.exists', 'os.path.exists', (['args.out_path'], {}), '(args.out_path)\n', (1643, 1658), False, 'import os\n'), ((1668, 1688), 'os.mkdir', 'mkdir', (['args.out_path'], {}), '(args.out_path)\n', (1673, 1688), False, 'from os import mkdir\n'), ((546, 614), 'backbones.get_model', 'get_model', (['cfg.network'], {'dropout': '(0.0)', 'num_features': 'cfg.embedding_size'}), '(cfg.network, dropout=0.0, num_features=cfg.embedding_size)\n', (555, 614), False, 'from backbones import get_model\n'), ((771, 811), 'oneflow.randn', 'flow.randn', (['(1)', '(3)', 'image_size', 'image_size'], {}), '(1, 3, image_size, image_size)\n', (781, 811), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import numpy as np
import oneflow.typing as tp
import os
import unittest
@flow.unittest.skip_unless_1n1d()
class Test1dSspVariableProxy(flow.unittest.TestCase):
def test_1d_ring_buffer_Wm_assign_Wc_plus_1(test_case):
if flow.eager_execution_enabled():
return
device_name = "0:0"
flow.config.cpu_device_num(2)
buffer_size = 4
@flow.global_function()
def Foo() -> tp.Numpy:
with flow.scope.placement("cpu", device_name):
w = flow.get_variable(
"w",
shape=(10,),
dtype=flow.float,
initializer=flow.constant_initializer(0),
)
ones = flow.constant_like(w, value=1.0, dtype=flow.float)
ref, value = flow.experimental.ssp_variable_proxy(
w, buffer_size=buffer_size
)
# do no use `w` again because it's delegated by `ref` and `value`
# W_mutable = W_constant + 1
flow.assign(ref, value + ones)
return value
checkpoint = flow.train.CheckPoint()
checkpoint.init()
zeros = np.zeros((10,)).astype(np.float32)
ones = np.ones((10,)).astype(np.float32)
# the first four results are always initialized with zeros
for i in range(buffer_size):
x = Foo()
test_case.assertTrue(np.allclose(x, zeros))
# the next for results are ones, because the formula is W_mutable = W_constant + 1
for i in range(buffer_size):
x = Foo()
test_case.assertTrue(np.allclose(x, ones))
# the next for results are twos, because the formula is W_mutable = W_constant + 1
for i in range(buffer_size):
x = Foo()
test_case.assertTrue(np.allclose(x, ones + ones))
def test_1d_ring_buffer_Wm_assign_Wm_plus_1(test_case):
if flow.eager_execution_enabled():
return
device_name = "0:0"
flow.config.cpu_device_num(2)
buffer_size = 4
@flow.global_function()
def Foo() -> tp.Numpy:
with flow.scope.placement("cpu", device_name):
w = flow.get_variable(
"w",
shape=(10,),
dtype=flow.float,
initializer=flow.constant_initializer(0),
)
ones = flow.constant_like(w, value=1.0, dtype=flow.float)
ref, value = flow.experimental.ssp_variable_proxy(
w, buffer_size=buffer_size
)
# do no use `w` again because it's delegated by `ref` and `value`
# W_mutable = W_mutable + 1
flow.assign(ref, ref + ones)
return value
checkpoint = flow.train.CheckPoint()
checkpoint.init()
zeros = np.zeros((10,)).astype(np.float32)
ones = np.ones((10,)).astype(np.float32)
# the first four results are always initialized with zeros
for i in range(buffer_size):
x = Foo()
test_case.assertTrue(np.allclose(x, zeros))
# ones, because the formula is W_mutable = W_mutable + 1
x = Foo()
test_case.assertTrue(np.allclose(x, ones))
# twos, because the formula is W_mutable = W_mutable + 1
x = Foo()
test_case.assertTrue(np.allclose(x, ones + ones))
# threes, because the formula is W_mutable = W_mutable + 1
x = Foo()
test_case.assertTrue(np.allclose(x, ones + ones + ones))
# fours, because the formula is W_mutable = W_mutable + 1
x = Foo()
test_case.assertTrue(np.allclose(x, ones + ones + ones + ones))
def test_add_ssp_variable_proxy(test_case):
if flow.eager_execution_enabled():
return
device_name = "0:0"
flow.config.enable_debug_mode(True)
flow.config.cpu_device_num(2)
buffer_size = 4
function_config = flow.FunctionConfig()
function_config.enable_ssp(True)
@flow.global_function(type="train", function_config=function_config)
def Foo() -> tp.Numpy:
with flow.scope.placement(
"cpu", device_name
), flow.experimental.scope.config(
ssp_num_stages=buffer_size, ssp_stage_id=0
):
w = flow.get_variable(
"w",
shape=(10,),
dtype=flow.float,
initializer=flow.constant_initializer(0),
)
loss = w + flow.constant_like(w, value=0.0, dtype=flow.float)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [-10.0]), momentum=0
).minimize(loss)
return loss
checkpoint = flow.train.CheckPoint()
checkpoint.init()
zeros = np.zeros((10,)).astype(np.float32)
ones = np.ones((10,)).astype(np.float32)
# the first four results are always initialized with zeros
for i in range(buffer_size):
x = Foo()
test_case.assertTrue(np.allclose(x, zeros))
# ones, because the formula is W_mutable = W_mutable + 1
x = Foo()
test_case.assertTrue(np.allclose(x, ones))
# twos, because the formula is W_mutable = W_mutable + 1
x = Foo()
test_case.assertTrue(np.allclose(x, ones + ones))
# threes, because the formula is W_mutable = W_mutable + 1
x = Foo()
test_case.assertTrue(np.allclose(x, ones + ones + ones))
# fours, because the formula is W_mutable = W_mutable + 1
x = Foo()
test_case.assertTrue(np.allclose(x, ones + ones + ones + ones))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.global_function",
"oneflow.train.CheckPoint",
"oneflow.constant_initializer",
"oneflow.config.enable_debug_mode",
"oneflow.scope.placement",
"oneflow.experimental.ssp_variable_proxy",
"oneflow.config.cpu_device_num",
"oneflow.eager_execution_enabled",
... | [((689, 721), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (719, 721), True, 'import oneflow as flow\n'), ((6499, 6514), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6512, 6514), False, 'import unittest\n'), ((847, 877), 'oneflow.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (875, 877), True, 'import oneflow as flow\n'), ((935, 964), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['(2)'], {}), '(2)\n', (961, 964), True, 'import oneflow as flow\n'), ((1000, 1022), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1020, 1022), True, 'import oneflow as flow\n'), ((1759, 1782), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (1780, 1782), True, 'import oneflow as flow\n'), ((2583, 2613), 'oneflow.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (2611, 2613), True, 'import oneflow as flow\n'), ((2671, 2700), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['(2)'], {}), '(2)\n', (2697, 2700), True, 'import oneflow as flow\n'), ((2736, 2758), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (2756, 2758), True, 'import oneflow as flow\n'), ((3492, 3515), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (3513, 3515), True, 'import oneflow as flow\n'), ((4469, 4499), 'oneflow.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (4497, 4499), True, 'import oneflow as flow\n'), ((4557, 4592), 'oneflow.config.enable_debug_mode', 'flow.config.enable_debug_mode', (['(True)'], {}), '(True)\n', (4586, 4592), True, 'import oneflow as flow\n'), ((4601, 4630), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['(2)'], {}), '(2)\n', (4627, 4630), True, 'import oneflow as flow\n'), ((4683, 4704), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (4702, 4704), True, 'import oneflow as flow\n'), ((4756, 4823), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'function_config'}), "(type='train', function_config=function_config)\n", (4776, 4823), True, 'import oneflow as flow\n'), ((5549, 5572), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (5570, 5572), True, 'import oneflow as flow\n'), ((3937, 3957), 'numpy.allclose', 'np.allclose', (['x', 'ones'], {}), '(x, ones)\n', (3948, 3957), True, 'import numpy as np\n'), ((4072, 4099), 'numpy.allclose', 'np.allclose', (['x', '(ones + ones)'], {}), '(x, ones + ones)\n', (4083, 4099), True, 'import numpy as np\n'), ((4216, 4250), 'numpy.allclose', 'np.allclose', (['x', '(ones + ones + ones)'], {}), '(x, ones + ones + ones)\n', (4227, 4250), True, 'import numpy as np\n'), ((4366, 4407), 'numpy.allclose', 'np.allclose', (['x', '(ones + ones + ones + ones)'], {}), '(x, ones + ones + ones + ones)\n', (4377, 4407), True, 'import numpy as np\n'), ((5994, 6014), 'numpy.allclose', 'np.allclose', (['x', 'ones'], {}), '(x, ones)\n', (6005, 6014), True, 'import numpy as np\n'), ((6129, 6156), 'numpy.allclose', 'np.allclose', (['x', '(ones + ones)'], {}), '(x, ones + ones)\n', (6140, 6156), True, 'import numpy as np\n'), ((6273, 6307), 'numpy.allclose', 'np.allclose', (['x', '(ones + ones + ones)'], {}), '(x, ones + ones + ones)\n', (6284, 6307), True, 'import numpy as np\n'), ((6423, 6464), 'numpy.allclose', 'np.allclose', (['x', '(ones + ones + ones + ones)'], {}), '(x, ones + ones + ones + ones)\n', (6434, 6464), True, 'import numpy as np\n'), ((1071, 1111), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', 'device_name'], {}), "('cpu', device_name)\n", (1091, 1111), True, 'import oneflow as flow\n'), ((1351, 1401), 'oneflow.constant_like', 'flow.constant_like', (['w'], {'value': '(1.0)', 'dtype': 'flow.float'}), '(w, value=1.0, dtype=flow.float)\n', (1369, 1401), True, 'import oneflow as flow\n'), ((1431, 1495), 'oneflow.experimental.ssp_variable_proxy', 'flow.experimental.ssp_variable_proxy', (['w'], {'buffer_size': 'buffer_size'}), '(w, buffer_size=buffer_size)\n', (1467, 1495), True, 'import oneflow as flow\n'), ((1677, 1707), 'oneflow.assign', 'flow.assign', (['ref', '(value + ones)'], {}), '(ref, value + ones)\n', (1688, 1707), True, 'import oneflow as flow\n'), ((1825, 1840), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (1833, 1840), True, 'import numpy as np\n'), ((1875, 1889), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (1882, 1889), True, 'import numpy as np\n'), ((2069, 2090), 'numpy.allclose', 'np.allclose', (['x', 'zeros'], {}), '(x, zeros)\n', (2080, 2090), True, 'import numpy as np\n'), ((2276, 2296), 'numpy.allclose', 'np.allclose', (['x', 'ones'], {}), '(x, ones)\n', (2287, 2296), True, 'import numpy as np\n'), ((2482, 2509), 'numpy.allclose', 'np.allclose', (['x', '(ones + ones)'], {}), '(x, ones + ones)\n', (2493, 2509), True, 'import numpy as np\n'), ((2807, 2847), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', 'device_name'], {}), "('cpu', device_name)\n", (2827, 2847), True, 'import oneflow as flow\n'), ((3087, 3137), 'oneflow.constant_like', 'flow.constant_like', (['w'], {'value': '(1.0)', 'dtype': 'flow.float'}), '(w, value=1.0, dtype=flow.float)\n', (3105, 3137), True, 'import oneflow as flow\n'), ((3167, 3231), 'oneflow.experimental.ssp_variable_proxy', 'flow.experimental.ssp_variable_proxy', (['w'], {'buffer_size': 'buffer_size'}), '(w, buffer_size=buffer_size)\n', (3203, 3231), True, 'import oneflow as flow\n'), ((3412, 3440), 'oneflow.assign', 'flow.assign', (['ref', '(ref + ones)'], {}), '(ref, ref + ones)\n', (3423, 3440), True, 'import oneflow as flow\n'), ((3558, 3573), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (3566, 3573), True, 'import numpy as np\n'), ((3608, 3622), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (3615, 3622), True, 'import numpy as np\n'), ((3802, 3823), 'numpy.allclose', 'np.allclose', (['x', 'zeros'], {}), '(x, zeros)\n', (3813, 3823), True, 'import numpy as np\n'), ((4872, 4912), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', 'device_name'], {}), "('cpu', device_name)\n", (4892, 4912), True, 'import oneflow as flow\n'), ((4944, 5018), 'oneflow.experimental.scope.config', 'flow.experimental.scope.config', ([], {'ssp_num_stages': 'buffer_size', 'ssp_stage_id': '(0)'}), '(ssp_num_stages=buffer_size, ssp_stage_id=0)\n', (4974, 5018), True, 'import oneflow as flow\n'), ((5615, 5630), 'numpy.zeros', 'np.zeros', (['(10,)'], {}), '((10,))\n', (5623, 5630), True, 'import numpy as np\n'), ((5665, 5679), 'numpy.ones', 'np.ones', (['(10,)'], {}), '((10,))\n', (5672, 5679), True, 'import numpy as np\n'), ((5859, 5880), 'numpy.allclose', 'np.allclose', (['x', 'zeros'], {}), '(x, zeros)\n', (5870, 5880), True, 'import numpy as np\n'), ((5292, 5342), 'oneflow.constant_like', 'flow.constant_like', (['w'], {'value': '(0.0)', 'dtype': 'flow.float'}), '(w, value=0.0, dtype=flow.float)\n', (5310, 5342), True, 'import oneflow as flow\n'), ((1280, 1308), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (1305, 1308), True, 'import oneflow as flow\n'), ((3016, 3044), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (3041, 3044), True, 'import oneflow as flow\n'), ((5217, 5245), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (5242, 5245), True, 'import oneflow as flow\n'), ((5399, 5453), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[-10.0]'], {}), '([], [-10.0])\n', (5440, 5453), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from scipy import special
import oneflow.experimental as flow
from test_util import GenArgList
def _test_relu_impl(test_case, shape, device):
np_input = np.random.randn(*shape)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
m = flow.nn.ReLU()
of_out = m(of_input)
np_out = np.maximum(0, np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_out > 0, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestReLUModule(flow.unittest.TestCase):
def test_relu(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_relu_impl(test_case, *arg)
def _test_relu6_impl(test_case, shape, device):
np_input = np.random.randn(*shape)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
m = flow.nn.ReLU6()
of_out = m(of_input)
np_out = np.minimum(np.maximum(0, np_input), 6.0)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(
of_input.grad.numpy(),
np.where(np_input > 6, 0, np.where(np_input < 0, 0, 1)),
1e-5,
1e-5,
)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestReLU6Module(flow.unittest.TestCase):
def test_relu6(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_relu6_impl(test_case, *arg)
def _test_tanh_nn_impl(test_case, shape, device):
np_input = np.random.randn(*shape)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
tanh = flow.nn.Tanh()
of_out = tanh(of_input)
np_out = np.tanh(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), 1.0 - np_out * np_out, 1e-5, 1e-5)
)
def _test_tanh_function_impl(test_case, shape, device):
np_input = np.random.randn(*shape)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
of_out = flow.tanh(of_input)
np_out = np.tanh(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), 1.0 - np_out * np_out, 1e-5, 1e-5)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestTanh(flow.unittest.TestCase):
def test_tanh(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_tanh_nn_impl(test_case, *arg)
_test_tanh_function_impl(test_case, *arg)
def _test_elu_function_impl(test_case, shape, device):
m = flow.nn.ELU()
arr = np.random.randn(*shape)
np_out = np.where(arr > 0, arr, 1.0 * (np.exp(arr) - 1))
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-5, atol=1e-5))
m = flow.nn.ELU(alpha=1.2)
arr = np.random.randn(*shape)
np_out = np.where(arr > 0, arr, 1.2 * (np.exp(arr) - 1))
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-5, atol=1e-5))
of_out = of_out.sum()
of_out.backward()
np_grad = np.where(arr > 0, 1, 1.2 * np.exp(arr))
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestELUModule(flow.unittest.TestCase):
def test_elu(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_elu_function_impl(test_case, *arg)
def _np_gelu(x):
return 0.5 * x * (1 + special.erf(x / np.sqrt(2)))
def _test_gelu_impl(test_case, device):
np_input = np.array([1.0, -1.0, 2.3]).astype(np.float32)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
gelu = flow.nn.GELU()
of_out = gelu(of_input)
np_out = _np_gelu(np_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
np_grad = [1.0833154916763306, -0.08331547677516937, 1.0544281005859375]
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestGelu(flow.unittest.TestCase):
def test_gelu(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_gelu_impl(test_case, *arg)
def numpy_sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def numpy_sigmoid_grad(inputs, grads):
x = np.exp(-inputs)
delta = x / (1 + x) ** 2
return delta * grads
def numpy_softmax(x, axis):
x = x - x.max(axis=axis, keepdims=True)
y = np.exp(x)
return y / y.sum(axis=axis, keepdims=True)
def numpy_logsoftmax(x, dim):
e_x = np.exp(x - np.max(x, axis=dim, keepdims=True))
return np.log(e_x / e_x.sum(axis=dim, keepdims=True))
def numpy_softplus(x, beta, threshold):
return np.where(
x * beta > threshold, x, 1.0 / beta * np.log(1.0 + np.exp(beta * x))
)
def numpy_mish_grad(x):
f = 1 + np.exp(x)
y_grad = (f * f - 1) / (f * f + 1) + x * (4 * f * (f - 1)) / (
(f * f + 1) * (f * f + 1)
)
return y_grad
def _test_sigmoid(test_case, device):
m = flow.nn.Sigmoid()
input_arr = np.random.randn(2, 3, 4, 5)
x = flow.Tensor(input_arr, device=flow.device(device))
y = m(x)
y2 = flow.sigmoid(x)
y3 = x.sigmoid()
output = numpy_sigmoid(input_arr)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-5, 1e-5))
test_case.assertTrue(np.allclose(y2.numpy(), output, 1e-5, 1e-5))
test_case.assertTrue(np.allclose(y3.numpy(), output, 1e-5, 1e-5))
def _test_sigmoid_backward(test_case, device):
input_arr = np.random.randn(2, 3, 4, 5)
x = flow.Tensor(input_arr, device=flow.device(device), requires_grad=True)
x_grad = numpy_sigmoid_grad(input_arr, np.ones(input_arr.shape))
m = flow.nn.Sigmoid()
y = m(x).sum()
y.backward()
test_case.assertTrue(np.allclose(x.grad.numpy(), x_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSigmoid(flow.unittest.TestCase):
def test_sigmoid(test_case):
arg_dict = OrderedDict()
arg_dict["fun"] = [
_test_sigmoid,
_test_sigmoid_backward,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def _test_softmax(test_case, device):
axis = 0
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(2, 3, 4, 5)
x = flow.Tensor(arr, device=flow.device(device))
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-5, 1e-5))
def _test_softmax_dim_1(test_case, device):
axis = 1
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(9, 7, 8, 16)
x = flow.Tensor(arr, device=flow.device(device))
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-5, 1e-5))
def _test_softmax_dim_2(test_case, device):
axis = 2
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(2, 5, 6, 3)
x = flow.Tensor(arr, device=flow.device(device))
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-5, 1e-5))
def _test_softmax_dim_3(test_case, device):
axis = 3
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(1, 3, 4, 7)
x = flow.Tensor(arr, device=flow.device(device))
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-5, 1e-5))
axis2 = -1
m2 = flow.nn.Softmax(dim=axis)
y2 = m(x)
output2 = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y2.numpy(), output2, 1e-5, 1e-5))
def _test_softmax_backward_normal(test_case, device):
# Grad of softmax should equal to zero.
# See:https://eli.thegreenplace.net/2016/the-softmax-function-and-its-derivative/
# Note that only when sum of softmax is the backward value zero.
x_grad = np.zeros((2, 3, 4, 5))
axis = 0
m = flow.nn.Softmax(dim=axis)
x = flow.Tensor(
np.random.randn(2, 3, 4, 5),
requires_grad=True,
device=flow.device(device),
dtype=flow.float64,
)
y = m(x).sum()
y.backward()
test_case.assertTrue(np.allclose(x.grad.numpy(), x_grad, 1e-5, 1e-5))
def _test_softmax_backward_1_dim(test_case, device):
a = flow.tensor(
[1, 2], dtype=flow.float64, device=flow.device(device), requires_grad=True
)
b = flow.tensor(
[3, 4], dtype=flow.float64, device=flow.device(device), requires_grad=True
)
c = a * b
m = flow.nn.Softmax(dim=None)
d = m(c)
d[0].backward()
a_grad = np.array([0.0199441700, -0.0265922267])
test_case.assertTrue(np.allclose(a.grad.numpy(), a_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSoftmax(flow.unittest.TestCase):
def test_softmax(test_case):
arg_dict = OrderedDict()
arg_dict["fun"] = [
_test_softmax,
_test_softmax_dim_1,
_test_softmax_dim_2,
_test_softmax_dim_3,
_test_softmax_backward_normal,
_test_softmax_backward_1_dim,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def _np_hardsigmoid_grad(x):
return np.where(x > 0, np.where(x >= 1, 0, 1.0 / 6), 0)
def _test_hardsigmoid_impl(test_case, shape, device):
m = flow.nn.Hardsigmoid()
arr = np.random.randn(*shape)
np_out = np.maximum(0, np.minimum(1, (arr + 3) / 6))
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), _np_hardsigmoid_grad(np_out), 1e-5, 1e-5)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestHardsigmoidModule(flow.unittest.TestCase):
def test_hardsigmoid(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_hardsigmoid_impl(test_case, *arg)
def _test_logsoftmax(test_case, device):
dim = 1
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(4, 7)
x = flow.Tensor(input_arr, device=flow.device(device))
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-5, 1e-5))
def _test_logsoftmax_dim_2(test_case, device):
dim = 2
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(3, 4, 5)
x = flow.Tensor(input_arr, device=flow.device(device))
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-5, 1e-5))
def _test_logsoftmax_dim_3(test_case, device):
dim = 3
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(8, 9, 7, 3)
x = flow.Tensor(input_arr, device=flow.device(device))
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-5, 1e-5))
def _test_logsoftmax_backward(test_case, device):
axis = 0
m = flow.nn.LogSoftmax(axis)
input_arr = np.array(
[
[
[
[2.0, 1.0, 9.0, 3.0, 4.0],
[1.0, 6.0, 7.0, 1.0, 4.0],
[4.0, 7.0, 5.0, 8.0, 1.0],
[9.0, 5.0, 7.0, 8.0, 5.0],
],
[
[1.0, 1.0, 5.0, 3.0, 5.0],
[3.0, 6.0, 3.0, 7.0, 8.0],
[8.0, 8.0, 1.0, 2.0, 6.0],
[3.0, 5.0, 6.0, 1.0, 1.0],
],
[
[8.0, 3.0, 6.0, 3.0, 7.0],
[8.0, 5.0, 1.0, 2.0, 7.0],
[3.0, 9.0, 4.0, 6.0, 5.0],
[5.0, 1.0, 2.0, 3.0, 6.0],
],
],
[
[
[3.0, 5.0, 3.0, 1.0, 7.0],
[5.0, 2.0, 6.0, 3.0, 5.0],
[5.0, 1.0, 8.0, 6.0, 9.0],
[9.0, 8.0, 4.0, 5.0, 1.0],
],
[
[7.0, 5.0, 7.0, 1.0, 6.0],
[3.0, 3.0, 6.0, 6.0, 7.0],
[9.0, 4.0, 1.0, 5.0, 7.0],
[7.0, 6.0, 9.0, 8.0, 6.0],
],
[
[6.0, 7.0, 5.0, 3.0, 9.0],
[4.0, 1.0, 2.0, 3.0, 2.0],
[4.0, 3.0, 8.0, 7.0, 8.0],
[1.0, 3.0, 8.0, 6.0, 2.0],
],
],
]
)
x = flow.Tensor(
input_arr, requires_grad=True, device=flow.device(device), dtype=flow.float64,
)
x_grad = np.array(
[
[
[
[0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825],
[0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716],
[0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.99932930],
[0.00000000, 0.90514825, -0.90514825, -0.90514825, -0.96402758],
],
[
[0.99505475, 0.96402758, 0.76159416, -0.76159416, 0.46211716],
[0.00000000, -0.90514825, 0.90514825, -0.46211716, -0.46211716],
[0.46211716, -0.96402758, 0.00000000, 0.90514825, 0.46211716],
[0.96402758, 0.46211716, 0.90514825, 0.99817790, 0.98661430],
],
[
[-0.76159416, 0.96402758, -0.46211716, 0.00000000, 0.76159416],
[-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.98661430],
[0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825],
[-0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758],
],
],
[
[
[-0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825],
[-0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716],
[-0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.99932930],
[0.00000000, -0.90514825, 0.90514825, 0.90514825, 0.96402758],
],
[
[-0.99505475, -0.96402758, -0.76159416, 0.76159416, -0.46211716],
[0.00000000, 0.90514825, -0.90514825, 0.46211716, 0.46211716],
[-0.46211716, 0.96402758, 0.00000000, -0.90514825, -0.46211716],
[-0.96402758, -0.46211716, -0.90514825, -0.99817790, -0.98661430],
],
[
[0.76159416, -0.96402758, 0.46211716, 0.00000000, -0.76159416],
[0.96402758, 0.96402758, -0.46211716, -0.46211716, 0.98661430],
[-0.46211716, 0.99505475, -0.96402758, -0.46211716, -0.90514825],
[0.96402758, -0.76159416, -0.99505475, -0.90514825, 0.96402758],
],
],
]
)
y = m(x).sum()
y.backward()
test_case.assertTrue(np.allclose(x.grad.numpy(), x_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestLogSoftmax(flow.unittest.TestCase):
def test_log_softmax(test_case):
arg_dict = OrderedDict()
arg_dict["fun"] = [
_test_logsoftmax,
_test_logsoftmax_dim_2,
_test_logsoftmax_dim_3,
_test_logsoftmax_backward,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def _test_logsigmoid(test_case, device):
m = flow.nn.LogSigmoid()
arr = np.array([1.0, 2.0, 3.0, 10.2, 7.6])
np_out = np.log(1.0 / (1.0 + np.exp(-arr)))
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
np_grad = [
0.2689414213699951,
0.11920292202211764,
0.04742587317756669,
3.716893710287265e-05,
0.0005002011070795276,
]
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestLogSigmoidModule(flow.unittest.TestCase):
def test_logsigmoid(test_case):
arg_dict = OrderedDict()
arg_dict["fun"] = [
_test_logsigmoid,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def _test_softplus(test_case, device):
m = flow.nn.Softplus()
arr = np.random.randn(2, 3, 4, 5)
np_out = numpy_softplus(arr, 1.0, 20)
x = flow.Tensor(arr, device=flow.device(device))
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_softplus_beta(test_case, device):
m = flow.nn.Softplus(beta=1.11)
arr = np.random.randn(2, 3, 4, 5)
np_out = numpy_softplus(arr, 1.11, 20)
x = flow.Tensor(arr, device=flow.device(device))
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_softplus_threshold(test_case, device):
m = flow.nn.Softplus(beta=1.11, threshold=1.55)
arr = np.random.randn(2, 3, 4, 5)
np_out = np.where(
arr * 1.11 > 1.55, arr, 1.0 / 1.11 * np.log(1.0 + np.exp(1.11 * arr))
)
np_out = numpy_softplus(arr, 1.11, 1.55)
x = flow.Tensor(arr, device=flow.device(device))
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_softplus_backward(test_case, device):
m = flow.nn.Softplus()
arr = np.array([1.0, 2.0, 21.0, 20.0, 4.0])
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
of_out = of_out.sum()
of_out.backward()
np_grad = [0.7310585786300049, 0.8807970779778824, 1.0, 1.0, 0.9820137900379085]
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSoftplusModule(flow.unittest.TestCase):
def test_softplus(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_softplus,
_test_softplus_beta,
_test_softplus_threshold,
_test_softplus_backward,
]
arg_dict["device"] = ["cpu"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def _test_hardswish_impl(test_case, shape, device):
m = flow.nn.Hardswish()
arr = np.random.randn(*shape)
f = arr + 3
relu6 = np.where(np.where(f < 0, 0, f) > 6, 6, np.where(f < 0, 0, f))
relu6_grad = np.where(f > 6, 0, np.where(f < 0, 0, 1))
np_out = arr * relu6 / 6
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
np_grad = relu6 / 6 + arr * relu6_grad / 6
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestHardswishModule(flow.unittest.TestCase):
def test_hardswish(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_hardswish_impl(test_case, *arg)
def _np_hardtanh_grad(x):
return np.where(x <= -2.0, 0.0, np.where(x >= 2.3, 0.0, 1.0))
def _test_hardtanh_impl(test_case, shape, device):
m = flow.nn.Hardtanh()
arr = np.random.randn(*shape)
np_out = np.maximum(-1, np.minimum(1, arr))
x = flow.Tensor(arr, device=flow.device(device))
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
m = flow.nn.Hardtanh(min_val=-2.0, max_val=2.3)
arr = np.random.randn(*shape)
np_out = np.maximum(-2.0, np.minimum(2.3, arr))
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), _np_hardtanh_grad(np_out), 1e-5, 1e-5)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestHardtanhModule(flow.unittest.TestCase):
def test_hardtanh(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_hardtanh_impl(test_case, *arg)
def _test_leakyrelu_impl(test_case, shape, device):
negative_slope = 0.2
m = flow.nn.LeakyReLU(negative_slope=negative_slope)
arr = np.random.randn(*shape)
np_out = np.maximum(0, arr) + negative_slope * np.minimum(0, arr)
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
np_grad = np.where(arr < 0, 1.0 * negative_slope, 1.0)
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestLeakyReLUModule(flow.unittest.TestCase):
def test_leaky_relu(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_leakyrelu_impl(test_case, *arg)
def _test_mish(test_case, shape, device):
np_input = np.random.randn(*shape)
of_input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))
m = flow.nn.Mish()
of_out = m(of_input)
np_out = np_input * np.tanh(numpy_softplus(np_input, 1.0, 20))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_mish_backward(test_case, shape, device):
m = flow.nn.Mish()
arr = np.random.randn(*shape)
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
of_out = of_out.sum()
of_out.backward()
np_grad = numpy_mish_grad(arr)
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestMishModule(flow.unittest.TestCase):
def test_mish(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_mish,
_test_mish_backward,
]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.nn.LogSoftmax",
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.nn.Hardtanh",
"oneflow.experimental.nn.ReLU6",
"oneflow.experimental.nn.ELU",
"oneflow.experimental.device",
"oneflow.experimental.nn.Sigmoid",
"oneflow.experimental.nn.GELU",
"on... | [((822, 845), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (837, 845), True, 'import numpy as np\n'), ((974, 988), 'oneflow.experimental.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (986, 988), True, 'import oneflow.experimental as flow\n'), ((1027, 1050), 'numpy.maximum', 'np.maximum', (['(0)', 'np_input'], {}), '(0, np_input)\n', (1037, 1050), True, 'import numpy as np\n'), ((1743, 1766), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1758, 1766), True, 'import numpy as np\n'), ((1895, 1910), 'oneflow.experimental.nn.ReLU6', 'flow.nn.ReLU6', ([], {}), '()\n', (1908, 1910), True, 'import oneflow.experimental as flow\n'), ((2805, 2828), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2820, 2828), True, 'import numpy as np\n'), ((2960, 2974), 'oneflow.experimental.nn.Tanh', 'flow.nn.Tanh', ([], {}), '()\n', (2972, 2974), True, 'import oneflow.experimental as flow\n'), ((3016, 3033), 'numpy.tanh', 'np.tanh', (['np_input'], {}), '(np_input)\n', (3023, 3033), True, 'import numpy as np\n'), ((3340, 3363), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (3355, 3363), True, 'import numpy as np\n'), ((3497, 3516), 'oneflow.experimental.tanh', 'flow.tanh', (['of_input'], {}), '(of_input)\n', (3506, 3516), True, 'import oneflow.experimental as flow\n'), ((3530, 3547), 'numpy.tanh', 'np.tanh', (['np_input'], {}), '(np_input)\n', (3537, 3547), True, 'import numpy as np\n'), ((4316, 4329), 'oneflow.experimental.nn.ELU', 'flow.nn.ELU', ([], {}), '()\n', (4327, 4329), True, 'import oneflow.experimental as flow\n'), ((4340, 4363), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (4355, 4363), True, 'import numpy as np\n'), ((4609, 4631), 'oneflow.experimental.nn.ELU', 'flow.nn.ELU', ([], {'alpha': '(1.2)'}), '(alpha=1.2)\n', (4620, 4631), True, 'import oneflow.experimental as flow\n'), ((4642, 4665), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (4657, 4665), True, 'import numpy as np\n'), ((5813, 5827), 'oneflow.experimental.nn.GELU', 'flow.nn.GELU', ([], {}), '()\n', (5825, 5827), True, 'import oneflow.experimental as flow\n'), ((6628, 6643), 'numpy.exp', 'np.exp', (['(-inputs)'], {}), '(-inputs)\n', (6634, 6643), True, 'import numpy as np\n'), ((6780, 6789), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (6786, 6789), True, 'import numpy as np\n'), ((7351, 7368), 'oneflow.experimental.nn.Sigmoid', 'flow.nn.Sigmoid', ([], {}), '()\n', (7366, 7368), True, 'import oneflow.experimental as flow\n'), ((7385, 7412), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (7400, 7412), True, 'import numpy as np\n'), ((7495, 7510), 'oneflow.experimental.sigmoid', 'flow.sigmoid', (['x'], {}), '(x)\n', (7507, 7510), True, 'import oneflow.experimental as flow\n'), ((7845, 7872), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (7860, 7872), True, 'import numpy as np\n'), ((8029, 8046), 'oneflow.experimental.nn.Sigmoid', 'flow.nn.Sigmoid', ([], {}), '()\n', (8044, 8046), True, 'import oneflow.experimental as flow\n'), ((8671, 8696), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (8686, 8696), True, 'import oneflow.experimental as flow\n'), ((8707, 8734), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (8722, 8734), True, 'import numpy as np\n'), ((8975, 9000), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (8990, 9000), True, 'import oneflow.experimental as flow\n'), ((9011, 9039), 'numpy.random.randn', 'np.random.randn', (['(9)', '(7)', '(8)', '(16)'], {}), '(9, 7, 8, 16)\n', (9026, 9039), True, 'import numpy as np\n'), ((9280, 9305), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (9295, 9305), True, 'import oneflow.experimental as flow\n'), ((9316, 9343), 'numpy.random.randn', 'np.random.randn', (['(2)', '(5)', '(6)', '(3)'], {}), '(2, 5, 6, 3)\n', (9331, 9343), True, 'import numpy as np\n'), ((9584, 9609), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (9599, 9609), True, 'import oneflow.experimental as flow\n'), ((9620, 9647), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(4)', '(7)'], {}), '(1, 3, 4, 7)\n', (9635, 9647), True, 'import numpy as np\n'), ((9846, 9871), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (9861, 9871), True, 'import oneflow.experimental as flow\n'), ((10264, 10286), 'numpy.zeros', 'np.zeros', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (10272, 10286), True, 'import numpy as np\n'), ((10308, 10333), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (10323, 10333), True, 'import oneflow.experimental as flow\n'), ((10897, 10922), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'None'}), '(dim=None)\n', (10912, 10922), True, 'import oneflow.experimental as flow\n'), ((10969, 11006), 'numpy.array', 'np.array', (['[0.01994417, -0.0265922267]'], {}), '([0.01994417, -0.0265922267])\n', (10977, 11006), True, 'import numpy as np\n'), ((11839, 11860), 'oneflow.experimental.nn.Hardsigmoid', 'flow.nn.Hardsigmoid', ([], {}), '()\n', (11858, 11860), True, 'import oneflow.experimental as flow\n'), ((11871, 11894), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (11886, 11894), True, 'import numpy as np\n'), ((12778, 12801), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (12796, 12801), True, 'import oneflow.experimental as flow\n'), ((12818, 12839), 'numpy.random.randn', 'np.random.randn', (['(4)', '(7)'], {}), '(4, 7)\n', (12833, 12839), True, 'import numpy as np\n'), ((13096, 13119), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (13114, 13119), True, 'import oneflow.experimental as flow\n'), ((13136, 13160), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (13151, 13160), True, 'import numpy as np\n'), ((13417, 13440), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (13435, 13440), True, 'import oneflow.experimental as flow\n'), ((13457, 13484), 'numpy.random.randn', 'np.random.randn', (['(8)', '(9)', '(7)', '(3)'], {}), '(8, 9, 7, 3)\n', (13472, 13484), True, 'import numpy as np\n'), ((13745, 13769), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['axis'], {}), '(axis)\n', (13763, 13769), True, 'import oneflow.experimental as flow\n'), ((13786, 14499), 'numpy.array', 'np.array', (['[[[[2.0, 1.0, 9.0, 3.0, 4.0], [1.0, 6.0, 7.0, 1.0, 4.0], [4.0, 7.0, 5.0, \n 8.0, 1.0], [9.0, 5.0, 7.0, 8.0, 5.0]], [[1.0, 1.0, 5.0, 3.0, 5.0], [3.0,\n 6.0, 3.0, 7.0, 8.0], [8.0, 8.0, 1.0, 2.0, 6.0], [3.0, 5.0, 6.0, 1.0, \n 1.0]], [[8.0, 3.0, 6.0, 3.0, 7.0], [8.0, 5.0, 1.0, 2.0, 7.0], [3.0, 9.0,\n 4.0, 6.0, 5.0], [5.0, 1.0, 2.0, 3.0, 6.0]]], [[[3.0, 5.0, 3.0, 1.0, 7.0\n ], [5.0, 2.0, 6.0, 3.0, 5.0], [5.0, 1.0, 8.0, 6.0, 9.0], [9.0, 8.0, 4.0,\n 5.0, 1.0]], [[7.0, 5.0, 7.0, 1.0, 6.0], [3.0, 3.0, 6.0, 6.0, 7.0], [9.0,\n 4.0, 1.0, 5.0, 7.0], [7.0, 6.0, 9.0, 8.0, 6.0]], [[6.0, 7.0, 5.0, 3.0, \n 9.0], [4.0, 1.0, 2.0, 3.0, 2.0], [4.0, 3.0, 8.0, 7.0, 8.0], [1.0, 3.0, \n 8.0, 6.0, 2.0]]]]'], {}), '([[[[2.0, 1.0, 9.0, 3.0, 4.0], [1.0, 6.0, 7.0, 1.0, 4.0], [4.0, 7.0,\n 5.0, 8.0, 1.0], [9.0, 5.0, 7.0, 8.0, 5.0]], [[1.0, 1.0, 5.0, 3.0, 5.0],\n [3.0, 6.0, 3.0, 7.0, 8.0], [8.0, 8.0, 1.0, 2.0, 6.0], [3.0, 5.0, 6.0, \n 1.0, 1.0]], [[8.0, 3.0, 6.0, 3.0, 7.0], [8.0, 5.0, 1.0, 2.0, 7.0], [3.0,\n 9.0, 4.0, 6.0, 5.0], [5.0, 1.0, 2.0, 3.0, 6.0]]], [[[3.0, 5.0, 3.0, 1.0,\n 7.0], [5.0, 2.0, 6.0, 3.0, 5.0], [5.0, 1.0, 8.0, 6.0, 9.0], [9.0, 8.0, \n 4.0, 5.0, 1.0]], [[7.0, 5.0, 7.0, 1.0, 6.0], [3.0, 3.0, 6.0, 6.0, 7.0],\n [9.0, 4.0, 1.0, 5.0, 7.0], [7.0, 6.0, 9.0, 8.0, 6.0]], [[6.0, 7.0, 5.0,\n 3.0, 9.0], [4.0, 1.0, 2.0, 3.0, 2.0], [4.0, 3.0, 8.0, 7.0, 8.0], [1.0, \n 3.0, 8.0, 6.0, 2.0]]]])\n', (13794, 14499), True, 'import numpy as np\n'), ((15357, 16970), 'numpy.array', 'np.array', (['[[[[0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825], [\n 0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716], [\n 0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.9993293], [0.0, \n 0.90514825, -0.90514825, -0.90514825, -0.96402758]], [[0.99505475, \n 0.96402758, 0.76159416, -0.76159416, 0.46211716], [0.0, -0.90514825, \n 0.90514825, -0.46211716, -0.46211716], [0.46211716, -0.96402758, 0.0, \n 0.90514825, 0.46211716], [0.96402758, 0.46211716, 0.90514825, 0.9981779,\n 0.9866143]], [[-0.76159416, 0.96402758, -0.46211716, 0.0, 0.76159416],\n [-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.9866143], [\n 0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825], [-\n 0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758]]], [[[-\n 0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825], [-\n 0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716], [-\n 0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.9993293], [0.0, -\n 0.90514825, 0.90514825, 0.90514825, 0.96402758]], [[-0.99505475, -\n 0.96402758, -0.76159416, 0.76159416, -0.46211716], [0.0, 0.90514825, -\n 0.90514825, 0.46211716, 0.46211716], [-0.46211716, 0.96402758, 0.0, -\n 0.90514825, -0.46211716], [-0.96402758, -0.46211716, -0.90514825, -\n 0.9981779, -0.9866143]], [[0.76159416, -0.96402758, 0.46211716, 0.0, -\n 0.76159416], [0.96402758, 0.96402758, -0.46211716, -0.46211716, \n 0.9866143], [-0.46211716, 0.99505475, -0.96402758, -0.46211716, -\n 0.90514825], [0.96402758, -0.76159416, -0.99505475, -0.90514825, \n 0.96402758]]]]'], {}), '([[[[0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825],\n [0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716], [\n 0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.9993293], [0.0, \n 0.90514825, -0.90514825, -0.90514825, -0.96402758]], [[0.99505475, \n 0.96402758, 0.76159416, -0.76159416, 0.46211716], [0.0, -0.90514825, \n 0.90514825, -0.46211716, -0.46211716], [0.46211716, -0.96402758, 0.0, \n 0.90514825, 0.46211716], [0.96402758, 0.46211716, 0.90514825, 0.9981779,\n 0.9866143]], [[-0.76159416, 0.96402758, -0.46211716, 0.0, 0.76159416],\n [-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.9866143], [\n 0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825], [-\n 0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758]]], [[[-\n 0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825], [-\n 0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716], [-\n 0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.9993293], [0.0, -\n 0.90514825, 0.90514825, 0.90514825, 0.96402758]], [[-0.99505475, -\n 0.96402758, -0.76159416, 0.76159416, -0.46211716], [0.0, 0.90514825, -\n 0.90514825, 0.46211716, 0.46211716], [-0.46211716, 0.96402758, 0.0, -\n 0.90514825, -0.46211716], [-0.96402758, -0.46211716, -0.90514825, -\n 0.9981779, -0.9866143]], [[0.76159416, -0.96402758, 0.46211716, 0.0, -\n 0.76159416], [0.96402758, 0.96402758, -0.46211716, -0.46211716, \n 0.9866143], [-0.46211716, 0.99505475, -0.96402758, -0.46211716, -\n 0.90514825], [0.96402758, -0.76159416, -0.99505475, -0.90514825, \n 0.96402758]]]])\n', (15365, 16970), True, 'import numpy as np\n'), ((18396, 18416), 'oneflow.experimental.nn.LogSigmoid', 'flow.nn.LogSigmoid', ([], {}), '()\n', (18414, 18416), True, 'import oneflow.experimental as flow\n'), ((18427, 18463), 'numpy.array', 'np.array', (['[1.0, 2.0, 3.0, 10.2, 7.6]'], {}), '([1.0, 2.0, 3.0, 10.2, 7.6])\n', (18435, 18463), True, 'import numpy as np\n'), ((19451, 19469), 'oneflow.experimental.nn.Softplus', 'flow.nn.Softplus', ([], {}), '()\n', (19467, 19469), True, 'import oneflow.experimental as flow\n'), ((19480, 19507), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (19495, 19507), True, 'import numpy as np\n'), ((19749, 19776), 'oneflow.experimental.nn.Softplus', 'flow.nn.Softplus', ([], {'beta': '(1.11)'}), '(beta=1.11)\n', (19765, 19776), True, 'import oneflow.experimental as flow\n'), ((19787, 19814), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (19802, 19814), True, 'import numpy as np\n'), ((20062, 20105), 'oneflow.experimental.nn.Softplus', 'flow.nn.Softplus', ([], {'beta': '(1.11)', 'threshold': '(1.55)'}), '(beta=1.11, threshold=1.55)\n', (20078, 20105), True, 'import oneflow.experimental as flow\n'), ((20116, 20143), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (20131, 20143), True, 'import numpy as np\n'), ((20499, 20517), 'oneflow.experimental.nn.Softplus', 'flow.nn.Softplus', ([], {}), '()\n', (20515, 20517), True, 'import oneflow.experimental as flow\n'), ((20528, 20565), 'numpy.array', 'np.array', (['[1.0, 2.0, 21.0, 20.0, 4.0]'], {}), '([1.0, 2.0, 21.0, 20.0, 4.0])\n', (20536, 20565), True, 'import numpy as np\n'), ((21458, 21477), 'oneflow.experimental.nn.Hardswish', 'flow.nn.Hardswish', ([], {}), '()\n', (21475, 21477), True, 'import oneflow.experimental as flow\n'), ((21488, 21511), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (21503, 21511), True, 'import numpy as np\n'), ((22614, 22632), 'oneflow.experimental.nn.Hardtanh', 'flow.nn.Hardtanh', ([], {}), '()\n', (22630, 22632), True, 'import oneflow.experimental as flow\n'), ((22643, 22666), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (22658, 22666), True, 'import numpy as np\n'), ((22869, 22912), 'oneflow.experimental.nn.Hardtanh', 'flow.nn.Hardtanh', ([], {'min_val': '(-2.0)', 'max_val': '(2.3)'}), '(min_val=-2.0, max_val=2.3)\n', (22885, 22912), True, 'import oneflow.experimental as flow\n'), ((22923, 22946), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (22938, 22946), True, 'import numpy as np\n'), ((23838, 23886), 'oneflow.experimental.nn.LeakyReLU', 'flow.nn.LeakyReLU', ([], {'negative_slope': 'negative_slope'}), '(negative_slope=negative_slope)\n', (23855, 23886), True, 'import oneflow.experimental as flow\n'), ((23897, 23920), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (23912, 23920), True, 'import numpy as np\n'), ((24172, 24216), 'numpy.where', 'np.where', (['(arr < 0)', '(1.0 * negative_slope)', '(1.0)'], {}), '(arr < 0, 1.0 * negative_slope, 1.0)\n', (24180, 24216), True, 'import numpy as np\n'), ((24834, 24857), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (24849, 24857), True, 'import numpy as np\n'), ((24952, 24966), 'oneflow.experimental.nn.Mish', 'flow.nn.Mish', ([], {}), '()\n', (24964, 24966), True, 'import oneflow.experimental as flow\n'), ((25194, 25208), 'oneflow.experimental.nn.Mish', 'flow.nn.Mish', ([], {}), '()\n', (25206, 25208), True, 'import oneflow.experimental as flow\n'), ((25219, 25242), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (25234, 25242), True, 'import numpy as np\n'), ((26039, 26054), 'unittest.main', 'unittest.main', ([], {}), '()\n', (26052, 26054), False, 'import unittest\n'), ((1471, 1484), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1482, 1484), False, 'from collections import OrderedDict\n'), ((1611, 1631), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (1621, 1631), False, 'from test_util import GenArgList\n'), ((1287, 1330), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (1328, 1330), True, 'import oneflow.experimental as flow\n'), ((1960, 1983), 'numpy.maximum', 'np.maximum', (['(0)', 'np_input'], {}), '(0, np_input)\n', (1970, 1983), True, 'import numpy as np\n'), ((2530, 2543), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2541, 2543), False, 'from collections import OrderedDict\n'), ((2670, 2690), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2680, 2690), False, 'from test_util import GenArgList\n'), ((2344, 2387), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (2385, 2387), True, 'import oneflow.experimental as flow\n'), ((3987, 4000), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3998, 4000), False, 'from collections import OrderedDict\n'), ((4127, 4147), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4137, 4147), False, 'from test_util import GenArgList\n'), ((3809, 3852), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3850, 3852), True, 'import oneflow.experimental as flow\n'), ((5290, 5303), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5301, 5303), False, 'from collections import OrderedDict\n'), ((5430, 5450), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5440, 5450), False, 'from test_util import GenArgList\n'), ((5108, 5151), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (5149, 5151), True, 'import oneflow.experimental as flow\n'), ((6376, 6389), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6387, 6389), False, 'from collections import OrderedDict\n'), ((6454, 6474), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6464, 6474), False, 'from test_util import GenArgList\n'), ((6198, 6241), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (6239, 6241), True, 'import oneflow.experimental as flow\n'), ((7168, 7177), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (7174, 7177), True, 'import numpy as np\n'), ((7995, 8019), 'numpy.ones', 'np.ones', (['input_arr.shape'], {}), '(input_arr.shape)\n', (8002, 8019), True, 'import numpy as np\n'), ((8369, 8382), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8380, 8382), False, 'from collections import OrderedDict\n'), ((8548, 8568), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8558, 8568), False, 'from test_util import GenArgList\n'), ((8185, 8228), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (8226, 8228), True, 'import oneflow.experimental as flow\n'), ((10363, 10390), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (10378, 10390), True, 'import numpy as np\n'), ((11295, 11308), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11306, 11308), False, 'from collections import OrderedDict\n'), ((11622, 11642), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (11632, 11642), False, 'from test_util import GenArgList\n'), ((11111, 11154), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (11152, 11154), True, 'import oneflow.experimental as flow\n'), ((11742, 11770), 'numpy.where', 'np.where', (['(x >= 1)', '(0)', '(1.0 / 6)'], {}), '(x >= 1, 0, 1.0 / 6)\n', (11750, 11770), True, 'import numpy as np\n'), ((11922, 11950), 'numpy.minimum', 'np.minimum', (['(1)', '((arr + 3) / 6)'], {}), '(1, (arr + 3) / 6)\n', (11932, 11950), True, 'import numpy as np\n'), ((12501, 12514), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12512, 12514), False, 'from collections import OrderedDict\n'), ((12641, 12661), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (12651, 12661), False, 'from test_util import GenArgList\n'), ((12303, 12346), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (12344, 12346), True, 'import oneflow.experimental as flow\n'), ((18026, 18039), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18037, 18039), False, 'from collections import OrderedDict\n'), ((18283, 18303), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18293, 18303), False, 'from test_util import GenArgList\n'), ((17835, 17878), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (17876, 17878), True, 'import oneflow.experimental as flow\n'), ((19194, 19207), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19205, 19207), False, 'from collections import OrderedDict\n'), ((19340, 19360), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (19350, 19360), False, 'from test_util import GenArgList\n'), ((18998, 19041), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (19039, 19041), True, 'import oneflow.experimental as flow\n'), ((21085, 21098), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21096, 21098), False, 'from collections import OrderedDict\n'), ((21334, 21354), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21344, 21354), False, 'from test_util import GenArgList\n'), ((20893, 20936), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (20934, 20936), True, 'import oneflow.experimental as flow\n'), ((21579, 21600), 'numpy.where', 'np.where', (['(f < 0)', '(0)', 'f'], {}), '(f < 0, 0, f)\n', (21587, 21600), True, 'import numpy as np\n'), ((21638, 21659), 'numpy.where', 'np.where', (['(f < 0)', '(0)', '(1)'], {}), '(f < 0, 0, 1)\n', (21646, 21659), True, 'import numpy as np\n'), ((22247, 22260), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (22258, 22260), False, 'from collections import OrderedDict\n'), ((22387, 22407), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (22397, 22407), False, 'from test_util import GenArgList\n'), ((22053, 22096), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (22094, 22096), True, 'import oneflow.experimental as flow\n'), ((22523, 22551), 'numpy.where', 'np.where', (['(x >= 2.3)', '(0.0)', '(1.0)'], {}), '(x >= 2.3, 0.0, 1.0)\n', (22531, 22551), True, 'import numpy as np\n'), ((22695, 22713), 'numpy.minimum', 'np.minimum', (['(1)', 'arr'], {}), '(1, arr)\n', (22705, 22713), True, 'import numpy as np\n'), ((22977, 22997), 'numpy.minimum', 'np.minimum', (['(2.3)', 'arr'], {}), '(2.3, arr)\n', (22987, 22997), True, 'import numpy as np\n'), ((23540, 23553), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23551, 23553), False, 'from collections import OrderedDict\n'), ((23680, 23700), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (23690, 23700), False, 'from test_util import GenArgList\n'), ((23348, 23391), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (23389, 23391), True, 'import oneflow.experimental as flow\n'), ((23935, 23953), 'numpy.maximum', 'np.maximum', (['(0)', 'arr'], {}), '(0, arr)\n', (23945, 23953), True, 'import numpy as np\n'), ((24563, 24576), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24574, 24576), False, 'from collections import OrderedDict\n'), ((24703, 24723), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (24713, 24723), False, 'from test_util import GenArgList\n'), ((24368, 24411), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (24409, 24411), True, 'import oneflow.experimental as flow\n'), ((25704, 25717), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (25715, 25717), False, 'from collections import OrderedDict\n'), ((25944, 25964), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (25954, 25964), False, 'from test_util import GenArgList\n'), ((25520, 25563), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (25561, 25563), True, 'import oneflow.experimental as flow\n'), ((919, 938), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (930, 938), True, 'import oneflow.experimental as flow\n'), ((1840, 1859), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1851, 1859), True, 'import oneflow.experimental as flow\n'), ((2902, 2921), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2913, 2921), True, 'import oneflow.experimental as flow\n'), ((3437, 3456), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (3448, 3456), True, 'import oneflow.experimental as flow\n'), ((4457, 4476), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (4468, 4476), True, 'import oneflow.experimental as flow\n'), ((4759, 4778), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (4770, 4778), True, 'import oneflow.experimental as flow\n'), ((4992, 5003), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (4998, 5003), True, 'import numpy as np\n'), ((5636, 5662), 'numpy.array', 'np.array', (['[1.0, -1.0, 2.3]'], {}), '([1.0, -1.0, 2.3])\n', (5644, 5662), True, 'import numpy as np\n'), ((5755, 5774), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5766, 5774), True, 'import oneflow.experimental as flow\n'), ((6567, 6577), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (6573, 6577), True, 'import numpy as np\n'), ((6890, 6924), 'numpy.max', 'np.max', (['x'], {'axis': 'dim', 'keepdims': '(True)'}), '(x, axis=dim, keepdims=True)\n', (6896, 6924), True, 'import numpy as np\n'), ((7451, 7470), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (7462, 7470), True, 'import oneflow.experimental as flow\n'), ((7911, 7930), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (7922, 7930), True, 'import oneflow.experimental as flow\n'), ((8767, 8786), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (8778, 8786), True, 'import oneflow.experimental as flow\n'), ((9072, 9091), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (9083, 9091), True, 'import oneflow.experimental as flow\n'), ((9376, 9395), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (9387, 9395), True, 'import oneflow.experimental as flow\n'), ((9680, 9699), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (9691, 9699), True, 'import oneflow.experimental as flow\n'), ((10435, 10454), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (10446, 10454), True, 'import oneflow.experimental as flow\n'), ((10719, 10738), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (10730, 10738), True, 'import oneflow.experimental as flow\n'), ((10829, 10848), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (10840, 10848), True, 'import oneflow.experimental as flow\n'), ((11984, 12003), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (11995, 12003), True, 'import oneflow.experimental as flow\n'), ((12878, 12897), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (12889, 12897), True, 'import oneflow.experimental as flow\n'), ((13199, 13218), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (13210, 13218), True, 'import oneflow.experimental as flow\n'), ((13523, 13542), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (13534, 13542), True, 'import oneflow.experimental as flow\n'), ((15297, 15316), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (15308, 15316), True, 'import oneflow.experimental as flow\n'), ((18544, 18563), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (18555, 18563), True, 'import oneflow.experimental as flow\n'), ((19582, 19601), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (19593, 19601), True, 'import oneflow.experimental as flow\n'), ((19890, 19909), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (19901, 19909), True, 'import oneflow.experimental as flow\n'), ((20328, 20347), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (20339, 20347), True, 'import oneflow.experimental as flow\n'), ((20598, 20617), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (20609, 20617), True, 'import oneflow.experimental as flow\n'), ((21549, 21570), 'numpy.where', 'np.where', (['(f < 0)', '(0)', 'f'], {}), '(f < 0, 0, f)\n', (21557, 21570), True, 'import numpy as np\n'), ((21722, 21741), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (21733, 21741), True, 'import oneflow.experimental as flow\n'), ((22747, 22766), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (22758, 22766), True, 'import oneflow.experimental as flow\n'), ((23031, 23050), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (23042, 23050), True, 'import oneflow.experimental as flow\n'), ((23973, 23991), 'numpy.minimum', 'np.minimum', (['(0)', 'arr'], {}), '(0, arr)\n', (23983, 23991), True, 'import numpy as np\n'), ((24024, 24043), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (24035, 24043), True, 'import oneflow.experimental as flow\n'), ((24922, 24941), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (24933, 24941), True, 'import oneflow.experimental as flow\n'), ((25275, 25294), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (25286, 25294), True, 'import oneflow.experimental as flow\n'), ((2233, 2261), 'numpy.where', 'np.where', (['(np_input < 0)', '(0)', '(1)'], {}), '(np_input < 0, 0, 1)\n', (2241, 2261), True, 'import numpy as np\n'), ((4407, 4418), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (4413, 4418), True, 'import numpy as np\n'), ((4709, 4720), 'numpy.exp', 'np.exp', (['arr'], {}), '(arr)\n', (4715, 4720), True, 'import numpy as np\n'), ((18497, 18509), 'numpy.exp', 'np.exp', (['(-arr)'], {}), '(-arr)\n', (18503, 18509), True, 'import numpy as np\n'), ((5566, 5576), 'numpy.sqrt', 'np.sqrt', (['(2)'], {}), '(2)\n', (5573, 5576), True, 'import numpy as np\n'), ((7106, 7122), 'numpy.exp', 'np.exp', (['(beta * x)'], {}), '(beta * x)\n', (7112, 7122), True, 'import numpy as np\n'), ((20225, 20243), 'numpy.exp', 'np.exp', (['(1.11 * arr)'], {}), '(1.11 * arr)\n', (20231, 20243), True, 'import numpy as np\n')] |
""":
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
def deconv2d(
input,
filters,
size,
name,
strides=2,
trainable=True,
reuse=False,
const_init=False,
use_bias=False,
):
name_ = name if reuse == False else name + "_reuse"
# weight : [in_channels, out_channels, height, width]
weight_shape = (input.shape[1], filters, size, size)
output_shape = (
input.shape[0],
input.shape[1],
input.shape[2] * strides,
input.shape[3] * strides,
)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
# initializer=flow.random_normal_initializer(stddev=0.02)
initializer=flow.glorot_uniform_initializer(data_format="NCHW")
if not const_init
else flow.constant_initializer(0.002),
trainable=trainable,
)
output = flow.nn.conv2d_transpose(
input,
weight,
strides=[strides, strides],
output_shape=output_shape,
padding="SAME",
data_format="NCHW",
name=name_,
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=flow.constant_initializer(0.0),
trainable=trainable,
)
output = flow.nn.bias_add(output, bias, "NCHW")
return output
def conv2d(
input,
filters,
size,
name,
strides=2,
padding="same",
trainable=True,
reuse=False,
const_init=False,
use_bias=True,
):
name_ = name if reuse == False else name + "_reuse"
# (output_dim, k_h, k_w, input.shape[3]) if NHWC
weight_shape = (filters, input.shape[1], size, size)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
# initializer=flow.random_normal_initializer(stddev=0.02)
initializer=flow.glorot_uniform_initializer(data_format="NCHW")
if not const_init
else flow.constant_initializer(0.002),
trainable=trainable,
)
output = flow.nn.compat_conv2d(
input,
weight,
strides=[strides, strides],
padding=padding,
data_format="NCHW",
name=name_,
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=flow.constant_initializer(0.0),
trainable=trainable,
)
output = flow.nn.bias_add(output, bias, "NCHW")
return output
def batchnorm(input, name, axis=1, trainable=True, reuse=False):
return flow.layers.batch_normalization(input, axis=axis, trainable=trainable, name=name)
# def batchnorm(input, name, axis=1, reuse=False):
# return flow.layers.batch_normalization(input, axis=axis)
def dense(
input, units, name, use_bias=False, trainable=True, reuse=False, const_init=False
):
name_ = name if reuse == False else name + "_reuse"
in_shape = input.shape
in_num_axes = len(in_shape)
assert in_num_axes >= 2
inputs = flow.reshape(input, (-1, in_shape[-1])) if in_num_axes > 2 else input
weight = flow.get_variable(
name="{}-weight".format(name),
shape=(units, inputs.shape[1]),
dtype=inputs.dtype,
# initializer=flow.random_normal_initializer(stddev=0.02)
initializer=flow.glorot_uniform_initializer(data_format="NCHW")
if not const_init
else flow.constant_initializer(0.002),
trainable=trainable,
model_name="weight",
)
out = flow.matmul(a=inputs, b=weight, transpose_b=True, name=name_ + "matmul",)
if use_bias:
bias = flow.get_variable(
name="{}-bias".format(name),
shape=(units,),
dtype=inputs.dtype,
initializer=flow.random_normal_initializer(0.0),
# if not const_init
# else flow.constant_initializer(0.002),
trainable=trainable,
model_name="bias",
)
out = flow.nn.bias_add(out, bias, name=name_ + "_bias_add")
out = flow.reshape(out, in_shape[:-1] + (units,)) if in_num_axes > 2 else out
return out
| [
"oneflow.nn.conv2d_transpose",
"oneflow.nn.compat_conv2d",
"oneflow.constant_initializer",
"oneflow.glorot_uniform_initializer",
"oneflow.layers.batch_normalization",
"oneflow.nn.bias_add",
"oneflow.reshape",
"oneflow.random_normal_initializer",
"oneflow.matmul"
] | [((1501, 1647), 'oneflow.nn.conv2d_transpose', 'flow.nn.conv2d_transpose', (['input', 'weight'], {'strides': '[strides, strides]', 'output_shape': 'output_shape', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': 'name_'}), "(input, weight, strides=[strides, strides],\n output_shape=output_shape, padding='SAME', data_format='NCHW', name=name_)\n", (1525, 1647), True, 'import oneflow as flow\n'), ((2786, 2904), 'oneflow.nn.compat_conv2d', 'flow.nn.compat_conv2d', (['input', 'weight'], {'strides': '[strides, strides]', 'padding': 'padding', 'data_format': '"""NCHW"""', 'name': 'name_'}), "(input, weight, strides=[strides, strides], padding=\n padding, data_format='NCHW', name=name_)\n", (2807, 2904), True, 'import oneflow as flow\n'), ((3371, 3457), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', (['input'], {'axis': 'axis', 'trainable': 'trainable', 'name': 'name'}), '(input, axis=axis, trainable=trainable, name\n =name)\n', (3402, 3457), True, 'import oneflow as flow\n'), ((4351, 4423), 'oneflow.matmul', 'flow.matmul', ([], {'a': 'inputs', 'b': 'weight', 'transpose_b': '(True)', 'name': "(name_ + 'matmul')"}), "(a=inputs, b=weight, transpose_b=True, name=name_ + 'matmul')\n", (4362, 4423), True, 'import oneflow as flow\n'), ((1984, 2022), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', '"""NCHW"""'], {}), "(output, bias, 'NCHW')\n", (2000, 2022), True, 'import oneflow as flow\n'), ((3231, 3269), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', '"""NCHW"""'], {}), "(output, bias, 'NCHW')\n", (3247, 3269), True, 'import oneflow as flow\n'), ((3841, 3880), 'oneflow.reshape', 'flow.reshape', (['input', '(-1, in_shape[-1])'], {}), '(input, (-1, in_shape[-1]))\n', (3853, 3880), True, 'import oneflow as flow\n'), ((4825, 4878), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['out', 'bias'], {'name': "(name_ + '_bias_add')"}), "(out, bias, name=name_ + '_bias_add')\n", (4841, 4878), True, 'import oneflow as flow\n'), ((4892, 4935), 'oneflow.reshape', 'flow.reshape', (['out', '(in_shape[:-1] + (units,))'], {}), '(out, in_shape[:-1] + (units,))\n', (4904, 4935), True, 'import oneflow as flow\n'), ((1321, 1372), 'oneflow.glorot_uniform_initializer', 'flow.glorot_uniform_initializer', ([], {'data_format': '"""NCHW"""'}), "(data_format='NCHW')\n", (1352, 1372), True, 'import oneflow as flow\n'), ((1414, 1446), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.002)'], {}), '(0.002)\n', (1439, 1446), True, 'import oneflow as flow\n'), ((1887, 1917), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (1912, 1917), True, 'import oneflow as flow\n'), ((2606, 2657), 'oneflow.glorot_uniform_initializer', 'flow.glorot_uniform_initializer', ([], {'data_format': '"""NCHW"""'}), "(data_format='NCHW')\n", (2637, 2657), True, 'import oneflow as flow\n'), ((2699, 2731), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.002)'], {}), '(0.002)\n', (2724, 2731), True, 'import oneflow as flow\n'), ((3134, 3164), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3159, 3164), True, 'import oneflow as flow\n'), ((4144, 4195), 'oneflow.glorot_uniform_initializer', 'flow.glorot_uniform_initializer', ([], {'data_format': '"""NCHW"""'}), "(data_format='NCHW')\n", (4175, 4195), True, 'import oneflow as flow\n'), ((4237, 4269), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.002)'], {}), '(0.002)\n', (4262, 4269), True, 'import oneflow as flow\n'), ((4609, 4644), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', (['(0.0)'], {}), '(0.0)\n', (4639, 4644), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Callable, Dict, Iterator, List, Union
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.nn.parameter import Parameter
from .optimizer import Optimizer, ParamGroup
class SGD(Optimizer):
"""Implements SGD algorithm.
This algorithm takes a random sample’s gradient as an approximate estimate of the overall gradient in small batch gradient descent.
When the momentum = 0, the equation of parameters updating is:
.. math::
param_{new} = param_{old} - learning\\_rate * grad
With momentum, the equation of parameters updating is:
.. math::
& V_t = \\beta * V_{t-1} + learning\\_rate * g_t
& param_{new} = param_{old} - V_t
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
momentum (float, optional): Momentum factor (default: 0.0)
scale (float, optional): the scale factor of loss (default: 1.0)
"""
def __init__(
self,
parameters: Union[Iterator[Parameter], List[Dict]],
lr: float = 0.001,
momentum: float = 0.0,
scale: float = 1.0,
):
super().__init__()
assert lr >= 0.0, f"Invalid learning rate: {lr}"
assert momentum >= 0.0, f"Invalid momentum: {momentum}"
assert scale >= 0.0, f"Invalid scale factor: {scale}"
self._default_options["lr"] = lr
self._default_options["scale"] = scale
self._default_options["momentum"] = momentum
if isinstance(parameters, collections.abc.Iterator):
self.param_groups.append(ParamGroup(parameters, self._default_options))
else:
for param in parameters:
self.param_groups.append(ParamGroup(param, self._default_options))
for param_group in self.param_groups:
for param in param_group.parameters:
assert param.is_leaf, "parameters must be leaf tensor"
self._state[param] = dict()
if param_group["momentum"] != 0.0:
self._state[param]["momentum_buf"] = flow.experimental.zeros_like(
param
)
self._momentum_sgd = (
flow.stateful_op("momentum_update")
.Input("model")
.Input("model_diff")
.Input("momentum")
.Build()
)
self._sgd = (
flow.stateful_op("sgd_update").Input("model").Input("model_diff").Build()
)
def step(self, closure: Callable = None):
with flow.no_grad():
loss = None
if closure is not None:
loss = closure()
for param_group in self.param_groups:
lr = param_group["lr"]
for param in param_group.parameters:
if param.grad is None:
continue
if param_group["momentum"] == 0.0:
scale = param_group["scale"]
flow._C.dispatch_sgd_update(
self._sgd,
(param, param.grad),
learning_rate=lr,
scale=scale,
)
else:
momentum_buf = self._state[param]["momentum_buf"]
scale = param_group["scale"]
beta = param_group["momentum"]
flow._C.dispatch_momentum_update(
self._momentum_sgd,
(param, param.grad, momentum_buf),
learning_rate=lr,
scale=scale,
beta=beta,
)
self._state["step"] = self._state["step"] + 1
return loss
| [
"oneflow.compatible.single_client.stateful_op",
"oneflow.compatible.single_client.no_grad",
"oneflow.compatible.single_client.experimental.zeros_like",
"oneflow.compatible.single_client._C.dispatch_sgd_update",
"oneflow.compatible.single_client._C.dispatch_momentum_update"
] | [((3292, 3306), 'oneflow.compatible.single_client.no_grad', 'flow.no_grad', ([], {}), '()\n', (3304, 3306), True, 'from oneflow.compatible import single_client as flow\n'), ((2830, 2865), 'oneflow.compatible.single_client.experimental.zeros_like', 'flow.experimental.zeros_like', (['param'], {}), '(param)\n', (2858, 2865), True, 'from oneflow.compatible import single_client as flow\n'), ((3751, 3846), 'oneflow.compatible.single_client._C.dispatch_sgd_update', 'flow._C.dispatch_sgd_update', (['self._sgd', '(param, param.grad)'], {'learning_rate': 'lr', 'scale': 'scale'}), '(self._sgd, (param, param.grad), learning_rate=\n lr, scale=scale)\n', (3778, 3846), True, 'from oneflow.compatible import single_client as flow\n'), ((4213, 4346), 'oneflow.compatible.single_client._C.dispatch_momentum_update', 'flow._C.dispatch_momentum_update', (['self._momentum_sgd', '(param, param.grad, momentum_buf)'], {'learning_rate': 'lr', 'scale': 'scale', 'beta': 'beta'}), '(self._momentum_sgd, (param, param.grad,\n momentum_buf), learning_rate=lr, scale=scale, beta=beta)\n', (4245, 4346), True, 'from oneflow.compatible import single_client as flow\n'), ((3148, 3178), 'oneflow.compatible.single_client.stateful_op', 'flow.stateful_op', (['"""sgd_update"""'], {}), "('sgd_update')\n", (3164, 3178), True, 'from oneflow.compatible import single_client as flow\n'), ((2955, 2990), 'oneflow.compatible.single_client.stateful_op', 'flow.stateful_op', (['"""momentum_update"""'], {}), "('momentum_update')\n", (2971, 2990), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
from util import convert_to_onnx_and_check
def set_moving_max_min_value():
max_key, min_key = None, None
keys = flow.get_all_variables().keys()
for key in keys:
if max_key is not None and min_key is not None:
break
if key[-3:] == "max":
max_key = key
if key[-3:] == "min":
min_key = key
if max_key is not None and min_key is not None:
flow.load_variables(
{
max_key: np.array([0.5]).astype(np.float32),
min_key: np.array([-0.2]).astype(np.float32),
}
)
def generate_fake_quantization_test(
per_layer: bool = True, scheme: str = "symmetric", device_type: str = "cpu",
):
@flow.global_function()
def fake_quantization():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
name="x1",
shape=(2, 3, 4),
dtype=flow.float,
initializer=flow.random_uniform_initializer(-10, 10),
)
return flow.quantization.fake_quantization(
x,
*flow.quantization.min_max_observer(
x, per_layer_quantization=per_layer, quantization_scheme=scheme,
),
quantization_scheme=scheme,
)
convert_to_onnx_and_check(fake_quantization, opset=10 if per_layer else 13)
def generate_fake_quantization_test_moving_average(
scheme: str = "symmetric", device_type: str = "cpu",
):
@flow.global_function()
def fake_quantization_moving_average():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
name="x1",
shape=(2, 3, 4),
dtype=flow.float,
initializer=flow.random_uniform_initializer(-10, 10),
)
return flow.quantization.fake_quantization(
x,
*flow.quantization.moving_average_min_max_observer(
x, quantization_scheme=scheme,
),
quantization_scheme=scheme,
)
set_moving_max_min_value()
convert_to_onnx_and_check(
fake_quantization_moving_average, opset=10, explicit_init=False
)
# min_max_observer
def test_fake_quantization_symmetric(test_case):
generate_fake_quantization_test(per_layer=True, scheme="symmetric")
def test_fake_quantization_symmetric_per_channel(test_case):
generate_fake_quantization_test(per_layer=False, scheme="symmetric")
def test_fake_quantization_affine(test_case):
generate_fake_quantization_test(per_layer=True, scheme="affine")
def test_fake_quantization_affine_per_channel(test_case):
generate_fake_quantization_test(per_layer=False, scheme="affine")
def test_fake_quantization_symmetric_gpu(test_case):
generate_fake_quantization_test(
per_layer=True, scheme="symmetric", device_type="gpu"
)
def test_fake_quantization_symmetric_per_channel_gpu(test_case):
generate_fake_quantization_test(
per_layer=False, scheme="symmetric", device_type="gpu"
)
def test_fake_quantization_affine_gpu(test_case):
generate_fake_quantization_test(per_layer=True, scheme="affine", device_type="gpu")
def test_fake_quantization_affine_per_channel_gpu(test_case):
generate_fake_quantization_test(per_layer=False, scheme="affine", device_type="gpu")
# moving_average_min_max_observer
def test_fake_quantization_symmetric_moving_average(test_case):
generate_fake_quantization_test_moving_average(scheme="symmetric")
def test_fake_quantization_affine_moving_average(test_case):
generate_fake_quantization_test_moving_average(scheme="affine")
def test_fake_quantization_symmetric_gpu_moving_average(test_case):
generate_fake_quantization_test_moving_average(
scheme="symmetric", device_type="gpu"
)
def test_fake_quantization_affine_gpu_moving_average(test_case):
generate_fake_quantization_test_moving_average(scheme="affine", device_type="gpu")
| [
"oneflow.global_function",
"oneflow.quantization.moving_average_min_max_observer",
"oneflow.scope.placement",
"oneflow.quantization.min_max_observer",
"oneflow.get_all_variables",
"oneflow.random_uniform_initializer"
] | [((1363, 1385), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1383, 1385), True, 'import oneflow as flow\n'), ((1978, 2053), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['fake_quantization'], {'opset': '(10 if per_layer else 13)'}), '(fake_quantization, opset=10 if per_layer else 13)\n', (2003, 2053), False, 'from util import convert_to_onnx_and_check\n'), ((2173, 2195), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (2193, 2195), True, 'import oneflow as flow\n'), ((2816, 2910), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['fake_quantization_moving_average'], {'opset': '(10)', 'explicit_init': '(False)'}), '(fake_quantization_moving_average, opset=10,\n explicit_init=False)\n', (2841, 2910), False, 'from util import convert_to_onnx_and_check\n'), ((754, 778), 'oneflow.get_all_variables', 'flow.get_all_variables', ([], {}), '()\n', (776, 778), True, 'import oneflow as flow\n'), ((1428, 1468), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1448, 1468), True, 'import oneflow as flow\n'), ((2253, 2293), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (2273, 2293), True, 'import oneflow as flow\n'), ((1627, 1667), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', (['(-10)', '(10)'], {}), '(-10, 10)\n', (1658, 1667), True, 'import oneflow as flow\n'), ((1775, 1878), 'oneflow.quantization.min_max_observer', 'flow.quantization.min_max_observer', (['x'], {'per_layer_quantization': 'per_layer', 'quantization_scheme': 'scheme'}), '(x, per_layer_quantization=per_layer,\n quantization_scheme=scheme)\n', (1809, 1878), True, 'import oneflow as flow\n'), ((2452, 2492), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', (['(-10)', '(10)'], {}), '(-10, 10)\n', (2483, 2492), True, 'import oneflow as flow\n'), ((2600, 2685), 'oneflow.quantization.moving_average_min_max_observer', 'flow.quantization.moving_average_min_max_observer', (['x'], {'quantization_scheme': 'scheme'}), '(x, quantization_scheme=scheme\n )\n', (2649, 2685), True, 'import oneflow as flow\n'), ((1113, 1128), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (1121, 1128), True, 'import numpy as np\n'), ((1174, 1190), 'numpy.array', 'np.array', (['[-0.2]'], {}), '([-0.2])\n', (1182, 1190), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.topk,
"""Finds the values and indices of the k largest entries at specified axis.
Args:
input (oneflow.Tensor): Input Tensor
k (int): the k in “top-k”
dim (int, optional): the dimension to sort along. Defaults to the last dim (-1)
largest (bool, optional): controls whether to return largest or smallest elements
sorted (bool, optional): controls whether to return the elements in sorted order (Only Support True Now!)
Returns:
Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int32)): A tuple of (values, indices), where
the indices are the indices of the elements in the original input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = np.array([[1, 3, 8, 7, 2], [1, 9, 4, 3, 2]], dtype=np.float32)
>>> (values, indices) = flow.topk(flow.Tensor(x), k=3, dim=1)
>>> values
tensor([[8., 7., 3.],
[9., 4., 3.]], dtype=oneflow.float32)
>>> indices
tensor([[2, 3, 1],
[1, 2, 3]], dtype=oneflow.int64)
>>> values.shape
oneflow.Size([2, 3])
>>> indices.shape
oneflow.Size([2, 3])
>>> (values, indices) = flow.topk(flow.Tensor(x), k=2, dim=1, largest=False)
>>> values
tensor([[1., 2.],
[1., 2.]], dtype=oneflow.float32)
>>> indices
tensor([[0, 4],
[0, 4]], dtype=oneflow.int64)
>>> values.shape
oneflow.Size([2, 2])
>>> indices.shape
oneflow.Size([2, 2])
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 2302), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.topk', '"""Finds the values and indices of the k largest entries at specified axis.\n\n Args:\n input (oneflow.Tensor): Input Tensor\n k (int): the k in “top-k”\n dim (int, optional): the dimension to sort along. Defaults to the last dim (-1)\n largest (bool, optional): controls whether to return largest or smallest elements\n sorted (bool, optional): controls whether to return the elements in sorted order (Only Support True Now!)\n\n Returns:\n Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int32)): A tuple of (values, indices), where\n the indices are the indices of the elements in the original input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = np.array([[1, 3, 8, 7, 2], [1, 9, 4, 3, 2]], dtype=np.float32)\n >>> (values, indices) = flow.topk(flow.Tensor(x), k=3, dim=1)\n >>> values\n tensor([[8., 7., 3.],\n [9., 4., 3.]], dtype=oneflow.float32)\n >>> indices\n tensor([[2, 3, 1],\n [1, 2, 3]], dtype=oneflow.int64)\n >>> values.shape\n oneflow.Size([2, 3])\n >>> indices.shape\n oneflow.Size([2, 3])\n >>> (values, indices) = flow.topk(flow.Tensor(x), k=2, dim=1, largest=False)\n >>> values\n tensor([[1., 2.],\n [1., 2.]], dtype=oneflow.float32)\n >>> indices\n tensor([[0, 4],\n [0, 4]], dtype=oneflow.int64)\n >>> values.shape\n oneflow.Size([2, 2])\n >>> indices.shape\n oneflow.Size([2, 2])\n\n """'], {}), '(oneflow.topk,\n """Finds the values and indices of the k largest entries at specified axis.\n\n Args:\n input (oneflow.Tensor): Input Tensor\n k (int): the k in “top-k”\n dim (int, optional): the dimension to sort along. Defaults to the last dim (-1)\n largest (bool, optional): controls whether to return largest or smallest elements\n sorted (bool, optional): controls whether to return the elements in sorted order (Only Support True Now!)\n\n Returns:\n Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int32)): A tuple of (values, indices), where\n the indices are the indices of the elements in the original input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = np.array([[1, 3, 8, 7, 2], [1, 9, 4, 3, 2]], dtype=np.float32)\n >>> (values, indices) = flow.topk(flow.Tensor(x), k=3, dim=1)\n >>> values\n tensor([[8., 7., 3.],\n [9., 4., 3.]], dtype=oneflow.float32)\n >>> indices\n tensor([[2, 3, 1],\n [1, 2, 3]], dtype=oneflow.int64)\n >>> values.shape\n oneflow.Size([2, 3])\n >>> indices.shape\n oneflow.Size([2, 3])\n >>> (values, indices) = flow.topk(flow.Tensor(x), k=2, dim=1, largest=False)\n >>> values\n tensor([[1., 2.],\n [1., 2.]], dtype=oneflow.float32)\n >>> indices\n tensor([[0, 4],\n [0, 4]], dtype=oneflow.int64)\n >>> values.shape\n oneflow.Size([2, 2])\n >>> indices.shape\n oneflow.Size([2, 2])\n\n """\n )\n', (670, 2302), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import oneflow
import oneflow as flow
import oneflow.framework.session_context as session_ctx
import oneflow.unittest
from oneflow.framework.multi_client_session import MultiClientSession
@flow.unittest.skip_unless_1n1d()
class TestMultiClientSession(unittest.TestCase):
def test_case1(self):
self.assertTrue(flow.env.is_multi_client())
sess = session_ctx.GetDefaultSession()
self.assertTrue(isinstance(sess, MultiClientSession))
sess.TryInit()
self.assertEqual(sess.status, sess.Status.INITED)
def test_case2(self):
print("test_case2")
self.assertTrue(flow.env.is_multi_client())
sess = session_ctx.GetDefaultSession()
self.assertTrue(isinstance(sess, MultiClientSession))
sess.TryInit()
self.assertEqual(sess.status, sess.Status.INITED)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.framework.session_context.GetDefaultSession",
"oneflow.env.is_multi_client"
] | [((809, 841), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (839, 841), True, 'import oneflow as flow\n'), ((1489, 1504), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1502, 1504), False, 'import unittest\n'), ((984, 1015), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1013, 1015), True, 'import oneflow.framework.session_context as session_ctx\n'), ((1281, 1312), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1310, 1312), True, 'import oneflow.framework.session_context as session_ctx\n'), ((941, 967), 'oneflow.env.is_multi_client', 'flow.env.is_multi_client', ([], {}), '()\n', (965, 967), True, 'import oneflow as flow\n'), ((1238, 1264), 'oneflow.env.is_multi_client', 'flow.env.is_multi_client', ([], {}), '()\n', (1262, 1264), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
import oneflow._oneflow_internal
from typing import Tuple
@flow.unittest.skip_unless_1n4d()
class TestFunctionInputOutput(flow.unittest.TestCase):
def test_FixedTensorDef(test_case):
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_FixedTensorDef_2_device(test_case):
flow.config.gpu_device_num(2)
@flow.global_function()
def Foo(x: oft.Numpy.Placeholder((2, 5))):
return x
data = np.ones((2, 5), dtype=np.float32)
of_ret = Foo(data).get()
test_case.assertEqual(of_ret.numpy().max(), 1)
test_case.assertEqual(of_ret.numpy().min(), 1)
test_case.assertTrue(np.allclose(of_ret.numpy(), data))
def test_MirroredTensorDef(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.ListNumpy.Placeholder((2, 5))):
return x
data = np.ones((1, 5), dtype=np.float32)
ndarray_list = Foo([data]).get().numpy_list()
test_case.assertEqual(len(ndarray_list), 1)
test_case.assertTrue(np.allclose(ndarray_list[0], data))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.config.gpu_device_num",
"oneflow.compatible.single_client.unittest.skip_unless_1n4d",
"oneflow.compatible.single_client.scope.mirrored_view",
"oneflow.compatible.single_client.typing.ListNumpy.Placeholder",
"oneflow.compatible.single_client.typing.Numpy.Placeholder",
"one... | [((798, 830), 'oneflow.compatible.single_client.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (828, 830), True, 'from oneflow.compatible import single_client as flow\n'), ((2284, 2299), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2297, 2299), False, 'import unittest\n'), ((935, 957), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {}), '()\n', (955, 957), True, 'from oneflow.compatible import single_client as flow\n'), ((1046, 1079), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': 'np.float32'}), '((2, 5), dtype=np.float32)\n', (1053, 1079), True, 'import numpy as np\n'), ((1345, 1374), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (1371, 1374), True, 'from oneflow.compatible import single_client as flow\n'), ((1385, 1407), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {}), '()\n', (1405, 1407), True, 'from oneflow.compatible import single_client as flow\n'), ((1496, 1529), 'numpy.ones', 'np.ones', (['(2, 5)'], {'dtype': 'np.float32'}), '((2, 5), dtype=np.float32)\n', (1503, 1529), True, 'import numpy as np\n'), ((1803, 1824), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1822, 1824), True, 'from oneflow.compatible import single_client as flow\n'), ((1904, 1953), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1924, 1953), True, 'from oneflow.compatible import single_client as flow\n'), ((2046, 2079), 'numpy.ones', 'np.ones', (['(1, 5)'], {'dtype': 'np.float32'}), '((1, 5), dtype=np.float32)\n', (2053, 2079), True, 'import numpy as np\n'), ((1866, 1892), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1890, 1892), True, 'from oneflow.compatible import single_client as flow\n'), ((2215, 2249), 'numpy.allclose', 'np.allclose', (['ndarray_list[0]', 'data'], {}), '(ndarray_list[0], data)\n', (2226, 2249), True, 'import numpy as np\n'), ((977, 1006), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2, 5)'], {}), '((2, 5))\n', (998, 1006), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1427, 1456), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2, 5)'], {}), '((2, 5))\n', (1448, 1456), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1973, 2006), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(2, 5)'], {}), '((2, 5))\n', (1998, 2006), True, 'from oneflow.compatible.single_client import typing as oft\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import oneflow as flow
from oneflow.ops.initializer_util import CalcGain
def calculate_gain(nonlinearity, param=None):
return CalcGain(nonlinearity, param)
def uniform_(tensor, a=0.0, b=1.0):
with flow.no_grad():
return tensor.uniform_(a, b)
def normal_(tensor, mean=0.0, std=1.0):
with flow.no_grad():
return tensor.normal_(mean, std)
def xavier_uniform_(tensor, gain=1.0, *, data_format="NCHW"):
r"""
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.10/nn.init.html.
Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - <NAME>. & <NAME>. (2010), using a uniform
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-a, a)` where
.. math::
a = \text{gain} \times \sqrt{\frac{6}{\text{fan_in} + \text{fan_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `flow.Tensor`
gain: an optional scaling factor
Examples:
>>> w = flow.empty(3, 5)
>>> nn.init.xavier_uniform_(w, gain=nn.init.calculate_gain('relu'))
"""
with flow.no_grad():
return tensor.xavier_uniform_(gain, data_format=data_format)
def xavier_normal_(tensor, gain=1.0, *, data_format="NCHW"):
r"""
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.10/nn.init.html.
Fills the input `Tensor` with values according to the method
described in `Understanding the difficulty of training deep feedforward
neural networks` - <NAME>. & <NAME>. (2010), using a normal
distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \text{gain} \times \sqrt{\frac{2}{\text{fan_in} + \text{fan_out}}}
Also known as Glorot initialization.
Args:
tensor: an n-dimensional `flow.Tensor`
gain: an optional scaling factor
Examples:
>>> w = flow.empty(3, 5)
>>> nn.init.xavier_normal_(w)
"""
with flow.no_grad():
return tensor.xavier_normal_(gain, data_format=data_format)
def orthogonal_(tensor, gain=1.0):
r"""
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/nn.init.html.
Fills the input `Tensor` with a (semi) orthogonal matrix, as
described in `Exact solutions to the nonlinear dynamics of learning in deep
linear neural networks` - Saxe, A. et al. (2013). The input tensor must have
at least 2 dimensions, and for tensors with more than 2 dimensions the
trailing dimensions are flattened.
Args:
tensor: an n-dimensional `torch.Tensor`, where :math:`n \geq 2`
gain: optional scaling factor
Examples:
>>> w = flow.empty(3, 5)
>>> nn.init.orthogonal_(w)
"""
with flow.no_grad():
return tensor.orthogonal_(gain)
def kaiming_uniform_(
tensor, a=0, mode="fan_in", nonlinearity="leaky_relu", *, data_format="NCHW"
):
r"""
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.10/nn.init.html.
Fills the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - <NAME>. et al. (2015), using a
uniform distribution. The resulting tensor will have values sampled from
:math:`\mathcal{U}(-\text{bound}, \text{bound})` where
.. math::
\text{bound} = \text{gain} \times \sqrt{\frac{3}{\text{fan_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `flow.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
Examples:
>>> w = flow.empty(3, 5)
>>> nn.init.kaiming_uniform_(w, mode='fan_in', nonlinearity='relu')
"""
with flow.no_grad():
return tensor.kaiming_uniform_(a, mode, nonlinearity, data_format=data_format)
def kaiming_normal_(
tensor, a=0, mode="fan_in", nonlinearity="leaky_relu", *, data_format="NCHW"
):
r"""
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.10/nn.init.html.
Fills the input `Tensor` with values according to the method
described in `Delving deep into rectifiers: Surpassing human-level
performance on ImageNet classification` - <NAME>. et al. (2015), using a
normal distribution. The resulting tensor will have values sampled from
:math:`\mathcal{N}(0, \text{std}^2)` where
.. math::
\text{std} = \frac{\text{gain}}{\sqrt{\text{fan_mode}}}
Also known as He initialization.
Args:
tensor: an n-dimensional `flow.Tensor`
a: the negative slope of the rectifier used after this layer (only
used with ``'leaky_relu'``)
mode: either ``'fan_in'`` (default) or ``'fan_out'``. Choosing ``'fan_in'``
preserves the magnitude of the variance of the weights in the
forward pass. Choosing ``'fan_out'`` preserves the magnitudes in the
backwards pass.
nonlinearity: the non-linear function (`nn.functional` name),
recommended to use only with ``'relu'`` or ``'leaky_relu'`` (default).
Examples:
>>> w = flow.empty(3, 5)
>>> nn.init.kaiming_normal_(w, mode='fan_out', nonlinearity='relu')
"""
if os.getenv("ONEFLOW_ENABLE_NHWC") == "1":
data_format = "NHWC"
with flow.no_grad():
return tensor.kaiming_normal_(a, mode, nonlinearity, data_format=data_format)
def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):
with flow.no_grad():
return tensor.trunc_normal_(mean, std, a, b)
def constant_(tensor, val):
with flow.no_grad():
return tensor.fill_(val)
def ones_(tensor):
with flow.no_grad():
return tensor.fill_(1)
def zeros_(tensor):
with flow.no_grad():
return tensor.fill_(0)
def _calculate_fan_in_and_fan_out(tensor):
dimensions = tensor.ndimension()
if dimensions < 2:
raise ValueError(
"Fan in and fan out can not be computed for tensor with fewer than 2 dimensions"
)
num_input_fmaps = tensor.size(1)
num_output_fmaps = tensor.size(0)
receptive_field_size = 1
if tensor.ndimension() > 2:
for s in tensor.size()[2:]:
receptive_field_size *= s
fan_in = num_input_fmaps * receptive_field_size
fan_out = num_output_fmaps * receptive_field_size
return (fan_in, fan_out)
| [
"oneflow.no_grad",
"oneflow.ops.initializer_util.CalcGain"
] | [((733, 762), 'oneflow.ops.initializer_util.CalcGain', 'CalcGain', (['nonlinearity', 'param'], {}), '(nonlinearity, param)\n', (741, 762), False, 'from oneflow.ops.initializer_util import CalcGain\n'), ((810, 824), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (822, 824), True, 'import oneflow as flow\n'), ((914, 928), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (926, 928), True, 'import oneflow as flow\n'), ((1865, 1879), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (1877, 1879), True, 'import oneflow as flow\n'), ((2823, 2837), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (2835, 2837), True, 'import oneflow as flow\n'), ((3649, 3663), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (3661, 3663), True, 'import oneflow as flow\n'), ((5166, 5180), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (5178, 5180), True, 'import oneflow as flow\n'), ((6707, 6739), 'os.getenv', 'os.getenv', (['"""ONEFLOW_ENABLE_NHWC"""'], {}), "('ONEFLOW_ENABLE_NHWC')\n", (6716, 6739), False, 'import os\n'), ((6786, 6800), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (6798, 6800), True, 'import oneflow as flow\n'), ((6960, 6974), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (6972, 6974), True, 'import oneflow as flow\n'), ((7068, 7082), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (7080, 7082), True, 'import oneflow as flow\n'), ((7147, 7161), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (7159, 7161), True, 'import oneflow as flow\n'), ((7225, 7239), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (7237, 7239), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Callable, Optional, Union, Tuple, Sequence
from oneflow.python.oneflow_export import oneflow_export
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.python.framework.distribute as distribute_util
import oneflow.python.framework.remote_blob as remote_blob_util
IntPair = Tuple[int, int]
@oneflow_export("layers.dense")
def dense(
inputs: remote_blob_util.BlobDef,
units: int,
activation: Optional[
Callable[[remote_blob_util.BlobDef, str], remote_blob_util.BlobDef]
] = None,
use_bias: bool = True,
kernel_initializer: Optional[op_conf_util.InitializerConf] = None,
bias_initializer: Optional[op_conf_util.InitializerConf] = None,
kernel_regularizer: Optional[op_conf_util.RegularizerConf] = None,
bias_regularizer: Optional[op_conf_util.RegularizerConf] = None,
trainable: bool = True,
name: str = "Dense",
model_distribute: distribute_util.Distribute = distribute_util.broadcast(),
) -> remote_blob_util.BlobDef:
r"""Analogous to `tf.keras.layers.Dense <https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense>`_
Args:
inputs (remote_blob_util.BlobDef): A 2D input `Blob`.
units (int): A positive integer for the dimensionality of the output space.
activation (Optional[remote_blob_util.BlobDef], optional): Activation function. Defaults to None.
use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
kernel_initializer (Optional[op_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
bias_initializer (Optional[op_conf_util.InitializerConf], optional): [description]. Defaults to None.
kernel_regularizer (Optional[op_conf_util.RegularizerConf], optional): [description]. Defaults to None.
bias_regularizer (Optional[op_conf_util.RegularizerConf], optional): Regularizer for the bias vector. Defaults to None.
trainable (bool, optional): A boolean specifies whether to train the variables. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
model_distribute (distribute_util.Distribute, optional): Define the way to ditribute the model. Defaults to distribute_util.broadcast().
Returns:
remote_blob_util.BlobDef: A N-D `Blob` with the shape of (batch_size, units).
Raises:
ValueError: The dimension of input `Blob` must be less than 2.
VauleError: Model distribute must be in auto, broadcast, split.
ValueError: The input must be a 2D `Blob` when the model distribute is split.
"""
in_shape = inputs.shape
in_num_axes = len(in_shape)
assert in_num_axes >= 2
assert (
model_distribute is distribute_util.auto()
or model_distribute is distribute_util.broadcast()
or model_distribute is distribute_util.split(0)
)
if model_distribute is distribute_util.split(0):
assert in_num_axes == 2 # model distribute is hard for reshape split dim 1
if in_num_axes > 2:
inputs = flow.reshape(inputs, (-1, in_shape[-1]))
with flow.scope.namespace(name):
if kernel_initializer is None:
kernel_initializer = flow.constant_initializer(0)
weight = flow.get_variable(
name="weight",
shape=(units, inputs.shape[1]),
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
distribute=model_distribute,
reuse=False,
)
weight = weight.with_distribute(model_distribute)
out = flow.matmul(a=inputs, b=weight, transpose_b=True, name="matmul")
if use_bias:
if bias_initializer is None:
bias_initializer = flow.constant_initializer(0)
bias = flow.get_variable(
name="bias",
shape=(units,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
distribute=model_distribute,
reuse=False,
)
bias = bias.with_distribute(model_distribute)
out = flow.nn.bias_add(out, bias, name="bias_add")
if callable(activation):
out = activation(out, name="activation")
if in_num_axes > 2:
out = flow.reshape(out, in_shape[:-1] + (units,))
return out
@oneflow_export("layers.conv2d")
def conv2d(
inputs: remote_blob_util.BlobDef,
filters: int,
kernel_size: Union[int, IntPair] = 1,
strides: Union[int, IntPair] = 1,
padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair]] = "VALID",
data_format: str = "NCHW",
dilation_rate: int = 1,
groups: int = 1,
activation: Optional[
Callable[[remote_blob_util.BlobDef, str], remote_blob_util.BlobDef]
] = None,
use_bias: bool = True,
kernel_initializer: Optional[op_conf_util.InitializerConf] = None,
bias_initializer: Optional[op_conf_util.InitializerConf] = None,
kernel_regularizer: Optional[op_conf_util.RegularizerConf] = None,
bias_regularizer: Optional[op_conf_util.RegularizerConf] = None,
trainable: bool = True,
name: str = "Conv2d",
weight_name: Optional[str] = None,
bias_name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
r"""2D convolution layer.
Args:
inputs (remote_blob_util.BlobDef): A 4D input `Blob`.
filters (int): An integer specifies the dimensionality of the output space.
kernel_size (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the height and width of the convolution window.
When it is an integer, a square window is applied to the input. Defaults to 1.
strides (Union[int, List[int], Tuple[int]], optional): An integer or tuple/list specifies the strides of the convolution window along the height and width.
When it is an integer, the same value for the all spatial dimesions is applied. Defaults to 1.
padding (str, optional): "VALID" or "SAME". Defaults to "VALID".
data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCHW" or "NHWC" (default: "NCHW"). "NCHW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, height, width).
"NHWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, height, width, channels). Defaults to "NCHW".
dilation_rate (int, optional): An integer or tuple/list specifies the dilation rate for the dilated convolution. When it is an integer, the same dilation rate is applied for the all dimensions. Defaults to 1.
groups (int, optional): A positive integer specifies number of groups for the Group conv. Defaults to 1.
activation (Optional[ Callable[[remote_blob_util.BlobDef, str], remote_blob_util.BlobDef] ], optional): Activation function. Defaults to None.
use_bias (bool, optional): A boolean specifies whether to use a bias vector. Defaults to True.
kernel_initializer (Optional[op_conf_util.InitializerConf], optional): Initializer for the kernel weights matrix. Defaults to None.
bias_initializer (Optional[op_conf_util.InitializerConf], optional): Initializer for the bias vector. Defaults to None.
kernel_regularizer (Optional[op_conf_util.RegularizerConf], optional): Regularizer for the kernel weights matrix. Defaults to None.
bias_regularizer (Optional[op_conf_util.RegularizerConf], optional): Regularizer for the bias vector . Defaults to None.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
weight_name (Optional[str], optional): This weight's name. Defaults to None.
bias_name (Optional[str], optional): This bias's name. Defaults to None.
Raises:
ValueError: If the type of kernel_size is not one of integer, list, tuple.
ValueError: The number of groups must be positive and number of filters must be divisible by it.
ValueError: If data_format is not one of 'NCHW', 'NHWC'.
ValueError: If number of input channels is not divisible by number of groups or less than number of groups.
ValueError: Number of group must be one when data_format is 'NHWC'.
Returns:
remote_blob_util.BlobDef: A 4D `Blob` with the shape of (batch_size, filters, new_height, new_width).
"""
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
else:
assert isinstance(kernel_size, (list, tuple))
assert len(kernel_size) == 2
kernel_size = tuple(kernel_size)
assert isinstance(groups, int)
assert groups > 0
assert groups <= filters
assert filters % groups == 0
if data_format.upper() == "NCHW":
assert groups <= inputs.shape[1]
assert inputs.shape[1] % groups == 0
weight_shape = (filters, inputs.shape[1] // groups) + kernel_size
elif data_format.upper() == "NHWC":
assert groups == 1
assert groups <= inputs.shape[3]
assert inputs.shape[3] % groups == 0
weight_shape = (
filters,
kernel_size[0],
kernel_size[1],
inputs.shape[3] // groups,
)
else:
raise ValueError("data_format must be in NCHW or NHWC")
if kernel_initializer is None:
kernel_initializer = flow.constant_initializer(0)
if weight_name is None:
with flow.scope.namespace(name):
weight = flow.get_variable(
name="weight",
shape=weight_shape,
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
reuse=False,
)
else:
weight = flow.get_variable(
name=weight_name,
shape=weight_shape,
dtype=inputs.dtype,
initializer=kernel_initializer,
regularizer=kernel_regularizer,
trainable=trainable,
model_name="weight",
reuse=False,
)
output = flow.nn.conv2d(
inputs,
weight,
strides,
padding,
data_format,
dilation_rate,
groups=groups,
name=name,
)
if use_bias:
if bias_initializer is None:
bias_initializer = flow.constant_initializer(0)
if bias_name is None:
with flow.scope.namespace(name):
bias = flow.get_variable(
name="bias",
shape=(filters,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
reuse=False,
)
else:
bias = flow.get_variable(
name=bias_name,
shape=(filters,),
dtype=inputs.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
trainable=trainable,
model_name="bias",
reuse=False,
)
with flow.scope.namespace(name):
output = flow.nn.bias_add(output, bias, data_format, name="bias_add")
if callable(activation):
with flow.scope.namespace(name):
output = activation(output, name="activation")
return output
@oneflow_export("layers.layer_norm")
def layer_norm(
inputs: remote_blob_util.BlobDef,
center: bool = True,
scale: bool = True,
trainable: bool = True,
begin_norm_axis: int = 1,
begin_params_axis: int = -1,
epsilon: float = 1e-5,
name: str = "LayerNorm",
) -> remote_blob_util.BlobDef:
r"""Analogous to `tf.keras.layers.LayerNormalization <https://www.tensorflow.org/api_docs/python/tf/keras/layers/LayerNormalization>`_
Args:
inputs (remote_blob_util.BlobDef): Input `Blob`.
center (bool, optional): A boolean specifies whether to shift input `Blob`. Defaults to True.
scale (bool, optional): A boolean specifies whether to scaleinput `Blob`. Defaults to True.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
begin_norm_axis (int, optional): An integer specifies which axis to normalize at first. Defaults to 1.
begin_params_axis (int, optional): An integer specifies which axis params at . Defaults to -1.
epsilon (float, optional): A small float is added to avoid division by zero. Defaults to 1e-5.
name (Optional[str], optional): This layer's name. Defaults to None.
Returns:
remote_blob_util.BlobDef: A normalized `Blob` with same shape of input.
"""
op_builder = (
flow.user_op_builder(name)
.Op("layer_norm")
.Input("x", [inputs])
.Output("y")
.Output("mean")
.Output("inv_variance")
)
if center is False and scale is False:
trainable = False
param_shape = inputs.shape[begin_params_axis:]
if center:
with flow.scope.namespace(name):
beta = flow.get_variable(
name="beta",
shape=param_shape,
dtype=inputs.dtype,
initializer=flow.constant_initializer(0.0),
trainable=trainable,
model_name="beta",
distribute=distribute_util.broadcast(),
reuse=False,
)
op_builder.Input("beta", [beta])
if scale:
with flow.scope.namespace(name):
gamma = flow.get_variable(
name="gamma",
shape=param_shape,
dtype=inputs.dtype,
initializer=flow.constant_initializer(1.0),
trainable=trainable,
model_name="gamma",
distribute=distribute_util.broadcast(),
reuse=False,
)
op_builder.Input("gamma", [gamma])
op_builder.Output("normalized")
op_builder.Attr("center", center)
op_builder.Attr("scale", scale)
op_builder.Attr("begin_norm_axis", begin_norm_axis)
op_builder.Attr("begin_params_axis", begin_params_axis)
op_builder.Attr("epsilon", epsilon)
return op_builder.Build().InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("layers.layer_norm_grad")
def layer_norm_grad(
dy: remote_blob_util.BlobDef,
x: remote_blob_util.BlobDef,
mean: remote_blob_util.BlobDef,
inv_variance: remote_blob_util.BlobDef,
begin_norm_axis: int = 1,
name: str = "LayerNormGrad",
) -> remote_blob_util.BlobDef:
r"""Layer normalization
Args:
dy (remote_blob_util.BlobDef): Upstream derivstives.
x (remote_blob_util.BlobDef): Input `Blob`.
mean (remote_blob_util.BlobDef): Mean over neurons.
inv_variance (remote_blob_util.BlobDef): Variance over neurons.
begin_norm_axis (int, optional): An integer specifies which axis to normalize at first. Defaults to 1.
name (Optional[str], optional): This layer's name. Defaults to None.
Returns:
remote_blob_util.BlobDef: Gradient with respect to input `Blob`.
"""
op = (
flow.user_op_builder(name)
.Op("layer_norm_grad")
.Input("dy", [dy])
.Input("x", [x])
.Input("mean", [mean])
.Input("inv_variance", [inv_variance])
.Output("dx")
.Attr("begin_norm_axis", begin_norm_axis)
.Attr("epsilon", 1e-5)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("layers.layer_norm_param_grad")
def layer_norm_param_grad(
dy: remote_blob_util.BlobDef,
norm: remote_blob_util.BlobDef,
gamma: remote_blob_util.BlobDef,
begin_params_axis: int = -1,
name: str = "LayerNormParamGrad",
) -> Tuple[
remote_blob_util.BlobDef, remote_blob_util.BlobDef, remote_blob_util.BlobDef
]:
r"""Backward pass for layer normalization
Args:
dy (remote_blob_util.BlobDef): Upstream derivstives.
norm (remote_blob_util.BlobDef): Normalized output.
gamma (remote_blob_util.BlobDef): Scale parameter.
begin_params_axis (int, optional): From which parameters to begin with. Defaults to -1.
name (Optional[str], optional): This layer's name. Defaults to 'LayerNormParamGrad'.
Returns:
Tuple[remote_blob_util.BlobDef]:
normalized_diff: Gradient with respect to input `Blob`.
beta_diff: Gradient with respect to shift parameter beta.
gamma_diff: Gradient with respect to scale parameter gamma.
"""
op = (
flow.user_op_builder(name)
.Op("layer_norm_param_grad")
.Input("dy", [dy])
.Input("normalized", [norm])
.Input("gamma", [gamma])
.Output("normalized_diff")
.Output("beta_diff")
.Output("gamma_diff")
.Output("reduce_buf")
.Attr("begin_params_axis", begin_params_axis)
.Build()
)
(
normalized_diff,
beta_diff,
gamma_diff,
reduce_buf,
) = op.InferAndTryRun().RemoteBlobList()
return normalized_diff, beta_diff, gamma_diff
@oneflow_export("layers.batch_normalization")
def batch_normalization(
inputs: remote_blob_util.BlobDef,
axis: int = -1,
momentum: float = 0.99,
epsilon: float = 0.001,
center: bool = True,
scale: bool = True,
beta_initializer: Optional[op_conf_util.InitializerConf] = None,
gamma_initializer: Optional[op_conf_util.InitializerConf] = None,
beta_regularizer: Optional[op_conf_util.RegularizerConf] = None,
gamma_regularizer: Optional[op_conf_util.RegularizerConf] = None,
moving_mean_initializer: Optional[op_conf_util.InitializerConf] = None,
moving_variance_initializer: Optional[op_conf_util.InitializerConf] = None,
trainable: bool = True,
training: bool = True,
name: str = "BatchNorm",
) -> remote_blob_util.BlobDef:
r"""Analogous to `tf.keras.layers.BatchNormalization <https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization>`_
Args:
inputs (remote_blob_util.BlobDef): Input `Blob`.
axis (int, optional): An int specifies the aixs that should be normalized . Default is -1, which normalizes the last axis.
momentum (float, optional): A float specifies the momontum for the moving average. Defaults to 0.99.
epsilon (float, optional): A small float added to avoid division by zero. Defaults to 0.001.
center (bool, optional): A boolean specifies whether to add offset to normalized `Blob`. Defaults to True.
scale (bool, optional): A boolean specifies whether to multiply normalized `Blob` by gamma. Defaults to True.
beta_initializer (Optional[op_conf_util.InitializerConf], optional): Initializer for beta. Defaults to None.
gamma_initializer (Optional[op_conf_util.InitializerConf], optional): Initializer for gamma. Defaults to None.
beta_regularizer (Optional[op_conf_util.RegularizerConf], optional): Regularizer for beta. Defaults to None.
gamma_regularizer (Optional[op_conf_util.RegularizerConf], optional): Regularizer for gamma. Defaults to None.
moving_mean_initializer (Optional[op_conf_util.InitializerConf], optional): Initializer for moving mean. Defaults to None.
moving_variance_initializer (Optional[op_conf_util.InitializerConf], optional): Initializer for moving variance. Defaults to None.
trainable (bool, optional): A boolean specifies whether to train variables. Defaults to True.
training (bool, optional): A boolean specifies whether now is training the model. Defaults to True.
name (Optional[str], optional): This layer's name. Defaults to None.
Returns:
remote_blob_util.BlobDef: A `Blob` with same shape of input.
Raises:
ValueError: If axis is out of dimension of input.
"""
if axis < 0:
axis += len(inputs.shape)
assert axis >= 0 and axis < len(inputs.shape)
params_shape = [inputs.shape[axis]]
# Float32 required to avoid precision-loss when using fp16 input/output
params_dtype = flow.float32 if inputs.dtype == flow.float16 else inputs.dtype
if not flow.current_global_function_desc().IsTrainable() or not trainable:
training = False
with flow.scope.namespace(name):
if center:
beta = flow.get_variable(
name="beta",
shape=params_shape,
dtype=params_dtype,
initializer=beta_initializer or flow.zeros_initializer(),
regularizer=beta_regularizer,
trainable=trainable,
distribute=distribute_util.broadcast(),
reuse=False,
)
else:
beta = flow.constant(0, dtype=params_dtype, shape=params_shape, name="beta")
if scale:
gamma = flow.get_variable(
name="gamma",
shape=params_shape,
dtype=params_dtype,
initializer=gamma_initializer or flow.ones_initializer(),
regularizer=gamma_regularizer,
trainable=trainable,
distribute=distribute_util.broadcast(),
reuse=False,
)
else:
gamma = flow.constant(
1, dtype=params_dtype, shape=params_shape, name="gamma"
)
moving_mean = flow.get_variable(
name="moving_mean",
shape=params_shape,
dtype=params_dtype,
initializer=moving_mean_initializer or flow.zeros_initializer(),
trainable=False,
distribute=distribute_util.broadcast(),
reuse=False,
)
moving_variance = flow.get_variable(
name="moving_variance",
shape=params_shape,
dtype=params_dtype,
initializer=moving_variance_initializer or flow.ones_initializer(),
trainable=False,
distribute=distribute_util.broadcast(),
reuse=False,
)
builder = (
flow.user_op_builder(name)
.Op("normalization")
.Input("x", [inputs])
.Input("moving_mean", [moving_mean])
.Input("moving_variance", [moving_variance])
.Input("gamma", [gamma])
.Input("beta", [beta])
.Output("y")
.Attr("axis", axis)
.Attr("epsilon", epsilon)
.Attr("training", training)
.Attr("momentum", momentum)
)
if trainable and training:
builder = builder.Output("mean").Output("inv_variance")
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("layers.upsample_2d")
def upsample(
x: remote_blob_util.BlobDef,
size: Sequence[int] = (2, 2),
data_format: str = "NCHW",
interpolation: str = "nearest",
name: str = "Upsample2D",
):
r"""Upsample Operation
Args:
x ([type]): Input `Blob`.
size (tuple, optional): (height_scale,width_scale) Defaults to (2, 2).
data_format (str, optional): A string specifies the format of the input `Blob`, one of "NCHW" or "NHWC" (default: "NCHW"). "NCHW" cooresponds to channels_first, i.e. the input `Blob` with shape (batch_size, channels, height, width).
"NHWC" cooresponds to channels_last, i.e. the input `Blob` with shape (batch_size, height, width, channels).. Defaults to "NCHW".
interpolation (str, optional): Image interpolation algorithm to enlarge the image size. Defaults to "nearest".
name ([type], optional): This layer's name. Defaults to None.
Raises:
ValueError: interpolation must be "nearest" or "bilinear".
ValueError: data_format must be "NHWC" or "NCHW"
Returns:
[type]: remote_blob_util.BlobDef: A `Blob` with new shape of input. if input size is(2,2),then the new shape is [N, C, 2H, 2W].
"""
if isinstance(size, int):
height_scale = size
width_scale = size
else:
assert isinstance(size, (list, tuple))
assert len(size) == 2
height_scale = size[0]
width_scale = size[1]
if interpolation != "nearest" and interpolation != "bilinear":
raise ValueError('interpolation must be "nearest" or "bilinear".')
if data_format.upper() != "NCHW" and data_format.upper() != "NHWC":
raise ValueError('data_format must be "NHWC" or "NCHW".')
need_transpose = 0
if data_format.upper() == "NHWC":
need_transpose = 1
if need_transpose:
x = flow.transpose(x, perm=[0, 3, 1, 2])
op = (
flow.user_op_builder(name)
.Op("upsample")
.Input("x", [x])
.Output("y")
.Attr("height_scale", float(height_scale))
.Attr("width_scale", float(width_scale))
.Attr("data_format", "channels_first")
.Attr("interpolation", interpolation)
.Build()
)
output = op.InferAndTryRun().SoleOutputBlob()
if need_transpose:
output = flow.transpose(output, perm=[0, 2, 3, 1])
return output
| [
"oneflow.nn.conv2d",
"oneflow.scope.namespace",
"oneflow.current_global_function_desc",
"oneflow.python.framework.distribute.broadcast",
"oneflow.constant_initializer",
"oneflow.constant",
"oneflow.zeros_initializer",
"oneflow.python.framework.distribute.split",
"oneflow.user_op_builder",
"oneflow... | [((985, 1015), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""layers.dense"""'], {}), "('layers.dense')\n", (999, 1015), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5286, 5317), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""layers.conv2d"""'], {}), "('layers.conv2d')\n", (5300, 5317), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((12639, 12674), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""layers.layer_norm"""'], {}), "('layers.layer_norm')\n", (12653, 12674), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15562, 15602), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""layers.layer_norm_grad"""'], {}), "('layers.layer_norm_grad')\n", (15576, 15602), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((16817, 16863), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""layers.layer_norm_param_grad"""'], {}), "('layers.layer_norm_param_grad')\n", (16831, 16863), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((18451, 18495), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""layers.batch_normalization"""'], {}), "('layers.batch_normalization')\n", (18465, 18495), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((23990, 24026), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""layers.upsample_2d"""'], {}), "('layers.upsample_2d')\n", (24004, 24026), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1608, 1635), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (1633, 1635), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((11253, 11359), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['inputs', 'weight', 'strides', 'padding', 'data_format', 'dilation_rate'], {'groups': 'groups', 'name': 'name'}), '(inputs, weight, strides, padding, data_format, dilation_rate,\n groups=groups, name=name)\n', (11267, 11359), True, 'import oneflow as flow\n'), ((3621, 3645), 'oneflow.python.framework.distribute.split', 'distribute_util.split', (['(0)'], {}), '(0)\n', (3642, 3645), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((3773, 3813), 'oneflow.reshape', 'flow.reshape', (['inputs', '(-1, in_shape[-1])'], {}), '(inputs, (-1, in_shape[-1]))\n', (3785, 3813), True, 'import oneflow as flow\n'), ((3824, 3850), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (3844, 3850), True, 'import oneflow as flow\n'), ((3971, 4217), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': '"""weight"""', 'shape': '(units, inputs.shape[1])', 'dtype': 'inputs.dtype', 'initializer': 'kernel_initializer', 'regularizer': 'kernel_regularizer', 'trainable': 'trainable', 'model_name': '"""weight"""', 'distribute': 'model_distribute', 'reuse': '(False)'}), "(name='weight', shape=(units, inputs.shape[1]), dtype=\n inputs.dtype, initializer=kernel_initializer, regularizer=\n kernel_regularizer, trainable=trainable, model_name='weight',\n distribute=model_distribute, reuse=False)\n", (3988, 4217), True, 'import oneflow as flow\n'), ((4396, 4460), 'oneflow.matmul', 'flow.matmul', ([], {'a': 'inputs', 'b': 'weight', 'transpose_b': '(True)', 'name': '"""matmul"""'}), "(a=inputs, b=weight, transpose_b=True, name='matmul')\n", (4407, 4460), True, 'import oneflow as flow\n'), ((5223, 5266), 'oneflow.reshape', 'flow.reshape', (['out', '(in_shape[:-1] + (units,))'], {}), '(out, in_shape[:-1] + (units,))\n', (5235, 5266), True, 'import oneflow as flow\n'), ((10455, 10483), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (10480, 10483), True, 'import oneflow as flow\n'), ((10937, 11139), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': 'weight_name', 'shape': 'weight_shape', 'dtype': 'inputs.dtype', 'initializer': 'kernel_initializer', 'regularizer': 'kernel_regularizer', 'trainable': 'trainable', 'model_name': '"""weight"""', 'reuse': '(False)'}), "(name=weight_name, shape=weight_shape, dtype=inputs.dtype,\n initializer=kernel_initializer, regularizer=kernel_regularizer,\n trainable=trainable, model_name='weight', reuse=False)\n", (10954, 11139), True, 'import oneflow as flow\n'), ((21623, 21649), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (21643, 21649), True, 'import oneflow as flow\n'), ((25883, 25919), 'oneflow.transpose', 'flow.transpose', (['x'], {'perm': '[0, 3, 1, 2]'}), '(x, perm=[0, 3, 1, 2])\n', (25897, 25919), True, 'import oneflow as flow\n'), ((26344, 26385), 'oneflow.transpose', 'flow.transpose', (['output'], {'perm': '[0, 2, 3, 1]'}), '(output, perm=[0, 2, 3, 1])\n', (26358, 26385), True, 'import oneflow as flow\n'), ((3449, 3471), 'oneflow.python.framework.distribute.auto', 'distribute_util.auto', ([], {}), '()\n', (3469, 3471), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((3503, 3530), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (3528, 3530), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((3562, 3586), 'oneflow.python.framework.distribute.split', 'distribute_util.split', (['(0)'], {}), '(0)\n', (3583, 3586), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((3924, 3952), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (3949, 3952), True, 'import oneflow as flow\n'), ((4608, 4825), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': '"""bias"""', 'shape': '(units,)', 'dtype': 'inputs.dtype', 'initializer': 'bias_initializer', 'regularizer': 'bias_regularizer', 'trainable': 'trainable', 'model_name': '"""bias"""', 'distribute': 'model_distribute', 'reuse': '(False)'}), "(name='bias', shape=(units,), dtype=inputs.dtype,\n initializer=bias_initializer, regularizer=bias_regularizer, trainable=\n trainable, model_name='bias', distribute=model_distribute, reuse=False)\n", (4625, 4825), True, 'import oneflow as flow\n'), ((5052, 5096), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['out', 'bias'], {'name': '"""bias_add"""'}), "(out, bias, name='bias_add')\n", (5068, 5096), True, 'import oneflow as flow\n'), ((10526, 10552), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (10546, 10552), True, 'import oneflow as flow\n'), ((10575, 10774), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': '"""weight"""', 'shape': 'weight_shape', 'dtype': 'inputs.dtype', 'initializer': 'kernel_initializer', 'regularizer': 'kernel_regularizer', 'trainable': 'trainable', 'model_name': '"""weight"""', 'reuse': '(False)'}), "(name='weight', shape=weight_shape, dtype=inputs.dtype,\n initializer=kernel_initializer, regularizer=kernel_regularizer,\n trainable=trainable, model_name='weight', reuse=False)\n", (10592, 10774), True, 'import oneflow as flow\n'), ((11513, 11541), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (11538, 11541), True, 'import oneflow as flow\n'), ((12035, 12228), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': 'bias_name', 'shape': '(filters,)', 'dtype': 'inputs.dtype', 'initializer': 'bias_initializer', 'regularizer': 'bias_regularizer', 'trainable': 'trainable', 'model_name': '"""bias"""', 'reuse': '(False)'}), "(name=bias_name, shape=(filters,), dtype=inputs.dtype,\n initializer=bias_initializer, regularizer=bias_regularizer, trainable=\n trainable, model_name='bias', reuse=False)\n", (12052, 12228), True, 'import oneflow as flow\n'), ((12377, 12403), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (12397, 12403), True, 'import oneflow as flow\n'), ((12426, 12486), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {'name': '"""bias_add"""'}), "(output, bias, data_format, name='bias_add')\n", (12442, 12486), True, 'import oneflow as flow\n'), ((12530, 12556), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (12550, 12556), True, 'import oneflow as flow\n'), ((14309, 14335), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (14329, 14335), True, 'import oneflow as flow\n'), ((14776, 14802), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (14796, 14802), True, 'import oneflow as flow\n'), ((22098, 22167), 'oneflow.constant', 'flow.constant', (['(0)'], {'dtype': 'params_dtype', 'shape': 'params_shape', 'name': '"""beta"""'}), "(0, dtype=params_dtype, shape=params_shape, name='beta')\n", (22111, 22167), True, 'import oneflow as flow\n'), ((22619, 22689), 'oneflow.constant', 'flow.constant', (['(1)'], {'dtype': 'params_dtype', 'shape': 'params_shape', 'name': '"""gamma"""'}), "(1, dtype=params_dtype, shape=params_shape, name='gamma')\n", (22632, 22689), True, 'import oneflow as flow\n'), ((4559, 4587), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (4584, 4587), True, 'import oneflow as flow\n'), ((11590, 11616), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (11610, 11616), True, 'import oneflow as flow\n'), ((11641, 11831), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': '"""bias"""', 'shape': '(filters,)', 'dtype': 'inputs.dtype', 'initializer': 'bias_initializer', 'regularizer': 'bias_regularizer', 'trainable': 'trainable', 'model_name': '"""bias"""', 'reuse': '(False)'}), "(name='bias', shape=(filters,), dtype=inputs.dtype,\n initializer=bias_initializer, regularizer=bias_regularizer, trainable=\n trainable, model_name='bias', reuse=False)\n", (11658, 11831), True, 'import oneflow as flow\n'), ((22987, 23014), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (23012, 23014), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((23329, 23356), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (23354, 23356), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((14503, 14533), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (14528, 14533), True, 'import oneflow as flow\n'), ((14634, 14661), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (14659, 14661), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((14972, 15002), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (14997, 15002), True, 'import oneflow as flow\n'), ((15104, 15131), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (15129, 15131), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((21520, 21555), 'oneflow.current_global_function_desc', 'flow.current_global_function_desc', ([], {}), '()\n', (21553, 21555), True, 'import oneflow as flow\n'), ((21993, 22020), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (22018, 22020), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((22513, 22540), 'oneflow.python.framework.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (22538, 22540), True, 'import oneflow.python.framework.distribute as distribute_util\n'), ((22909, 22933), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (22931, 22933), True, 'import oneflow as flow\n'), ((23252, 23275), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (23273, 23275), True, 'import oneflow as flow\n'), ((21857, 21881), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (21879, 21881), True, 'import oneflow as flow\n'), ((22377, 22400), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (22398, 22400), True, 'import oneflow as flow\n'), ((13993, 14019), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (14013, 14019), True, 'import oneflow as flow\n'), ((25940, 25966), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (25960, 25966), True, 'import oneflow as flow\n'), ((16452, 16478), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (16472, 16478), True, 'import oneflow as flow\n'), ((17899, 17925), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (17919, 17925), True, 'import oneflow as flow\n'), ((23418, 23444), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (23438, 23444), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.max,
"""
oneflow.max(input, dim=None, keepdim=False)
Computes the maximum value of all elements in the input tensor.
Args:
input (oneflow.Tensor): the Input Tensor
dim (int, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
Returns:
Tensor or Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int64)): If :attr:`dim` is `None`, returns
the maximum value of all elements in the `input` tensor. Otherwise, returns a tuple of Tensor (values, indices),
where the `values` are the maximum value of all elements in the `input` tensor,
the `indices` are the indices of the elements in the original input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([[4, 1, 5], [2, 6, 3]])
>>> flow.max(input)
tensor(6., dtype=oneflow.float32)
>>> (values, indices) = flow.max(input, dim=1)
>>> values
tensor([5., 6.], dtype=oneflow.float32)
>>> indices
tensor([2, 1], dtype=oneflow.int64)
""",
)
add_docstr(
oneflow.min,
"""
oneflow.min(input, dim=None, keepdim=False)
Computes the minimum value of all elements in the input tensor.
Args:
input (oneflow.Tensor): the Input Tensor
dim (int, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
Returns:
Tensor or Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int64)): If :attr:`dim` is `None`, returns
the minimum value of all elements in the `input` tensor. Otherwise, returns a tuple of Tensor (values, indices),
where the `values` are the minimum value of all elements in the `input` tensor,
the `indices` are the indices of the elements in the original input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([[4, 1, 5], [2, 6, 3]])
>>> flow.min(input)
tensor(1., dtype=oneflow.float32)
>>> (values, indices) = flow.min(input, dim=1)
>>> values
tensor([1., 2.], dtype=oneflow.float32)
>>> indices
tensor([1, 0], dtype=oneflow.int64)
""",
)
add_docstr(
oneflow.sum,
"""
oneflow.sum(input, dim=None, keepdim=False) -> Tensor
Computes the sum of row of elements in a tensor in the given dimension. If the dimension is None, sum of all elements will be caculated.
If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s).
Args:
input (oneflow.Tensor): the Input Tensor
dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])
>>> flow.sum(input)
tensor(21., dtype=oneflow.float32)
>>> flow.sum(input, dim=0)
tensor([5., 7., 9.], dtype=oneflow.float32)
>>> flow.sum(input, dim=1)
tensor([ 6., 15.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.mean,
"""
oneflow.mean(input, dim=None, keepdim=False) -> Tensor
Computes the mean of row of elements in a tensor in the given dimension. If the dimension is None, mean of all elements will be caculated.
If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s).
Args:
input (oneflow.Tensor): the Input Tensor
dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])
>>> flow.mean(input)
tensor(3.5000, dtype=oneflow.float32)
>>> flow.mean(input, dim=0)
tensor([2.5000, 3.5000, 4.5000], dtype=oneflow.float32)
>>> flow.mean(input, dim=1)
tensor([2., 5.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.prod,
"""
oneflow.prod(input, dim=None, keepdim=False) -> Tensor
Computes the product of row of elements in a tensor in the given dimension. If the dimension is None, product of all elements will be caculated.
If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s).
Args:
input (oneflow.Tensor): the Input Tensor
dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])
>>> flow.prod(input)
tensor(720., dtype=oneflow.float32)
>>> flow.prod(input, dim=0)
tensor([ 4., 10., 18.], dtype=oneflow.float32)
>>> flow.prod(input, dim=1)
tensor([ 6., 120.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.all,
"""
oneflow.all(input, dim=None, keepdim=False) -> Tensor
For each row of `input` in the given dimension `dim`, returns True if all element in the row evaluate to True and False otherwise. If the dimension is None, compute if all elements in the input tensor to true.
If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s).
Args:
input (oneflow.Tensor): the Input Tensor
dim (int, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]]) < 4
>>> input
tensor([[ True, True, True],
[False, False, False]], dtype=oneflow.bool)
>>> flow.all(input)
tensor(False, dtype=oneflow.bool)
>>> flow.all(input, 1)
tensor([ True, False], dtype=oneflow.bool)
>>> flow.all(input, 1, True)
tensor([[ True],
[False]], dtype=oneflow.bool)
""",
)
add_docstr(
oneflow.any,
"""
oneflow.any(input, dim=None, keepdim=False) -> Tensor
For each row of `input` in the given dimension `dim`, returns True if any element in the row evaluate to True and False otherwise. If the dimension is None, compute if any elements in the input tensor to true.
If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s).
Args:
input (oneflow.Tensor): the Input Tensor
dim (int, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]]) < 4
>>> input
tensor([[ True, True, True],
[False, False, False]], dtype=oneflow.bool)
>>> flow.any(input)
tensor(True, dtype=oneflow.bool)
>>> flow.any(input, 0)
tensor([True, True, True], dtype=oneflow.bool)
>>> flow.any(input, 0, True)
tensor([[True, True, True]], dtype=oneflow.bool)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1875), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.max', '"""\n oneflow.max(input, dim=None, keepdim=False)\n\n Computes the maximum value of all elements in the input tensor.\n \n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n Returns:\n Tensor or Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int64)): If :attr:`dim` is `None`, returns \n the maximum value of all elements in the `input` tensor. Otherwise, returns a tuple of Tensor (values, indices), \n where the `values` are the maximum value of all elements in the `input` tensor,\n the `indices` are the indices of the elements in the original input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> input = flow.Tensor([[4, 1, 5], [2, 6, 3]])\n >>> flow.max(input)\n tensor(6., dtype=oneflow.float32)\n >>> (values, indices) = flow.max(input, dim=1)\n >>> values\n tensor([5., 6.], dtype=oneflow.float32)\n >>> indices\n tensor([2, 1], dtype=oneflow.int64)\n\n """'], {}), '(oneflow.max,\n """\n oneflow.max(input, dim=None, keepdim=False)\n\n Computes the maximum value of all elements in the input tensor.\n \n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n Returns:\n Tensor or Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int64)): If :attr:`dim` is `None`, returns \n the maximum value of all elements in the `input` tensor. Otherwise, returns a tuple of Tensor (values, indices), \n where the `values` are the maximum value of all elements in the `input` tensor,\n the `indices` are the indices of the elements in the original input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> input = flow.Tensor([[4, 1, 5], [2, 6, 3]])\n >>> flow.max(input)\n tensor(6., dtype=oneflow.float32)\n >>> (values, indices) = flow.max(input, dim=1)\n >>> values\n tensor([5., 6.], dtype=oneflow.float32)\n >>> indices\n tensor([2, 1], dtype=oneflow.int64)\n\n """\n )\n', (670, 1875), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1879, 3094), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.min', '"""\n oneflow.min(input, dim=None, keepdim=False)\n \n Computes the minimum value of all elements in the input tensor.\n \n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n Returns:\n Tensor or Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int64)): If :attr:`dim` is `None`, returns \n the minimum value of all elements in the `input` tensor. Otherwise, returns a tuple of Tensor (values, indices), \n where the `values` are the minimum value of all elements in the `input` tensor,\n the `indices` are the indices of the elements in the original input tensor.\n \n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[4, 1, 5], [2, 6, 3]])\n >>> flow.min(input)\n tensor(1., dtype=oneflow.float32)\n >>> (values, indices) = flow.min(input, dim=1)\n >>> values\n tensor([1., 2.], dtype=oneflow.float32)\n >>> indices\n tensor([1, 0], dtype=oneflow.int64)\n\n """'], {}), '(oneflow.min,\n """\n oneflow.min(input, dim=None, keepdim=False)\n \n Computes the minimum value of all elements in the input tensor.\n \n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n Returns:\n Tensor or Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int64)): If :attr:`dim` is `None`, returns \n the minimum value of all elements in the `input` tensor. Otherwise, returns a tuple of Tensor (values, indices), \n where the `values` are the minimum value of all elements in the `input` tensor,\n the `indices` are the indices of the elements in the original input tensor.\n \n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[4, 1, 5], [2, 6, 3]])\n >>> flow.min(input)\n tensor(1., dtype=oneflow.float32)\n >>> (values, indices) = flow.min(input, dim=1)\n >>> values\n tensor([1., 2.], dtype=oneflow.float32)\n >>> indices\n tensor([1, 0], dtype=oneflow.int64)\n\n """\n )\n', (1889, 3094), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((3098, 4246), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.sum', '"""\n oneflow.sum(input, dim=None, keepdim=False) -> Tensor\n\n Computes the sum of row of elements in a tensor in the given dimension. If the dimension is None, sum of all elements will be caculated.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])\n >>> flow.sum(input)\n tensor(21., dtype=oneflow.float32)\n >>> flow.sum(input, dim=0)\n tensor([5., 7., 9.], dtype=oneflow.float32)\n >>> flow.sum(input, dim=1)\n tensor([ 6., 15.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.sum,\n """\n oneflow.sum(input, dim=None, keepdim=False) -> Tensor\n\n Computes the sum of row of elements in a tensor in the given dimension. If the dimension is None, sum of all elements will be caculated.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])\n >>> flow.sum(input)\n tensor(21., dtype=oneflow.float32)\n >>> flow.sum(input, dim=0)\n tensor([5., 7., 9.], dtype=oneflow.float32)\n >>> flow.sum(input, dim=1)\n tensor([ 6., 15.], dtype=oneflow.float32)\n\n """\n )\n', (3108, 4246), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4250, 5422), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.mean', '"""\n oneflow.mean(input, dim=None, keepdim=False) -> Tensor\n \n Computes the mean of row of elements in a tensor in the given dimension. If the dimension is None, mean of all elements will be caculated.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])\n >>> flow.mean(input)\n tensor(3.5000, dtype=oneflow.float32)\n >>> flow.mean(input, dim=0)\n tensor([2.5000, 3.5000, 4.5000], dtype=oneflow.float32)\n >>> flow.mean(input, dim=1)\n tensor([2., 5.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.mean,\n """\n oneflow.mean(input, dim=None, keepdim=False) -> Tensor\n \n Computes the mean of row of elements in a tensor in the given dimension. If the dimension is None, mean of all elements will be caculated.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])\n >>> flow.mean(input)\n tensor(3.5000, dtype=oneflow.float32)\n >>> flow.mean(input, dim=0)\n tensor([2.5000, 3.5000, 4.5000], dtype=oneflow.float32)\n >>> flow.mean(input, dim=1)\n tensor([2., 5.], dtype=oneflow.float32)\n\n """\n )\n', (4260, 5422), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5426, 6593), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.prod', '"""\n oneflow.prod(input, dim=None, keepdim=False) -> Tensor\n\n Computes the product of row of elements in a tensor in the given dimension. If the dimension is None, product of all elements will be caculated.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])\n >>> flow.prod(input)\n tensor(720., dtype=oneflow.float32)\n >>> flow.prod(input, dim=0)\n tensor([ 4., 10., 18.], dtype=oneflow.float32)\n >>> flow.prod(input, dim=1)\n tensor([ 6., 120.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.prod,\n """\n oneflow.prod(input, dim=None, keepdim=False) -> Tensor\n\n Computes the product of row of elements in a tensor in the given dimension. If the dimension is None, product of all elements will be caculated.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int or tuple of ints, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]])\n >>> flow.prod(input)\n tensor(720., dtype=oneflow.float32)\n >>> flow.prod(input, dim=0)\n tensor([ 4., 10., 18.], dtype=oneflow.float32)\n >>> flow.prod(input, dim=1)\n tensor([ 6., 120.], dtype=oneflow.float32)\n\n """\n )\n', (5436, 6593), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((6597, 7938), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.all', '"""\n oneflow.all(input, dim=None, keepdim=False) -> Tensor\n\n For each row of `input` in the given dimension `dim`, returns True if all element in the row evaluate to True and False otherwise. If the dimension is None, compute if all elements in the input tensor to true.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]]) < 4\n >>> input\n tensor([[ True, True, True],\n [False, False, False]], dtype=oneflow.bool)\n >>> flow.all(input)\n tensor(False, dtype=oneflow.bool)\n >>> flow.all(input, 1)\n tensor([ True, False], dtype=oneflow.bool)\n >>> flow.all(input, 1, True)\n tensor([[ True],\n [False]], dtype=oneflow.bool)\n """'], {}), '(oneflow.all,\n """\n oneflow.all(input, dim=None, keepdim=False) -> Tensor\n\n For each row of `input` in the given dimension `dim`, returns True if all element in the row evaluate to True and False otherwise. If the dimension is None, compute if all elements in the input tensor to true.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]]) < 4\n >>> input\n tensor([[ True, True, True],\n [False, False, False]], dtype=oneflow.bool)\n >>> flow.all(input)\n tensor(False, dtype=oneflow.bool)\n >>> flow.all(input, 1)\n tensor([ True, False], dtype=oneflow.bool)\n >>> flow.all(input, 1, True)\n tensor([[ True],\n [False]], dtype=oneflow.bool)\n """\n )\n', (6607, 7938), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7942, 9285), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.any', '"""\n oneflow.any(input, dim=None, keepdim=False) -> Tensor\n \n For each row of `input` in the given dimension `dim`, returns True if any element in the row evaluate to True and False otherwise. If the dimension is None, compute if any elements in the input tensor to true.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]]) < 4\n >>> input\n tensor([[ True, True, True],\n [False, False, False]], dtype=oneflow.bool)\n >>> flow.any(input)\n tensor(True, dtype=oneflow.bool)\n >>> flow.any(input, 0)\n tensor([True, True, True], dtype=oneflow.bool)\n >>> flow.any(input, 0, True)\n tensor([[True, True, True]], dtype=oneflow.bool)\n\n """'], {}), '(oneflow.any,\n """\n oneflow.any(input, dim=None, keepdim=False) -> Tensor\n \n For each row of `input` in the given dimension `dim`, returns True if any element in the row evaluate to True and False otherwise. If the dimension is None, compute if any elements in the input tensor to true.\n \n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed :func:`oneflow.squeeze()`, resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s). \n\n Args:\n input (oneflow.Tensor): the Input Tensor\n dim (int, optional): the dimension to reduce. Default: `None`\n keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> input = flow.Tensor([[1, 2, 3], [4, 5, 6]]) < 4\n >>> input\n tensor([[ True, True, True],\n [False, False, False]], dtype=oneflow.bool)\n >>> flow.any(input)\n tensor(True, dtype=oneflow.bool)\n >>> flow.any(input, 0)\n tensor([True, True, True], dtype=oneflow.bool)\n >>> flow.any(input, 0, True)\n tensor([[True, True, True]], dtype=oneflow.bool)\n\n """\n )\n', (7952, 9285), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.