code stringlengths 118 171k | apis list | extract_api stringlengths 145 164k |
|---|---|---|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import cv2
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _of_image_normalize(images, image_shape, std, mean):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_normalize_job(
images_def: oft.ListListNumpy.Placeholder(shape=image_shape, dtype=flow.float)
):
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
norm_images = flow.image_normalize(images_buffer, std, mean)
return flow.tensor_buffer_to_tensor_list(
norm_images, shape=image_shape[1:], dtype=flow.float
)
image_tensor = image_normalize_job([images]).get()
return image_tensor.numpy_lists()[0]
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
assert image_static_shape[0] == 1, str(image_static_shape)
image_static_shape[0] = len(image_shapes)
return image_static_shape
def _compare_image_normalize(test_case, image_files, std, mean):
images = _read_images_by_cv(image_files)
assert all([len(image.shape) == 4 for image in images])
image_shape = _get_images_static_shape(images)
norm_images = _of_image_normalize(images, tuple(image_shape), std, mean)
std_array = np.array(std).reshape(1, 1, 1, -1)
mean_array = np.array(mean).reshape(1, 1, 1, -1)
for image, norm_image in zip(images, norm_images):
exp_norm_image = (image - mean_array) / std_array
test_case.assertTrue(np.allclose(exp_norm_image, norm_image))
# @flow.unittest.skip_unless_1n1d()
# TODO(zhangwenxiao, jiangxuefei): refine in multi-client
@unittest.skipIf(True, "skip for now because of single-client tensor_list removed")
class TestImageNormalize(flow.unittest.TestCase):
def test_image_normalize(test_case):
_compare_image_normalize(
test_case,
[
"/dataset/mscoco_2017/val2017/000000000139.jpg",
"/dataset/mscoco_2017/val2017/000000000632.jpg",
],
(102.9801, 115.9465, 122.7717),
(1.0, 1.0, 1.0),
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.tensor_list_to_tensor_buffer",
"oneflow.compatible.single_client.clear_default_session",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.image_normalize",
"oneflow.compatible.single_c... | [((2924, 3010), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""skip for now because of single-client tensor_list removed"""'], {}), "(True,\n 'skip for now because of single-client tensor_list removed')\n", (2939, 3010), False, 'import unittest\n'), ((811, 839), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (837, 839), True, 'from oneflow.compatible import single_client as flow\n'), ((858, 879), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (877, 879), True, 'from oneflow.compatible import single_client as flow\n'), ((997, 1046), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1017, 1046), True, 'from oneflow.compatible import single_client as flow\n'), ((1838, 1867), 'numpy.amax', 'np.amax', (['image_shapes'], {'axis': '(0)'}), '(image_shapes, axis=0)\n', (1845, 1867), True, 'import numpy as np\n'), ((3430, 3445), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3443, 3445), False, 'import unittest\n'), ((963, 989), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (987, 989), True, 'from oneflow.compatible import single_client as flow\n'), ((1194, 1239), 'oneflow.compatible.single_client.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['images_def'], {}), '(images_def)\n', (1227, 1239), True, 'from oneflow.compatible import single_client as flow\n'), ((1262, 1308), 'oneflow.compatible.single_client.image_normalize', 'flow.image_normalize', (['images_buffer', 'std', 'mean'], {}), '(images_buffer, std, mean)\n', (1282, 1308), True, 'from oneflow.compatible import single_client as flow\n'), ((1324, 1416), 'oneflow.compatible.single_client.tensor_buffer_to_tensor_list', 'flow.tensor_buffer_to_tensor_list', (['norm_images'], {'shape': 'image_shape[1:]', 'dtype': 'flow.float'}), '(norm_images, shape=image_shape[1:], dtype\n =flow.float)\n', (1357, 1416), True, 'from oneflow.compatible import single_client as flow\n'), ((1669, 1698), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (1683, 1698), True, 'import numpy as np\n'), ((1096, 1162), 'oneflow.compatible.single_client.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', ([], {'shape': 'image_shape', 'dtype': 'flow.float'}), '(shape=image_shape, dtype=flow.float)\n', (1125, 1162), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2555, 2568), 'numpy.array', 'np.array', (['std'], {}), '(std)\n', (2563, 2568), True, 'import numpy as np\n'), ((2607, 2621), 'numpy.array', 'np.array', (['mean'], {}), '(mean)\n', (2615, 2621), True, 'import numpy as np\n'), ((2786, 2825), 'numpy.allclose', 'np.allclose', (['exp_norm_image', 'norm_image'], {}), '(exp_norm_image, norm_image)\n', (2797, 2825), True, 'import numpy as np\n'), ((1585, 1607), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (1595, 1607), False, 'import cv2\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow import autograd
class TestAutogradFunction(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n1d()
def test_simple_input(test_case):
class MyReLU(autograd.Function):
@staticmethod
def forward(ctx, x):
y = x.clamp(min=0.0, max=None)
ctx.save_for_backward(x)
return y
@staticmethod
def backward(ctx, y_grad):
x_grad = y_grad.clone()
(x,) = ctx.saved_tensors
x_grad[x < 0] = 0
return x_grad
np_arr = np.random.randn(4, 5)
a = flow.tensor(np_arr).requires_grad_()
# forward
b = MyReLU.apply(a)
test_case.assertTrue(np.allclose(b.numpy(), np_arr.clip(min=0.0)))
# backward
b.sum().backward()
np_grad = np.ones((4, 5))
np_grad[np_arr < 0] = 0.0
test_case.assertTrue(np.allclose(a.grad.numpy(), np_grad))
@flow.unittest.skip_unless_1n1d()
def test_multi_input(test_case):
class MyMatMul(autograd.Function):
@staticmethod
def forward(ctx, x, y):
z = x * y
ctx.save_for_backward(x, y)
return z
@staticmethod
def backward(ctx, z_grad):
x, y = ctx.saved_tensors
x_grad = y * z_grad
y_grad = x * z_grad
return x_grad, y_grad
np_arr0 = np.random.randn(4, 5)
np_arr1 = np.random.randn(4, 5)
a = flow.tensor(np_arr0).requires_grad_()
b = flow.tensor(np_arr1).requires_grad_()
# forward
c = MyMatMul().apply(a, b)
test_case.assertTrue(np.allclose(c.numpy(), np_arr0 * np_arr1))
# backward
c.sum().backward()
test_case.assertTrue(np.allclose(a.grad.numpy(), np_arr1))
test_case.assertTrue(np.allclose(b.grad.numpy(), np_arr0))
@flow.unittest.skip_unless_1n1d()
def test_non_differentiable_interface(test_case):
class MyModule(autograd.Function):
@staticmethod
def forward(ctx, x, y):
mul_res = x * y
add_res = x + y
ctx.save_for_backward(x, y)
ctx.mark_non_differentiable(add_res)
return mul_res, add_res
@staticmethod
def backward(ctx, mul_grad, add_grad=None):
x, y = ctx.saved_tensors
x_grad = y * mul_grad
y_grad = x * mul_grad
return x_grad, y_grad
np_arr0 = np.random.randn(4, 5)
np_arr1 = np.random.randn(4, 5)
a = flow.tensor(np_arr0).requires_grad_()
b = flow.tensor(np_arr1).requires_grad_()
# forward
c, d = MyModule().apply(a, b)
test_case.assertTrue(np.allclose(c.numpy(), np_arr0 * np_arr1))
test_case.assertFalse(d.requires_grad)
test_case.assertTrue(d.grad_fn is None)
# backward
c.sum().backward()
test_case.assertTrue(np.allclose(a.grad.numpy(), np_arr1))
test_case.assertTrue(np.allclose(b.grad.numpy(), np_arr0))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor"
] | [((762, 794), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (792, 794), True, 'import oneflow as flow\n'), ((1654, 1686), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1684, 1686), True, 'import oneflow as flow\n'), ((2633, 2665), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2663, 2665), True, 'import oneflow as flow\n'), ((3881, 3896), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3894, 3896), False, 'import unittest\n'), ((1275, 1296), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)'], {}), '(4, 5)\n', (1290, 1296), True, 'import numpy as np\n'), ((1531, 1546), 'numpy.ones', 'np.ones', (['(4, 5)'], {}), '((4, 5))\n', (1538, 1546), True, 'import numpy as np\n'), ((2160, 2181), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)'], {}), '(4, 5)\n', (2175, 2181), True, 'import numpy as np\n'), ((2200, 2221), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)'], {}), '(4, 5)\n', (2215, 2221), True, 'import numpy as np\n'), ((3283, 3304), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)'], {}), '(4, 5)\n', (3298, 3304), True, 'import numpy as np\n'), ((3323, 3344), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)'], {}), '(4, 5)\n', (3338, 3344), True, 'import numpy as np\n'), ((1309, 1328), 'oneflow.tensor', 'flow.tensor', (['np_arr'], {}), '(np_arr)\n', (1320, 1328), True, 'import oneflow as flow\n'), ((2234, 2254), 'oneflow.tensor', 'flow.tensor', (['np_arr0'], {}), '(np_arr0)\n', (2245, 2254), True, 'import oneflow as flow\n'), ((2284, 2304), 'oneflow.tensor', 'flow.tensor', (['np_arr1'], {}), '(np_arr1)\n', (2295, 2304), True, 'import oneflow as flow\n'), ((3357, 3377), 'oneflow.tensor', 'flow.tensor', (['np_arr0'], {}), '(np_arr0)\n', (3368, 3377), True, 'import oneflow as flow\n'), ((3407, 3427), 'oneflow.tensor', 'flow.tensor', (['np_arr1'], {}), '(np_arr1)\n', (3418, 3427), True, 'import oneflow as flow\n')] |
"""
Modified from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/eca.py
"""
import math
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
class EcaModule(nn.Module):
"""ECA module
"""
# TODO: add docstring
def __init__(
self, channels=None, kernel_size=3, gamma=2, beta=1, gate_layer=nn.Sigmoid,
):
super(EcaModule, self).__init__()
if channels is not None:
t = int(abs(math.log(channels, 2) + beta) / gamma)
kernel_size = max(t if t % 2 else t + 1, 3)
assert kernel_size % 2 == 1
padding = (kernel_size - 1) // 2
self.conv = nn.Conv1d(
1, 1, kernel_size=kernel_size, padding=padding, bias=False
)
self.gate = gate_layer()
def forward(self, x):
y = x.mean((2, 3)).view(x.shape[0], 1, -1)
y = self.conv(y)
y = self.gate(y).view(x.shape[0], -1, 1, 1)
return x * y.expand_as(x)
| [
"oneflow.nn.Conv1d"
] | [((688, 757), 'oneflow.nn.Conv1d', 'nn.Conv1d', (['(1)', '(1)'], {'kernel_size': 'kernel_size', 'padding': 'padding', 'bias': '(False)'}), '(1, 1, kernel_size=kernel_size, padding=padding, bias=False)\n', (697, 757), True, 'import oneflow.nn as nn\n'), ((496, 517), 'math.log', 'math.log', (['channels', '(2)'], {}), '(channels, 2)\n', (504, 517), False, 'import math\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
import oneflow.framework.graph_build_util as graph_build_util
import oneflow.framework.scope_util as scope_util
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestGraphBlock(flow.unittest.TestCase):
def test_module_has_custom_func(test_case):
class CustomModuleHasFunc(flow.nn.Module):
def __init__(self):
super().__init__()
self.data_mem = 10
def forward(self, x):
return self._custom_func(x)
def _custom_func(self, x):
test_case.assertEqual(self.data_mem, 10)
return x
class CustomGraphHasFunc(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = CustomModuleHasFunc()
def build(self, x):
return self.m(x)
g = CustomGraphHasFunc()
x = np.ones((10, 10))
x = flow.tensor(x, dtype=flow.float32)
out = g(x)
test_case.assertTrue(np.array_equal(x.numpy(), out.numpy()))
def test_block_with_parameter(test_case):
device = "cuda"
linear = flow.nn.Linear(3, 8)
linear = linear.to(device)
flow.nn.init.constant_(linear.weight, 2.068758)
flow.nn.init.constant_(linear.bias, 0.23)
of_sgd = flow.optim.SGD(linear.parameters(), lr=0.001, momentum=0.9)
x = flow.tensor(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=flow.float32,
device=device,
requires_grad=False,
)
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.linear = linear
def forward(self, x):
return self._forward_impl(x)
def _forward_impl(self, x):
test_case.assertTrue(isinstance(self.linear, flow.nn.graph.Block))
return self.linear(x)
class LinearTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = CustomModule()
self.add_optimizer(of_sgd)
def build(self, x):
out = self.m(x)
out = out.sum()
out.backward()
test_case.assertTrue(self.m.linear.weight.is_lazy)
return out
linear_t_g = LinearTrainGraph()
linear_t_g(x)
def test_block_with_seq_container(test_case):
class SubModule0(flow.nn.Module):
def __init__(self):
super().__init__()
self.linear = flow.nn.Linear(10, 10, False)
def forward(self, x):
if graph_build_util.lazy_mode.is_enabled():
scope = scope_util.current_scope()
scope_proto = graph_build_util.scope_to_proto(scope)
ck_bool = scope_proto.attr_name2attr_value["checkpointing"].at_bool
test_case.assertEqual(ck_bool, True)
out = self.linear(x)
return out
list_of_m = [SubModule0() for i in range(3)]
class SeqModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.linears = flow.nn.Sequential(*list_of_m)
def forward(self, x):
x = self.linears(x)
return x
class SeqGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.linears = flow.nn.Sequential(*list_of_m)
self.linears.config.activation_checkpointing = True
def build(self, x):
x = self.linears(x)
return x
seq_m = SeqModule()
seq_g = SeqGraph()
input = flow.tensor(np.random.randn(4, 10), dtype=flow.float32)
output_m = seq_m(input)
output_g = seq_g(input)
# print(seq_g)
test_case.assertTrue(np.array_equal(output_m.numpy(), output_g.numpy()))
def test_block_with_list_container(test_case):
class SubModule0(flow.nn.Module):
def __init__(self):
super().__init__()
self.linear = flow.nn.Linear(10, 10, False)
def forward(self, x):
if graph_build_util.lazy_mode.is_enabled():
scope = scope_util.current_scope()
scope_proto = graph_build_util.scope_to_proto(scope)
ck_bool = scope_proto.attr_name2attr_value["checkpointing"].at_bool
test_case.assertEqual(ck_bool, True)
out = self.linear(x)
return out
list_of_m = [SubModule0() for i in range(3)]
class ModuleListModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.linears = flow.nn.ModuleList(list_of_m)
def forward(self, x):
for i, _ in enumerate(self.linears):
x = self.linears[i](x)
return x
class ModuleListGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.linears = flow.nn.ModuleList(list_of_m)
# NOTE: ModuleList doesn't have config.
# self.linears.config.activation_checkpointing = True
for i, _ in enumerate(self.linears):
self.linears[i].config.activation_checkpointing = True
def build(self, x):
# ModuleList can act as an iterable, or be indexed using ints
for i, _ in enumerate(self.linears):
x = self.linears[i](x)
return x
module_list_m = ModuleListModule()
module_list_g = ModuleListGraph()
input = flow.tensor(np.random.randn(4, 10), dtype=flow.float32)
output_m = module_list_m(input)
output_g = module_list_g(input)
# print(module_list_g)
test_case.assertTrue(np.array_equal(output_m.numpy(), output_g.numpy()))
def test_block_with_dict_container(test_case):
class SubModule0(flow.nn.Module):
def __init__(self, out):
super().__init__()
self.linear = flow.nn.Linear(10, out, False)
def forward(self, x):
if graph_build_util.lazy_mode.is_enabled():
scope = scope_util.current_scope()
scope_proto = graph_build_util.scope_to_proto(scope)
ck_bool = scope_proto.attr_name2attr_value["checkpointing"].at_bool
test_case.assertEqual(ck_bool, True)
out = self.linear(x)
return out
dict_of_m = {"0": SubModule0(10), "1": SubModule0(6)}
class ModuleDictModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.linears = flow.nn.ModuleDict(dict_of_m)
def forward(self, x):
x = self.linears["0"](x)
x = self.linears["1"](x)
return x
class ModuleDictGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.linears = flow.nn.ModuleDict(dict_of_m)
# NOTE: ModuleDict doesn't have config.
# self.linears.config.activation_checkpointing = True
for k, _ in self.linears.items():
self.linears[k].config.activation_checkpointing = True
def build(self, x):
# ModuleDict can act as an iterable, or get using key
x = self.linears["0"](x)
x = self.linears["1"](x)
return x
module_dict_m = ModuleDictModule()
module_dict_g = ModuleDictGraph()
input = flow.tensor(np.random.randn(4, 10), dtype=flow.float32)
output_m = module_dict_m(input)
output_g = module_dict_g(input)
# print(module_dict_g)
test_case.assertTrue(np.array_equal(output_m.numpy(), output_g.numpy()))
def test_block_with_para_list_container(test_case):
list_of_p = [flow.nn.Parameter(flow.randn(10, 10)) for i in range(2)]
class ParaListModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.params = flow.nn.ParameterList(list_of_p)
def forward(self, x):
for i, _ in enumerate(self.params):
x = flow._C.matmul(x, self.params[i])
return x
class ParaListGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.params = flow.nn.ParameterList(list_of_p)
def build(self, x):
for i, _ in enumerate(self.params):
x = flow._C.matmul(x, self.params[i])
return x
para_list_m = ParaListModule()
para_list_g = ParaListGraph()
# print(para_list_g)
input = flow.tensor(np.random.randn(4, 10), dtype=flow.float32)
output_m = para_list_m(input)
# print(output_m)
output_g = para_list_g(input)
# print(para_list_g)
test_case.assertTrue(np.array_equal(output_m.numpy(), output_g.numpy()))
def test_block_with_para_dict_container(test_case):
dict_of_p = {
"0": flow.nn.Parameter(flow.randn(10, 3)),
"1": flow.nn.Parameter(flow.randn(10, 10)),
}
class ParaDictModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.params = flow.nn.ParameterDict(dict_of_p)
def forward(self, x):
x = flow._C.matmul(x, self.params["0"])
return x
class ParaDictGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.params = flow.nn.ParameterDict(dict_of_p)
def build(self, x):
x = flow._C.matmul(x, self.params["0"])
return x
para_dict_m = ParaDictModule()
para_dict_g = ParaDictGraph()
# print(para_dict_g)
input = flow.tensor(np.random.randn(4, 10), dtype=flow.float32)
output_m = para_dict_m(input)
# print(output_m)
output_g = para_dict_g(input)
# print(para_dict_g)
test_case.assertTrue(np.array_equal(output_m.numpy(), output_g.numpy()))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.Sequential",
"oneflow.nn.ModuleList",
"oneflow.nn.Linear",
"oneflow.nn.ModuleDict",
"oneflow.framework.graph_build_util.scope_to_proto",
"oneflow.nn.init.constant_",
"oneflow._C.matmul",
"oneflow.framework.graph_build_util.lazy_mode.is_enabled",
"oneflow.randn",
"oneflow.nn.ParameterDi... | [((875, 907), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (905, 907), True, 'import oneflow as flow\n'), ((815, 849), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (824, 849), False, 'import os\n'), ((11701, 11716), 'unittest.main', 'unittest.main', ([], {}), '()\n', (11714, 11716), False, 'import unittest\n'), ((1632, 1649), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1639, 1649), True, 'import numpy as np\n'), ((1662, 1696), 'oneflow.tensor', 'flow.tensor', (['x'], {'dtype': 'flow.float32'}), '(x, dtype=flow.float32)\n', (1673, 1696), True, 'import oneflow as flow\n'), ((1873, 1893), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(3)', '(8)'], {}), '(3, 8)\n', (1887, 1893), True, 'import oneflow as flow\n'), ((1937, 1984), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['linear.weight', '(2.068758)'], {}), '(linear.weight, 2.068758)\n', (1959, 1984), True, 'import oneflow as flow\n'), ((1993, 2034), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['linear.bias', '(0.23)'], {}), '(linear.bias, 0.23)\n', (2015, 2034), True, 'import oneflow as flow\n'), ((2125, 2536), 'oneflow.tensor', 'flow.tensor', (['[[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -0.28708987, -\n 2.18369248], [0.35217619, -0.67095644, -1.58943879], [0.08086036, -\n 1.81075924, 1.20752494], [0.8901075, -0.49976737, -1.07153746], [-\n 0.44872912, -1.07275683, 0.06256855], [-0.22556897, 0.74798368, \n 0.90416439], [0.48339456, -2.32742195, -0.59321527]]'], {'dtype': 'flow.float32', 'device': 'device', 'requires_grad': '(False)'}), '([[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -\n 0.28708987, -2.18369248], [0.35217619, -0.67095644, -1.58943879], [\n 0.08086036, -1.81075924, 1.20752494], [0.8901075, -0.49976737, -\n 1.07153746], [-0.44872912, -1.07275683, 0.06256855], [-0.22556897, \n 0.74798368, 0.90416439], [0.48339456, -2.32742195, -0.59321527]], dtype\n =flow.float32, device=device, requires_grad=False)\n', (2136, 2536), True, 'import oneflow as flow\n'), ((4978, 5000), 'numpy.random.randn', 'np.random.randn', (['(4)', '(10)'], {}), '(4, 10)\n', (4993, 5000), True, 'import numpy as np\n'), ((7008, 7030), 'numpy.random.randn', 'np.random.randn', (['(4)', '(10)'], {}), '(4, 10)\n', (7023, 7030), True, 'import numpy as np\n'), ((9038, 9060), 'numpy.random.randn', 'np.random.randn', (['(4)', '(10)'], {}), '(4, 10)\n', (9053, 9060), True, 'import numpy as np\n'), ((10236, 10258), 'numpy.random.randn', 'np.random.randn', (['(4)', '(10)'], {}), '(4, 10)\n', (10251, 10258), True, 'import numpy as np\n'), ((11411, 11433), 'numpy.random.randn', 'np.random.randn', (['(4)', '(10)'], {}), '(4, 10)\n', (11426, 11433), True, 'import numpy as np\n'), ((3779, 3808), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(10)', '(10)', '(False)'], {}), '(10, 10, False)\n', (3793, 3808), True, 'import oneflow as flow\n'), ((3863, 3902), 'oneflow.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (3900, 3902), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((4435, 4465), 'oneflow.nn.Sequential', 'flow.nn.Sequential', (['*list_of_m'], {}), '(*list_of_m)\n', (4453, 4465), True, 'import oneflow as flow\n'), ((4700, 4730), 'oneflow.nn.Sequential', 'flow.nn.Sequential', (['*list_of_m'], {}), '(*list_of_m)\n', (4718, 4730), True, 'import oneflow as flow\n'), ((5382, 5411), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(10)', '(10)', '(False)'], {}), '(10, 10, False)\n', (5396, 5411), True, 'import oneflow as flow\n'), ((5466, 5505), 'oneflow.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (5503, 5505), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((6045, 6074), 'oneflow.nn.ModuleList', 'flow.nn.ModuleList', (['list_of_m'], {}), '(list_of_m)\n', (6063, 6074), True, 'import oneflow as flow\n'), ((6376, 6405), 'oneflow.nn.ModuleList', 'flow.nn.ModuleList', (['list_of_m'], {}), '(list_of_m)\n', (6394, 6405), True, 'import oneflow as flow\n'), ((7441, 7471), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(10)', 'out', '(False)'], {}), '(10, out, False)\n', (7455, 7471), True, 'import oneflow as flow\n'), ((7526, 7565), 'oneflow.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (7563, 7565), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((8114, 8143), 'oneflow.nn.ModuleDict', 'flow.nn.ModuleDict', (['dict_of_m'], {}), '(dict_of_m)\n', (8132, 8143), True, 'import oneflow as flow\n'), ((8431, 8460), 'oneflow.nn.ModuleDict', 'flow.nn.ModuleDict', (['dict_of_m'], {}), '(dict_of_m)\n', (8449, 8460), True, 'import oneflow as flow\n'), ((9371, 9389), 'oneflow.randn', 'flow.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (9381, 9389), True, 'import oneflow as flow\n'), ((9554, 9586), 'oneflow.nn.ParameterList', 'flow.nn.ParameterList', (['list_of_p'], {}), '(list_of_p)\n', (9575, 9586), True, 'import oneflow as flow\n'), ((9899, 9931), 'oneflow.nn.ParameterList', 'flow.nn.ParameterList', (['list_of_p'], {}), '(list_of_p)\n', (9920, 9931), True, 'import oneflow as flow\n'), ((10607, 10624), 'oneflow.randn', 'flow.randn', (['(10)', '(3)'], {}), '(10, 3)\n', (10617, 10624), True, 'import oneflow as flow\n'), ((10662, 10680), 'oneflow.randn', 'flow.randn', (['(10)', '(10)'], {}), '(10, 10)\n', (10672, 10680), True, 'import oneflow as flow\n'), ((10837, 10869), 'oneflow.nn.ParameterDict', 'flow.nn.ParameterDict', (['dict_of_p'], {}), '(dict_of_p)\n', (10858, 10869), True, 'import oneflow as flow\n'), ((10925, 10960), 'oneflow._C.matmul', 'flow._C.matmul', (['x', "self.params['0']"], {}), "(x, self.params['0'])\n", (10939, 10960), True, 'import oneflow as flow\n'), ((11128, 11160), 'oneflow.nn.ParameterDict', 'flow.nn.ParameterDict', (['dict_of_p'], {}), '(dict_of_p)\n', (11149, 11160), True, 'import oneflow as flow\n'), ((11214, 11249), 'oneflow._C.matmul', 'flow._C.matmul', (['x', "self.params['0']"], {}), "(x, self.params['0'])\n", (11228, 11249), True, 'import oneflow as flow\n'), ((3932, 3958), 'oneflow.framework.scope_util.current_scope', 'scope_util.current_scope', ([], {}), '()\n', (3956, 3958), True, 'import oneflow.framework.scope_util as scope_util\n'), ((3993, 4031), 'oneflow.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (['scope'], {}), '(scope)\n', (4024, 4031), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((5535, 5561), 'oneflow.framework.scope_util.current_scope', 'scope_util.current_scope', ([], {}), '()\n', (5559, 5561), True, 'import oneflow.framework.scope_util as scope_util\n'), ((5596, 5634), 'oneflow.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (['scope'], {}), '(scope)\n', (5627, 5634), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((7595, 7621), 'oneflow.framework.scope_util.current_scope', 'scope_util.current_scope', ([], {}), '()\n', (7619, 7621), True, 'import oneflow.framework.scope_util as scope_util\n'), ((7656, 7694), 'oneflow.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (['scope'], {}), '(scope)\n', (7687, 7694), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((9698, 9731), 'oneflow._C.matmul', 'flow._C.matmul', (['x', 'self.params[i]'], {}), '(x, self.params[i])\n', (9712, 9731), True, 'import oneflow as flow\n'), ((10041, 10074), 'oneflow._C.matmul', 'flow._C.matmul', (['x', 'self.params[i]'], {}), '(x, self.params[i])\n', (10055, 10074), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Optional, Sequence, Union
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
from oneflow.nn.modules.utils import _check_axis
def sum_op(input, dim=None, keepdim=False):
axis_checked = _check_axis(dim, input.shape)
if len(axis_checked) == 0:
return input
return flow._C.reduce_sum(input, axis=axis_checked, keepdims=keepdim)
def mean_op(input, dim=None, keepdim=False):
axis_checked = _check_axis(dim, input.shape)
if len(axis_checked) == 0:
return input
return flow._C.reduce_mean(input, axis=axis_checked, keepdims=keepdim)
def prod_op(input, dim=None, keepdim=False, *, dtype=None):
axis_checked = _check_axis(dim, input.shape)
if len(axis_checked) == 0:
return input
return flow._C.reduce_prod(input, axis_checked, keepdim, dtype=dtype)
def all_op(input, dim=None, keepdim=False):
"""Computes if all elements in the input tensor to true.
Args:
input (oneflow.Tensor): the Input Tensor
dim (int, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
Returns:
Tensor(oneflow.Tensor(dtype=int8)): If :attr:`dim` is `None`, returns
the logical all value of all elements in the `input` tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.arange(0, 3)
>>> flow.all(input)
tensor(False, dtype=oneflow.bool)
"""
axis_checked = _check_axis(dim, input.shape)
if len(axis_checked) == 0:
return input
return flow._C.reduce_all(input, axis=axis_checked, keepdims=keepdim)
def any_op(input, dim=None, keepdim=False):
"""Computes if any elements in the input tensor to true.
Args:
input (oneflow.Tensor): the Input Tensor
dim (int, optional): the dimension to reduce. Default: `None`
keepdim (bool, optional): whether the output tensor has dim retained or not. Default: `False`
Returns:
Tensor(oneflow.Tensor(dtype=int8)): If :attr:`dim` is `None`, returns
the logical any value of all elements in the `input` tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.arange(0, 3)
>>> flow.any(input)
tensor(True, dtype=oneflow.bool)
"""
axis_checked = _check_axis(dim, input.shape)
if len(axis_checked) == 0:
return input
return flow._C.reduce_any(input, axis=axis_checked, keepdims=keepdim)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow._C.reduce_mean",
"oneflow.nn.modules.utils._check_axis",
"oneflow._C.reduce_sum",
"oneflow._C.reduce_prod",
"oneflow._C.reduce_all",
"oneflow._C.reduce_any"
] | [((885, 914), 'oneflow.nn.modules.utils._check_axis', '_check_axis', (['dim', 'input.shape'], {}), '(dim, input.shape)\n', (896, 914), False, 'from oneflow.nn.modules.utils import _check_axis\n'), ((978, 1040), 'oneflow._C.reduce_sum', 'flow._C.reduce_sum', (['input'], {'axis': 'axis_checked', 'keepdims': 'keepdim'}), '(input, axis=axis_checked, keepdims=keepdim)\n', (996, 1040), True, 'import oneflow as flow\n'), ((1107, 1136), 'oneflow.nn.modules.utils._check_axis', '_check_axis', (['dim', 'input.shape'], {}), '(dim, input.shape)\n', (1118, 1136), False, 'from oneflow.nn.modules.utils import _check_axis\n'), ((1200, 1263), 'oneflow._C.reduce_mean', 'flow._C.reduce_mean', (['input'], {'axis': 'axis_checked', 'keepdims': 'keepdim'}), '(input, axis=axis_checked, keepdims=keepdim)\n', (1219, 1263), True, 'import oneflow as flow\n'), ((1345, 1374), 'oneflow.nn.modules.utils._check_axis', '_check_axis', (['dim', 'input.shape'], {}), '(dim, input.shape)\n', (1356, 1374), False, 'from oneflow.nn.modules.utils import _check_axis\n'), ((1438, 1500), 'oneflow._C.reduce_prod', 'flow._C.reduce_prod', (['input', 'axis_checked', 'keepdim'], {'dtype': 'dtype'}), '(input, axis_checked, keepdim, dtype=dtype)\n', (1457, 1500), True, 'import oneflow as flow\n'), ((2229, 2258), 'oneflow.nn.modules.utils._check_axis', '_check_axis', (['dim', 'input.shape'], {}), '(dim, input.shape)\n', (2240, 2258), False, 'from oneflow.nn.modules.utils import _check_axis\n'), ((2322, 2384), 'oneflow._C.reduce_all', 'flow._C.reduce_all', (['input'], {'axis': 'axis_checked', 'keepdims': 'keepdim'}), '(input, axis=axis_checked, keepdims=keepdim)\n', (2340, 2384), True, 'import oneflow as flow\n'), ((3112, 3141), 'oneflow.nn.modules.utils._check_axis', '_check_axis', (['dim', 'input.shape'], {}), '(dim, input.shape)\n', (3123, 3141), False, 'from oneflow.nn.modules.utils import _check_axis\n'), ((3205, 3267), 'oneflow._C.reduce_any', 'flow._C.reduce_any', (['input'], {'axis': 'axis_checked', 'keepdims': 'keepdim'}), '(input, axis=axis_checked, keepdims=keepdim)\n', (3223, 3267), True, 'import oneflow as flow\n'), ((3321, 3357), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (3336, 3357), False, 'import doctest\n')] |
import json
from oneflow import nn
class BertConfig(object):
def __init__(
self,
vocab_size=30522,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act=nn.GELU(),
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=2,
initializer_range=0.02,
layer_norm_eps=1e-12,
pad_token_id=0,
classifier_dropout=None,
is_decoder=False,
add_cross_attention=False,
**kwargs
):
super().__init__()
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.hidden_act = hidden_act
self.intermediate_size = intermediate_size
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.max_position_embeddings = max_position_embeddings
self.type_vocab_size = type_vocab_size
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.pad_token_id = pad_token_id
self.classifier_dropout = classifier_dropout
self.is_decoder = is_decoder
self.add_cross_attention = add_cross_attention
self.num_labels = kwargs.pop("num_labels", 2)
self.output_attentions = kwargs.pop('output_attentions', False)
self.output_hidden_states = kwargs.pop('output_hidden_states', False)
self.problem_type = kwargs.pop("problem_type", None)
def load_from_json(self, config_path):
with open(config_path, "r", encoding="utf8") as file:
json_data = json.load(file)
for k, v in json_data.items():
if hasattr(self, k):
setattr(self, k, v)
| [
"oneflow.nn.GELU"
] | [((260, 269), 'oneflow.nn.GELU', 'nn.GELU', ([], {}), '()\n', (267, 269), False, 'from oneflow import nn\n'), ((1826, 1841), 'json.load', 'json.load', (['file'], {}), '(file)\n', (1835, 1841), False, 'import json\n')] |
import math, os
import argparse
import numpy as np
import oneflow as flow
from config import config, default, generate_val_config
import ofrecord_util
import validation_util
from symbols import fresnet100, fmobilefacenet
def get_val_args():
val_parser = argparse.ArgumentParser(description="flags for validation")
val_parser.add_argument("-network", default=default.network)
args, rest = val_parser.parse_known_args()
generate_val_config(args.network)
for ds in config.val_targets:
val_parser.add_argument(
"--%s_dataset_dir" % ds,
type=str,
default=os.path.join(default.val_dataset_dir, ds),
help="validation dataset dir",
)
val_parser.add_argument(
"--val_data_part_num",
type=str,
default=default.val_data_part_num,
help="validation dataset dir prefix",
)
val_parser.add_argument(
"--lfw_total_images_num", type=int, default=12000, required=False
)
val_parser.add_argument(
"--cfp_fp_total_images_num", type=int, default=14000, required=False
)
val_parser.add_argument(
"--agedb_30_total_images_num", type=int, default=12000, required=False
)
# distribution config
val_parser.add_argument(
"--device_num_per_node",
type=int,
default=default.device_num_per_node,
required=False,
)
val_parser.add_argument(
"--num_nodes",
type=int,
default=default.num_nodes,
help="node/machine number for training",
)
val_parser.add_argument(
"--val_batch_size_per_device",
default=default.val_batch_size_per_device,
type=int,
help="validation batch size per device",
)
val_parser.add_argument(
"--nrof_folds", default=default.nrof_folds, type=int, help="nrof folds"
)
# model and log
val_parser.add_argument(
"--log_dir", type=str, default=default.log_dir, help="log info save"
)
val_parser.add_argument(
"--model_load_dir", default=default.model_load_dir, help="path to load model."
)
return val_parser.parse_args()
def flip_data(images):
images_flipped = np.flip(images, axis=2).astype(np.float32)
return images_flipped
def get_val_config():
config = flow.function_config()
config.default_logical_view(flow.scope.consistent_view())
config.default_data_type(flow.float)
return config
class Validator(object):
def __init__(self, args):
self.args = args
if default.do_validation_while_train:
function_config = get_val_config()
@flow.global_function(type="predict", function_config=function_config)
def get_validation_datset_lfw_job():
with flow.scope.placement("cpu", "0:0"):
issame, images = ofrecord_util.load_lfw_dataset(self.args)
return issame, images
self.get_validation_datset_lfw_fn = get_validation_datset_lfw_job
@flow.global_function(type="predict", function_config=function_config)
def get_validation_dataset_cfp_fp_job():
with flow.scope.placement("cpu", "0:0"):
issame, images = ofrecord_util.load_cfp_fp_dataset(self.args)
return issame, images
self.get_validation_dataset_cfp_fp_fn = get_validation_dataset_cfp_fp_job
@flow.global_function(type="predict", function_config=function_config)
def get_validation_dataset_agedb_30_job():
with flow.scope.placement("cpu", "0:0"):
issame, images = ofrecord_util.load_agedb_30_dataset(self.args)
return issame, images
self.get_validation_dataset_agedb_30_fn = (
get_validation_dataset_agedb_30_job
)
@flow.global_function(type="predict", function_config=function_config)
def get_symbol_val_job(
images: flow.typing.Numpy.Placeholder(
(self.args.val_batch_size_per_device, 112, 112, 3)
)
):
print("val batch data: ", images.shape)
embedding = eval(config.net_name).get_symbol(images)
return embedding
self.get_symbol_val_fn = get_symbol_val_job
def do_validation(self, dataset="lfw"):
print("Validation on [{}]:".format(dataset))
_issame_list = []
_em_list = []
_em_flipped_list = []
batch_size = self.args.val_batch_size_per_device
if dataset == "lfw":
total_images_num = self.args.lfw_total_images_num
val_job = self.get_validation_datset_lfw_fn
if dataset == "cfp_fp":
total_images_num = self.args.cfp_fp_total_images_num
val_job = self.get_validation_dataset_cfp_fp_fn
if dataset == "agedb_30":
total_images_num = self.args.agedb_30_total_images_num
val_job = self.get_validation_dataset_agedb_30_fn
val_iter_num = math.ceil(total_images_num / batch_size)
for i in range(val_iter_num):
_issame, images = val_job().get()
images_flipped = flip_data(images.numpy())
_em = self.get_symbol_val_fn(images.numpy()).get()
_em_flipped = self.get_symbol_val_fn(images_flipped).get()
_issame_list.append(_issame.numpy())
_em_list.append(_em.numpy())
_em_flipped_list.append(_em_flipped.numpy())
issame = np.array(_issame_list).flatten().reshape(-1, 1)[:total_images_num, :]
issame_list = [bool(x) for x in issame[0::2]]
embedding_length = _em_list[0].shape[-1]
embeddings = (np.array(_em_list).flatten().reshape(-1, embedding_length))[
:total_images_num, :
]
embeddings_flipped = (
np.array(_em_flipped_list).flatten().reshape(-1, embedding_length)
)[:total_images_num, :]
embeddings_list = [embeddings, embeddings_flipped]
return issame_list, embeddings_list
def load_checkpoint(self):
flow.load_variables(flow.checkpoint.get(self.args.model_load_dir))
def main():
args = get_val_args()
flow.env.log_dir(args.log_dir)
flow.config.gpu_device_num(args.device_num_per_node)
# validation
validator = Validator(args)
validator.load_checkpoint()
for ds in config.val_targets:
issame_list, embeddings_list = validator.do_validation(dataset=ds)
validation_util.cal_validation_metrics(
embeddings_list, issame_list, nrof_folds=args.nrof_folds,
)
if __name__ == "__main__":
main()
| [
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.consistent_view",
"oneflow.env.log_dir",
"oneflow.config.gpu_device_num",
"oneflow.function_config",
"oneflow.checkpoint.get",
"oneflow.global_function",
"oneflow.scope.placement"
] | [((260, 319), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""flags for validation"""'}), "(description='flags for validation')\n", (283, 319), False, 'import argparse\n'), ((436, 469), 'config.generate_val_config', 'generate_val_config', (['args.network'], {}), '(args.network)\n', (455, 469), False, 'from config import config, default, generate_val_config\n'), ((2311, 2333), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (2331, 2333), True, 'import oneflow as flow\n'), ((2400, 2436), 'config.config.default_data_type', 'config.default_data_type', (['flow.float'], {}), '(flow.float)\n', (2424, 2436), False, 'from config import config, default, generate_val_config\n'), ((6263, 6293), 'oneflow.env.log_dir', 'flow.env.log_dir', (['args.log_dir'], {}), '(args.log_dir)\n', (6279, 6293), True, 'import oneflow as flow\n'), ((6298, 6350), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.device_num_per_node'], {}), '(args.device_num_per_node)\n', (6324, 6350), True, 'import oneflow as flow\n'), ((2366, 2394), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2392, 2394), True, 'import oneflow as flow\n'), ((5088, 5128), 'math.ceil', 'math.ceil', (['(total_images_num / batch_size)'], {}), '(total_images_num / batch_size)\n', (5097, 5128), False, 'import math, os\n'), ((6550, 6650), 'validation_util.cal_validation_metrics', 'validation_util.cal_validation_metrics', (['embeddings_list', 'issame_list'], {'nrof_folds': 'args.nrof_folds'}), '(embeddings_list, issame_list,\n nrof_folds=args.nrof_folds)\n', (6588, 6650), False, 'import validation_util\n'), ((2205, 2228), 'numpy.flip', 'np.flip', (['images'], {'axis': '(2)'}), '(images, axis=2)\n', (2212, 2228), True, 'import numpy as np\n'), ((2644, 2713), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'function_config'}), "(type='predict', function_config=function_config)\n", (2664, 2713), True, 'import oneflow as flow\n'), ((3034, 3103), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'function_config'}), "(type='predict', function_config=function_config)\n", (3054, 3103), True, 'import oneflow as flow\n'), ((3439, 3508), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'function_config'}), "(type='predict', function_config=function_config)\n", (3459, 3508), True, 'import oneflow as flow\n'), ((3884, 3953), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'function_config'}), "(type='predict', function_config=function_config)\n", (3904, 3953), True, 'import oneflow as flow\n'), ((6172, 6217), 'oneflow.checkpoint.get', 'flow.checkpoint.get', (['self.args.model_load_dir'], {}), '(self.args.model_load_dir)\n', (6191, 6217), True, 'import oneflow as flow\n'), ((616, 657), 'os.path.join', 'os.path.join', (['default.val_dataset_dir', 'ds'], {}), '(default.val_dataset_dir, ds)\n', (628, 657), False, 'import math, os\n'), ((2784, 2818), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (2804, 2818), True, 'import oneflow as flow\n'), ((2857, 2898), 'ofrecord_util.load_lfw_dataset', 'ofrecord_util.load_lfw_dataset', (['self.args'], {}), '(self.args)\n', (2887, 2898), False, 'import ofrecord_util\n'), ((3178, 3212), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (3198, 3212), True, 'import oneflow as flow\n'), ((3251, 3295), 'ofrecord_util.load_cfp_fp_dataset', 'ofrecord_util.load_cfp_fp_dataset', (['self.args'], {}), '(self.args)\n', (3284, 3295), False, 'import ofrecord_util\n'), ((3585, 3619), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (3605, 3619), True, 'import oneflow as flow\n'), ((3658, 3704), 'ofrecord_util.load_agedb_30_dataset', 'ofrecord_util.load_agedb_30_dataset', (['self.args'], {}), '(self.args)\n', (3693, 3704), False, 'import ofrecord_util\n'), ((4014, 4100), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['(self.args.val_batch_size_per_device, 112, 112, 3)'], {}), '((self.args.val_batch_size_per_device, 112, \n 112, 3))\n', (4043, 4100), True, 'import oneflow as flow\n'), ((5567, 5589), 'numpy.array', 'np.array', (['_issame_list'], {}), '(_issame_list)\n', (5575, 5589), True, 'import numpy as np\n'), ((5762, 5780), 'numpy.array', 'np.array', (['_em_list'], {}), '(_em_list)\n', (5770, 5780), True, 'import numpy as np\n'), ((5909, 5935), 'numpy.array', 'np.array', (['_em_flipped_list'], {}), '(_em_flipped_list)\n', (5917, 5935), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
config = flow.function_config()
def make_job(a_shape, b_shape, trans_a=False, trans_b=False, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def matmul_job(
a=flow.FixedTensorDef(a_shape, dtype=dtype),
b=flow.FixedTensorDef(b_shape, dtype=dtype),
):
return flow.matmul(a, b, transpose_a=trans_a, transpose_b=trans_b)
return matmul_job
def make_xla_job(a_shape, b_shape, trans_a=False, trans_b=False, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_matmul_job(
a=flow.FixedTensorDef(a_shape, dtype=dtype),
b=flow.FixedTensorDef(b_shape, dtype=dtype),
):
return flow.matmul(a, b, transpose_a=trans_a, transpose_b=trans_b)
return xla_matmul_job
def make_trt_job(a_shape, b_shape, trans_a=False, trans_b=False, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(True)
@flow.global_function(config)
def trt_matmul_job(
a=flow.FixedTensorDef(a_shape, dtype=dtype),
b=flow.FixedTensorDef(b_shape, dtype=dtype),
):
return flow.matmul(a, b, transpose_a=trans_a, transpose_b=trans_b)
return trt_matmul_job
class TestMatmul(unittest.TestCase):
def make_shape(self, m, n, transpose):
if transpose:
return (n, m)
else:
return (m, n)
def _test_body(self, a, b, trans_a, trans_b, dtype=np.float32):
f1 = make_job(a.shape, b.shape, trans_a, trans_b)
f2 = make_xla_job(a.shape, b.shape, trans_a, trans_b)
f3 = make_trt_job(a.shape, b.shape, trans_a, trans_b)
x = f1(a, b).get()
y = f2(a, b).get()
z = f3(a, b).get()
print("without xla: ", x)
print("with xla: ", y)
print("with tensorrt: ", y)
self.assertTrue(np.allclose(x.numpy(), y.numpy(), rtol=0.001, atol=1e-05))
self.assertTrue(np.allclose(x.numpy(), z.numpy(), rtol=0.001, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, m, k, n, trans_a, trans_b, dtype=np.float32):
shape_a = self.make_shape(m, k, trans_a)
shape_b = self.make_shape(k, n, trans_b)
a = np.ones(shape_a, dtype=dtype)
b = np.ones(shape_b, dtype=dtype)
self._test_body(a, b, trans_a, trans_b, dtype=dtype)
def _test_random_body(self, m, k, n, trans_a, trans_b, dtype=np.float32):
shape_a = self.make_shape(m, k, trans_a)
shape_b = self.make_shape(k, n, trans_b)
a = np.random.random(shape_a).astype(dtype)
b = np.random.random(shape_b).astype(dtype)
self._test_body(a, b, trans_a, trans_b, dtype=dtype)
def test_ones1x1x1_input(self):
print("run test_ones1x1x1_input: ")
self._test_ones_body(1, 1, 1, False, False)
self._test_ones_body(1, 1, 1, False, True)
self._test_ones_body(1, 1, 1, True, False)
self._test_ones_body(1, 1, 1, True, True)
def test_random1x1x1_input(self):
print("test_random1x1x1_input: ")
self._test_random_body(1, 1, 1, False, False)
self._test_random_body(1, 1, 1, False, True)
self._test_random_body(1, 1, 1, True, False)
self._test_random_body(1, 1, 1, True, True)
def test_ones1x10x1_input(self):
print("test_ones1x10x1_input: ")
self._test_ones_body(1, 10, 1, False, False)
self._test_ones_body(1, 10, 1, False, True)
self._test_ones_body(1, 10, 1, True, False)
self._test_ones_body(1, 10, 1, True, True)
def test_random1x10x1_input(self):
print("test_random1x10x1_input: ")
self._test_random_body(1, 10, 1, False, False)
self._test_random_body(1, 10, 1, False, True)
self._test_random_body(1, 10, 1, True, False)
self._test_random_body(1, 10, 1, True, True)
def test_ones10x10x2_input(self):
print("test_ones10x10x2_input: ")
self._test_ones_body(10, 10, 2, False, False)
self._test_ones_body(10, 10, 2, False, True)
self._test_ones_body(10, 10, 2, True, False)
self._test_ones_body(10, 10, 2, True, True)
def test_random10x10x2_input(self):
print("run test_random10x10x2_input: ")
self._test_random_body(10, 10, 2, False, False)
self._test_random_body(10, 10, 2, False, True)
self._test_random_body(10, 10, 2, True, False)
self._test_random_body(10, 10, 2, True, True)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.function_config",
"oneflow.compatible.single_client.FixedTensorDef",
"oneflow.compatible.single_client.clear_default_session",
"oneflow.compatible.single_client.matmul",
"oneflow.compatible.single_client.global_function"
] | [((740, 762), 'oneflow.compatible.single_client.function_config', 'flow.function_config', ([], {}), '()\n', (760, 762), True, 'from oneflow.compatible import single_client as flow\n'), ((914, 942), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (934, 942), True, 'from oneflow.compatible import single_client as flow\n'), ((1328, 1356), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (1348, 1356), True, 'from oneflow.compatible import single_client as flow\n'), ((1750, 1778), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (1770, 1778), True, 'from oneflow.compatible import single_client as flow\n'), ((5285, 5300), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5298, 5300), False, 'import unittest\n'), ((973, 1014), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['a_shape'], {'dtype': 'dtype'}), '(a_shape, dtype=dtype)\n', (992, 1014), True, 'from oneflow.compatible import single_client as flow\n'), ((1026, 1067), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (1045, 1067), True, 'from oneflow.compatible import single_client as flow\n'), ((1091, 1150), 'oneflow.compatible.single_client.matmul', 'flow.matmul', (['a', 'b'], {'transpose_a': 'trans_a', 'transpose_b': 'trans_b'}), '(a, b, transpose_a=trans_a, transpose_b=trans_b)\n', (1102, 1150), True, 'from oneflow.compatible import single_client as flow\n'), ((1391, 1432), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['a_shape'], {'dtype': 'dtype'}), '(a_shape, dtype=dtype)\n', (1410, 1432), True, 'from oneflow.compatible import single_client as flow\n'), ((1444, 1485), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (1463, 1485), True, 'from oneflow.compatible import single_client as flow\n'), ((1509, 1568), 'oneflow.compatible.single_client.matmul', 'flow.matmul', (['a', 'b'], {'transpose_a': 'trans_a', 'transpose_b': 'trans_b'}), '(a, b, transpose_a=trans_a, transpose_b=trans_b)\n', (1520, 1568), True, 'from oneflow.compatible import single_client as flow\n'), ((1813, 1854), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['a_shape'], {'dtype': 'dtype'}), '(a_shape, dtype=dtype)\n', (1832, 1854), True, 'from oneflow.compatible import single_client as flow\n'), ((1866, 1907), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (1885, 1907), True, 'from oneflow.compatible import single_client as flow\n'), ((1931, 1990), 'oneflow.compatible.single_client.matmul', 'flow.matmul', (['a', 'b'], {'transpose_a': 'trans_a', 'transpose_b': 'trans_b'}), '(a, b, transpose_a=trans_a, transpose_b=trans_b)\n', (1942, 1990), True, 'from oneflow.compatible import single_client as flow\n'), ((2795, 2823), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2821, 2823), True, 'from oneflow.compatible import single_client as flow\n'), ((3011, 3040), 'numpy.ones', 'np.ones', (['shape_a'], {'dtype': 'dtype'}), '(shape_a, dtype=dtype)\n', (3018, 3040), True, 'import numpy as np\n'), ((3053, 3082), 'numpy.ones', 'np.ones', (['shape_b'], {'dtype': 'dtype'}), '(shape_b, dtype=dtype)\n', (3060, 3082), True, 'import numpy as np\n'), ((3333, 3358), 'numpy.random.random', 'np.random.random', (['shape_a'], {}), '(shape_a)\n', (3349, 3358), True, 'import numpy as np\n'), ((3385, 3410), 'numpy.random.random', 'np.random.random', (['shape_b'], {}), '(shape_b)\n', (3401, 3410), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import math
import numpy as np
from oneflow.test_utils.automated_test_util import *
from test_util import GenArgList
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow as flow
import oneflow.unittest
def gen_quant_scale_for_min_max_symmetric(weight, quantization_bit):
weight_max = np.max(np.abs(weight))
denominator = 2.0 ** (quantization_bit - 1) - 1
return (weight_max / denominator, 0)
def gen_quant_scale_for_min_max_affine(weight, quantization_bit):
weight_max = np.max(weight)
weight_min = np.min(weight)
denominator = 2.0 ** quantization_bit - 1
scale = (weight_max - weight_min) / denominator
zero_point = -np.round(weight_min / scale)
return (scale, zero_point)
def gen_quant_scale_for_min_max_cambricon(weight, quantization_bit):
weight_max = np.max(np.abs(weight))
scale = math.floor(math.log2(weight_max)) - (quantization_bit - 2)
return (scale, 0)
def product(tu):
return np.prod(tu).astype(np.int).item()
def quant_per_layer_symmetric(input, quantization_bit, scale):
upper_bound = 2.0 ** (quantization_bit - 1) - 1
lower_bound = -upper_bound
return np.clip(np.rint(input / scale), lower_bound, upper_bound)
def quant_per_layer_affine(input, quantization_bit, scale, zero_point):
upper_bound = 2.0 ** quantization_bit - 1
lower_bound = 0
return np.clip(np.rint(input / scale + zero_point), lower_bound, upper_bound)
def quant_per_layer_cambricon(input, quantization_bit, shift):
upper_bound = 2.0 ** (quantization_bit - 1) - 1
lower_bound = -upper_bound
scale = 2 ** shift
return np.clip(np.rint(input / scale), lower_bound, upper_bound)
def _check_quantize(
test_case,
input,
out_of,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
):
if per_layer_quantization or quantization_formula == "cambricon":
outer_num = 1
inner_num = product(input.shape[0:])
else:
outer_num = input.shape[0]
inner_num = product(input.shape[1:])
scale_np = np.zeros((outer_num,))
zero_point_np = np.zeros((outer_num,))
out_np = np.zeros((inner_num * outer_num,))
input_flatten = input.flatten()
input_diff_np = np.full((inner_num * outer_num,), 1.0 / (inner_num * outer_num))
if quantization_formula == "google":
if quantization_scheme == "symmetric":
for c in range(outer_num):
(scale_np[c], zero_point_np[c]) = gen_quant_scale_for_min_max_symmetric(
input_flatten[c * inner_num : (c + 1) * inner_num], quantization_bit
)
out = quant_per_layer_symmetric(
input_flatten[c * inner_num : (c + 1) * inner_num],
quantization_bit,
scale_np[c],
)
out_np[c * inner_num : (c + 1) * inner_num] = out
else:
for c in range(outer_num):
(scale_np[c], zero_point_np[c]) = gen_quant_scale_for_min_max_affine(
input_flatten[c * inner_num : (c + 1) * inner_num], quantization_bit
)
out = quant_per_layer_affine(
input_flatten[c * inner_num : (c + 1) * inner_num],
quantization_bit,
scale_np[c],
zero_point_np[c],
)
out_np[c * inner_num : (c + 1) * inner_num] = out
else:
(scale_np[0], zero_point_np[0]) = gen_quant_scale_for_min_max_cambricon(
input_flatten, quantization_bit
)
out_np = quant_per_layer_cambricon(input_flatten, quantization_bit, scale_np[0])
rmse = np.sqrt(np.mean((out_of - out_np) ** 2))
assert rmse <= 2.0, "quantization op has bug!"
def _run_test_quantize(
test_case,
device_type,
dtype,
in_shape,
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
):
input = (np.random.random(in_shape) - 0.5).astype(type_name_to_np_type[dtype])
input_tensor = flow.tensor(
input, dtype=flow.float32, device=flow.device(device_type)
)
min_max_observer = flow.nn.MinMaxObserver(
quantization_formula=quantization_formula,
quantization_bit=quantization_bit,
quantization_scheme=quantization_scheme,
per_layer_quantization=per_layer_quantization,
)
(scale, zero_point) = min_max_observer(input_tensor)
quantization = flow.nn.Quantization(
quantization_formula=quantization_formula,
quantization_bit=quantization_bit,
quantization_scheme=quantization_scheme,
)
output_tensor = quantization(input_tensor, scale, zero_point)
out = output_tensor.numpy()
_check_quantize(
test_case,
input,
out.flatten(),
quantization_bit,
quantization_scheme,
quantization_formula,
per_layer_quantization,
)
class TestQuantize(flow.unittest.TestCase):
def test_quantize(test_case):
arg_dict = OrderedDict()
arg_dict["test_case"] = [test_case]
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dtype"] = ["float32", "double"]
arg_dict["in_shape"] = [(9, 40, 20, 10)]
arg_dict["quantization_bit"] = [8, 2]
arg_dict["quantization_scheme"] = ["symmetric", "affine"]
arg_dict["quantization_formula"] = ["google"]
arg_dict["per_layer_quantization"] = [True, False]
for arg in GenArgList(arg_dict):
if arg[-2] == "cambricon" and arg[-1] == False:
continue
_run_test_quantize(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.Quantization",
"oneflow.device",
"oneflow.nn.MinMaxObserver"
] | [((1178, 1192), 'numpy.max', 'np.max', (['weight'], {}), '(weight)\n', (1184, 1192), True, 'import numpy as np\n'), ((1210, 1224), 'numpy.min', 'np.min', (['weight'], {}), '(weight)\n', (1216, 1224), True, 'import numpy as np\n'), ((2755, 2777), 'numpy.zeros', 'np.zeros', (['(outer_num,)'], {}), '((outer_num,))\n', (2763, 2777), True, 'import numpy as np\n'), ((2798, 2820), 'numpy.zeros', 'np.zeros', (['(outer_num,)'], {}), '((outer_num,))\n', (2806, 2820), True, 'import numpy as np\n'), ((2834, 2868), 'numpy.zeros', 'np.zeros', (['(inner_num * outer_num,)'], {}), '((inner_num * outer_num,))\n', (2842, 2868), True, 'import numpy as np\n'), ((2925, 2989), 'numpy.full', 'np.full', (['(inner_num * outer_num,)', '(1.0 / (inner_num * outer_num))'], {}), '((inner_num * outer_num,), 1.0 / (inner_num * outer_num))\n', (2932, 2989), True, 'import numpy as np\n'), ((4881, 5078), 'oneflow.nn.MinMaxObserver', 'flow.nn.MinMaxObserver', ([], {'quantization_formula': 'quantization_formula', 'quantization_bit': 'quantization_bit', 'quantization_scheme': 'quantization_scheme', 'per_layer_quantization': 'per_layer_quantization'}), '(quantization_formula=quantization_formula,\n quantization_bit=quantization_bit, quantization_scheme=\n quantization_scheme, per_layer_quantization=per_layer_quantization)\n', (4903, 5078), True, 'import oneflow as flow\n'), ((5185, 5328), 'oneflow.nn.Quantization', 'flow.nn.Quantization', ([], {'quantization_formula': 'quantization_formula', 'quantization_bit': 'quantization_bit', 'quantization_scheme': 'quantization_scheme'}), '(quantization_formula=quantization_formula,\n quantization_bit=quantization_bit, quantization_scheme=quantization_scheme)\n', (5205, 5328), True, 'import oneflow as flow\n'), ((6383, 6398), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6396, 6398), False, 'import unittest\n'), ((984, 998), 'numpy.abs', 'np.abs', (['weight'], {}), '(weight)\n', (990, 998), True, 'import numpy as np\n'), ((1341, 1369), 'numpy.round', 'np.round', (['(weight_min / scale)'], {}), '(weight_min / scale)\n', (1349, 1369), True, 'import numpy as np\n'), ((1496, 1510), 'numpy.abs', 'np.abs', (['weight'], {}), '(weight)\n', (1502, 1510), True, 'import numpy as np\n'), ((1836, 1858), 'numpy.rint', 'np.rint', (['(input / scale)'], {}), '(input / scale)\n', (1843, 1858), True, 'import numpy as np\n'), ((2045, 2080), 'numpy.rint', 'np.rint', (['(input / scale + zero_point)'], {}), '(input / scale + zero_point)\n', (2052, 2080), True, 'import numpy as np\n'), ((2298, 2320), 'numpy.rint', 'np.rint', (['(input / scale)'], {}), '(input / scale)\n', (2305, 2320), True, 'import numpy as np\n'), ((4399, 4430), 'numpy.mean', 'np.mean', (['((out_of - out_np) ** 2)'], {}), '((out_of - out_np) ** 2)\n', (4406, 4430), True, 'import numpy as np\n'), ((5755, 5768), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5766, 5768), False, 'from collections import OrderedDict\n'), ((6206, 6226), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6216, 6226), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((1535, 1556), 'math.log2', 'math.log2', (['weight_max'], {}), '(weight_max)\n', (1544, 1556), False, 'import math\n'), ((4827, 4851), 'oneflow.device', 'flow.device', (['device_type'], {}), '(device_type)\n', (4838, 4851), True, 'import oneflow as flow\n'), ((4683, 4709), 'numpy.random.random', 'np.random.random', (['in_shape'], {}), '(in_shape)\n', (4699, 4709), True, 'import numpy as np\n'), ((1635, 1646), 'numpy.prod', 'np.prod', (['tu'], {}), '(tu)\n', (1642, 1646), True, 'import numpy as np\n')] |
import sys
import math
sys.path.insert(0, '../../')
import oneflow as of
import oneflow.nn as nn
import oneflow.nn.functional as F
# from torch import nn
# import torch.nn.functional as F
# import torch
from libs.components.conv import conv3x3
from libs.components.activation import ReLU20
from libs.components import pooling
class BasicBlock(nn.Module): # 定义block
expansion = 1
def __init__(self, in_channels, channels, stride=1, downsample=None): # 输入通道,输出通道,stride,下采样
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(in_channels, channels, stride)
self.bn1 = nn.BatchNorm2d(channels)
self.relu = ReLU20(inplace=True)
self.conv2 = conv3x3(channels, channels)
self.bn2 = nn.BatchNorm2d(channels)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out # block输出
class SpeakerEmbNet(nn.Module): # 定义resnet
def __init__(self, opts): # block类型,embedding大小,分类数,maigin大小
super(SpeakerEmbNet, self).__init__()
input_channel = 1
input_dim = opts['input_dim']
hidden_dim = opts['hidden_dim']
residual_block_layers = opts['residual_block_layers']
fc_layers = opts['fc_layers']
block = BasicBlock
embedding_dim = opts['embedding_dim']
num_head = opts['num_head']
# pooling = opts['pooling']
self.relu = ReLU20(inplace=True)
block_layers = []
for dim, block_layer in zip(hidden_dim, residual_block_layers):
block_layers.append(nn.Conv2d(input_channel, dim, kernel_size = 5, stride = 2, padding = 2, bias = False))
block_layers.append(nn.BatchNorm2d(dim))
# block_layers.append(ReLU20(inplace = True))
block_layers.append(self._make_layer(block, dim, block_layer))
input_channel = dim
self.residual = nn.Sequential(*block_layers)
residual_output_shape = math.ceil(input_dim / (2 ** len(residual_block_layers)))
if opts['pooling'] == 'STAT':
self.pooling = pooling.STAT()
self.fc1 = nn.Linear(hidden_dim[-1] * residual_output_shape * 2, embedding_dim)
elif opts['pooling'] == 'ASP':
self.pooling = pooling.AttentiveStatPooling(attention_hidden_size, hidden_dim[-1])
self.fc1 = nn.Linear(hidden_dim[-1] * residual_output_shape * 2, embedding_dim)
# self.pooling = pooling.AttentiveStatisticsPooling(hidden_dim[-1], hidden_size = attention_hidden_size)
elif opts['pooling'] == 'multi_head_ffa':
self.pooling = pooling.MultiHeadFFA(hidden_dim[-1], attention_hidden_size)
self.fc1 = nn.Linear(hidden_dim[-1] * residual_output_shape, embedding_dim)
elif opts['pooling'] == 'multi_head_attention':
self.pooling = pooling.MultiHeadAttentionPooling(residual_output_shape * hidden_dim[-1], num_head = num_head)
self.fc1 = nn.Linear(hidden_dim[-1] * residual_output_shape * 2, embedding_dim)
elif opts['pooling'] == 'multi_resolution_attention':
self.pooling = pooling.MultiResolutionMultiHeadAttentionPooling(residual_output_shape * hidden_dim[-1], num_head = num_head)
self.fc1 = nn.Linear(hidden_dim[-1] * residual_output_shape * 2, embedding_dim)
else:
raise NotImplementedError("Other pooling methods has been not implemented!")
# for m in self.modules(): # 对于各层参数的初始化
# if isinstance(m, nn.Conv2d): # 以2/n的开方为标准差,做均值为0的正态分布
# n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
# m.weight.data.normal_(0, math.sqrt(2. / n))
# elif isinstance(m, nn.BatchNorm2d): # weight设置为1,bias为0
# m.weight.data.fill_(1)
# m.bias.data.zero_()
# elif isinstance(m, nn.BatchNorm1d): # weight设置为1,bias为0
# m.weight.data.fill_(1)
# m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
layers = [block(planes, planes, stride)]
in_planes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(in_planes, planes))
return nn.Sequential(*layers)
def extract_embedding(self, x):
assert len(x.size()) == 3, "the shape of input must be 3 dimensions"
x = x.unsqueeze(1)
x = self.residual(x)
x = x.reshape(x.shape[0], x.shape[1]*x.shape[2], x.shape[3])
pooling_out = self.pooling(x)
x = pooling_out.view(pooling_out.size(0), -1)
x = self.fc1(x)
return x, pooling_out
def forward(self, x):
'''
params:
x: input feature, B, C, T
return:
output of unnormalized speaker embedding
'''
x, _ = self.extract_embedding(x)
return x
if __name__ == '__main__':
import yaml
from yaml import CLoader
f = open('../../conf/model/resnet.yaml', 'r')
opts = yaml.load(f, Loader = CLoader)
f.close()
net = SpeakerEmbNet(opts)
print(net)
# summary(net.cuda(), (161, 300))
| [
"oneflow.nn.Sequential",
"oneflow.nn.BatchNorm2d",
"oneflow.nn.Conv2d",
"oneflow.nn.Linear"
] | [((23, 51), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../"""'], {}), "(0, '../../')\n", (38, 51), False, 'import sys\n'), ((5317, 5345), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'CLoader'}), '(f, Loader=CLoader)\n', (5326, 5345), False, 'import yaml\n'), ((550, 588), 'libs.components.conv.conv3x3', 'conv3x3', (['in_channels', 'channels', 'stride'], {}), '(in_channels, channels, stride)\n', (557, 588), False, 'from libs.components.conv import conv3x3\n'), ((608, 632), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {}), '(channels)\n', (622, 632), True, 'import oneflow.nn as nn\n'), ((653, 673), 'libs.components.activation.ReLU20', 'ReLU20', ([], {'inplace': '(True)'}), '(inplace=True)\n', (659, 673), False, 'from libs.components.activation import ReLU20\n'), ((695, 722), 'libs.components.conv.conv3x3', 'conv3x3', (['channels', 'channels'], {}), '(channels, channels)\n', (702, 722), False, 'from libs.components.conv import conv3x3\n'), ((742, 766), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {}), '(channels)\n', (756, 766), True, 'import oneflow.nn as nn\n'), ((1717, 1737), 'libs.components.activation.ReLU20', 'ReLU20', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1723, 1737), False, 'from libs.components.activation import ReLU20\n'), ((2200, 2228), 'oneflow.nn.Sequential', 'nn.Sequential', (['*block_layers'], {}), '(*block_layers)\n', (2213, 2228), True, 'import oneflow.nn as nn\n'), ((4543, 4565), 'oneflow.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (4556, 4565), True, 'import oneflow.nn as nn\n'), ((2384, 2398), 'libs.components.pooling.STAT', 'pooling.STAT', ([], {}), '()\n', (2396, 2398), False, 'from libs.components import pooling\n'), ((2422, 2490), 'oneflow.nn.Linear', 'nn.Linear', (['(hidden_dim[-1] * residual_output_shape * 2)', 'embedding_dim'], {}), '(hidden_dim[-1] * residual_output_shape * 2, embedding_dim)\n', (2431, 2490), True, 'import oneflow.nn as nn\n'), ((1869, 1946), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['input_channel', 'dim'], {'kernel_size': '(5)', 'stride': '(2)', 'padding': '(2)', 'bias': '(False)'}), '(input_channel, dim, kernel_size=5, stride=2, padding=2, bias=False)\n', (1878, 1946), True, 'import oneflow.nn as nn\n'), ((1988, 2007), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['dim'], {}), '(dim)\n', (2002, 2007), True, 'import oneflow.nn as nn\n'), ((2557, 2624), 'libs.components.pooling.AttentiveStatPooling', 'pooling.AttentiveStatPooling', (['attention_hidden_size', 'hidden_dim[-1]'], {}), '(attention_hidden_size, hidden_dim[-1])\n', (2585, 2624), False, 'from libs.components import pooling\n'), ((2648, 2716), 'oneflow.nn.Linear', 'nn.Linear', (['(hidden_dim[-1] * residual_output_shape * 2)', 'embedding_dim'], {}), '(hidden_dim[-1] * residual_output_shape * 2, embedding_dim)\n', (2657, 2716), True, 'import oneflow.nn as nn\n'), ((2912, 2971), 'libs.components.pooling.MultiHeadFFA', 'pooling.MultiHeadFFA', (['hidden_dim[-1]', 'attention_hidden_size'], {}), '(hidden_dim[-1], attention_hidden_size)\n', (2932, 2971), False, 'from libs.components import pooling\n'), ((2995, 3059), 'oneflow.nn.Linear', 'nn.Linear', (['(hidden_dim[-1] * residual_output_shape)', 'embedding_dim'], {}), '(hidden_dim[-1] * residual_output_shape, embedding_dim)\n', (3004, 3059), True, 'import oneflow.nn as nn\n'), ((3143, 3239), 'libs.components.pooling.MultiHeadAttentionPooling', 'pooling.MultiHeadAttentionPooling', (['(residual_output_shape * hidden_dim[-1])'], {'num_head': 'num_head'}), '(residual_output_shape * hidden_dim[-1],\n num_head=num_head)\n', (3176, 3239), False, 'from libs.components import pooling\n'), ((3261, 3329), 'oneflow.nn.Linear', 'nn.Linear', (['(hidden_dim[-1] * residual_output_shape * 2)', 'embedding_dim'], {}), '(hidden_dim[-1] * residual_output_shape * 2, embedding_dim)\n', (3270, 3329), True, 'import oneflow.nn as nn\n'), ((3419, 3530), 'libs.components.pooling.MultiResolutionMultiHeadAttentionPooling', 'pooling.MultiResolutionMultiHeadAttentionPooling', (['(residual_output_shape * hidden_dim[-1])'], {'num_head': 'num_head'}), '(residual_output_shape *\n hidden_dim[-1], num_head=num_head)\n', (3467, 3530), False, 'from libs.components import pooling\n'), ((3552, 3620), 'oneflow.nn.Linear', 'nn.Linear', (['(hidden_dim[-1] * residual_output_shape * 2)', 'embedding_dim'], {}), '(hidden_dim[-1] * residual_output_shape * 2, embedding_dim)\n', (3561, 3620), True, 'import oneflow.nn as nn\n')] |
import os
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
import argparse
import numpy as np
import time
import oneflow as flow
from models.resnet50 import resnet50
from utils.imagenet1000_clsidx_to_labels import clsidx_2_labels
from utils.numpy_data_utils import load_image
def _parse_args():
parser = argparse.ArgumentParser("flags for test resnet50")
parser.add_argument(
"--model",
type=str,
default="./resnet50_imagenet_pretrain_model",
dest="model_path",
help="model path",
)
parser.add_argument(
"--image",
type=str,
default=None,
required=True,
dest="image_path",
help="input image path",
)
parser.add_argument("--graph", action="store_true", help="Run model in graph mode.")
return parser.parse_args()
class InferGraph(flow.nn.Graph):
def __init__(self, model):
super().__init__()
self.model = model
def build(self, image):
with flow.no_grad():
logits = self.model(image)
pred = logits.softmax()
return pred
def main(args):
start_t = time.perf_counter()
print("***** Model Init *****")
model = resnet50()
model.load_state_dict(flow.load(args.model_path))
model = model.to("cuda")
model.eval()
end_t = time.perf_counter()
print(f"***** Model Init Finish, time escapled {end_t - start_t:.6f} s *****")
if args.graph:
model_graph = InferGraph(model)
start_t = end_t
image = load_image(args.image_path)
image = flow.Tensor(image, device=flow.device("cuda"))
if args.graph:
pred = model_graph(image)
else:
pred = model(image).softmax()
pred = pred.numpy()
prob = np.max(pred)
clsidx = np.argmax(pred)
cls = clsidx_2_labels[clsidx]
end_t = time.perf_counter()
print(
"predict image ({}) prob: {:.5f}, class name: {}, time escapled: {:.6f} s".format(
os.path.basename(args.image_path), prob, cls, end_t - start_t
)
)
if __name__ == "__main__":
args = _parse_args()
main(args)
| [
"oneflow.load",
"oneflow.device",
"oneflow.no_grad"
] | [((372, 422), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for test resnet50"""'], {}), "('flags for test resnet50')\n", (395, 422), False, 'import argparse\n'), ((1197, 1216), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1214, 1216), False, 'import time\n'), ((1266, 1276), 'models.resnet50.resnet50', 'resnet50', ([], {}), '()\n', (1274, 1276), False, 'from models.resnet50 import resnet50\n'), ((1389, 1408), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1406, 1408), False, 'import time\n'), ((1585, 1612), 'utils.numpy_data_utils.load_image', 'load_image', (['args.image_path'], {}), '(args.image_path)\n', (1595, 1612), False, 'from utils.numpy_data_utils import load_image\n'), ((1809, 1821), 'numpy.max', 'np.max', (['pred'], {}), '(pred)\n', (1815, 1821), True, 'import numpy as np\n'), ((1835, 1850), 'numpy.argmax', 'np.argmax', (['pred'], {}), '(pred)\n', (1844, 1850), True, 'import numpy as np\n'), ((1898, 1917), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (1915, 1917), False, 'import time\n'), ((1303, 1329), 'oneflow.load', 'flow.load', (['args.model_path'], {}), '(args.model_path)\n', (1312, 1329), True, 'import oneflow as flow\n'), ((72, 97), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'import os\n'), ((1054, 1068), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (1066, 1068), True, 'import oneflow as flow\n'), ((1651, 1670), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1662, 1670), True, 'import oneflow as flow\n'), ((2032, 2065), 'os.path.basename', 'os.path.basename', (['args.image_path'], {}), '(args.image_path)\n', (2048, 2065), False, 'import os\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import oneflow as flow
from PIL import Image
from libai.config import instantiate
from libai.data.structures import DistTensorData, Instance
from libai.inference.basic import BasePipeline
class ImageClassificationPipeline(BasePipeline):
def __init__(
self,
config_file,
data_parallel=None,
tensor_parallel=None,
pipeline_parallel=None,
**kwargs,
):
super().__init__(config_file, data_parallel, tensor_parallel, pipeline_parallel, **kwargs)
assert "num_classes" in self.cfg.model, "The model's config must contain num_classes"
self.label2id = {"Label_" + str(i): i for i in range(self.cfg.model.num_classes)}
self.id2label = {ind: label for label, ind in self.label2id.items()}
self.transform = instantiate(self.cfg.dataloader.test[0].dataset.transform)
def _parse_parameters(self, **pipeline_parameters):
preprocess_params = {}
forward_params = {}
postprocess_params = {**pipeline_parameters}
return preprocess_params, forward_params, postprocess_params
def preprocess(
self,
inputs,
**kwargs,
) -> dict:
assert os.path.exists(inputs), "inputs must be an existing image path!"
with open(inputs, "rb") as f:
img = Image.open(f).convert("RGB")
img = self.transform(img)
img = img.unsqueeze(0)
# to global tensor
model_input = Instance(
images=DistTensorData(img),
)
mdoel_input_dict = {}
for key, value in model_input.get_fields().items():
value.to_global()
mdoel_input_dict[key] = value.tensor
return mdoel_input_dict
def forward(self, mdoel_input_dict) -> dict:
model_outputs_dict = self.model(**mdoel_input_dict)
return model_outputs_dict
def postprocess(
self, model_outputs_dict, function_to_apply=None, return_all_scores=False, **kwargs
) -> dict:
# prepare
num_labels = self.cfg.model.num_classes
if function_to_apply is not None:
function_to_apply = function_to_apply.lower()
assert function_to_apply in [
"sigmoid",
"softmax",
"none",
], f"Unrecognized `function_to_apply` argument: {function_to_apply}"
else:
if num_labels == 1:
function_to_apply == "sigmoid"
elif num_labels > 1:
function_to_apply == "softmax"
# process, logits: [num_labels]
logits = model_outputs_dict["prediction_scores"][0]
if function_to_apply == "sigmoid":
scores = flow.sigmoid(logits)
elif function_to_apply == "softmax":
scores = flow.softmax(logits)
else:
scores = logits
scores = scores.detach().numpy()
if return_all_scores:
return [
{"label": self.id2label[i], "score": score.item()} for i, score in enumerate(scores)
]
else:
return {
"label": self.id2label[scores.argmax().item()],
"score": scores.max().item(),
}
| [
"oneflow.softmax",
"oneflow.sigmoid"
] | [((1425, 1483), 'libai.config.instantiate', 'instantiate', (['self.cfg.dataloader.test[0].dataset.transform'], {}), '(self.cfg.dataloader.test[0].dataset.transform)\n', (1436, 1483), False, 'from libai.config import instantiate\n'), ((1821, 1843), 'os.path.exists', 'os.path.exists', (['inputs'], {}), '(inputs)\n', (1835, 1843), False, 'import os\n'), ((3326, 3346), 'oneflow.sigmoid', 'flow.sigmoid', (['logits'], {}), '(logits)\n', (3338, 3346), True, 'import oneflow as flow\n'), ((2115, 2134), 'libai.data.structures.DistTensorData', 'DistTensorData', (['img'], {}), '(img)\n', (2129, 2134), False, 'from libai.data.structures import DistTensorData, Instance\n'), ((3413, 3433), 'oneflow.softmax', 'flow.softmax', (['logits'], {}), '(logits)\n', (3425, 3433), True, 'import oneflow as flow\n'), ((1942, 1955), 'PIL.Image.open', 'Image.open', (['f'], {}), '(f)\n', (1952, 1955), False, 'from PIL import Image\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.unittest
import unittest
class TestAllReduce(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
def test_all_reduce_1n2d(test_case):
np_arr = np.array([[1, 2], [3, 4]])
input = flow.tensor(np_arr, device="cuda")
out = flow.comm.all_reduce(input)
test_case.assertTrue(np.allclose(out.numpy(), np_arr * 2))
@flow.unittest.skip_unless_2n2d()
def test_all_reduce_2n2d(test_case):
np_arr = np.array([[1, 2], [3, 4]])
input = flow.tensor(np_arr, device="cuda")
out = flow.comm.all_reduce(input)
test_case.assertTrue(np.allclose(out.numpy(), np_arr * 4))
@flow.unittest.skip_unless_1n2d()
def test_docs(test_case):
oneflow.framework.unittest.check_multi_rank_docstr(oneflow.comm.primitive)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.comm.all_reduce",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.tensor",
"oneflow.unittest.skip_unless_2n2d"
] | [((725, 757), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (755, 757), True, 'import oneflow as flow\n'), ((1009, 1041), 'oneflow.unittest.skip_unless_2n2d', 'flow.unittest.skip_unless_2n2d', ([], {}), '()\n', (1039, 1041), True, 'import oneflow as flow\n'), ((1293, 1325), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (1323, 1325), True, 'import oneflow as flow\n'), ((1472, 1487), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1485, 1487), False, 'import unittest\n'), ((816, 842), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (824, 842), True, 'import numpy as np\n'), ((859, 893), 'oneflow.tensor', 'flow.tensor', (['np_arr'], {'device': '"""cuda"""'}), "(np_arr, device='cuda')\n", (870, 893), True, 'import oneflow as flow\n'), ((908, 935), 'oneflow.comm.all_reduce', 'flow.comm.all_reduce', (['input'], {}), '(input)\n', (928, 935), True, 'import oneflow as flow\n'), ((1100, 1126), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1108, 1126), True, 'import numpy as np\n'), ((1143, 1177), 'oneflow.tensor', 'flow.tensor', (['np_arr'], {'device': '"""cuda"""'}), "(np_arr, device='cuda')\n", (1154, 1177), True, 'import oneflow as flow\n'), ((1192, 1219), 'oneflow.comm.all_reduce', 'flow.comm.all_reduce', (['input'], {}), '(input)\n', (1212, 1219), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import traceback
from typing import Optional
import oneflow._oneflow_internal
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.framework import id_util as id_util
from oneflow.compatible.single_client.framework import interpret_util as interpret_util
from oneflow.compatible.single_client.framework import remote_blob as remote_blob_util
from oneflow.compatible.single_client.ops import user_op_builder as user_op_builder
from oneflow.core.operator import op_conf_pb2 as op_conf_util
from oneflow.core.register import logical_blob_id_pb2 as logical_blob_id_util
def build_unary_elemwise_math_op(math_op, x, name=None):
if name is None:
name = id_util.UniqueStr(math_op + "_")
return (
flow.user_op_builder(name)
.Op(math_op)
.Input("x", [x])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def abs(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator returns the absolute value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def abs_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.abs(x)
x = np.array([-1, 2, -3]).astype(np.float32)
out = abs_Job(x)
# out [1. 2. 3.]
"""
return build_unary_elemwise_math_op("abs", x, name)
def acos(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the acos value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def acos_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.acos(x)
x = np.array([0.5, 0.6, 0.7]).astype(np.float32)
out = acos_Job(x)
# out [1.0471976 0.9272952 0.7953989]
# We take the first value as an example
# (arccos(0.5) * pi) / 180 = 1.0471976
"""
return build_unary_elemwise_math_op("acos", x, name)
def acosh(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the inverse hyperbolic cosine value of Blob.
The equation is:
.. math::
out = log(x+(x^2-1)^\\frac{1}{2})
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob, the range is [1, inf]
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def acosh_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.acosh(x)
x = np.array([2, 3, 4]).astype(np.float32)
out = acosh_Job(x)
# out [1.316958 1.7627473 2.063437 ]
"""
return build_unary_elemwise_math_op("acosh", x, name)
def asin(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the arcsin value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def asin_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.asin(x)
x = np.array([0.5, 0.6, 0.7]).astype(np.float32)
out = asin_Job(x)
# out [0.5235988 0.64350116 0.7753975 ]
# We take the first value as an example
# (arcsin(0.5) * pi) / 180 = 0.5235988
"""
return build_unary_elemwise_math_op("asin", x, name)
def asinh(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the inverse hyperbolic sine value of Blob.
The equation is:
.. math::
out = log(x+(x^2+1)^\\frac{1}{2})
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def asinh_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.asinh(x)
x = np.array([2, 3, 4]).astype(np.float32)
out = asinh_Job(x)
# out [1.4436355 1.8184464 2.0947125]
"""
return build_unary_elemwise_math_op("asinh", x, name)
def atan(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the arctan value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def atan_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.atan(x)
x = np.array([0.5, 0.6, 0.7]).astype(np.float32)
out = atan_Job(x)
# out [0.4636476 0.5404195 0.61072594]
# We take the first value as an example
# (arctan(0.5) * pi) / 180 = 0.4636476
"""
return build_unary_elemwise_math_op("atan", x, name)
def atanh(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the inverse hyperbolic tangent value of Blob.
The equation is:
.. math::
out = \\frac{1}{2}*log(\\frac{1+x}{1-x})
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def atanh_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.atanh(x)
x = np.array([0.5, 0.6, 0.7]).astype(np.float32)
out = atanh_Job(x)
# out [0.54930615 0.6931472 0.8673005 ]
"""
return build_unary_elemwise_math_op("atanh", x, name)
def ceil(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the ceiling value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def ceil_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.ceil(x)
x = np.array([1.3, 1.5, 2.7]).astype(np.float32)
out = ceil_Job(x)
# out [2. 2. 3.]
"""
return build_unary_elemwise_math_op("ceil", x, name)
def cos(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the cosine value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def cos_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.cos(x)
x = np.array([1/3*np.pi, 0.25*np.pi, 1.25*np.pi]).astype(np.float32)
out = cos_Job(x)
# out [ 0.49999997 0.70710677 -0.7071068 ]
"""
return build_unary_elemwise_math_op("cos", x, name)
def cosh(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes hyperbolic cosine value of Blob.
The equation is:
.. math::
out = \\frac{e^x+e^{-x}}{2}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def cosh_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.cosh(x)
x = np.array([1, 2, 3]).astype(np.float32)
out = cosh_Job(x)
# out [ 1.5430806 3.7621958 10.067662 ]
"""
return build_unary_elemwise_math_op("cosh", x, name)
def erf(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the Gauss error value of Blob.
The equation is:
.. math ::
out = \\frac{2}{\\sqrt{\\pi}}*\\int_{0}^{x}e^{-z^2}\\mathrm{d}{z}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def erf_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.erf(x)
x = np.array([1, 2, 3]).astype(np.float32)
out = erf_Job(x)
# out [0.8427008 0.9953223 0.9999779]
"""
return build_unary_elemwise_math_op("erf", x, name)
def erfc(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the :math:`1-erf(x)`, for more details of `erf` function
please refer to `math.erf`.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def erfc_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.erfc(x)
x = np.array([1, 2, 3]).astype(np.float32)
out = erfc_Job(x)
# out [1.5729921e-01 4.6777353e-03 2.2090495e-05]
"""
return build_unary_elemwise_math_op("erfc", x, name)
def exp(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the exponential of Blob.
The equation is:
.. math::
out = e^x
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def exp_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.exp(x)
x = np.array([1, 2, 3]).astype(np.float32)
out = exp_Job(x)
# out [ 2.7182817 7.389056 20.085537 ]
"""
return build_unary_elemwise_math_op("exp", x, name)
def expm1(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes :math:`y=e^x-1`.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def expm1_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.expm1(x)
x = np.array([1, 2, 3]).astype(np.float32)
out = expm1_Job(x)
# out [ 1.7182819 6.389056 19.085537 ]
"""
return build_unary_elemwise_math_op("expm1", x, name)
def floor(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the largest integer not greater than input Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def floor_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.floor(x)
x = np.array([1.3, 1.5, 2.7]).astype(np.float32)
out = floor_Job(x)
# out [1. 1. 2.]
"""
return build_unary_elemwise_math_op("floor", x, name)
def lgamma(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the :math:`Gamma(x)` value.
The equation is:
.. math::
out = \\int_{0}^{\\infty}t^{x-1}*e^{-t}\\mathrm{d}{t}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def lgamma_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.lgamma(x)
x = np.array([1.3, 1.5, 2.7]).astype(np.float32)
out = lgamma_Job(x)
# out [-0.1081748 -0.12078223 0.4348206 ]
"""
return build_unary_elemwise_math_op("lgamma", x, name)
def log(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the log value of input Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def log_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.log(x)
x = np.array([1.3, 1.5, 2.7]).astype(np.float32)
out = log_Job(x)
# out [0.26236424 0.40546513 0.9932518 ]
"""
return build_unary_elemwise_math_op("log", x, name)
def log1p(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the :math:`log(x)+1` value of input Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def log1p_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.log1p(x)
x = np.array([1.3, 1.5, 2.7]).astype(np.float32)
out = log1p_Job(x)
# out [0.8329091 0.91629076 1.3083328 ]
"""
return build_unary_elemwise_math_op("log1p", x, name)
def log_sigmoid(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the log sigmoid value of input Blob.
The equation is:
.. math::
out = log(\\frac{1}{1+e^{-x}})
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def log_sigmoid_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.log_sigmoid(x)
x = np.array([1.3, 1.5, 2.7]).astype(np.float32)
out = log_sigmoid_Job(x)
# out [-0.24100842 -0.20141333 -0.0650436 ]
"""
return build_unary_elemwise_math_op("log_sigmoid", x, name)
def negative(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the negative value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def negative_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.negative(x)
x = np.array([1.3, 1.5, 2.7]).astype(np.float32)
out = negative_Job(x)
# out [-1.3 -1.5 -2.7]
"""
return build_unary_elemwise_math_op("negative", x, name)
def reciprocal(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the reciprocal of x.
The equation is:
.. math::
out = \\frac{1}{x}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def reciprocal_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.reciprocal(x)
x = np.array([1, 2, 4]).astype(np.float32)
out = reciprocal_Job(x)
# out [1. 0.5 0.25]
"""
return build_unary_elemwise_math_op("reciprocal", x, name)
def reciprocal_no_nan(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the safe reciprocal of x. If x is zero, the reciprocal will
be also set to zero.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def reciprocal_no_nan_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.reciprocal_no_nan(x)
x = np.array([0, 2, 4]).astype(np.float32)
out = reciprocal_no_nan_Job(x)
# out [0. 0.5 0.25]
"""
return build_unary_elemwise_math_op("reciprocal_no_nan", x, name)
def rint(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the closest integer to Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def rint_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.rint(x)
x = np.array([1.49999, 1.500001, 2.7]).astype(np.float32)
out = rint_Job(x)
# out [1. 2. 3.]
"""
return build_unary_elemwise_math_op("rint", x, name)
def round(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator rounds the value of Blob to the nearest integer.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def round_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.round(x)
x = np.array([1.49999, 1.500001, 2.7]).astype(np.float32)
out = round_Job(x)
# out [1. 2. 3.]
"""
return build_unary_elemwise_math_op("round", x, name)
def rsqrt(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the reciprocal of square root value of Blob.
The equation is:
.. math::
out=\\frac{1}{\\sqrt{x}}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def rsqrt_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.rsqrt(x)
x = np.array([4, 16, 25]).astype(np.float32)
out = rsqrt_Job(x)
# out [0.5 0.25 0.2 ]
"""
return build_unary_elemwise_math_op("rsqrt", x, name)
def sigmoid_v2(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the sigmoid value of Blob.
The equation is:
.. math::
out=\\frac{1}{1+e^{-x}}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def sigmoidv2_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.sigmoid_v2(x)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
out = sigmoidv2_Job(x)
# out [0.37754068 0.5 0.62245935]
"""
return build_unary_elemwise_math_op("sigmoid_v2", x, name)
def sign(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator returns the sign of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def sign_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.sign(x)
x = np.array([-2, 0, 2]).astype(np.float32)
out = sign_Job(x)
# out [-1. 0. 1.]
"""
return build_unary_elemwise_math_op("sign", x, name)
def sin(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the sin value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def sin_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.sin(x)
x = np.array([-1/6*np.pi, 0, 1/6*np.pi]).astype(np.float32)
out = sin_Job(x)
# out [-0.5 0. 0.5]
"""
return build_unary_elemwise_math_op("sin", x, name)
def sinh(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the hyperbolic sine value of Blob.
The equation is:
.. math::
out =\\frac{e^x-e^{-x}}{2}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def sinh_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.sinh(x)
x = np.array([-1, 0, 1]).astype(np.float32)
out = sinh_Job(x)
# out [-1.1752012 0. 1.1752012]
"""
return build_unary_elemwise_math_op("sinh", x, name)
def softplus(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the softplus value of Blob.
The equation is:
.. math::
out = log(e^x+1)
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def softplus_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.softplus(x)
x = np.array([-1, 0, 1]).astype(np.float32)
out = softplus_Job(x)
# out [0.31326166 0.6931472 1.3132616 ]
"""
return build_unary_elemwise_math_op("softplus", x, name)
def sqrt(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the sqrt root value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def sqrt_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.sqrt(x)
x = np.array([4, 16, 25]).astype(np.float32)
out = sqrt_Job(x)
# out [2. 4. 5.]
"""
return build_unary_elemwise_math_op("sqrt", x, name)
def square(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the square value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def square_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.square(x)
x = np.array([2, 3, 4]).astype(np.float32)
out = square_Job(x)
# out [ 4. 9. 16.]
"""
return build_unary_elemwise_math_op("square", x, name)
def tan(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the tan value of Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def tan_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.tan(x)
x = np.array([-1/4*np.pi, 0, 1/4*np.pi]).astype(np.float32)
out = tan_Job(x)
# out [-1. 0. 1.]
"""
return build_unary_elemwise_math_op("tan", x, name)
def tanh(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the hyperbolic tangent value of Blob.
The equation is:
.. math::
out = \\frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def tanh_Job(x: tp.Numpy.Placeholder((3,))
) -> tp.Numpy:
return flow.math.tanh(x)
x = np.array([-1, 0, 1]).astype(np.float32)
out = tanh_Job(x)
# out [-0.7615942 0. 0.7615942]
"""
return build_unary_elemwise_math_op("tanh", x, name)
def tanh_v2(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the hyperbolic tangent value of Blob.
The equation is:
.. math::
out = \\frac{e^x-e^{-x}}{e^x+e^{-x}}
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
"""
print(
"WARNING: flow.math.tanh_v2 has been deprecated. Please replace it by flow.math.tanh.\n "
)
print(traceback.format_stack()[-2])
return flow.math.tanh(x, name)
| [
"oneflow.compatible.single_client.math.tanh",
"oneflow.compatible.single_client.user_op_builder",
"oneflow.compatible.single_client.framework.id_util.UniqueStr"
] | [((35709, 35732), 'oneflow.compatible.single_client.math.tanh', 'flow.math.tanh', (['x', 'name'], {}), '(x, name)\n', (35723, 35732), True, 'from oneflow.compatible import single_client as flow\n'), ((1300, 1332), 'oneflow.compatible.single_client.framework.id_util.UniqueStr', 'id_util.UniqueStr', (["(math_op + '_')"], {}), "(math_op + '_')\n", (1317, 1332), True, 'from oneflow.compatible.single_client.framework import id_util as id_util\n'), ((35668, 35692), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (35690, 35692), False, 'import traceback\n'), ((1354, 1380), 'oneflow.compatible.single_client.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (1374, 1380), True, 'from oneflow.compatible import single_client as flow\n')] |
import numpy as np
import oneflow as flow
import oneflow.nn as nn
def _make_divisible(v, divisor, min_value=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_value:
:return:
"""
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(flow.ones(features))
self.beta = nn.Parameter(flow.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class ConvBNReLU(nn.Sequential):
def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, groups=1):
padding = (kernel_size - 1) // 2
super(ConvBNReLU, self).__init__(
nn.Conv1d(
in_planes,
out_planes,
kernel_size,
stride,
padding,
groups=groups,
bias=False,
),
nn.BatchNorm1d(out_planes),
nn.ReLU6(),
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = int(round(inp * expand_ratio))
self.use_res_connect = self.stride == 1 and inp == oup
layers = []
if expand_ratio != 1:
# pw
layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
layers.extend(
[
# dw
ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim),
# pw-linear
nn.Conv1d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm1d(oup),
]
)
self.conv = nn.Sequential(*layers)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(
self,
num_classes=1000,
width_mult=1.0,
inverted_residual_setting=None,
round_nearest=8,
):
"""
MobileNet V2 main class
Args:
num_classes (int): Number of classes
width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount
inverted_residual_setting: Network structure
round_nearest (int): Round the number of channels in each layer to be a multiple of this number
Set to 1 to turn off rounding
"""
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
last_channel = 1280
if inverted_residual_setting is None:
inverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# only check the first element, assuming user knows t,c,n,s are required
if (
len(inverted_residual_setting) == 0
or len(inverted_residual_setting[0]) != 4
):
raise ValueError(
"inverted_residual_setting should be non-empty "
"or a 4-element list, got {}".format(inverted_residual_setting)
)
# building first layer
input_channel = _make_divisible(input_channel * width_mult, round_nearest)
self.last_channel = _make_divisible(
last_channel * max(1.0, width_mult), round_nearest
)
features = [ConvBNReLU(1, input_channel, stride=2)]
# building inverted residual blocks
for t, c, n, s in inverted_residual_setting:
output_channel = _make_divisible(c * width_mult, round_nearest)
for i in range(n):
stride = s if i == 0 else 1
features.append(
block(input_channel, output_channel, stride, expand_ratio=t)
)
input_channel = output_channel
# building last several layers
features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1))
# make it nn.Sequential
self.features = nn.Sequential(*features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, num_classes[0]),
nn.LogSoftmax(dim=1),
)
self.normalize = nn.BatchNorm1d(6420)
self.maxpool1d = nn.MaxPool1d(3, stride=2)
# weight initialization
for m in self.modules():
if isinstance(m, nn.Conv1d):
nn.init.kaiming_normal_(m.weight, mode="fan_out")
if m.bias is not None:
nn.init.zeros_(m.bias)
elif isinstance(m, nn.BatchNorm1d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
if len(x.shape) == 2:
print("x.size ", x.size())
x = self.normalize(x)
x = x.reshape([x.shape[0], 1, x.shape[1]])
x = self.maxpool1d(x)
x = self.features(x)
x = x.mean(2)
x = self.classifier(x)
return x
class AdditiveMarginSoftmax(nn.Module):
# AMSoftmax
def __init__(self, margin=0.35, s=30):
super().__init__()
self.m = margin
self.s = s
self.epsilon = 0.000000000001
print("AMSoftmax m = " + str(margin))
def forward(self, predicted, target):
# ------------ AM Softmax ------------ #
predicted = predicted / (predicted.norm(dim=0) + self.epsilon)
indexes = flow.Tensor(range(predicted.size(0))).long().to(predicted.device)
cos_theta_y = predicted[indexes, target]
cos_theta_y_m = cos_theta_y - self.m
exp_s = (flow.ones_like(cos_theta_y_m) * np.e) ** (self.s * cos_theta_y_m)
sum_cos_theta_j = (
(flow.ones_like(predicted) * np.e) ** (predicted * self.s)
).sum(dim=1) - (
(flow.ones_like(predicted[indexes, target]) * np.e)
** (predicted[indexes, target] * self.s)
)
log = -flow.log(exp_s / (exp_s + sum_cos_theta_j + self.epsilon)).mean()
return log
| [
"oneflow.nn.Sequential",
"oneflow.nn.init.kaiming_normal_",
"oneflow.nn.init.normal_",
"oneflow.nn.ReLU6",
"oneflow.log",
"oneflow.nn.MaxPool1d",
"oneflow.nn.Conv1d",
"oneflow.zeros",
"oneflow.ones",
"oneflow.nn.Dropout",
"oneflow.nn.BatchNorm1d",
"oneflow.nn.LogSoftmax",
"oneflow.nn.init.ze... | [((2412, 2434), 'oneflow.nn.Sequential', 'nn.Sequential', (['*layers'], {}), '(*layers)\n', (2425, 2434), True, 'import oneflow.nn as nn\n'), ((4977, 5001), 'oneflow.nn.Sequential', 'nn.Sequential', (['*features'], {}), '(*features)\n', (4990, 5001), True, 'import oneflow.nn as nn\n'), ((5231, 5251), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['(6420)'], {}), '(6420)\n', (5245, 5251), True, 'import oneflow.nn as nn\n'), ((5277, 5302), 'oneflow.nn.MaxPool1d', 'nn.MaxPool1d', (['(3)'], {'stride': '(2)'}), '(3, stride=2)\n', (5289, 5302), True, 'import oneflow.nn as nn\n'), ((859, 878), 'oneflow.ones', 'flow.ones', (['features'], {}), '(features)\n', (868, 878), True, 'import oneflow as flow\n'), ((913, 933), 'oneflow.zeros', 'flow.zeros', (['features'], {}), '(features)\n', (923, 933), True, 'import oneflow as flow\n'), ((1345, 1439), 'oneflow.nn.Conv1d', 'nn.Conv1d', (['in_planes', 'out_planes', 'kernel_size', 'stride', 'padding'], {'groups': 'groups', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size, stride, padding, groups=\n groups, bias=False)\n', (1354, 1439), True, 'import oneflow.nn as nn\n'), ((1575, 1601), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_planes'], {}), '(out_planes)\n', (1589, 1601), True, 'import oneflow.nn as nn\n'), ((1615, 1625), 'oneflow.nn.ReLU6', 'nn.ReLU6', ([], {}), '()\n', (1623, 1625), True, 'import oneflow.nn as nn\n'), ((5086, 5101), 'oneflow.nn.Dropout', 'nn.Dropout', (['(0.2)'], {}), '(0.2)\n', (5096, 5101), True, 'import oneflow.nn as nn\n'), ((5115, 5159), 'oneflow.nn.Linear', 'nn.Linear', (['self.last_channel', 'num_classes[0]'], {}), '(self.last_channel, num_classes[0])\n', (5124, 5159), True, 'import oneflow.nn as nn\n'), ((5173, 5193), 'oneflow.nn.LogSoftmax', 'nn.LogSoftmax', ([], {'dim': '(1)'}), '(dim=1)\n', (5186, 5193), True, 'import oneflow.nn as nn\n'), ((2282, 2329), 'oneflow.nn.Conv1d', 'nn.Conv1d', (['hidden_dim', 'oup', '(1)', '(1)', '(0)'], {'bias': '(False)'}), '(hidden_dim, oup, 1, 1, 0, bias=False)\n', (2291, 2329), True, 'import oneflow.nn as nn\n'), ((2347, 2366), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['oup'], {}), '(oup)\n', (2361, 2366), True, 'import oneflow.nn as nn\n'), ((5426, 5475), 'oneflow.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""'}), "(m.weight, mode='fan_out')\n", (5449, 5475), True, 'import oneflow.nn as nn\n'), ((6753, 6782), 'oneflow.ones_like', 'flow.ones_like', (['cos_theta_y_m'], {}), '(cos_theta_y_m)\n', (6767, 6782), True, 'import oneflow as flow\n'), ((5535, 5557), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (5549, 5557), True, 'import oneflow.nn as nn\n'), ((5622, 5645), 'oneflow.nn.init.ones_', 'nn.init.ones_', (['m.weight'], {}), '(m.weight)\n', (5635, 5645), True, 'import oneflow.nn as nn\n'), ((5662, 5684), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (5676, 5684), True, 'import oneflow.nn as nn\n'), ((6956, 6998), 'oneflow.ones_like', 'flow.ones_like', (['predicted[indexes, target]'], {}), '(predicted[indexes, target])\n', (6970, 6998), True, 'import oneflow as flow\n'), ((7085, 7143), 'oneflow.log', 'flow.log', (['(exp_s / (exp_s + sum_cos_theta_j + self.epsilon))'], {}), '(exp_s / (exp_s + sum_cos_theta_j + self.epsilon))\n', (7093, 7143), True, 'import oneflow as flow\n'), ((5744, 5778), 'oneflow.nn.init.normal_', 'nn.init.normal_', (['m.weight', '(0)', '(0.01)'], {}), '(m.weight, 0, 0.01)\n', (5759, 5778), True, 'import oneflow.nn as nn\n'), ((5795, 5817), 'oneflow.nn.init.zeros_', 'nn.init.zeros_', (['m.bias'], {}), '(m.bias)\n', (5809, 5817), True, 'import oneflow.nn as nn\n'), ((6860, 6885), 'oneflow.ones_like', 'flow.ones_like', (['predicted'], {}), '(predicted)\n', (6874, 6885), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
import os
def gen_gather_test_sample(input_shape, index_shape, dim, is_float=True):
def _np_dim_scatter_add(src, dim, index, outshape):
output = np.zeros(outshape)
for srcidx in range(0, src.size):
outcoord = np.unravel_index(srcidx, src.shape)
outcoord = [*outcoord]
outcoord[dim] = index[np.unravel_index(srcidx, index.shape)]
output_offset = np.ravel_multi_index(outcoord, outshape)
output[np.unravel_index(output_offset, outshape)] += src[
np.unravel_index(srcidx, src.shape)
]
return output
if is_float:
input = np.random.random(input_shape)
else:
input = np.random.randint(0, 100, input_shape)
index = np.random.randint(0, input_shape[dim], index_shape)
output = np.take_along_axis(input, index, dim)
grad = _np_dim_scatter_add(np.ones_like(output), dim, index, input_shape)
ret = {
"input": input.astype(np.float32),
"index": index.astype(np.int32),
"dim": dim,
"output": output.astype(np.float32),
"grad": grad.astype(np.float32),
}
return ret
def _make_dim_gather_fn(test_case, sample, datashape):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.scope.placement("gpu", "0:0"))
def _compare_diff(blob: oft.ListNumpy):
test_case.assertTrue(np.allclose(sample["grad"], blob[0]))
@flow.global_function(type="train", function_config=func_config)
def DynamicDimGatherJob(
params_def: oft.ListNumpy.Placeholder(datashape, dtype=flow.float32),
index_def: oft.ListNumpy.Placeholder(datashape, dtype=flow.int32),
) -> oft.ListNumpy:
x_var = flow.get_variable(
"input",
shape=(1,),
dtype=flow.float32,
initializer=flow.constant_initializer(0),
)
x_var = flow.cast_to_current_logical_view(x_var)
x = x_var + params_def
y = flow.dim_gather(x, sample["dim"], index_def)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(y)
flow.watch_diff(x, _compare_diff)
return y
return DynamicDimGatherJob
def _compare_dim_gather_with_samples(test_case, inputshape, indexshape, dim, maxshape):
sample = gen_gather_test_sample((inputshape), indexshape, dim)
dynamic_dim_gather = _make_dim_gather_fn(test_case, sample, maxshape)
out = dynamic_dim_gather([sample["input"]], [sample["index"]])[0]
test_case.assertTrue(
np.allclose(out, sample["output"].astype(np.float32), 1e-3, 1e-3)
)
@flow.unittest.skip_unless_1n1d()
class TestDynamicDimGather(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_dynamic_dim_gather(test_case):
_compare_dim_gather_with_samples(
test_case, inputshape=(2, 2), indexshape=(2, 2), dim=1, maxshape=(10, 10)
)
_compare_dim_gather_with_samples(
test_case, inputshape=(2, 2), indexshape=(2, 2), dim=0, maxshape=(10, 10)
)
_compare_dim_gather_with_samples(
test_case,
inputshape=(4, 4, 3),
indexshape=(4, 1, 3),
dim=1,
maxshape=(10, 10, 10),
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.typing.ListNumpy.Placeholder",
"oneflow.compatible.single_client.constant_initializer",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.watch_diff",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.... | [((3557, 3589), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3587, 3589), True, 'from oneflow.compatible import single_client as flow\n'), ((1492, 1543), 'numpy.random.randint', 'np.random.randint', (['(0)', 'input_shape[dim]', 'index_shape'], {}), '(0, input_shape[dim], index_shape)\n', (1509, 1543), True, 'import numpy as np\n'), ((1557, 1594), 'numpy.take_along_axis', 'np.take_along_axis', (['input', 'index', 'dim'], {}), '(input, index, dim)\n', (1575, 1594), True, 'import numpy as np\n'), ((1958, 1986), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1984, 1986), True, 'from oneflow.compatible import single_client as flow\n'), ((2005, 2026), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2024, 2026), True, 'from oneflow.compatible import single_client as flow\n'), ((2334, 2397), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2354, 2397), True, 'from oneflow.compatible import single_client as flow\n'), ((4274, 4289), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4287, 4289), False, 'import unittest\n'), ((896, 914), 'numpy.zeros', 'np.zeros', (['outshape'], {}), '(outshape)\n', (904, 914), True, 'import numpy as np\n'), ((1385, 1414), 'numpy.random.random', 'np.random.random', (['input_shape'], {}), '(input_shape)\n', (1401, 1414), True, 'import numpy as np\n'), ((1441, 1479), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', 'input_shape'], {}), '(0, 100, input_shape)\n', (1458, 1479), True, 'import numpy as np\n'), ((1626, 1646), 'numpy.ones_like', 'np.ones_like', (['output'], {}), '(output)\n', (1638, 1646), True, 'import numpy as np\n'), ((2112, 2138), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2136, 2138), True, 'from oneflow.compatible import single_client as flow\n'), ((2180, 2214), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0"""'], {}), "('gpu', '0:0')\n", (2200, 2214), True, 'from oneflow.compatible import single_client as flow\n'), ((2796, 2836), 'oneflow.compatible.single_client.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['x_var'], {}), '(x_var)\n', (2829, 2836), True, 'from oneflow.compatible import single_client as flow\n'), ((2881, 2925), 'oneflow.compatible.single_client.dim_gather', 'flow.dim_gather', (['x', "sample['dim']", 'index_def'], {}), "(x, sample['dim'], index_def)\n", (2896, 2925), True, 'from oneflow.compatible import single_client as flow\n'), ((3064, 3097), 'oneflow.compatible.single_client.watch_diff', 'flow.watch_diff', (['x', '_compare_diff'], {}), '(x, _compare_diff)\n', (3079, 3097), True, 'from oneflow.compatible import single_client as flow\n'), ((3663, 3697), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (3672, 3697), False, 'import os\n'), ((980, 1015), 'numpy.unravel_index', 'np.unravel_index', (['srcidx', 'src.shape'], {}), '(srcidx, src.shape)\n', (996, 1015), True, 'import numpy as np\n'), ((1152, 1192), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['outcoord', 'outshape'], {}), '(outcoord, outshape)\n', (1172, 1192), True, 'import numpy as np\n'), ((2290, 2326), 'numpy.allclose', 'np.allclose', (["sample['grad']", 'blob[0]'], {}), "(sample['grad'], blob[0])\n", (2301, 2326), True, 'import numpy as np\n'), ((2447, 2503), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['datashape'], {'dtype': 'flow.float32'}), '(datashape, dtype=flow.float32)\n', (2472, 2503), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2524, 2578), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['datashape'], {'dtype': 'flow.int32'}), '(datashape, dtype=flow.int32)\n', (2549, 2578), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1085, 1122), 'numpy.unravel_index', 'np.unravel_index', (['srcidx', 'index.shape'], {}), '(srcidx, index.shape)\n', (1101, 1122), True, 'import numpy as np\n'), ((1212, 1253), 'numpy.unravel_index', 'np.unravel_index', (['output_offset', 'outshape'], {}), '(output_offset, outshape)\n', (1228, 1253), True, 'import numpy as np\n'), ((1279, 1314), 'numpy.unravel_index', 'np.unravel_index', (['srcidx', 'src.shape'], {}), '(srcidx, src.shape)\n', (1295, 1314), True, 'import numpy as np\n'), ((2740, 2768), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (2765, 2768), True, 'from oneflow.compatible import single_client as flow\n'), ((2967, 3021), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (3008, 3021), True, 'from oneflow.compatible import single_client as flow\n')] |
import oneflow as flow
import oneflow.nn as nn
from quantization_ops import *
__all__ = ["QuantizationAlexNet"]
class QuantizationAlexNet(nn.Module):
def __init__(self, num_classes: int = 1000) -> None:
super(QuantizationAlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x: flow.Tensor) -> flow.Tensor:
x = self.features(x)
x = self.avgpool(x)
x = flow.flatten(x, 1)
x = self.classifier(x)
return x
def quantize(self, quantization_bit=8, quantization_scheme='symmetric', quantization_formula='google', per_layer_quantization=True):
self.q_features = nn.Sequential(
q_conv(self.features[0], qi=True, qo=True, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
q_conv(self.features[3], qi=False, qo=True, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
q_conv(self.features[6], qi=False, qo=True, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization),
nn.ReLU(inplace=True),
q_conv(self.features[8], qi=False, qo=True, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization),
nn.ReLU(inplace=True),
q_conv(self.features[10], qi=False, qo=True, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.q_avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.q_classifier = nn.Sequential(
nn.Dropout(),
q_linear(self.classifier[1], qi=False, qo=True, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization),
nn.ReLU(inplace=True),
nn.Dropout(),
q_linear(self.classifier[4], qi=False, qo=True, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization),
nn.ReLU(inplace=True),
q_linear(self.classifier[6], qi=False, qo=True, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization),
)
def quantize_forward(self, x):
x = self.q_features(x)
x = self.q_avgpool(x)
x = flow.flatten(x, 1)
x = self.q_classifier(x)
return x
def freeze(self):
self.q_features[0].freeze()
self.q_features[3].freeze(self.q_features[0].qo)
self.q_features[6].freeze(self.q_features[3].qo)
self.q_features[8].freeze(self.q_features[6].qo)
self.q_features[10].freeze(self.q_features[8].qo)
self.q_classifier[1].freeze(self.q_features[10].qo)
self.q_classifier[4].freeze(self.q_classifier[1].qo)
self.q_classifier[6].freeze(self.q_classifier[4].qo)
| [
"oneflow.nn.ReLU",
"oneflow.nn.AdaptiveAvgPool2d",
"oneflow.nn.Dropout",
"oneflow.nn.MaxPool2d",
"oneflow.nn.Conv2d",
"oneflow.nn.Linear",
"oneflow.flatten"
] | [((964, 992), 'oneflow.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(6, 6)'], {}), '((6, 6))\n', (984, 992), True, 'import oneflow.nn as nn\n'), ((1409, 1427), 'oneflow.flatten', 'flow.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1421, 1427), True, 'import oneflow as flow\n'), ((3228, 3256), 'oneflow.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(6, 6)'], {}), '((6, 6))\n', (3248, 3256), True, 'import oneflow.nn as nn\n'), ((4285, 4303), 'oneflow.flatten', 'flow.flatten', (['x', '(1)'], {}), '(x, 1)\n', (4297, 4303), True, 'import oneflow as flow\n'), ((313, 366), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(3)', '(64)'], {'kernel_size': '(11)', 'stride': '(4)', 'padding': '(2)'}), '(3, 64, kernel_size=11, stride=4, padding=2)\n', (322, 366), True, 'import oneflow.nn as nn\n'), ((380, 401), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (387, 401), True, 'import oneflow.nn as nn\n'), ((415, 452), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (427, 452), True, 'import oneflow.nn as nn\n'), ((466, 510), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(64)', '(192)'], {'kernel_size': '(5)', 'padding': '(2)'}), '(64, 192, kernel_size=5, padding=2)\n', (475, 510), True, 'import oneflow.nn as nn\n'), ((524, 545), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (531, 545), True, 'import oneflow.nn as nn\n'), ((559, 596), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (571, 596), True, 'import oneflow.nn as nn\n'), ((610, 655), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(192)', '(384)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(192, 384, kernel_size=3, padding=1)\n', (619, 655), True, 'import oneflow.nn as nn\n'), ((669, 690), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (676, 690), True, 'import oneflow.nn as nn\n'), ((704, 749), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(384)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(384, 256, kernel_size=3, padding=1)\n', (713, 749), True, 'import oneflow.nn as nn\n'), ((763, 784), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (770, 784), True, 'import oneflow.nn as nn\n'), ((798, 843), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(256)', '(256)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(256, 256, kernel_size=3, padding=1)\n', (807, 843), True, 'import oneflow.nn as nn\n'), ((857, 878), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (864, 878), True, 'import oneflow.nn as nn\n'), ((892, 929), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (904, 929), True, 'import oneflow.nn as nn\n'), ((1046, 1058), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1056, 1058), True, 'import oneflow.nn as nn\n'), ((1072, 1100), 'oneflow.nn.Linear', 'nn.Linear', (['(256 * 6 * 6)', '(4096)'], {}), '(256 * 6 * 6, 4096)\n', (1081, 1100), True, 'import oneflow.nn as nn\n'), ((1114, 1135), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1121, 1135), True, 'import oneflow.nn as nn\n'), ((1149, 1161), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (1159, 1161), True, 'import oneflow.nn as nn\n'), ((1175, 1196), 'oneflow.nn.Linear', 'nn.Linear', (['(4096)', '(4096)'], {}), '(4096, 4096)\n', (1184, 1196), True, 'import oneflow.nn as nn\n'), ((1210, 1231), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1217, 1231), True, 'import oneflow.nn as nn\n'), ((1245, 1273), 'oneflow.nn.Linear', 'nn.Linear', (['(4096)', 'num_classes'], {}), '(4096, num_classes)\n', (1254, 1273), True, 'import oneflow.nn as nn\n'), ((1908, 1929), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1915, 1929), True, 'import oneflow.nn as nn\n'), ((1943, 1980), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (1955, 1980), True, 'import oneflow.nn as nn\n'), ((2236, 2257), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2243, 2257), True, 'import oneflow.nn as nn\n'), ((2271, 2308), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (2283, 2308), True, 'import oneflow.nn as nn\n'), ((2564, 2585), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2571, 2585), True, 'import oneflow.nn as nn\n'), ((2841, 2862), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2848, 2862), True, 'import oneflow.nn as nn\n'), ((3119, 3140), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3126, 3140), True, 'import oneflow.nn as nn\n'), ((3154, 3191), 'oneflow.nn.MaxPool2d', 'nn.MaxPool2d', ([], {'kernel_size': '(3)', 'stride': '(2)'}), '(kernel_size=3, stride=2)\n', (3166, 3191), True, 'import oneflow.nn as nn\n'), ((3312, 3324), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (3322, 3324), True, 'import oneflow.nn as nn\n'), ((3586, 3607), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3593, 3607), True, 'import oneflow.nn as nn\n'), ((3621, 3633), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {}), '()\n', (3631, 3633), True, 'import oneflow.nn as nn\n'), ((3895, 3916), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3902, 3916), True, 'import oneflow.nn as nn\n')] |
import sys
import time
import logging
import yaml
import os
sys.path.insert(0, "../../")
import oneflow as of
from oneflow.utils.data import DataLoader
import oneflow.optim as optim
from oneflow.optim import lr_scheduler
import oneflow.distributed as dist
import libs.dataio.dataset as dataset
from libs.utils.utils import read_config
from libs.utils.config_parser import ArgParser
from libs.components import loss
class BaseTrainer(object):
def __init__(self):
# build model
self.build_model()
# mv to device
self._move_to_device()
# build dataloader
self.build_dataloader()
# build loss
self.build_criterion()
# build optimizer
self.build_optimizer()
# resume model from saved path
if os.path.exists(self.train_opts['resume']):
self.load(self.train_opts['resume'])
def build_model(self):
'''
You MUST overwrite this function.
And you have to set two attributions in this function.
1. embedding_dim
2. model
'''
# you MUST overwrite this function
raise NotImplementedError("Please implement this function by yourself!")
def build_criterion(self):
'''
You can overwrite this function.
If so, you have to set the following attribution in this function.
1. criterion
'''
raise NotImplementedError("Please implement this function by yourself!")
def build_optimizer(self):
'''
You can overwrite this function.
If so, you have to set the following attribution in this function.
1. optim
2. lr_scheduler
'''
raise NotImplementedError("Please implement this function by yourself!")
def build_dataloader(self):
'''
You can overwrite this function.
If so, you have to set the following attribution in this function.
1. trainloader
'''
raise NotImplementedError("Please implement this function by yourself!")
def train_epoch(self):
# you MUST overwrite this function
raise NotImplementedError("Please implement this function by yourself!")
def _dev(self):
raise NotImplementedError("Please implement this function by yourself!")
def _move_to_device(self):
# rank = dist.get_rank()
# self.device = of.device('cuda', rank)
self.device = of.device('cuda')
device_ids = [0]
device_num = 1
# else:
# pass
# device_ids = self.train_opts['gpus_id']
# device_num = of.cuda.device_count()
# if device_num >= len(device_ids):
# device_num = len(device_ids)
# else:
# logging.warn('There are only {} GPU cards in this machine, using all of them'.format(device_num))
# device_ids = list(range(device_num))
# self.model = of.nn.DataParallel(self.model.to(self.device), device_ids = device_ids)
self.model.to(self.device)
logging.info("Using GPU: {}".format(device_ids))
self.device_num = device_num
def model_average(self, avg_num = 4):
model_state_dict = {}
for i in range(avg_num):
suffix = self.current_epoch - i
ckpt = of.load('exp/{}/net_{}.pth'.format(self.log_time, suffix))
state_dict = ckpt['state_dict']
for k, v in state_dict.items():
if k in model_state_dict:
model_state_dict[k] += v
else:
model_state_dict[k] = v
for k, v in model_state_dict.items():
model_state_dict[k] = v / avg_num
of.save({'epoch': 0, 'state_dict': model_state_dict,
'optimizer': ckpt['optimizer']},
'exp/{}/net_avg.pth'.format(self.log_time))
self.model.load_state_dict(model_state_dict)
def save(self, filename = None):
# model = self.model.module # DO NOT save DataParallel wrapper
model = self.model
if filename is None:
of.save({'epoch': self.current_epoch, 'state_dict': model.state_dict(), 'criterion': self.criterion.state_dict(),
'lr_scheduler': self.lr_scheduler.state_dict(), 'optimizer': self.optim.state_dict()},
'exp/{}/net_{}.pth'.format(self.log_time, self.current_epoch))
else:
of.save({'epoch': self.current_epoch, 'state_dict': model.state_dict(), 'criterion': self.criterion.state_dict(),
'lr_scheduler': self.lr_scheduler.state_dict(), 'optimizer': self.optim.state_dict()},
'exp/{}/{}'.format(self.log_time, filename))
def load(self, resume):
ckpt = of.load(resume)
if self.train_opts['device'] == 'cuda':
self.model.load_state_dict(ckpt['state_dict'])
else:
self.model.load_state_dict(ckpt['state_dict'])
if 'criterion' in ckpt and isinstance(ckpt['criterion'], dict):
self.criterion.load_state_dict(ckpt['criterion'])
else:
self.criterion = ckpt['criterion']
if 'lr_scheduler' in ckpt:
self.lr_scheduler.load_state_dict(ckpt['lr_scheduler'])
self.optim.load_state_dict(ckpt['optimizer'])
self.current_epoch = ckpt['epoch']
def train(self):
start_epoch = self.current_epoch
self.best_dev_epoch = self.current_epoch
self.best_dev_loss = 1000
self.count = 0
self.dev_check_count = 0
for epoch in range(start_epoch + 1, self.epoch + 1):
self.current_epoch = epoch
logging.info("Epoch {}".format(self.current_epoch))
stop = self.train_epoch()
self.save()
if stop == -1:
break
def _reset_opts(self, module, opts):
if module == 'data':
for k, v in opts.items():
self.data_opts[k] = v
elif module == 'model':
for k, v in opts.items():
self.model_opts[k] = v
elif module == 'train':
for k, v in opts.items():
self.train_opts[k] = v
def __call__(self):
os.makedirs('exp/{}/conf'.format(self.log_time), exist_ok = True)
if not os.path.exists("exp/{}/conf/data.yaml".format(self.log_time)):
with open("exp/{}/conf/data.yaml".format(self.log_time), 'w') as f:
yaml.dump(self.data_opts, f)
if not os.path.exists("exp/{}/conf/model.yaml".format(self.log_time)):
with open("exp/{}/conf/model.yaml".format(self.log_time), 'w') as f:
yaml.dump(self.model_opts, f)
if not os.path.exists("exp/{}/conf/train.yaml".format(self.log_time)):
with open("exp/{}/conf/train.yaml".format(self.log_time), 'w') as f:
yaml.dump(self.train_opts, f)
logging.info("start training")
self.train()
if __name__ == "__main__":
def main():
parser = ArgParser()
args = parser.parse_args()
args = vars(args)
data_config = read_config("../../conf/data.yaml")
model_config = read_config("../../conf/model.yaml")
train_config = read_config("../../conf/train.yaml")
trainer = NNetTrainer(data_config, model_config, train_config, args)
main()
| [
"oneflow.load",
"oneflow.device"
] | [((60, 88), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../"""'], {}), "(0, '../../')\n", (75, 88), False, 'import sys\n'), ((795, 836), 'os.path.exists', 'os.path.exists', (["self.train_opts['resume']"], {}), "(self.train_opts['resume'])\n", (809, 836), False, 'import os\n'), ((2455, 2472), 'oneflow.device', 'of.device', (['"""cuda"""'], {}), "('cuda')\n", (2464, 2472), True, 'import oneflow as of\n'), ((4826, 4841), 'oneflow.load', 'of.load', (['resume'], {}), '(resume)\n', (4833, 4841), True, 'import oneflow as of\n'), ((6982, 7012), 'logging.info', 'logging.info', (['"""start training"""'], {}), "('start training')\n", (6994, 7012), False, 'import logging\n'), ((7095, 7106), 'libs.utils.config_parser.ArgParser', 'ArgParser', ([], {}), '()\n', (7104, 7106), False, 'from libs.utils.config_parser import ArgParser\n'), ((7190, 7225), 'libs.utils.utils.read_config', 'read_config', (['"""../../conf/data.yaml"""'], {}), "('../../conf/data.yaml')\n", (7201, 7225), False, 'from libs.utils.utils import read_config\n'), ((7249, 7285), 'libs.utils.utils.read_config', 'read_config', (['"""../../conf/model.yaml"""'], {}), "('../../conf/model.yaml')\n", (7260, 7285), False, 'from libs.utils.utils import read_config\n'), ((7309, 7345), 'libs.utils.utils.read_config', 'read_config', (['"""../../conf/train.yaml"""'], {}), "('../../conf/train.yaml')\n", (7320, 7345), False, 'from libs.utils.utils import read_config\n'), ((6533, 6561), 'yaml.dump', 'yaml.dump', (['self.data_opts', 'f'], {}), '(self.data_opts, f)\n', (6542, 6561), False, 'import yaml\n'), ((6738, 6767), 'yaml.dump', 'yaml.dump', (['self.model_opts', 'f'], {}), '(self.model_opts, f)\n', (6747, 6767), False, 'import yaml\n'), ((6944, 6973), 'yaml.dump', 'yaml.dump', (['self.train_opts', 'f'], {}), '(self.train_opts, f)\n', (6953, 6973), False, 'import yaml\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.compatible import single_client as flow
import tensorflow as tf
from test_util import (
Args,
CompareOpWithTensorFlow,
GenArgDict,
test_global_storage,
type_name_to_flow_type,
type_name_to_np_type,
)
from oneflow.compatible.single_client import typing as oft
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def _test_element_wise_mul_fw_bw(test_case, device, shape, type_name):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
np_type = type_name_to_np_type[type_name]
flow_type = type_name_to_flow_type[type_name]
@flow.global_function(type="train", function_config=func_config)
def test_element_wise_mul_job(
x: oft.Numpy.Placeholder(shape, dtype=flow.float),
y: oft.Numpy.Placeholder(shape, dtype=flow.float),
):
with flow.scope.placement(device, "0:0"):
x += flow.get_variable(
name="vx",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
y += flow.get_variable(
name="vy",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
x = flow.cast(x, dtype=flow_type)
y = flow.cast(y, dtype=flow_type)
out = flow.math.multiply(x, y)
out = flow.cast(out, dtype=flow.float)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(out)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(y, test_global_storage.Setter("y"))
flow.watch_diff(y, test_global_storage.Setter("y_diff"))
flow.watch(out, test_global_storage.Setter("out"))
flow.watch_diff(out, test_global_storage.Setter("out_diff"))
return out
x = np.random.randint(low=0, high=10, size=shape).astype(np.float32)
y = np.random.randint(low=0, high=10, size=shape).astype(np.float32)
test_element_wise_mul_job(x, y).get()
test_case.assertTrue(
np.allclose(
test_global_storage.Get("x") * test_global_storage.Get("y"),
test_global_storage.Get("out"),
)
)
test_case.assertTrue(
np.allclose(
test_global_storage.Get("out_diff") * test_global_storage.Get("x"),
test_global_storage.Get("y_diff"),
)
)
test_case.assertTrue(
np.allclose(
test_global_storage.Get("out_diff") * test_global_storage.Get("y"),
test_global_storage.Get("x_diff"),
)
)
@flow.unittest.skip_unless_1n1d()
class TestMultiply(flow.unittest.TestCase):
def test_scalar_mul(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["flow_op"] = [flow.math.multiply]
arg_dict["tf_op"] = [tf.math.multiply]
arg_dict["input_shape"] = [(10, 10, 10)]
arg_dict["op_args"] = [
Args([1]),
Args([-1]),
Args([84223.19348]),
Args([-3284.139]),
]
for arg in GenArgDict(arg_dict):
CompareOpWithTensorFlow(**arg)
def test_element_wise_mul_fw_bw(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["gpu", "cpu"]
arg_dict["shape"] = [(96, 96)]
arg_dict["type_name"] = ["float32", "double", "int8", "int32", "int64"]
for arg in GenArgDict(arg_dict):
_test_element_wise_mul_fw_bw(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.zeros_initializer",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.cast",
"oneflow.compatible.single_client.optimizer.PiecewiseCon... | [((966, 1017), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (1010, 1017), True, 'import tensorflow as tf\n'), ((3542, 3574), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3572, 3574), True, 'from oneflow.compatible import single_client as flow\n'), ((1039, 1090), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (1079, 1090), True, 'import tensorflow as tf\n'), ((1168, 1196), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1194, 1196), True, 'from oneflow.compatible import single_client as flow\n'), ((1215, 1236), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1234, 1236), True, 'from oneflow.compatible import single_client as flow\n'), ((1386, 1449), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1406, 1449), True, 'from oneflow.compatible import single_client as flow\n'), ((4499, 4514), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4512, 4514), False, 'import unittest\n'), ((3674, 3687), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3685, 3687), False, 'from collections import OrderedDict\n'), ((4056, 4076), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (4066, 4076), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((4189, 4202), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4200, 4202), False, 'from collections import OrderedDict\n'), ((4385, 4405), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (4395, 4405), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((1496, 1542), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['shape'], {'dtype': 'flow.float'}), '(shape, dtype=flow.float)\n', (1517, 1542), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1555, 1601), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['shape'], {'dtype': 'flow.float'}), '(shape, dtype=flow.float)\n', (1576, 1601), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1623, 1658), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device', '"""0:0"""'], {}), "(device, '0:0')\n", (1643, 1658), True, 'from oneflow.compatible import single_client as flow\n'), ((2062, 2091), 'oneflow.compatible.single_client.cast', 'flow.cast', (['x'], {'dtype': 'flow_type'}), '(x, dtype=flow_type)\n', (2071, 2091), True, 'from oneflow.compatible import single_client as flow\n'), ((2108, 2137), 'oneflow.compatible.single_client.cast', 'flow.cast', (['y'], {'dtype': 'flow_type'}), '(y, dtype=flow_type)\n', (2117, 2137), True, 'from oneflow.compatible import single_client as flow\n'), ((2156, 2180), 'oneflow.compatible.single_client.math.multiply', 'flow.math.multiply', (['x', 'y'], {}), '(x, y)\n', (2174, 2180), True, 'from oneflow.compatible import single_client as flow\n'), ((2199, 2231), 'oneflow.compatible.single_client.cast', 'flow.cast', (['out'], {'dtype': 'flow.float'}), '(out, dtype=flow.float)\n', (2208, 2231), True, 'from oneflow.compatible import single_client as flow\n'), ((2799, 2844), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(10)', 'size': 'shape'}), '(low=0, high=10, size=shape)\n', (2816, 2844), True, 'import numpy as np\n'), ((2872, 2917), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(10)', 'size': 'shape'}), '(low=0, high=10, size=shape)\n', (2889, 2917), True, 'import numpy as np\n'), ((3111, 3141), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""out"""'], {}), "('out')\n", (3134, 3141), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3298, 3331), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""y_diff"""'], {}), "('y_diff')\n", (3321, 3331), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3488, 3521), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (3511, 3521), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3928, 3937), 'test_util.Args', 'Args', (['[1]'], {}), '([1])\n', (3932, 3937), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3951, 3961), 'test_util.Args', 'Args', (['[-1]'], {}), '([-1])\n', (3955, 3961), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3975, 3994), 'test_util.Args', 'Args', (['[84223.19348]'], {}), '([84223.19348])\n', (3979, 3994), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((4008, 4025), 'test_util.Args', 'Args', (['[-3284.139]'], {}), '([-3284.139])\n', (4012, 4025), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((4090, 4120), 'test_util.CompareOpWithTensorFlow', 'CompareOpWithTensorFlow', ([], {}), '(**arg)\n', (4113, 4120), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((2401, 2432), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (2427, 2432), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((2465, 2501), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (2491, 2501), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((2529, 2560), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""y"""'], {}), "('y')\n", (2555, 2560), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((2593, 2629), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""y_diff"""'], {}), "('y_diff')\n", (2619, 2629), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((2659, 2692), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""out"""'], {}), "('out')\n", (2685, 2692), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((2727, 2765), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""out_diff"""'], {}), "('out_diff')\n", (2753, 2765), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3038, 3066), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""x"""'], {}), "('x')\n", (3061, 3066), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3069, 3097), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""y"""'], {}), "('y')\n", (3092, 3097), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3218, 3253), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""out_diff"""'], {}), "('out_diff')\n", (3241, 3253), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3256, 3284), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""x"""'], {}), "('x')\n", (3279, 3284), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3408, 3443), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""out_diff"""'], {}), "('out_diff')\n", (3431, 3443), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3446, 3474), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""y"""'], {}), "('y')\n", (3469, 3474), False, 'from test_util import Args, CompareOpWithTensorFlow, GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((1813, 1837), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1835, 1837), True, 'from oneflow.compatible import single_client as flow\n'), ((2006, 2030), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2028, 2030), True, 'from oneflow.compatible import single_client as flow\n'), ((2280, 2335), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (2321, 2335), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Modified from https://github.com/clovaai/rexnet/blob/master/rexnetv1_lite.py
"""
import oneflow as flow
import oneflow.nn as nn
from .utils import load_state_dict_from_url
from .registry import ModelCreator
from .helpers import make_divisible
model_urls = {
"rexnet_lite_1_0": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/RexNet/rexnet_lite_1_0.zip",
"rexnet_lite_1_3": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/RexNet/rexnet_lite_1_3.zip",
"rexnet_lite_1_5": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/RexNet/rexnet_lite_1_5.zip",
"rexnet_lite_2_0": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/RexNet/rexnet_lite_2_0.zip",
}
def _add_conv(
out,
in_channels,
channels,
kernel=1,
stride=1,
pad=0,
num_group=1,
active=True,
relu6=True,
bn_momentum=0.1,
bn_eps=1e-5,
):
out.append(
nn.Conv2d(
in_channels, channels, kernel, stride, pad, groups=num_group, bias=False
)
)
out.append(nn.BatchNorm2d(channels, momentum=bn_momentum, eps=bn_eps))
if active:
out.append(nn.ReLU6(inplace=True) if relu6 else nn.ReLU(inplace=True))
class LinearBottleneck(nn.Module):
def __init__(
self,
in_channels,
channels,
t,
kernel_size=3,
stride=1,
bn_momentum=0.1,
bn_eps=1e-5,
**kwargs
):
super(LinearBottleneck, self).__init__(**kwargs)
self.conv_shortcut = None
self.use_shortcut = stride == 1 and in_channels <= channels
self.in_channels = in_channels
self.out_channels = channels
out = []
if t != 1:
dw_channels = in_channels * t
_add_conv(
out,
in_channels=in_channels,
channels=dw_channels,
bn_momentum=bn_momentum,
bn_eps=bn_eps,
)
else:
dw_channels = in_channels
_add_conv(
out,
in_channels=dw_channels,
channels=dw_channels * 1,
kernel=kernel_size,
stride=stride,
pad=(kernel_size // 2),
num_group=dw_channels,
bn_momentum=bn_momentum,
bn_eps=bn_eps,
)
_add_conv(
out,
in_channels=dw_channels,
channels=channels,
active=False,
bn_momentum=bn_momentum,
bn_eps=bn_eps,
)
self.out = nn.Sequential(*out)
def forward(self, x):
out = self.out(x)
if self.use_shortcut:
out[:, 0 : self.in_channels] += x
return out
class ReXNetV1_lite(nn.Module):
def __init__(
self,
fix_head_stem=False,
divisible_value=8,
input_ch=16,
final_ch=164,
multiplier=1.0,
classes=1000,
dropout_ratio=0.2,
bn_momentum=0.1,
bn_eps=1e-5,
kernel_conf="333333",
):
super(ReXNetV1_lite, self).__init__()
layers = [1, 2, 2, 3, 3, 5]
strides = [1, 2, 2, 2, 1, 2]
kernel_sizes = [int(element) for element in kernel_conf]
strides = sum(
[
[element] + [1] * (layers[idx] - 1)
for idx, element in enumerate(strides)
],
[],
)
ts = [1] * layers[0] + [6] * sum(layers[1:])
kernel_sizes = sum(
[[element] * layers[idx] for idx, element in enumerate(kernel_sizes)], []
)
self.num_convblocks = sum(layers[:])
features = []
inplanes = input_ch / multiplier if multiplier < 1.0 else input_ch
first_channel = 32 / multiplier if multiplier < 1.0 or fix_head_stem else 32
first_channel = make_divisible(
int(round(first_channel * multiplier)), divisible_value
)
in_channels_group = []
channels_group = []
_add_conv(
features,
3,
first_channel,
kernel=3,
stride=2,
pad=1,
bn_momentum=bn_momentum,
bn_eps=bn_eps,
)
for i in range(self.num_convblocks):
inplanes_divisible = make_divisible(
int(round(inplanes * multiplier)), divisible_value
)
if i == 0:
in_channels_group.append(first_channel)
channels_group.append(inplanes_divisible)
else:
in_channels_group.append(inplanes_divisible)
inplanes += final_ch / (self.num_convblocks - 1 * 1.0)
inplanes_divisible = make_divisible(
int(round(inplanes * multiplier)), divisible_value
)
channels_group.append(inplanes_divisible)
for block_idx, (in_c, c, t, k, s) in enumerate(
zip(in_channels_group, channels_group, ts, kernel_sizes, strides)
):
features.append(
LinearBottleneck(
in_channels=in_c,
channels=c,
t=t,
kernel_size=k,
stride=s,
bn_momentum=bn_momentum,
bn_eps=bn_eps,
)
)
pen_channels = (
int(1280 * multiplier) if multiplier > 1 and not fix_head_stem else 1280
)
_add_conv(features, c, pen_channels, bn_momentum=bn_momentum, bn_eps=bn_eps)
self.features = nn.Sequential(*features)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.output = nn.Sequential(
nn.Conv2d(pen_channels, 1024, 1, bias=True),
nn.BatchNorm2d(1024, momentum=bn_momentum, eps=bn_eps),
nn.ReLU6(inplace=True),
nn.Dropout(dropout_ratio),
nn.Conv2d(1024, classes, 1, bias=True),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = self.output(x).flatten(1)
return x
def _create_rexnet_lite(arch, pretrained=False, progress=True, **model_kwargs):
model = ReXNetV1_lite(**model_kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
@ModelCreator.register_model
def rexnet_lite_1_0(pretrained=False, progress=True, **kwargs):
"""
Constructs the ReXNet-lite model with width multiplier of 1.0.
.. note::
ReXNet-lite model with width multiplier of 1.0 from the `Rethinking Channel Dimensions for Efficient Model Design <https://arxiv.org/pdf/2007.00992.pdf>`_ paper.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> rexnet_lite_1_0 = flowvision.models.rexnet_lite_1_0(pretrained=False, progress=True)
"""
model_kwargs = dict(multiplier=1.0, **kwargs)
return _create_rexnet_lite(
"rexnet_lite_1_0", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def rexnet_lite_1_3(pretrained=False, progress=True, **kwargs):
"""
Constructs the ReXNet-lite model with width multiplier of 1.3.
.. note::
ReXNet-lite model with width multiplier of 1.3 from the `Rethinking Channel Dimensions for Efficient Model Design <https://arxiv.org/pdf/2007.00992.pdf>`_ paper.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> rexnet_lite_1_3 = flowvision.models.rexnet_lite_1_3(pretrained=False, progress=True)
"""
model_kwargs = dict(multiplier=1.3, **kwargs)
return _create_rexnet_lite(
"rexnet_lite_1_3", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def rexnet_lite_1_5(pretrained=False, progress=True, **kwargs):
"""
Constructs the ReXNet-lite model with width multiplier of 1.5.
.. note::
ReXNet-lite model with width multiplier of 1.5 from the `Rethinking Channel Dimensions for Efficient Model Design <https://arxiv.org/pdf/2007.00992.pdf>`_ paper.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> rexnet_lite_1_5 = flowvision.models.rexnet_lite_1_5(pretrained=False, progress=True)
"""
model_kwargs = dict(multiplier=1.5, **kwargs)
return _create_rexnet_lite(
"rexnet_lite_1_5", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def rexnet_lite_2_0(pretrained=False, progress=True, **kwargs):
"""
Constructs the ReXNet-lite model with width multiplier of 2.0.
.. note::
ReXNet-lite model with width multiplier of 2.0 from the `Rethinking Channel Dimensions for Efficient Model Design <https://arxiv.org/pdf/2007.00992.pdf>`_ paper.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderr. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> rexnet_lite_2_0 = flowvision.models.rexnet_lite_2_0(pretrained=False, progress=True)
"""
model_kwargs = dict(multiplier=2.0, **kwargs)
return _create_rexnet_lite(
"rexnet_lite_2_0", pretrained=pretrained, progress=progress, **model_kwargs
)
| [
"oneflow.nn.Sequential",
"oneflow.nn.ReLU6",
"oneflow.nn.ReLU",
"oneflow.nn.AdaptiveAvgPool2d",
"oneflow.nn.BatchNorm2d",
"oneflow.nn.Dropout",
"oneflow.nn.Conv2d"
] | [((1038, 1125), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['in_channels', 'channels', 'kernel', 'stride', 'pad'], {'groups': 'num_group', 'bias': '(False)'}), '(in_channels, channels, kernel, stride, pad, groups=num_group,\n bias=False)\n', (1047, 1125), True, 'import oneflow.nn as nn\n'), ((1165, 1223), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['channels'], {'momentum': 'bn_momentum', 'eps': 'bn_eps'}), '(channels, momentum=bn_momentum, eps=bn_eps)\n', (1179, 1223), True, 'import oneflow.nn as nn\n'), ((2664, 2683), 'oneflow.nn.Sequential', 'nn.Sequential', (['*out'], {}), '(*out)\n', (2677, 2683), True, 'import oneflow.nn as nn\n'), ((5704, 5728), 'oneflow.nn.Sequential', 'nn.Sequential', (['*features'], {}), '(*features)\n', (5717, 5728), True, 'import oneflow.nn as nn\n'), ((5752, 5775), 'oneflow.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (5772, 5775), True, 'import oneflow.nn as nn\n'), ((5826, 5869), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['pen_channels', '(1024)', '(1)'], {'bias': '(True)'}), '(pen_channels, 1024, 1, bias=True)\n', (5835, 5869), True, 'import oneflow.nn as nn\n'), ((5883, 5937), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['(1024)'], {'momentum': 'bn_momentum', 'eps': 'bn_eps'}), '(1024, momentum=bn_momentum, eps=bn_eps)\n', (5897, 5937), True, 'import oneflow.nn as nn\n'), ((5951, 5973), 'oneflow.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (5959, 5973), True, 'import oneflow.nn as nn\n'), ((5987, 6012), 'oneflow.nn.Dropout', 'nn.Dropout', (['dropout_ratio'], {}), '(dropout_ratio)\n', (5997, 6012), True, 'import oneflow.nn as nn\n'), ((6026, 6064), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(1024)', 'classes', '(1)'], {'bias': '(True)'}), '(1024, classes, 1, bias=True)\n', (6035, 6064), True, 'import oneflow.nn as nn\n'), ((1259, 1281), 'oneflow.nn.ReLU6', 'nn.ReLU6', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1267, 1281), True, 'import oneflow.nn as nn\n'), ((1296, 1317), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1303, 1317), True, 'import oneflow.nn as nn\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.python.nn.module import Module
from oneflow.compatible.single_client.python.oneflow_export import (
oneflow_export,
experimental_api,
)
@oneflow_export("nn.AdaptiveAvgPool2d")
@experimental_api
class AdaptiveAvgPool2d(Module):
r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
The output is of size H x W, for any input size.
The number of output features is equal to the number of input planes.
Args:
output_size: the target output size of the image of the form H x W.
Can be a tuple (H, W) or a single H for a square image H x H.
H and W can be either a ``int``, or ``None`` which means the size will
be the same as that of the input.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.compatible.single_client.experimental as flow
>>> import oneflow.compatible.single_client.experimental.nn as nn
>>> flow.enable_eager_execution()
>>> m = nn.AdaptiveAvgPool2d((5,7))
>>> input = flow.Tensor(np.random.randn(1, 64, 8, 9))
>>> output = m(input)
>>> output.size()
flow.Size([1, 64, 5, 7])
>>> m = nn.AdaptiveAvgPool2d(7)
>>> input = flow.Tensor(np.random.randn(1, 64, 10, 9))
>>> output = m(input)
>>> output.size()
flow.Size([1, 64, 7, 7])
>>> m = nn.AdaptiveAvgPool2d((None, 7))
>>> input = flow.Tensor(np.random.randn(1, 64, 10, 9))
>>> output = m(input)
>>> output.size()
flow.Size([1, 64, 10, 7])
"""
def __init__(self, output_size) -> None:
super().__init__()
self.output_size = output_size
self._op = (
flow.builtin_op("adaptive_avg_pool2d")
.Input("x")
.Attr("output_size", [])
.Output("y")
.Build()
)
def forward(self, x):
new_output_size = []
assert len(x.shape) == 4
if isinstance(self.output_size, int):
new_output_size.append(self.output_size)
new_output_size.append(self.output_size)
elif isinstance(self.output_size, tuple):
new_output_size = list(self.output_size)
if self.output_size[0] is None:
new_output_size[0] = x.shape[2]
if self.output_size[1] is None:
new_output_size[1] = x.shape[3]
else:
raise NotImplementedError("output_size param wrong, please check!")
new_output_size = tuple(new_output_size)
assert (
new_output_size[0] <= x.shape[2]
), f"output_size param wrong, please check!"
assert (
new_output_size[1] <= x.shape[3]
), f"output_size param wrong, please check!"
return self._op(x, output_size=new_output_size)[0]
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.compatible.single_client.python.oneflow_export.oneflow_export",
"oneflow.compatible.single_client.builtin_op"
] | [((828, 866), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.AdaptiveAvgPool2d"""'], {}), "('nn.AdaptiveAvgPool2d')\n", (842, 866), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export, experimental_api\n'), ((3649, 3685), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (3664, 3685), False, 'import doctest\n'), ((2477, 2515), 'oneflow.compatible.single_client.builtin_op', 'flow.builtin_op', (['"""adaptive_avg_pool2d"""'], {}), "('adaptive_avg_pool2d')\n", (2492, 2515), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Callable, Dict, Iterator, List, Union, Tuple
import math
import oneflow as flow
from oneflow.nn.optimizer.optimizer import Optimizer
from oneflow.nn.parameter import Parameter
class LAMB(Optimizer):
"""Implements LAMB algorithm.
LAMB was proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
The equation of parameters updating is:
.. math::
& V_t = \\beta_1*V_{t-1} + (1-\\beta_1)*grad
& S_t = \\beta_2*S_{t-1} + (1-\\beta_2)*{grad} \\odot {grad}
& \\hat{u} = \\frac{{V_t}}{\\sqrt{{S_t}}+\\epsilon}
& \\hat{r} = learning\\_rate * \\frac{||param_{old}||_2}{||\\hat{u}||_2}
& param_{new} = param_{old} - \\hat{r} * \\hat{u}
Args:
parameters (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam_w_mode (bool, optional): apply L2 regularization or weight decay True for
decoupled weight decay (also known as AdamW) (default: True)
do_bias_correction (bool, optional): whether to do bias correction (default: True)
amsgrad (bool, optional): whether to use the AMSGrad variant of this algorithm.
NOT SUPPORTED now! (default: False)
.. _Large Batch Optimization for Deep Learning\\: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
For example:
Example 1:
.. code-block:: python
# Assume net is a custom model.
lamb = flow.optim.LAMB(net.parameters(), lr=1e-3)
for epoch in range(epochs):
# Read data, Compute the loss and so on.
# ...
loss.backward()
lamb.step()
lamb.zero_grad()
Example 2:
.. code-block:: python
# Assume net is a custom model.
lamb = flow.optim.LAMB(
[
{
"params": net.parameters(),
"lr": learning_rate,
"clip_grad_max_norm": 0.5,
"clip_grad_norm_type": 2.0,
}
],
)
for epoch in range(epochs):
# Read data, Compute the loss and so on.
# ...
loss.backward()
lamb.clip_grad()
lamb.step()
lamb.zero_grad()
If you want to use clip_grad, you can refer this example.
For more details of `clip_grad_max_norm` and `clip_grad_norm_type`, you can refer to :func:`oneflow.nn.utils.clip_grad_norm_`.
"""
def __init__(
self,
parameters: Union[Iterator[Parameter], List[Dict]],
lr: float = 0.001,
betas: Tuple[float, float] = (0.9, 0.999),
eps: float = 1e-08,
weight_decay: float = 0,
adam_w_mode: bool = True,
do_bias_correction: bool = True,
amsgrad: bool = False,
):
if amsgrad:
# TODO: supported amsgrad in Lamb
raise RuntimeError("LAMB does not support AMSGrad variant.")
assert lr >= 0.0, f"Invalid learning rate: {lr}"
assert eps >= 0.0, f"Invalid epsilon value: {eps}"
assert (
betas[0] >= 0.0 and betas[0] < 1.0
), f"Invalid beta parameter at index 0: {betas[0]}"
assert (
betas[1] >= 0.0 and betas[1] < 1.0
), f"Invalid beta parameter at index 1: {betas[1]}"
assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
options = dict()
options["lr"] = lr
options["eps"] = eps
options["betas"] = betas
options["weight_decay"] = weight_decay
options["amsgrad"] = amsgrad
options["adam_w_mode"] = adam_w_mode
options["bias_correction1"] = 1.0
options["bias_correction2"] = 1.0
options["do_bias_correction"] = do_bias_correction
super().__init__(parameters, options)
for param_group in self.param_groups:
for param in param_group.parameters:
assert param.is_leaf, "parameters must be leaf tensor"
self._state[param] = dict()
self._op = (
flow.stateful_op("lamb_update")
.Input("model")
.Input("model_diff")
.Input("m")
.Input("v")
.Build()
)
def step(self, closure: Callable = None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
with flow.no_grad():
loss = None
if closure is not None:
loss = closure()
for param_group in self.param_groups:
if param_group["do_bias_correction"]:
param_group["bias_correction1"] = 1.0 - math.pow(
param_group["betas"][0], self._state["step"] + 1
)
param_group["bias_correction2"] = 1.0 - math.pow(
param_group["betas"][1], self._state["step"] + 1
)
kwargs = {
"learning_rate": param_group["lr"],
"bias_correction1": param_group["bias_correction1"],
"bias_correction2": param_group["bias_correction2"],
"beta1": param_group["betas"][0],
"beta2": param_group["betas"][1],
"epsilon": param_group["eps"],
"do_bias_correction": param_group["do_bias_correction"],
}
if param_group["adam_w_mode"]:
kwargs["weight_decay"] = param_group["weight_decay"]
kwargs["l2"] = 0.0
else:
kwargs["l2"] = param_group["weight_decay"]
kwargs["weight_decay"] = 0.0
for param in param_group.parameters:
if param.grad is None:
continue
if "exp_avg" not in self._state[param]:
self._state[param]["exp_avg"] = flow.zeros_like(param)
if "exp_avg_sq" not in self._state[param]:
self._state[param]["exp_avg_sq"] = flow.zeros_like(param)
m_tensor = self._state[param]["exp_avg"]
v_tensor = self._state[param]["exp_avg_sq"]
flow._C.dispatch_lamb_update(
self._op, (param, param.grad, m_tensor, v_tensor), **kwargs
)
self._state["step"] += 1
return loss
def _generate_conf_for_graph(self, train_conf, vars_conf):
new_opt_confs = []
for param_group in self.param_groups:
optimizer_conf = train_conf.mutable_optimizer_conf().Add()
lr = (
param_group["initial_lr"]
if "initial_lr" in param_group
else param_group["lr"]
)
adam_w_mode = param_group["adam_w_mode"]
weight_decay = param_group["weight_decay"]
beta1 = param_group["betas"][0]
beta2 = param_group["betas"][1]
do_bias_correction = param_group["do_bias_correction"]
epsilon = param_group["eps"]
optimizer_conf.set_base_learning_rate(lr)
optimizer_conf.mutable_lamb_conf().set_beta1(beta1)
optimizer_conf.mutable_lamb_conf().set_beta2(beta2)
optimizer_conf.mutable_lamb_conf().set_epsilon(epsilon)
optimizer_conf.mutable_lamb_conf().set_do_bias_correction(
do_bias_correction
)
self._generate_grad_clip_conf_for_optim_conf(param_group, optimizer_conf)
if adam_w_mode:
optimizer_conf.mutable_weight_decay_conf().set_weight_decay_rate(
weight_decay
)
else:
optimizer_conf.mutable_weight_decay_conf().set_weight_decay_rate(0.0)
for param in param_group.parameters:
if not adam_w_mode:
# Set l2 penalty as weight decay if **NOT** using adam_w_mode
vars_conf[param].l2 = weight_decay
if param.requires_grad:
optimizer_conf.add_variable_op_names(vars_conf[param].name)
new_opt_confs.append(optimizer_conf)
return new_opt_confs
| [
"oneflow.zeros_like",
"oneflow.stateful_op",
"oneflow._C.dispatch_lamb_update",
"oneflow.no_grad"
] | [((5580, 5594), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (5592, 5594), True, 'import oneflow as flow\n'), ((7460, 7553), 'oneflow._C.dispatch_lamb_update', 'flow._C.dispatch_lamb_update', (['self._op', '(param, param.grad, m_tensor, v_tensor)'], {}), '(self._op, (param, param.grad, m_tensor,\n v_tensor), **kwargs)\n', (7488, 7553), True, 'import oneflow as flow\n'), ((5854, 5912), 'math.pow', 'math.pow', (["param_group['betas'][0]", "(self._state['step'] + 1)"], {}), "(param_group['betas'][0], self._state['step'] + 1)\n", (5862, 5912), False, 'import math\n'), ((6019, 6077), 'math.pow', 'math.pow', (["param_group['betas'][1]", "(self._state['step'] + 1)"], {}), "(param_group['betas'][1], self._state['step'] + 1)\n", (6027, 6077), False, 'import math\n'), ((7146, 7168), 'oneflow.zeros_like', 'flow.zeros_like', (['param'], {}), '(param)\n', (7161, 7168), True, 'import oneflow as flow\n'), ((7291, 7313), 'oneflow.zeros_like', 'flow.zeros_like', (['param'], {}), '(param)\n', (7306, 7313), True, 'import oneflow as flow\n'), ((5156, 5187), 'oneflow.stateful_op', 'flow.stateful_op', (['"""lamb_update"""'], {}), "('lamb_update')\n", (5172, 5187), True, 'import oneflow as flow\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import tempfile
import unittest
import oneflow as flow
import oneflow.unittest
from flowvision.loss.cross_entropy import SoftTargetCrossEntropy
import libai.utils.distributed as dist
from configs.common.models.vit.vit_small_patch16_224 import model
from libai.config import LazyCall, LazyConfig
from libai.data.datasets import CIFAR10Dataset
from libai.engine import DefaultTrainer, hooks
from libai.engine.default import _check_batch_size
from libai.utils.file_utils import get_data_from_cache
from libai.utils.logger import setup_logger
DATA_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/cifar10/cifar-10-python.tar.gz" # noqa
DATA_MD5 = "c58f30108f718f92721af3b95e74349a"
setup_logger(distributed_rank=dist.get_rank())
class TestViTModel(flow.unittest.TestCase):
def setUp(self) -> None:
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "vit_data")
cfg = LazyConfig.load("configs/vit_imagenet.py")
# set model
cfg.model = model
cfg.model.num_classes = 10
cfg.model.depth = 6
cfg.model.loss_func = LazyCall(SoftTargetCrossEntropy)()
# prepare data path
if dist.get_local_rank() == 0:
get_data_from_cache(DATA_URL, cache_dir, md5=DATA_MD5)
dist.synchronize()
data_path = get_data_from_cache(DATA_URL, cache_dir, md5=DATA_MD5)
cfg.dataloader.train.dataset[0]._target_ = CIFAR10Dataset
cfg.dataloader.train.dataset[0].root = "/".join(data_path.split("/")[:-1])
cfg.dataloader.train.dataset[0].download = True
cfg.dataloader.train.num_workers = 0
# refine mixup cfg
cfg.dataloader.train.mixup_func.num_classes = 10
del cfg.dataloader.test
# set training config
cfg.train.train_epoch = 0
cfg.train.train_iter = 10
cfg.train.eval_period = 1000 # no test now
cfg.train.log_period = 1
cfg.train.train_micro_batch_size = 8
cfg.train.num_accumulation_steps = 1
cfg.train.resume = False
cfg.train.output_dir = tempfile.mkdtemp()
cfg.train.recompute_grad.enabled = True
cfg.train.amp.enabled = True
self.cfg = cfg
def build_hooks(self):
ret = [
hooks.IterationTimer(),
hooks.LRScheduler(),
]
if dist.is_main_process():
# run writers in the end, so that evaluation metrics are written
ret.append(hooks.PeriodicWriter(self.build_writers(), self.cfg.train.log_period))
return ret
@classmethod
def test(cls, cfg, test_loaders, model, evaluator=None):
return {}
DefaultTrainer.build_hooks = build_hooks
DefaultTrainer.test = test
@flow.unittest.skip_unless_1n4d()
def test_vit_eager_with_data_tensor_parallel(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
# pipeline parallelism not supported in eager global now!
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = False
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_vit_graph_with_data_tensor_parallel(self):
self.cfg.train.num_accumulation_steps = 1
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
self.cfg.train.dist.tensor_parallel_size = 2
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
def test_vit_graph_with_data_tensor_pipeline_parallel(self):
self.cfg.train.num_accumulation_steps = 4
# set distributed config
self.cfg.train.dist.data_parallel_size = 2
# change to 2 when 2d sbp bugfix
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 2
self.cfg.train.dist.pipeline_num_layers = self.cfg.model.depth
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
trainer = DefaultTrainer(self.cfg)
trainer.train()
@flow.unittest.skip_unless_1n4d()
@unittest.skip("There are still bugs in ZeRO")
def test_vit_with_zero(self):
# set distributed config
self.cfg.train.dist.data_parallel_size = 4
self.cfg.train.dist.tensor_parallel_size = 1
self.cfg.train.dist.pipeline_parallel_size = 1
dist.setup_dist_util(self.cfg.train.dist)
_check_batch_size(self.cfg)
self.cfg.graph.enabled = True
self.cfg.train.zero_optimization.enabled = True
self.cfg.train.zero_optimization.stage = 3
trainer = DefaultTrainer(self.cfg)
trainer.train()
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n4d"
] | [((3457, 3489), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (3487, 3489), True, 'import oneflow as flow\n'), ((4004, 4036), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (4034, 4036), True, 'import oneflow as flow\n'), ((4535, 4567), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (4565, 4567), True, 'import oneflow as flow\n'), ((5186, 5218), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (5216, 5218), True, 'import oneflow as flow\n'), ((5224, 5269), 'unittest.skip', 'unittest.skip', (['"""There are still bugs in ZeRO"""'], {}), "('There are still bugs in ZeRO')\n", (5237, 5269), False, 'import unittest\n'), ((5829, 5844), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5842, 5844), False, 'import unittest\n'), ((1377, 1392), 'libai.utils.distributed.get_rank', 'dist.get_rank', ([], {}), '()\n', (1390, 1392), True, 'import libai.utils.distributed as dist\n'), ((1581, 1623), 'libai.config.LazyConfig.load', 'LazyConfig.load', (['"""configs/vit_imagenet.py"""'], {}), "('configs/vit_imagenet.py')\n", (1596, 1623), False, 'from libai.config import LazyCall, LazyConfig\n'), ((1942, 1960), 'libai.utils.distributed.synchronize', 'dist.synchronize', ([], {}), '()\n', (1958, 1960), True, 'import libai.utils.distributed as dist\n'), ((1982, 2036), 'libai.utils.file_utils.get_data_from_cache', 'get_data_from_cache', (['DATA_URL', 'cache_dir'], {'md5': 'DATA_MD5'}), '(DATA_URL, cache_dir, md5=DATA_MD5)\n', (2001, 2036), False, 'from libai.utils.file_utils import get_data_from_cache\n'), ((2744, 2762), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (2760, 2762), False, 'import tempfile\n'), ((3813, 3854), 'libai.utils.distributed.setup_dist_util', 'dist.setup_dist_util', (['self.cfg.train.dist'], {}), '(self.cfg.train.dist)\n', (3833, 3854), True, 'import libai.utils.distributed as dist\n'), ((3863, 3890), 'libai.engine.default._check_batch_size', '_check_batch_size', (['self.cfg'], {}), '(self.cfg)\n', (3880, 3890), False, 'from libai.engine.default import _check_batch_size\n'), ((3949, 3973), 'libai.engine.DefaultTrainer', 'DefaultTrainer', (['self.cfg'], {}), '(self.cfg)\n', (3963, 3973), False, 'from libai.engine import DefaultTrainer, hooks\n'), ((4345, 4386), 'libai.utils.distributed.setup_dist_util', 'dist.setup_dist_util', (['self.cfg.train.dist'], {}), '(self.cfg.train.dist)\n', (4365, 4386), True, 'import libai.utils.distributed as dist\n'), ((4395, 4422), 'libai.engine.default._check_batch_size', '_check_batch_size', (['self.cfg'], {}), '(self.cfg)\n', (4412, 4422), False, 'from libai.engine.default import _check_batch_size\n'), ((4480, 4504), 'libai.engine.DefaultTrainer', 'DefaultTrainer', (['self.cfg'], {}), '(self.cfg)\n', (4494, 4504), False, 'from libai.engine import DefaultTrainer, hooks\n'), ((4996, 5037), 'libai.utils.distributed.setup_dist_util', 'dist.setup_dist_util', (['self.cfg.train.dist'], {}), '(self.cfg.train.dist)\n', (5016, 5037), True, 'import libai.utils.distributed as dist\n'), ((5046, 5073), 'libai.engine.default._check_batch_size', '_check_batch_size', (['self.cfg'], {}), '(self.cfg)\n', (5063, 5073), False, 'from libai.engine.default import _check_batch_size\n'), ((5131, 5155), 'libai.engine.DefaultTrainer', 'DefaultTrainer', (['self.cfg'], {}), '(self.cfg)\n', (5145, 5155), False, 'from libai.engine import DefaultTrainer, hooks\n'), ((5505, 5546), 'libai.utils.distributed.setup_dist_util', 'dist.setup_dist_util', (['self.cfg.train.dist'], {}), '(self.cfg.train.dist)\n', (5525, 5546), True, 'import libai.utils.distributed as dist\n'), ((5555, 5582), 'libai.engine.default._check_batch_size', '_check_batch_size', (['self.cfg'], {}), '(self.cfg)\n', (5572, 5582), False, 'from libai.engine.default import _check_batch_size\n'), ((5747, 5771), 'libai.engine.DefaultTrainer', 'DefaultTrainer', (['self.cfg'], {}), '(self.cfg)\n', (5761, 5771), False, 'from libai.engine import DefaultTrainer, hooks\n'), ((1502, 1552), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CACHE_DIR"""', '"""./data_test"""'], {}), "('ONEFLOW_TEST_CACHE_DIR', './data_test')\n", (1511, 1552), False, 'import os\n'), ((1764, 1796), 'libai.config.LazyCall', 'LazyCall', (['SoftTargetCrossEntropy'], {}), '(SoftTargetCrossEntropy)\n', (1772, 1796), False, 'from libai.config import LazyCall, LazyConfig\n'), ((1839, 1860), 'libai.utils.distributed.get_local_rank', 'dist.get_local_rank', ([], {}), '()\n', (1858, 1860), True, 'import libai.utils.distributed as dist\n'), ((1879, 1933), 'libai.utils.file_utils.get_data_from_cache', 'get_data_from_cache', (['DATA_URL', 'cache_dir'], {'md5': 'DATA_MD5'}), '(DATA_URL, cache_dir, md5=DATA_MD5)\n', (1898, 1933), False, 'from libai.utils.file_utils import get_data_from_cache\n'), ((3031, 3053), 'libai.utils.distributed.is_main_process', 'dist.is_main_process', ([], {}), '()\n', (3051, 3053), True, 'import libai.utils.distributed as dist\n'), ((2940, 2962), 'libai.engine.hooks.IterationTimer', 'hooks.IterationTimer', ([], {}), '()\n', (2960, 2962), False, 'from libai.engine import DefaultTrainer, hooks\n'), ((2980, 2999), 'libai.engine.hooks.LRScheduler', 'hooks.LRScheduler', ([], {}), '()\n', (2997, 2999), False, 'from libai.engine import DefaultTrainer, hooks\n')] |
# https://github.com/Kenneth111/TransformerDemo/blob/master/predict_odd_numbers.py
import sys
import argparse
import os
import shutil
import numpy as np
import oneflow as flow
import oneflow.nn as nn
sys.path.append("../")
from model import TransformerModel
TO_CUDA = True
parser = argparse.ArgumentParser()
parser.add_argument("--batch_size", type=int, default=32)
parser.add_argument("--n_epochs", type=int, default=15)
parser.add_argument("--lr", type=float, default=1e-4)
parser.add_argument("--vocab_sz", type=int, default=50000)
parser.add_argument("--d_model", type=int, default=512)
parser.add_argument("--dropout", type=float, default=0.1)
parser.add_argument("--n_head", type=int, default=8)
parser.add_argument("--n_encoder_layers", type=int, default=6)
parser.add_argument("--n_decoder_layers", type=int, default=6)
parser.add_argument("--dim_feedforward", type=int, default=1024)
parser.add_argument("--load_dir", type=str, default=".")
parser.add_argument("--save_dir", type=str, default="./best_model")
args = parser.parse_args()
def to_cuda(tensor, flag=TO_CUDA, where="cuda"):
if flag:
return tensor.to(where)
else:
return tensor
def get_numbers(x, y, inp_len=3, out_len=3):
data_x = np.array(
[[x[i + j] for j in range(inp_len)] for i in range(len(x) - inp_len + 1)]
)
data_y = np.array(
[[0] + [y[i + j] for j in range(out_len)] for i in range(len(y) - out_len + 1)]
) # 4997 * 3
idx = np.arange(len(data_x))
np.random.shuffle(idx)
return data_x[idx], data_y[idx]
def shuffle_batch(data, label, batch_size):
permu = np.random.permutation(len(data))
data, label = data[permu], label[permu]
batch_n = len(data) // batch_size
x_batch = np.array(
[data[i * batch_size : i * batch_size + batch_size] for i in range(batch_n)],
dtype=np.int32,
)
y_batch = np.array(
[label[i * batch_size : i * batch_size + batch_size] for i in range(batch_n)],
dtype=np.int32,
)
return (
flow.tensor(x_batch, dtype=flow.int64),
flow.tensor(y_batch, dtype=flow.int64),
)
def train(model, criterion, optimizer, train_x, train_y):
model.train()
epoch_loss = 0
train_x, train_y = shuffle_batch(train_x, train_y, args.batch_size)
for i, batch in enumerate(zip(train_x, train_y)):
src, tgt = batch
src, tgt = src.transpose(1, 0), tgt.transpose(1, 0)
src, tgt = to_cuda(src), to_cuda(tgt)
last = tgt.shape[0]
output = model(src, tgt[: last - 1, :])
n = output.shape[-1]
loss = criterion(output.permute(1, 2, 0), tgt[1:, :].permute(1, 0))
loss.backward()
optimizer.step()
epoch_loss += loss.numpy()
optimizer.zero_grad()
return epoch_loss / train_x.shape[0]
def validation(model, criterion, val_x, val_y):
model.eval()
epoch_loss = 0
val_x, val_y = shuffle_batch(val_x, val_y, args.batch_size)
with flow.no_grad():
for i, batch in enumerate(zip(val_x, val_y)):
src, tgt = batch
src, tgt = src.transpose(1, 0), tgt.transpose(1, 0)
src, tgt = to_cuda(src), to_cuda(tgt)
last = tgt.shape[0]
output = model(src, tgt[: last - 1, :])
n = output.shape[-1]
loss = criterion(output.permute(1, 2, 0), tgt[1:, :].permute(1, 0))
epoch_loss += loss.numpy()
return epoch_loss / val_x.shape[0]
def test(model, max_len=3, test_times=1, display=False):
model.eval()
res = []
with flow.no_grad():
for i in range(test_times):
s = np.random.randint(1, 4998)
cpu_src = [(s + j) * 2 for j in range(max_len)]
src = to_cuda(flow.Tensor(cpu_src, dtype=flow.int64).unsqueeze(1))
tgt = [0] + [(s + j) * 2 + 1 for j in range(max_len)]
pred = [0]
flag = 1
for j in range(max_len):
inp = to_cuda(flow.Tensor(pred, dtype=flow.int64).unsqueeze(1))
output = model(src, inp)
out_num = output.argmax(2)[-1].numpy()[0]
pred.append(out_num)
if pred[j + 1] != tgt[j + 1]:
flag = 0
res.append(flag)
if display:
print("input: ", cpu_src)
print("target: ", tgt)
print("predict: ", pred)
return sum(res) / test_times
def main():
print("Generating data...", end="")
voc_size = args.vocab_sz
inp = np.arange(2, voc_size, 2)
tgt = np.arange(3, voc_size, 2)
data_x, data_y = get_numbers(inp, tgt)
train_len = int(len(data_x) * 0.9)
train_x, val_x = data_x[:train_len], data_x[train_len:]
train_y, val_y = data_y[:train_len], data_y[train_len:]
print("Done")
print("Setting model...", end="")
model = TransformerModel(
input_sz=voc_size,
output_sz=voc_size,
d_model=args.d_model,
nhead=args.n_head,
num_encoder_layers=args.n_encoder_layers,
num_decoder_layers=args.n_decoder_layers,
dim_feedforward=args.dim_feedforward,
dropout=args.dropout,
)
if args.load_dir != ".":
model.load_state_dict(flow.load(args.load_dir))
model = to_cuda(model)
criterion = to_cuda(nn.CrossEntropyLoss())
optimizer = flow.optim.Adam(model.parameters(), lr=args.lr)
print("Done")
print("Training...")
min_loss = 100
for i in range(1, args.n_epochs + 1):
epoch_loss = train(model, criterion, optimizer, train_x, train_y)
epoch_loss_val = validation(model, criterion, val_x, val_y)
print("epoch: {} train loss: {}".format(i, epoch_loss))
print("epoch: {} val loss: {}".format(i, epoch_loss_val))
if epoch_loss < min_loss:
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
else:
shutil.rmtree(args.save_dir)
assert not os.path.exists(args.save_dir)
os.mkdir(args.save_dir)
flow.save(model.state_dict(), args.save_dir)
if i % 3 == 2:
print(test(model, test_times=10))
if __name__ == "__main__":
main()
| [
"oneflow.load",
"oneflow.Tensor",
"oneflow.nn.CrossEntropyLoss",
"oneflow.tensor",
"oneflow.no_grad"
] | [((202, 224), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (217, 224), False, 'import sys\n'), ((286, 311), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (309, 311), False, 'import argparse\n'), ((1506, 1528), 'numpy.random.shuffle', 'np.random.shuffle', (['idx'], {}), '(idx)\n', (1523, 1528), True, 'import numpy as np\n'), ((4547, 4572), 'numpy.arange', 'np.arange', (['(2)', 'voc_size', '(2)'], {}), '(2, voc_size, 2)\n', (4556, 4572), True, 'import numpy as np\n'), ((4583, 4608), 'numpy.arange', 'np.arange', (['(3)', 'voc_size', '(2)'], {}), '(3, voc_size, 2)\n', (4592, 4608), True, 'import numpy as np\n'), ((4880, 5134), 'model.TransformerModel', 'TransformerModel', ([], {'input_sz': 'voc_size', 'output_sz': 'voc_size', 'd_model': 'args.d_model', 'nhead': 'args.n_head', 'num_encoder_layers': 'args.n_encoder_layers', 'num_decoder_layers': 'args.n_decoder_layers', 'dim_feedforward': 'args.dim_feedforward', 'dropout': 'args.dropout'}), '(input_sz=voc_size, output_sz=voc_size, d_model=args.\n d_model, nhead=args.n_head, num_encoder_layers=args.n_encoder_layers,\n num_decoder_layers=args.n_decoder_layers, dim_feedforward=args.\n dim_feedforward, dropout=args.dropout)\n', (4896, 5134), False, 'from model import TransformerModel\n'), ((2043, 2081), 'oneflow.tensor', 'flow.tensor', (['x_batch'], {'dtype': 'flow.int64'}), '(x_batch, dtype=flow.int64)\n', (2054, 2081), True, 'import oneflow as flow\n'), ((2091, 2129), 'oneflow.tensor', 'flow.tensor', (['y_batch'], {'dtype': 'flow.int64'}), '(y_batch, dtype=flow.int64)\n', (2102, 2129), True, 'import oneflow as flow\n'), ((2988, 3002), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (3000, 3002), True, 'import oneflow as flow\n'), ((3574, 3588), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (3586, 3588), True, 'import oneflow as flow\n'), ((5328, 5349), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (5347, 5349), True, 'import oneflow.nn as nn\n'), ((3642, 3668), 'numpy.random.randint', 'np.random.randint', (['(1)', '(4998)'], {}), '(1, 4998)\n', (3659, 3668), True, 'import numpy as np\n'), ((5251, 5275), 'oneflow.load', 'flow.load', (['args.load_dir'], {}), '(args.load_dir)\n', (5260, 5275), True, 'import oneflow as flow\n'), ((5847, 5876), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (5861, 5876), False, 'import os\n'), ((5894, 5917), 'os.mkdir', 'os.mkdir', (['args.save_dir'], {}), '(args.save_dir)\n', (5902, 5917), False, 'import os\n'), ((5952, 5980), 'shutil.rmtree', 'shutil.rmtree', (['args.save_dir'], {}), '(args.save_dir)\n', (5965, 5980), False, 'import shutil\n'), ((6054, 6077), 'os.mkdir', 'os.mkdir', (['args.save_dir'], {}), '(args.save_dir)\n', (6062, 6077), False, 'import os\n'), ((6008, 6037), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (6022, 6037), False, 'import os\n'), ((3755, 3793), 'oneflow.Tensor', 'flow.Tensor', (['cpu_src'], {'dtype': 'flow.int64'}), '(cpu_src, dtype=flow.int64)\n', (3766, 3793), True, 'import oneflow as flow\n'), ((3985, 4020), 'oneflow.Tensor', 'flow.Tensor', (['pred'], {'dtype': 'flow.int64'}), '(pred, dtype=flow.int64)\n', (3996, 4020), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.unittest
from test_util import GenArgList
def _test_randint(test_case, device, shape, low, high):
y1 = flow.randint(low, high, shape, device=flow.device(device))
y2 = flow.randint(low, high, shape, device=flow.device(device))
test_case.assertFalse(np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))
test_case.assertTrue(shape == y1.shape)
def _test_0d_randint(test_case, device, shape, low, high):
y1 = flow.randint(low, high, shape, device=flow.device(device))
y2 = flow.randint(low, high, shape, device=flow.device(device))
test_case.assertTrue(
np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4)
) # 0d is [] and []
test_case.assertTrue(shape == y1.shape)
def _test_different_dtype(test_case, device, shape, low, high):
for dtype in [
flow.uint8,
flow.int8,
flow.int32,
flow.int64,
flow.float32,
flow.float64,
]:
y = flow.randint(low, high, shape, dtype=dtype, device=flow.device(device))
test_case.assertTrue(y.dtype == dtype)
test_case.assertTrue(y.shape == shape)
def _test_with_generator(test_case, device, shape, low, high):
gen = flow.Generator()
gen.manual_seed(0)
y1 = flow.randint(
low, high, shape, dtype=flow.float32, device=flow.device(device), generator=gen
)
gen.manual_seed(0)
y2 = flow.randint(
low, high, shape, dtype=flow.float32, device=flow.device(device), generator=gen
)
test_case.assertTrue(np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))
def _test_high(test_case, device, shape, low, high):
y1 = flow._C.randint(high, shape, device=flow.device(device))
y2 = flow._C.randint(high, shape, device=flow.device(device))
test_case.assertFalse(np.allclose(y1.numpy(), y2.numpy(), atol=1e-4, rtol=1e-4))
test_case.assertTrue(shape == y1.shape)
def _test_0rank(test_case, device, shape, low, high):
y1 = flow.randint(low, high, shape, device=flow.device(device))
test_case.assertTrue(y1.shape == shape)
@flow.unittest.skip_unless_1n1d()
class TestRandint(flow.unittest.TestCase):
def test_consistent_naive(test_case):
placement = flow.placement("cpu", {0: [0]})
sbp = (flow.sbp.broadcast,)
x = flow.randint(0, 16, (10, 1), placement=placement, sbp=sbp)
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
def test_consistent_different_types(test_case):
for dtype in [
flow.int8,
flow.int32,
flow.int64,
flow.float32,
flow.float64,
]:
placement = flow.placement("cpu", {0: [0]})
sbp = (flow.sbp.broadcast,)
x = flow.randint(0, 16, (10, 1), placement=placement, sbp=sbp, dtype=dtype)
test_case.assertEqual(x.dtype, dtype)
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
def test_randint(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_randint,
_test_different_dtype,
_test_with_generator,
]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]
arg_dict["low"] = [i for i in range(10)]
arg_dict["high"] = [10 + np.random.randint(10, 20) for i in range(10)]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_0d_randint(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_0d_randint]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [(2, 0, 4), (2, 0, 2)]
arg_dict["low"] = [i for i in range(10)]
arg_dict["high"] = [10 + np.random.randint(1, 20) for i in range(10)]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_high_randint(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_high]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [(2, 3, 4), (2, 5, 2)]
arg_dict["low"] = [i for i in range(10)]
arg_dict["high"] = [10 + np.random.randint(10, 20) for i in range(10)]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_0rank_randint(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_0rank]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["shape"] = [()]
arg_dict["low"] = [i for i in range(10)]
arg_dict["high"] = [1000 + np.random.randint(1, 10) for i in range(10)]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.placement",
"oneflow.Generator",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.randint",
"oneflow.device"
] | [((2770, 2802), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2800, 2802), True, 'import oneflow as flow\n'), ((1902, 1918), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (1916, 1918), True, 'import oneflow as flow\n'), ((5497, 5512), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5510, 5512), False, 'import unittest\n'), ((2908, 2941), 'oneflow.placement', 'flow.placement', (['"""cpu"""', '{(0): [0]}'], {}), "('cpu', {(0): [0]})\n", (2922, 2941), True, 'import oneflow as flow\n'), ((2988, 3046), 'oneflow.randint', 'flow.randint', (['(0)', '(16)', '(10, 1)'], {'placement': 'placement', 'sbp': 'sbp'}), '(0, 16, (10, 1), placement=placement, sbp=sbp)\n', (3000, 3046), True, 'import oneflow as flow\n'), ((3744, 3757), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3755, 3757), False, 'from collections import OrderedDict\n'), ((4151, 4171), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4161, 4171), False, 'from test_util import GenArgList\n'), ((4269, 4282), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4280, 4282), False, 'from collections import OrderedDict\n'), ((4575, 4595), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4585, 4595), False, 'from test_util import GenArgList\n'), ((4695, 4708), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4706, 4708), False, 'from collections import OrderedDict\n'), ((4996, 5016), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5006, 5016), False, 'from test_util import GenArgList\n'), ((5117, 5130), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5128, 5130), False, 'from collections import OrderedDict\n'), ((5402, 5422), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5412, 5422), False, 'from test_util import GenArgList\n'), ((858, 877), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (869, 877), True, 'import oneflow as flow\n'), ((926, 945), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (937, 945), True, 'import oneflow as flow\n'), ((1184, 1203), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1195, 1203), True, 'import oneflow as flow\n'), ((1252, 1271), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1263, 1271), True, 'import oneflow as flow\n'), ((2018, 2037), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2029, 2037), True, 'import oneflow as flow\n'), ((2158, 2177), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2169, 2177), True, 'import oneflow as flow\n'), ((2383, 2402), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2394, 2402), True, 'import oneflow as flow\n'), ((2449, 2468), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2460, 2468), True, 'import oneflow as flow\n'), ((2702, 2721), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2713, 2721), True, 'import oneflow as flow\n'), ((3377, 3410), 'oneflow.placement', 'flow.placement', (['"""cpu"""', '{(0): [0]}'], {}), "('cpu', {(0): [0]})\n", (3391, 3410), True, 'import oneflow as flow\n'), ((3465, 3536), 'oneflow.randint', 'flow.randint', (['(0)', '(16)', '(10, 1)'], {'placement': 'placement', 'sbp': 'sbp', 'dtype': 'dtype'}), '(0, 16, (10, 1), placement=placement, sbp=sbp, dtype=dtype)\n', (3477, 3536), True, 'import oneflow as flow\n'), ((1712, 1731), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1723, 1731), True, 'import oneflow as flow\n'), ((4086, 4111), 'numpy.random.randint', 'np.random.randint', (['(10)', '(20)'], {}), '(10, 20)\n', (4103, 4111), True, 'import numpy as np\n'), ((4511, 4535), 'numpy.random.randint', 'np.random.randint', (['(1)', '(20)'], {}), '(1, 20)\n', (4528, 4535), True, 'import numpy as np\n'), ((4931, 4956), 'numpy.random.randint', 'np.random.randint', (['(10)', '(20)'], {}), '(10, 20)\n', (4948, 4956), True, 'import numpy as np\n'), ((5338, 5362), 'numpy.random.randint', 'np.random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (5355, 5362), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestReLUModule(flow.unittest.TestCase):
def test_relu(test_case):
m = flow.nn.ReLU()
arr = np.random.randn(2, 3, 4, 5)
np_out = np.maximum(0, arr)
x = flow.Tensor(arr)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestTanhModule(flow.unittest.TestCase):
def _test_body_tanh(test_case, input_arr):
x = flow.Tensor(input_arr)
tanh = flow.nn.Tanh()
y = tanh(x)
z = np.tanh(input_arr)
test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4))
def _test_ones_body_tanh(self, shape):
x = np.ones(shape, dtype=np.float32)
self._test_body_tanh(x)
def _test_random_body_tanh(self, shape):
x = np.random.random(shape).astype(np.float32)
self._test_body_tanh(x)
def test_ones_input_tanh(self):
self._test_ones_body_tanh((1))
self._test_ones_body_tanh((1, 10))
self._test_ones_body_tanh((2, 10, 2))
self._test_ones_body_tanh((2, 5, 2, 2))
def test_random_input_tanh(self):
self._test_random_body_tanh((1))
self._test_random_body_tanh((1, 10))
self._test_random_body_tanh((2, 10, 2))
self._test_random_body_tanh((2, 5, 2, 2))
def _test_body_tanh_v2(test_case, input_arr):
x = flow.Tensor(input_arr)
y = flow.tanh(x)
z = np.tanh(input_arr)
test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4))
def _test_body_tanh_v3(test_case, input_arr):
x = flow.Tensor(input_arr)
y = x.tanh()
z = np.tanh(input_arr)
test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestGeLU(flow.unittest.TestCase):
def test_gelu_v1(test_case):
input_arr = np.array([-0.5, 0, 0.5]).astype(np.float32)
x = flow.Tensor(input_arr)
gelu = flow.nn.GELU()
y = gelu(x)
z = np.array([-0.15426877, 0.0, 0.34573123])
test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4))
def test_gelu_v2(test_case):
input_arr = np.array([-0.5, 0, 0.5]).astype(np.float32)
x = flow.Tensor(input_arr)
y = flow.gelu(x)
z = np.array([-0.15426877, 0.0, 0.34573123])
test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4))
def test_gelu_v3(test_case):
input_arr = np.array([-0.5, 0, 0.5]).astype(np.float32)
x = flow.Tensor(input_arr)
y = x.gelu()
z = np.array([-0.15426877, 0.0, 0.34573123])
test_case.assertTrue(np.allclose(y.numpy(), z, rtol=1e-4, atol=1e-4))
def numpy_sigmoid(x):
return 1.0 / (1 + np.exp(-x))
def numpy_softmax(x, axis):
x = x - x.max(axis=axis, keepdims=True)
y = np.exp(x)
return y / y.sum(axis=axis, keepdims=True)
def numpy_logsoftmax(x, dim):
e_x = np.exp(x - np.max(x, axis=dim, keepdims=True))
return np.log(e_x / e_x.sum(axis=dim, keepdims=True))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSigmoidModule(flow.unittest.TestCase):
def test_sigmoid(test_case):
m = flow.nn.Sigmoid()
input_arr = np.random.randn(2, 3, 4, 5)
x = flow.Tensor(input_arr)
y = m(x)
y2 = flow.sigmoid(x)
y3 = x.sigmoid()
output = numpy_sigmoid(input_arr)
test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05))
test_case.assertTrue(np.allclose(y2.numpy(), output, rtol=1e-05))
test_case.assertTrue(np.allclose(y3.numpy(), output, rtol=1e-05))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSoftmaxModule(flow.unittest.TestCase):
def test_softmax(test_case):
axis = 0
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(2, 3, 4, 5)
x = flow.Tensor(arr)
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05))
def test_softmax_dim_1(test_case):
axis = 1
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(9, 7, 8, 16)
x = flow.Tensor(arr)
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05))
def test_softmax_dim_2(test_case):
axis = 2
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(2, 5, 6, 3)
x = flow.Tensor(arr)
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05))
def test_softmax_dim_3(test_case):
axis = 3
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(1, 3, 4, 7)
x = flow.Tensor(arr)
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05))
axis2 = -1
m2 = flow.nn.Softmax(dim=axis)
y2 = m(x)
output2 = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y2.numpy(), output2, rtol=1e-05))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestLogSoftmaxModule(flow.unittest.TestCase):
def test_logsoftmax(test_case):
dim = 1
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(4, 7)
x = flow.Tensor(input_arr)
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05))
def test_logsoftmax_dim_2(test_case):
dim = 2
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(3, 4, 5)
x = flow.Tensor(input_arr)
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05))
def test_logsoftmax_dim_3(test_case):
dim = 3
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(8, 9, 7, 3)
x = flow.Tensor(input_arr)
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, rtol=1e-05))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestHardtanhModule(flow.unittest.TestCase):
def test_hardtanh(test_case):
m = flow.nn.Hardtanh()
arr = np.random.randn(2, 3, 4, 5)
np_out = np.maximum(-1, np.minimum(1, arr))
x = flow.Tensor(arr)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05))
def test_hardtanh_min_max(test_case):
m = flow.nn.Hardtanh(min_val=-2.0, max_val=2.3)
arr = np.random.randn(2, 3, 4, 5)
np_out = np.maximum(-2.0, np.minimum(2.3, arr))
x = flow.Tensor(arr)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestLeakyReLUModule(flow.unittest.TestCase):
def test_leaky_relu(test_case):
negative_slope = 0.2
m = flow.nn.LeakyReLU(negative_slope=negative_slope)
arr = np.random.randn(2, 3, 4, 5)
np_out = np.maximum(0, arr) + negative_slope * np.minimum(0, arr)
x = flow.Tensor(arr)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=1e-05))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.nn.Softmax",
"oneflow.experimental.nn.ReLU",
"oneflow.experimental.nn.LeakyReLU",
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.nn.LogSoftmax",
"oneflow.experimental.nn.GELU",
"oneflow.experimental.gelu",
"oneflow.experimental.tanh",
"oneflo... | [((3816, 3825), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (3822, 3825), True, 'import numpy as np\n'), ((8676, 8691), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8689, 8691), False, 'import unittest\n'), ((866, 880), 'oneflow.experimental.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (878, 880), True, 'import oneflow.experimental as flow\n'), ((895, 922), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (910, 922), True, 'import numpy as np\n'), ((941, 959), 'numpy.maximum', 'np.maximum', (['(0)', 'arr'], {}), '(0, arr)\n', (951, 959), True, 'import numpy as np\n'), ((972, 988), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (983, 988), True, 'import oneflow.experimental as flow\n'), ((689, 732), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (730, 732), True, 'import oneflow.experimental as flow\n'), ((1311, 1333), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (1322, 1333), True, 'import oneflow.experimental as flow\n'), ((1350, 1364), 'oneflow.experimental.nn.Tanh', 'flow.nn.Tanh', ([], {}), '()\n', (1362, 1364), True, 'import oneflow.experimental as flow\n'), ((1397, 1415), 'numpy.tanh', 'np.tanh', (['input_arr'], {}), '(input_arr)\n', (1404, 1415), True, 'import numpy as np\n'), ((1551, 1583), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'np.float32'}), '(shape, dtype=np.float32)\n', (1558, 1583), True, 'import numpy as np\n'), ((2248, 2270), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (2259, 2270), True, 'import oneflow.experimental as flow\n'), ((2284, 2296), 'oneflow.experimental.tanh', 'flow.tanh', (['x'], {}), '(x)\n', (2293, 2296), True, 'import oneflow.experimental as flow\n'), ((2309, 2327), 'numpy.tanh', 'np.tanh', (['input_arr'], {}), '(input_arr)\n', (2316, 2327), True, 'import numpy as np\n'), ((2470, 2492), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (2481, 2492), True, 'import oneflow.experimental as flow\n'), ((2527, 2545), 'numpy.tanh', 'np.tanh', (['input_arr'], {}), '(input_arr)\n', (2534, 2545), True, 'import numpy as np\n'), ((1117, 1160), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (1158, 1160), True, 'import oneflow.experimental as flow\n'), ((2891, 2913), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (2902, 2913), True, 'import oneflow.experimental as flow\n'), ((2930, 2944), 'oneflow.experimental.nn.GELU', 'flow.nn.GELU', ([], {}), '()\n', (2942, 2944), True, 'import oneflow.experimental as flow\n'), ((2977, 3017), 'numpy.array', 'np.array', (['[-0.15426877, 0.0, 0.34573123]'], {}), '([-0.15426877, 0.0, 0.34573123])\n', (2985, 3017), True, 'import numpy as np\n'), ((3207, 3229), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (3218, 3229), True, 'import oneflow.experimental as flow\n'), ((3243, 3255), 'oneflow.experimental.gelu', 'flow.gelu', (['x'], {}), '(x)\n', (3252, 3255), True, 'import oneflow.experimental as flow\n'), ((3268, 3308), 'numpy.array', 'np.array', (['[-0.15426877, 0.0, 0.34573123]'], {}), '([-0.15426877, 0.0, 0.34573123])\n', (3276, 3308), True, 'import numpy as np\n'), ((3498, 3520), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (3509, 3520), True, 'import oneflow.experimental as flow\n'), ((3556, 3596), 'numpy.array', 'np.array', (['[-0.15426877, 0.0, 0.34573123]'], {}), '([-0.15426877, 0.0, 0.34573123])\n', (3564, 3596), True, 'import numpy as np\n'), ((2653, 2696), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (2694, 2696), True, 'import oneflow.experimental as flow\n'), ((4231, 4248), 'oneflow.experimental.nn.Sigmoid', 'flow.nn.Sigmoid', ([], {}), '()\n', (4246, 4248), True, 'import oneflow.experimental as flow\n'), ((4269, 4296), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (4284, 4296), True, 'import numpy as np\n'), ((4309, 4331), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (4320, 4331), True, 'import oneflow.experimental as flow\n'), ((4363, 4378), 'oneflow.experimental.sigmoid', 'flow.sigmoid', (['x'], {}), '(x)\n', (4375, 4378), True, 'import oneflow.experimental as flow\n'), ((4048, 4091), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (4089, 4091), True, 'import oneflow.experimental as flow\n'), ((4896, 4921), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (4911, 4921), True, 'import oneflow.experimental as flow\n'), ((4936, 4963), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (4951, 4963), True, 'import numpy as np\n'), ((4976, 4992), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (4987, 4992), True, 'import oneflow.experimental as flow\n'), ((5194, 5219), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (5209, 5219), True, 'import oneflow.experimental as flow\n'), ((5234, 5262), 'numpy.random.randn', 'np.random.randn', (['(9)', '(7)', '(8)', '(16)'], {}), '(9, 7, 8, 16)\n', (5249, 5262), True, 'import numpy as np\n'), ((5275, 5291), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (5286, 5291), True, 'import oneflow.experimental as flow\n'), ((5493, 5518), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (5508, 5518), True, 'import oneflow.experimental as flow\n'), ((5533, 5560), 'numpy.random.randn', 'np.random.randn', (['(2)', '(5)', '(6)', '(3)'], {}), '(2, 5, 6, 3)\n', (5548, 5560), True, 'import numpy as np\n'), ((5573, 5589), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (5584, 5589), True, 'import oneflow.experimental as flow\n'), ((5791, 5816), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (5806, 5816), True, 'import oneflow.experimental as flow\n'), ((5831, 5858), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(4)', '(7)'], {}), '(1, 3, 4, 7)\n', (5846, 5858), True, 'import numpy as np\n'), ((5871, 5887), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (5882, 5887), True, 'import oneflow.experimental as flow\n'), ((6053, 6078), 'oneflow.experimental.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (6068, 6078), True, 'import oneflow.experimental as flow\n'), ((4696, 4739), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (4737, 4739), True, 'import oneflow.experimental as flow\n'), ((6448, 6471), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (6466, 6471), True, 'import oneflow.experimental as flow\n'), ((6492, 6513), 'numpy.random.randn', 'np.random.randn', (['(4)', '(7)'], {}), '(4, 7)\n', (6507, 6513), True, 'import numpy as np\n'), ((6526, 6548), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (6537, 6548), True, 'import oneflow.experimental as flow\n'), ((6760, 6783), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (6778, 6783), True, 'import oneflow.experimental as flow\n'), ((6804, 6828), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (6819, 6828), True, 'import numpy as np\n'), ((6841, 6863), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (6852, 6863), True, 'import oneflow.experimental as flow\n'), ((7075, 7098), 'oneflow.experimental.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (7093, 7098), True, 'import oneflow.experimental as flow\n'), ((7119, 7146), 'numpy.random.randn', 'np.random.randn', (['(8)', '(9)', '(7)', '(3)'], {}), '(8, 9, 7, 3)\n', (7134, 7146), True, 'import numpy as np\n'), ((7159, 7181), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (7170, 7181), True, 'import oneflow.experimental as flow\n'), ((6243, 6286), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (6284, 6286), True, 'import oneflow.experimental as flow\n'), ((7535, 7553), 'oneflow.experimental.nn.Hardtanh', 'flow.nn.Hardtanh', ([], {}), '()\n', (7551, 7553), True, 'import oneflow.experimental as flow\n'), ((7568, 7595), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (7583, 7595), True, 'import numpy as np\n'), ((7660, 7676), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (7671, 7676), True, 'import oneflow.experimental as flow\n'), ((7832, 7875), 'oneflow.experimental.nn.Hardtanh', 'flow.nn.Hardtanh', ([], {'min_val': '(-2.0)', 'max_val': '(2.3)'}), '(min_val=-2.0, max_val=2.3)\n', (7848, 7875), True, 'import oneflow.experimental as flow\n'), ((7890, 7917), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (7905, 7917), True, 'import numpy as np\n'), ((7986, 8002), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (7997, 8002), True, 'import oneflow.experimental as flow\n'), ((7350, 7393), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (7391, 7393), True, 'import oneflow.experimental as flow\n'), ((8348, 8396), 'oneflow.experimental.nn.LeakyReLU', 'flow.nn.LeakyReLU', ([], {'negative_slope': 'negative_slope'}), '(negative_slope=negative_slope)\n', (8365, 8396), True, 'import oneflow.experimental as flow\n'), ((8411, 8438), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (8426, 8438), True, 'import numpy as np\n'), ((8526, 8542), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {}), '(arr)\n', (8537, 8542), True, 'import oneflow.experimental as flow\n'), ((8131, 8174), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (8172, 8174), True, 'import oneflow.experimental as flow\n'), ((3722, 3732), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (3728, 3732), True, 'import numpy as np\n'), ((3926, 3960), 'numpy.max', 'np.max', (['x'], {'axis': 'dim', 'keepdims': '(True)'}), '(x, axis=dim, keepdims=True)\n', (3932, 3960), True, 'import numpy as np\n'), ((7628, 7646), 'numpy.minimum', 'np.minimum', (['(1)', 'arr'], {}), '(1, arr)\n', (7638, 7646), True, 'import numpy as np\n'), ((7952, 7972), 'numpy.minimum', 'np.minimum', (['(2.3)', 'arr'], {}), '(2.3, arr)\n', (7962, 7972), True, 'import numpy as np\n'), ((8457, 8475), 'numpy.maximum', 'np.maximum', (['(0)', 'arr'], {}), '(0, arr)\n', (8467, 8475), True, 'import numpy as np\n'), ((1674, 1697), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (1690, 1697), True, 'import numpy as np\n'), ((2835, 2859), 'numpy.array', 'np.array', (['[-0.5, 0, 0.5]'], {}), '([-0.5, 0, 0.5])\n', (2843, 2859), True, 'import numpy as np\n'), ((3151, 3175), 'numpy.array', 'np.array', (['[-0.5, 0, 0.5]'], {}), '([-0.5, 0, 0.5])\n', (3159, 3175), True, 'import numpy as np\n'), ((3442, 3466), 'numpy.array', 'np.array', (['[-0.5, 0, 0.5]'], {}), '([-0.5, 0, 0.5])\n', (3450, 3466), True, 'import numpy as np\n'), ((8495, 8513), 'numpy.minimum', 'np.minimum', (['(0)', 'arr'], {}), '(0, arr)\n', (8505, 8513), True, 'import numpy as np\n')] |
"""
"""
import os
from os.path import abspath, expanduser
from typing import Any, Callable, List, Dict, Optional, Tuple, Union
import oneflow as flow
from PIL import Image
from .utils import (
download_file_from_google_drive,
download_and_extract_archive,
extract_archive,
verify_str_arg,
)
from .vision import VisionDataset
class WIDERFace(VisionDataset):
"""`WIDERFace <http://shuoyang1213.me/WIDERFACE/>`_ Dataset.
Args:
root (string): Root directory where images and annotations are downloaded to.
Expects the following folder structure if download=False:
.. code::
<root>
└── widerface
├── wider_face_split ('wider_face_split.zip' if compressed)
├── WIDER_train ('WIDER_train.zip' if compressed)
├── WIDER_val ('WIDER_val.zip' if compressed)
└── WIDER_test ('WIDER_test.zip' if compressed)
split (string): The dataset split to use. One of {``train``, ``val``, ``test``}.
Defaults to ``train``.
transform (callable, optional): A function/transform that takes in a PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
BASE_FOLDER = "widerface"
FILE_LIST = [
# File ID MD5 Hash Filename
(
"15hGDLhsx8bLgLcIRD5DhYt5iBxnjNF1M",
"3fedf70df600953d25982bcd13d91ba2",
"WIDER_train.zip",
),
(
"1GUCogbp16PMGa39thoMMeWxp7Rp5oM8Q",
"dfa7d7e790efa35df3788964cf0bbaea",
"WIDER_val.zip",
),
(
"1HIfDbVEWKmsYKJZm4lchTBDLW5N7dY5T",
"e5d8f4248ed24c334bbd12f49c29dd40",
"WIDER_test.zip",
),
]
ANNOTATIONS_FILE = (
"http://shuoyang1213.me/WIDERFACE/support/bbx_annotation/wider_face_split.zip",
"0e3767bcf0e326556d407bf5bff5d27c",
"wider_face_split.zip",
)
def __init__(
self,
root: str,
split: str = "train",
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super(WIDERFace, self).__init__(
root=os.path.join(root, self.BASE_FOLDER),
transform=transform,
target_transform=target_transform,
)
# check arguments
self.split = verify_str_arg(split, "split", ("train", "val", "test"))
if download:
self.download()
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted. "
+ "You can use download=True to download and prepare it"
)
self.img_info: List[Dict[str, Union[str, Dict[str, flow.Tensor]]]] = []
if self.split in ("train", "val"):
self.parse_train_val_annotations_file()
else:
self.parse_test_annotations_file()
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is a dict of annotations for all faces in the image.
target=None for the test split.
"""
# stay consistent with other datasets and return a PIL Image
img = Image.open(self.img_info[index]["img_path"])
if self.transform is not None:
img = self.transform(img)
target = None if self.split == "test" else self.img_info[index]["annotations"]
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.img_info)
def extra_repr(self) -> str:
lines = ["Split: {split}"]
return "\n".join(lines).format(**self.__dict__)
def parse_train_val_annotations_file(self) -> None:
filename = (
"wider_face_train_bbx_gt.txt"
if self.split == "train"
else "wider_face_val_bbx_gt.txt"
)
filepath = os.path.join(self.root, "wider_face_split", filename)
with open(filepath, "r") as f:
lines = f.readlines()
file_name_line, num_boxes_line, box_annotation_line = True, False, False
num_boxes, box_counter = 0, 0
labels = []
for line in lines:
line = line.rstrip()
if file_name_line:
img_path = os.path.join(
self.root, "WIDER_" + self.split, "images", line
)
img_path = abspath(expanduser(img_path))
file_name_line = False
num_boxes_line = True
elif num_boxes_line:
num_boxes = int(line)
num_boxes_line = False
box_annotation_line = True
elif box_annotation_line:
box_counter += 1
line_split = line.split(" ")
line_values = [int(x) for x in line_split]
labels.append(line_values)
if box_counter >= num_boxes:
box_annotation_line = False
file_name_line = True
labels_tensor = flow.tensor(labels)
self.img_info.append(
{
"img_path": img_path,
"annotations": {
"bbox": labels_tensor[
:, 0:4
], # x, y, width, height
"blur": labels_tensor[:, 4],
"expression": labels_tensor[:, 5],
"illumination": labels_tensor[:, 6],
"occlusion": labels_tensor[:, 7],
"pose": labels_tensor[:, 8],
"invalid": labels_tensor[:, 9],
},
}
)
box_counter = 0
labels.clear()
else:
raise RuntimeError(
"Error parsing annotation file {}".format(filepath)
)
def parse_test_annotations_file(self) -> None:
filepath = os.path.join(
self.root, "wider_face_split", "wider_face_test_filelist.txt"
)
filepath = abspath(expanduser(filepath))
with open(filepath, "r") as f:
lines = f.readlines()
for line in lines:
line = line.rstrip()
img_path = os.path.join(self.root, "WIDER_test", "images", line)
img_path = abspath(expanduser(img_path))
self.img_info.append({"img_path": img_path})
def _check_integrity(self) -> bool:
# Allow original archive to be deleted (zip). Only need the extracted images
all_files = self.FILE_LIST.copy()
all_files.append(self.ANNOTATIONS_FILE)
for (_, md5, filename) in all_files:
file, ext = os.path.splitext(filename)
extracted_dir = os.path.join(self.root, file)
if not os.path.exists(extracted_dir):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
# download and extract image data
for (file_id, md5, filename) in self.FILE_LIST:
download_file_from_google_drive(file_id, self.root, filename, md5)
filepath = os.path.join(self.root, filename)
extract_archive(filepath)
# download and extract annotation files
download_and_extract_archive(
url=self.ANNOTATIONS_FILE[0],
download_root=self.root,
md5=self.ANNOTATIONS_FILE[1],
)
| [
"oneflow.tensor"
] | [((3785, 3829), 'PIL.Image.open', 'Image.open', (["self.img_info[index]['img_path']"], {}), "(self.img_info[index]['img_path'])\n", (3795, 3829), False, 'from PIL import Image\n'), ((4542, 4595), 'os.path.join', 'os.path.join', (['self.root', '"""wider_face_split"""', 'filename'], {}), "(self.root, 'wider_face_split', filename)\n", (4554, 4595), False, 'import os\n'), ((6984, 7059), 'os.path.join', 'os.path.join', (['self.root', '"""wider_face_split"""', '"""wider_face_test_filelist.txt"""'], {}), "(self.root, 'wider_face_split', 'wider_face_test_filelist.txt')\n", (6996, 7059), False, 'import os\n'), ((7109, 7129), 'os.path.expanduser', 'expanduser', (['filepath'], {}), '(filepath)\n', (7119, 7129), False, 'from os.path import abspath, expanduser\n'), ((7756, 7782), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (7772, 7782), False, 'import os\n'), ((7811, 7840), 'os.path.join', 'os.path.join', (['self.root', 'file'], {}), '(self.root, file)\n', (7823, 7840), False, 'import os\n'), ((8288, 8321), 'os.path.join', 'os.path.join', (['self.root', 'filename'], {}), '(self.root, filename)\n', (8300, 8321), False, 'import os\n'), ((2681, 2717), 'os.path.join', 'os.path.join', (['root', 'self.BASE_FOLDER'], {}), '(root, self.BASE_FOLDER)\n', (2693, 2717), False, 'import os\n'), ((7299, 7352), 'os.path.join', 'os.path.join', (['self.root', '"""WIDER_test"""', '"""images"""', 'line'], {}), "(self.root, 'WIDER_test', 'images', line)\n", (7311, 7352), False, 'import os\n'), ((7860, 7889), 'os.path.exists', 'os.path.exists', (['extracted_dir'], {}), '(extracted_dir)\n', (7874, 7889), False, 'import os\n'), ((4955, 5017), 'os.path.join', 'os.path.join', (['self.root', "('WIDER_' + self.split)", '"""images"""', 'line'], {}), "(self.root, 'WIDER_' + self.split, 'images', line)\n", (4967, 5017), False, 'import os\n'), ((7388, 7408), 'os.path.expanduser', 'expanduser', (['img_path'], {}), '(img_path)\n', (7398, 7408), False, 'from os.path import abspath, expanduser\n'), ((5103, 5123), 'os.path.expanduser', 'expanduser', (['img_path'], {}), '(img_path)\n', (5113, 5123), False, 'from os.path import abspath, expanduser\n'), ((5804, 5823), 'oneflow.tensor', 'flow.tensor', (['labels'], {}), '(labels)\n', (5815, 5823), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from random import randint
from random import choice
import numpy as np
import oneflow as flow
from test_xrt import *
class TestXrtReshape(flow.unittest.TestCase):
def test_xrt_reshape(test_case):
x = np.random.random((1, 10, 2)).astype(np.float32)
x_cpu = flow.tensor(x, dtype=flow.float32, device=flow.device("cpu"))
x_cuda = flow.tensor(x, dtype=flow.float32, device=flow.device("cuda"))
reshape = lambda arg: flow.reshape(arg, shape=[2, 2, -1])
reshape_g = generate_graph(reshape)
out = reshape_g(x_cpu)
test_xrt_openvino(test_case, generate_graph(reshape), x_cpu, out)
test_xrt_tensorrt(test_case, generate_graph(reshape), x_cuda, out)
test_xrt_xla(test_case, generate_graph(reshape), x_cuda, out)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.reshape",
"oneflow.device"
] | [((1425, 1440), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1438, 1440), False, 'import unittest\n'), ((1061, 1096), 'oneflow.reshape', 'flow.reshape', (['arg'], {'shape': '[2, 2, -1]'}), '(arg, shape=[2, 2, -1])\n', (1073, 1096), True, 'import oneflow as flow\n'), ((824, 852), 'numpy.random.random', 'np.random.random', (['(1, 10, 2)'], {}), '((1, 10, 2))\n', (840, 852), True, 'import numpy as np\n'), ((930, 948), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (941, 948), True, 'import oneflow as flow\n'), ((1009, 1028), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1020, 1028), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
def compare_with_numpy_adam(
test_case,
device,
x_shape,
learning_rate,
train_iters,
betas,
weight_decay,
eps,
do_bias_correction,
amsgrad,
):
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.para0 = flow.nn.Parameter(
flow.Tensor(init_value, device=flow.device(device))
)
def forward(self, mask):
return self.para0 * mask
simp_module = CustomModule()
simp_module.to(device)
simp_module.train()
adam0 = flow.optim.Adam(
[
{
"params": simp_module.parameters(),
"lr": learning_rate,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"do_bias_correction": do_bias_correction,
"amsgrad": amsgrad,
}
]
)
class CustomAdamGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = simp_module
self.add_optimizer(adam0)
def build(self, mask_tensor):
loss = flow.sum(self.m(mask_tensor))
loss.backward()
return loss
of_res_list = []
adam_graph = CustomAdamGraph()
for i in range(train_iters):
mask_tensor = flow.tensor(
random_grad_seq[i],
dtype=flow.float32,
requires_grad=False,
device=flow.device(device),
)
adam_x = adam_graph(mask_tensor)
of_res_list.append(simp_module.para0.numpy())
np_res_list = []
def train_by_numpy():
x = init_value
vt = np.zeros_like(x)
st = np.zeros_like(x)
max_st = np.zeros_like(x)
beta1 = betas[0]
beta2 = betas[1]
def np_train_one_iter(step, grad):
grad = grad + weight_decay * x
bias_correction1 = 1.0
bias_correction2 = 1.0
if do_bias_correction:
bias_correction1 = 1.0 - np.power(beta1, step)
bias_correction2 = 1.0 - np.power(beta2, step)
v = beta1 * vt + (1 - beta1) * grad
s = beta2 * st + (1 - beta2) * grad * grad
max_s = np.zeros_like(x)
if amsgrad:
max_s = np.maximum(s, max_st)
denom = np.sqrt(max_s) / np.sqrt(bias_correction2) + eps
else:
denom = np.sqrt(s) / np.sqrt(bias_correction2) + eps
param = x - ((learning_rate / bias_correction1) * v / denom)
return (param, v, s, max_s)
for i in range(1, train_iters + 1):
(x, vt, st, max_st) = np_train_one_iter(i, random_grad_seq[i - 1])
np_res_list.append(x)
return x
train_by_numpy()
test_case.assertTrue(np.allclose(of_res_list, np_res_list, rtol=0.001, atol=0.001))
def compare_with_numpy_adam_clip_grad(
test_case,
device,
x_shape,
learning_rate,
train_iters,
betas,
weight_decay,
eps,
do_bias_correction,
amsgrad,
clip_grad_max_norm,
clip_grad_norm_type,
):
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.para0 = flow.nn.Parameter(
flow.tensor(init_value, device=flow.device(device))
)
def forward(self, mask):
return self.para0 * mask
simp_module = CustomModule()
simp_module.to(device)
simp_module.train()
adam0 = flow.optim.Adam(
[
{
"params": simp_module.parameters(),
"lr": learning_rate,
"betas": betas,
"eps": eps,
"weight_decay": weight_decay,
"do_bias_correction": do_bias_correction,
"amsgrad": amsgrad,
"clip_grad_max_norm": clip_grad_max_norm,
"clip_grad_norm_type": clip_grad_norm_type,
}
]
)
class CustomAdamGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = simp_module
self.add_optimizer(adam0)
def build(self, mask_tensor):
loss = flow.sum(self.m(mask_tensor))
loss.backward()
return loss
of_res_list = []
adam_graph = CustomAdamGraph()
for i in range(train_iters):
mask_tensor = flow.tensor(
random_grad_seq[i], requires_grad=False, device=flow.device(device)
)
adam_x = adam_graph(mask_tensor)
of_res_list.append(simp_module.para0.numpy())
np_res_list = []
def train_by_numpy():
x = init_value
vt = np.zeros_like(x)
st = np.zeros_like(x)
max_st = np.zeros_like(x)
beta1 = betas[0]
beta2 = betas[1]
def np_train_one_iter(step, grad):
total_norm, grad = clip_grad_norm_np(
grad, clip_grad_max_norm, clip_grad_norm_type
)
grad = grad + weight_decay * x
bias_correction1 = 1.0
bias_correction2 = 1.0
if do_bias_correction:
bias_correction1 = 1.0 - np.power(beta1, step)
bias_correction2 = 1.0 - np.power(beta2, step)
v = beta1 * vt + (1 - beta1) * grad
s = beta2 * st + (1 - beta2) * grad * grad
max_s = np.zeros_like(x)
if amsgrad:
max_s = np.maximum(s, max_st)
denom = np.sqrt(max_s) / np.sqrt(bias_correction2) + eps
else:
denom = np.sqrt(s) / np.sqrt(bias_correction2) + eps
param = x - ((learning_rate / bias_correction1) * v / denom)
return (param, v, s, max_s)
for i in range(1, train_iters + 1):
(x, vt, st, max_st) = np_train_one_iter(i, random_grad_seq[i - 1])
np_res_list.append(x)
return x
train_by_numpy()
test_case.assertTrue(np.allclose(of_res_list, np_res_list, rtol=1e-3, atol=1e-3))
@flow.unittest.skip_unless_1n1d()
class TestAdam(flow.unittest.TestCase):
def test_adam(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["betas"] = [(0.99, 0.9)]
arg_dict["weight_decay"] = [0.001, 0.0]
arg_dict["eps"] = [1e-8]
arg_dict["do_bias_correction"] = [True, False]
arg_dict["amsgrad"] = [True, False]
for arg in GenArgList(arg_dict):
compare_with_numpy_adam(test_case, *arg)
def test_adam_clip_grad(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["betas"] = [(0.99, 0.9)]
arg_dict["weight_decay"] = [0.0, 0.9]
arg_dict["eps"] = [1e-8]
arg_dict["do_bias_correction"] = [True, False]
arg_dict["amsgrad"] = [True, False]
arg_dict["clip_grad_max_norm"] = [1.0]
arg_dict["clip_grad_norm_type"] = [2.0]
for arg in GenArgList(arg_dict):
compare_with_numpy_adam_clip_grad(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.device"
] | [((7293, 7325), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (7323, 7325), True, 'import oneflow as flow\n'), ((8608, 8623), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8621, 8623), False, 'import unittest\n'), ((2685, 2701), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2698, 2701), True, 'import numpy as np\n'), ((2715, 2731), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2728, 2731), True, 'import numpy as np\n'), ((2749, 2765), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2762, 2765), True, 'import numpy as np\n'), ((3845, 3906), 'numpy.allclose', 'np.allclose', (['of_res_list', 'np_res_list'], {'rtol': '(0.001)', 'atol': '(0.001)'}), '(of_res_list, np_res_list, rtol=0.001, atol=0.001)\n', (3856, 3906), True, 'import numpy as np\n'), ((5944, 5960), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5957, 5960), True, 'import numpy as np\n'), ((5974, 5990), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (5987, 5990), True, 'import numpy as np\n'), ((6008, 6024), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (6021, 6024), True, 'import numpy as np\n'), ((7229, 7290), 'numpy.allclose', 'np.allclose', (['of_res_list', 'np_res_list'], {'rtol': '(0.001)', 'atol': '(0.001)'}), '(of_res_list, np_res_list, rtol=0.001, atol=0.001)\n', (7240, 7290), True, 'import numpy as np\n'), ((7415, 7428), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7426, 7428), False, 'from collections import OrderedDict\n'), ((7839, 7859), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7849, 7859), False, 'from test_util import GenArgList\n'), ((7974, 7987), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7985, 7987), False, 'from collections import OrderedDict\n'), ((8490, 8510), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8500, 8510), False, 'from test_util import GenArgList\n'), ((1112, 1143), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (1129, 1143), True, 'import numpy as np\n'), ((3260, 3276), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (3273, 3276), True, 'import numpy as np\n'), ((4310, 4341), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (4327, 4341), True, 'import numpy as np\n'), ((6150, 6214), 'optimizer_test_util.clip_grad_norm_np', 'clip_grad_norm_np', (['grad', 'clip_grad_max_norm', 'clip_grad_norm_type'], {}), '(grad, clip_grad_max_norm, clip_grad_norm_type)\n', (6167, 6214), False, 'from optimizer_test_util import clip_grad_norm_np\n'), ((6645, 6661), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (6658, 6661), True, 'import numpy as np\n'), ((2473, 2492), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2484, 2492), True, 'import oneflow as flow\n'), ((3326, 3347), 'numpy.maximum', 'np.maximum', (['s', 'max_st'], {}), '(s, max_st)\n', (3336, 3347), True, 'import numpy as np\n'), ((5733, 5752), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (5744, 5752), True, 'import oneflow as flow\n'), ((6711, 6732), 'numpy.maximum', 'np.maximum', (['s', 'max_st'], {}), '(s, max_st)\n', (6721, 6732), True, 'import numpy as np\n'), ((1043, 1074), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (1060, 1074), True, 'import numpy as np\n'), ((3051, 3072), 'numpy.power', 'np.power', (['beta1', 'step'], {}), '(beta1, step)\n', (3059, 3072), True, 'import numpy as np\n'), ((3114, 3135), 'numpy.power', 'np.power', (['beta2', 'step'], {}), '(beta2, step)\n', (3122, 3135), True, 'import numpy as np\n'), ((4241, 4272), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (4258, 4272), True, 'import numpy as np\n'), ((6436, 6457), 'numpy.power', 'np.power', (['beta1', 'step'], {}), '(beta1, step)\n', (6444, 6457), True, 'import numpy as np\n'), ((6499, 6520), 'numpy.power', 'np.power', (['beta2', 'step'], {}), '(beta2, step)\n', (6507, 6520), True, 'import numpy as np\n'), ((1354, 1373), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1365, 1373), True, 'import oneflow as flow\n'), ((3372, 3386), 'numpy.sqrt', 'np.sqrt', (['max_s'], {}), '(max_s)\n', (3379, 3386), True, 'import numpy as np\n'), ((3389, 3414), 'numpy.sqrt', 'np.sqrt', (['bias_correction2'], {}), '(bias_correction2)\n', (3396, 3414), True, 'import numpy as np\n'), ((3463, 3473), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (3470, 3473), True, 'import numpy as np\n'), ((3476, 3501), 'numpy.sqrt', 'np.sqrt', (['bias_correction2'], {}), '(bias_correction2)\n', (3483, 3501), True, 'import numpy as np\n'), ((4552, 4571), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (4563, 4571), True, 'import oneflow as flow\n'), ((6757, 6771), 'numpy.sqrt', 'np.sqrt', (['max_s'], {}), '(max_s)\n', (6764, 6771), True, 'import numpy as np\n'), ((6774, 6799), 'numpy.sqrt', 'np.sqrt', (['bias_correction2'], {}), '(bias_correction2)\n', (6781, 6799), True, 'import numpy as np\n'), ((6848, 6858), 'numpy.sqrt', 'np.sqrt', (['s'], {}), '(s)\n', (6855, 6858), True, 'import numpy as np\n'), ((6861, 6886), 'numpy.sqrt', 'np.sqrt', (['bias_correction2'], {}), '(bias_correction2)\n', (6868, 6886), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow.core.common.device_type_pb2 as device_type_pb
from oneflow.python.oneflow_export import oneflow_export
_device_tag_2_device_type = {
"cpu": device_type_pb.kCPU,
"gpu": device_type_pb.kGPU,
"dummy": device_type_pb.kFAKEDEVICE,
"cambricon": device_type_pb.kCambricon,
}
_device_type_2_device_tag = {
device_type_pb.kCPU: "cpu",
device_type_pb.kGPU: "gpu",
device_type_pb.kFAKEDEVICE: "dummy",
device_type_pb.kCambricon: "cambricon",
}
@oneflow_export("is_valid_device_tag")
def is_valid_device_tag(device_tag: str):
if not isinstance(device_tag, str):
return False
return device_tag in _device_tag_2_device_type
| [
"oneflow.python.oneflow_export.oneflow_export"
] | [((1077, 1114), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""is_valid_device_tag"""'], {}), "('is_valid_device_tag')\n", (1091, 1114), False, 'from oneflow.python.oneflow_export import oneflow_export\n')] |
from .lstm_oneflow import LSTM
import oneflow.nn as nn
class lstm_ser(nn.Module):
def __init__(self, input_dim, hidden_dim, output_dim, batch_size):
super(lstm_ser, self).__init__()
self.classifier = nn.Sequential(
LSTM(input_dim, hidden_dim, batch_size),
nn.Dropout(0.5),
nn.Linear(hidden_dim, 32),
nn.ReLU(),
nn.Linear(32, output_dim),
)
def forward(self, x):
logits = self.classifier(x)
return logits
| [
"oneflow.nn.Linear",
"oneflow.nn.Dropout",
"oneflow.nn.ReLU"
] | [((302, 317), 'oneflow.nn.Dropout', 'nn.Dropout', (['(0.5)'], {}), '(0.5)\n', (312, 317), True, 'import oneflow.nn as nn\n'), ((331, 356), 'oneflow.nn.Linear', 'nn.Linear', (['hidden_dim', '(32)'], {}), '(hidden_dim, 32)\n', (340, 356), True, 'import oneflow.nn as nn\n'), ((370, 379), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (377, 379), True, 'import oneflow.nn as nn\n'), ((393, 418), 'oneflow.nn.Linear', 'nn.Linear', (['(32)', 'output_dim'], {}), '(32, output_dim)\n', (402, 418), True, 'import oneflow.nn as nn\n')] |
import os
from os import mkdir
import oneflow.typing as tp
import onnx
import onnxruntime as ort
import numpy as np
from oneflow_onnx.oneflow2onnx.util import convert_to_onnx_and_check
import oneflow as flow
import logging
from easydict import EasyDict as edict
from backbones import get_model
from utils.utils_config import get_config
import argparse
def convert_func(cfg, model_path, out_path):
@flow.global_function()
def InferenceNet(images: tp.Numpy.Placeholder((1, 3, 112, 112))):
logits = get_model(cfg.network, images, cfg)
return logits
print(convert_to_onnx_and_check(InferenceNet,
flow_weight_dir=None, onnx_model_path=out_path))
def main(args):
logging.basicConfig(level=logging.NOTSET)
logging.info(args.model_path)
cfg = get_config(args.config)
if not os.path.exists(args.out_path):
mkdir(args.out_path)
convert_func(cfg, args.model_path, args.out_path)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='OneFlow ArcFace val')
parser.add_argument('config', type=str, help='py config file')
parser.add_argument('--model_path', type=str, help='model path')
parser.add_argument('--out_path', type=str,
default="onnx_model", help='out path')
main(parser.parse_args())
| [
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder"
] | [((405, 427), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (425, 427), True, 'import oneflow as flow\n'), ((705, 746), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.NOTSET'}), '(level=logging.NOTSET)\n', (724, 746), False, 'import logging\n'), ((751, 780), 'logging.info', 'logging.info', (['args.model_path'], {}), '(args.model_path)\n', (763, 780), False, 'import logging\n'), ((791, 814), 'utils.utils_config.get_config', 'get_config', (['args.config'], {}), '(args.config)\n', (801, 814), False, 'from utils.utils_config import get_config\n'), ((983, 1041), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""OneFlow ArcFace val"""'}), "(description='OneFlow ArcFace val')\n", (1006, 1041), False, 'import argparse\n'), ((516, 551), 'backbones.get_model', 'get_model', (['cfg.network', 'images', 'cfg'], {}), '(cfg.network, images, cfg)\n', (525, 551), False, 'from backbones import get_model\n'), ((584, 675), 'oneflow_onnx.oneflow2onnx.util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['InferenceNet'], {'flow_weight_dir': 'None', 'onnx_model_path': 'out_path'}), '(InferenceNet, flow_weight_dir=None,\n onnx_model_path=out_path)\n', (609, 675), False, 'from oneflow_onnx.oneflow2onnx.util import convert_to_onnx_and_check\n'), ((826, 855), 'os.path.exists', 'os.path.exists', (['args.out_path'], {}), '(args.out_path)\n', (840, 855), False, 'import os\n'), ((865, 885), 'os.mkdir', 'mkdir', (['args.out_path'], {}), '(args.out_path)\n', (870, 885), False, 'from os import mkdir\n'), ((457, 495), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, 112, 112)'], {}), '((1, 3, 112, 112))\n', (477, 495), True, 'import oneflow.typing as tp\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
from contextlib import contextmanager
import oneflow
import oneflow._oneflow_internal
import oneflow._oneflow_internal.oneflow.core.common.shape as shape_proto_cfg
import oneflow._oneflow_internal.oneflow.core.job.placement as placement_cfg
import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb
import oneflow.core.operator.op_attribute_pb2 as op_attribute_pb
import oneflow.core.operator.op_conf_pb2 as op_conf_pb
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.eager.boxing_hob as boxing_hob
import oneflow.eager.boxing_middle as boxing_middle
import oneflow.eager.op_infer_util as op_infer_util
import oneflow.eager.symbol as symbol_util
import oneflow.framework.balanced_splitter as balanced_splitter
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.id_util as id_util
import oneflow.support.enable_if as enable_if
import oneflow.support.high_order_bool as high_order_bool
from oneflow.eager.boxing_hob import BoxingHobContext
def BoxingTo(builder, produced_blob_object, consumer_op_arg_parallel_attr):
hob_context = BoxingHobContext(produced_blob_object, consumer_op_arg_parallel_attr)
if enable_if.get_condition_hob(NoBoxing)(hob_context):
return produced_blob_object
producer_opt_mirrored_parallel = (
produced_blob_object.op_arg_parallel_attr.opt_mirrored_parallel
)
consumer_opt_mirrored_parallel = consumer_op_arg_parallel_attr.opt_mirrored_parallel
assert producer_opt_mirrored_parallel == consumer_opt_mirrored_parallel, (
"\nproducer_op_arg_parallel_attr: %s\nconsumer_op_arg_parallel_attr: %s"
% (produced_blob_object.op_arg_parallel_attr, consumer_op_arg_parallel_attr)
)
def default(get_failed_info, *args, **kwargs):
raise NotImplementedError(
"%s\nno boxing method found.\nlogical_blob_name: %s\nx_arg_attribute: %s\nconsumer_op_arg_parallel_attr: %s\n"
% (
get_failed_info(),
produced_blob_object.op_arg_blob_attr.logical_blob_name,
produced_blob_object.op_arg_parallel_attr,
consumer_op_arg_parallel_attr,
)
)
global conditional_function_table
function = enable_if.unique(
conditional_function_table,
context=BoxingHobContext(produced_blob_object, consumer_op_arg_parallel_attr),
default=default,
)
return function(builder, produced_blob_object, consumer_op_arg_parallel_attr)
def boxing_condition(hob_expr, verbose=False):
def Decorator(func):
func.__oneflow_condition_hob__ = hob_expr
if not verbose:
hob_expr.__debug_str__ = GetBoxingDebugString(func)
return func
return Decorator
def FirstMatchedBoxing(*boxing_methods):
hob_expr = enable_if.get_condition_hob(boxing_methods[0])
for boxing_method in boxing_methods[1:]:
hob_expr = hob_expr | enable_if.get_condition_hob(boxing_method)
@enable_if.condition(hob_expr)
def FirstMatched(builder, produced_blob_object, consumer_op_arg_parallel_attr):
ctx = BoxingHobContext(produced_blob_object, consumer_op_arg_parallel_attr)
for boxing_method in boxing_methods:
hob_expr = enable_if.get_condition_hob(boxing_method)
if not hob_expr(ctx):
continue
return boxing_method(
builder, produced_blob_object, consumer_op_arg_parallel_attr
)
boxing_methods_names = [GetBoxingDebugString(m) for m in boxing_methods]
FirstMatched.__debug_str__ = "(%s)" % " | ".join(boxing_methods_names)
return FirstMatched
def OptionalBoxing(boxing_method):
opt_boxing_method = FirstMatchedBoxing(boxing_method, NoBoxing)
debug_str = "Optional(%s)" % GetBoxingDebugString(boxing_method)
opt_boxing_method.__debug_str__ = debug_str
return opt_boxing_method
def ComposeBoxing(
lhs_boxing, rhs_boxing, get_middle_op_arg_parallel_attr, middle_verbose_str=None
):
composed_hob = boxing_hob.ComposeHob(
enable_if.get_condition_hob(lhs_boxing),
enable_if.get_condition_hob(rhs_boxing),
get_middle_op_arg_parallel_attr=get_middle_op_arg_parallel_attr,
middle_verbose_str=middle_verbose_str,
)
@enable_if.condition(composed_hob)
def Composed(builder, produced_blob_object, consumer_op_arg_parallel_attr):
tmp_op_arg_parallel_attr = get_middle_op_arg_parallel_attr(
builder, produced_blob_object, consumer_op_arg_parallel_attr
)
tmp = lhs_boxing(builder, produced_blob_object, tmp_op_arg_parallel_attr)
return rhs_boxing(builder, tmp, consumer_op_arg_parallel_attr)
Composed.__debug_str__ = "%s->%s" % (
GetBoxingDebugString(lhs_boxing),
GetBoxingDebugString(rhs_boxing),
)
Composed.__left_debug_str__ = GetBoxingLeftDebugString(lhs_boxing)
Composed.__right_debug_str__ = GetBoxingRightDebugString(rhs_boxing)
return Composed
def GetBoxingDebugString(boxing_method):
if hasattr(boxing_method, "__debug_str__"):
return boxing_method.__debug_str__
else:
return boxing_method.__name__
def GetBoxingLeftDebugString(boxing_method):
if hasattr(boxing_method, "__left_debug_str__"):
return boxing_method.__left_debug_str__
else:
return GetBoxingDebugString(boxing_method)
def GetBoxingRightDebugString(boxing_method):
if hasattr(boxing_method, "__right_debug_str__"):
return boxing_method.__right_debug_str__
else:
return GetBoxingDebugString(boxing_method)
def Sequential(*boxing_methods, exclude=tuple(), middle_verbose=False):
assert not isinstance(boxing_methods[-1], boxing_middle.BoxingToMiddle)
composed = boxing_methods[-1]
for boxing_to_middle in boxing_methods[-2::-1]:
assert isinstance(boxing_to_middle, boxing_middle.BoxingToMiddle)
if middle_verbose:
middle_verbose_str = "middle op_arg_parallel_attr of %s->%s:" % (
GetBoxingDebugString(boxing_to_middle.boxing_method),
GetBoxingLeftDebugString(composed),
)
else:
middle_verbose_str = None
composed = ComposeBoxing(
boxing_to_middle.boxing_method,
composed,
boxing_to_middle.get_middle_op_arg_parallel_attr,
middle_verbose_str=middle_verbose_str,
)
if len(exclude) > 0:
exclude_hob = enable_if.get_condition_hob(exclude[0])
for method in exclude[1:]:
exclude_hob = exclude_hob | enable_if.get_condition_hob(method)
old_hob = enable_if.get_condition_hob(composed)
enable_if.set_condition_hob(composed, old_hob & ~exclude_hob)
return composed
MatchCopyH2D = (
(
boxing_hob.producer_parallel_desc.machine_id2device_id_list
== boxing_hob.consumer_parallel_desc.machine_id2device_id_list
)
& (
(boxing_hob.producer_sbp_parallel == boxing_hob.consumer_sbp_parallel)
| (boxing_hob.producer_parallel_desc.parallel_num == 1)
)
& (boxing_hob.producer_parallel_desc.device_tag == "cpu")
& (boxing_hob.consumer_parallel_desc.device_tag == "gpu")
)
@boxing_condition(MatchCopyH2D)
def CopyH2D(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return CopyHD(builder, produced_blob_object, consumer_op_arg_parallel_attr)
MatchCopyD2H = (
(
boxing_hob.producer_parallel_desc.machine_id2device_id_list
== boxing_hob.consumer_parallel_desc.machine_id2device_id_list
)
& (
(boxing_hob.producer_sbp_parallel == boxing_hob.consumer_sbp_parallel)
| (boxing_hob.producer_parallel_desc.parallel_num == 1)
)
& (boxing_hob.producer_parallel_desc.device_tag == "gpu")
& (boxing_hob.consumer_parallel_desc.device_tag == "cpu")
)
@boxing_condition(MatchCopyD2H)
def CopyD2H(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return CopyHD(builder, produced_blob_object, consumer_op_arg_parallel_attr)
def CopyHD(builder, produced_blob_object, consumer_op_arg_parallel_attr):
arg_parallel_desc_symbol = consumer_op_arg_parallel_attr.parallel_desc_symbol
op_device_tag = arg_parallel_desc_symbol.device_tag
return BuildCopyHdInstruction(builder, produced_blob_object, op_device_tag)
BlobIsPartialSum = boxing_hob.producer_sbp_parallel.HasField("partial_sum_parallel")
OpArgIsBroadcast = boxing_hob.consumer_sbp_parallel.HasField("broadcast_parallel")
MatchInterNodeOneToMany = (
~boxing_hob.SingleMachine
& (boxing_hob.producer_parallel_desc.device_tag == "cpu")
& (boxing_hob.consumer_parallel_desc.device_tag == "cpu")
& (boxing_hob.producer_parallel_desc.parallel_num == 1)
& (boxing_hob.consumer_parallel_desc.parallel_num > 1)
& OpArgIsBroadcast
)
@boxing_condition(MatchInterNodeOneToMany)
def InterNodeOneToMany(builder, produced_blob_object, consumer_op_arg_parallel_attr):
out_blobs = []
consumer_dev_ids = (
consumer_op_arg_parallel_attr.parallel_desc_symbol.machine_id2device_id_list
)
for (machine_id, device_ids) in consumer_dev_ids.items():
for device_id in device_ids:
parallel_conf = placement_cfg.ParallelConf()
parallel_conf.set_device_tag("cpu")
parallel_conf.add_device_name("@%s:%s" % (machine_id, device_id))
parallel_desc_symbol = builder.GetParallelDescSymbol(parallel_conf)
out_blob = builder.Build121To(produced_blob_object, parallel_desc_symbol)
out_blobs.append(out_blob)
return PackPhysicalBoxingBlobObjectsToLogical(
builder,
out_blobs,
consumer_op_arg_parallel_attr,
produced_blob_object.op_arg_blob_attr,
)
MatchInterNodeOneToOne = (
(boxing_hob.producer_parallel_desc.device_tag == "cpu")
& (boxing_hob.consumer_parallel_desc.device_tag == "cpu")
& (boxing_hob.producer_parallel_desc != boxing_hob.consumer_parallel_desc)
& (
boxing_hob.producer_parallel_desc.parallel_num
== boxing_hob.consumer_parallel_desc.parallel_num
)
& ~boxing_hob.MatchDeviceOneToOnePerMachine
& (
(boxing_hob.producer_sbp_parallel == boxing_hob.consumer_sbp_parallel)
| (boxing_hob.producer_parallel_desc.parallel_num == 1)
)
)
@boxing_condition(MatchInterNodeOneToOne)
def InterNodeOneToOne(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return builder.Build121To(
produced_blob_object, consumer_op_arg_parallel_attr.parallel_desc_symbol
)
MatchCpuBroadcastOneToOne = (
(boxing_hob.producer_parallel_desc.device_tag == "cpu")
& (boxing_hob.consumer_parallel_desc.device_tag == "cpu")
& (boxing_hob.producer_parallel_desc != boxing_hob.consumer_parallel_desc)
& boxing_hob.MatchDeviceOneToOnePerMachine
& (
(boxing_hob.producer_sbp_parallel == boxing_hob.consumer_sbp_parallel)
| (boxing_hob.producer_parallel_desc.parallel_num == 1)
)
)
@boxing_condition(MatchCpuBroadcastOneToOne)
def CpuBroadcastOneToOne(builder, produced_blob_object, consumer_op_arg_parallel_attr):
def get_identity_physical_in_blob_objects(
builder,
produced_blob_object,
consumer_op_arg_parallel_attr,
physical_in_blob_objects,
boxing_parallel_desc_symbol,
out_parallel_num,
):
return physical_in_blob_objects
return NaiveCpuRefPhysicalBlobObjectsScope(
builder,
produced_blob_object,
consumer_op_arg_parallel_attr,
get_physical_out_blob_objects=get_identity_physical_in_blob_objects,
)
MatchNoBoxing = (
boxing_hob.producer_parallel_desc == boxing_hob.consumer_parallel_desc
) & (
(boxing_hob.producer_sbp_parallel == boxing_hob.consumer_sbp_parallel)
| (boxing_hob.producer_parallel_desc.parallel_num == 1)
)
@boxing_condition(MatchNoBoxing)
def NoBoxing(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return produced_blob_object
@boxing_condition(boxing_hob.Verbose & MatchNoBoxing)
def VerboseNoBoxing(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return produced_blob_object
def VerboseOptionalBoxing(boxing_method):
opt_boxing_method = FirstMatchedBoxing(boxing_method, VerboseNoBoxing)
debug_str = "VerboseOptional(%s)" % GetBoxingDebugString(boxing_method)
opt_boxing_method.__debug_str__ = debug_str
return opt_boxing_method
MatchNcclAllReduce = (
boxing_hob.SingleMachine
& (boxing_hob.producer_parallel_desc.device_tag == "gpu")
& (boxing_hob.producer_parallel_desc == boxing_hob.consumer_parallel_desc)
& (boxing_hob.consumer_parallel_desc.parallel_num > 1)
& BlobIsPartialSum
& OpArgIsBroadcast
)
@boxing_condition(MatchNcclAllReduce)
def GpuNcclAllReduce(builder, produced_blob_object, consumer_op_arg_parallel_attr):
parallel_conf = consumer_op_arg_parallel_attr.parallel_desc_symbol.parallel_conf
bn_in_op2blob_object = oneflow._oneflow_internal.deprecated.BnInOp2BlobObject()
bn_in_op2blob_object["in_0"] = produced_blob_object
op_attribute = _GetEagerNcclAllReduce(parallel_conf, bn_in_op2blob_object)
cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString(
str(op_attribute)
)
builder.NoBoxingStatelessCall(cfg_op_attribute, parallel_conf, bn_in_op2blob_object)
y_blob_object = bn_in_op2blob_object["out_0"]
y_blob_object.op_arg_parallel_attr.Assign(consumer_op_arg_parallel_attr)
return y_blob_object
MatchSplitOneToMany = (
(boxing_hob.producer_parallel_desc.parallel_num == 1)
& (boxing_hob.consumer_parallel_desc.parallel_num > 1)
& boxing_hob.consumer_sbp_parallel.HasField("split_parallel")
)
MatchConcatManyToOne = (
(boxing_hob.consumer_parallel_desc.parallel_num == 1)
& (boxing_hob.producer_parallel_desc.parallel_num > 1)
& boxing_hob.producer_sbp_parallel.HasField("split_parallel")
)
MatchConcatManyToSplitMany = (
(boxing_hob.producer_parallel_desc.parallel_num > 1)
& (boxing_hob.consumer_parallel_desc.parallel_num > 1)
& boxing_hob.producer_sbp_parallel.HasField("split_parallel")
& boxing_hob.consumer_sbp_parallel.HasField("split_parallel")
& (
(boxing_hob.producer_sbp_parallel != boxing_hob.consumer_sbp_parallel)
| (
boxing_hob.producer_parallel_desc.parallel_num
!= boxing_hob.consumer_parallel_desc.parallel_num
)
)
)
MatchNaiveCpuSplitToSplit = (
(boxing_hob.producer_parallel_desc.device_tag == "cpu")
& (boxing_hob.consumer_parallel_desc.device_tag == "cpu")
& (MatchSplitOneToMany | MatchConcatManyToOne | MatchConcatManyToSplitMany)
)
@boxing_condition(MatchNaiveCpuSplitToSplit)
def NaiveCpuSplitToSplit(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return NaiveCpuRefPhysicalBlobObjectsScope(
builder,
produced_blob_object,
consumer_op_arg_parallel_attr,
get_physical_out_blob_objects=NaiveBoxingToPhysicalBlobObjects,
)
MatchNaiveCpuPartialSumToSplit = (
(boxing_hob.producer_parallel_desc.device_tag == "cpu")
& (boxing_hob.consumer_parallel_desc.device_tag == "cpu")
& (boxing_hob.producer_parallel_desc.parallel_num > 1)
& boxing_hob.producer_sbp_parallel.HasField("partial_sum_parallel")
& (
(boxing_hob.consumer_parallel_desc.parallel_num == 1)
| boxing_hob.consumer_sbp_parallel.HasField("split_parallel")
)
)
@boxing_condition(MatchNaiveCpuPartialSumToSplit)
def NaiveCpuPartialSumToSplit(
builder, produced_blob_object, consumer_op_arg_parallel_attr
):
return NaiveCpuRefPhysicalBlobObjectsScope(
builder,
produced_blob_object,
consumer_op_arg_parallel_attr,
get_physical_out_blob_objects=NaiveBoxingToPhysicalBlobObjects,
)
def NaiveCpuRefPhysicalBlobObjectsScope(
builder,
produced_blob_object,
consumer_op_arg_parallel_attr,
get_physical_out_blob_objects,
):
physical_in_blob_objects = UnpackLogicalBoxingBlobObjectToPhysical(
builder, produced_blob_object
)
consumer_parallel_desc_symbol = consumer_op_arg_parallel_attr.parallel_desc_symbol
out_parallel_num = consumer_parallel_desc_symbol.parallel_num
boxing_parallel_desc_symbol = GetConcatSplitBoxingParallelDescSymbol(
builder,
consumer_parallel_desc_symbol,
max(len(physical_in_blob_objects), out_parallel_num),
)
physical_output_blob_objects = get_physical_out_blob_objects(
builder=builder,
produced_blob_object=produced_blob_object,
consumer_op_arg_parallel_attr=consumer_op_arg_parallel_attr,
physical_in_blob_objects=physical_in_blob_objects,
boxing_parallel_desc_symbol=boxing_parallel_desc_symbol,
out_parallel_num=out_parallel_num,
)
phy_parallel_desc_symbols = builder.GetPhysicalParallelDescSymbols(
consumer_op_arg_parallel_attr.parallel_desc_symbol
)
physical_output_blob_objects = RefBlobObjectWithParallelDesc(
builder, physical_output_blob_objects, phy_parallel_desc_symbols
)
return PackPhysicalBoxingBlobObjectsToLogical(
builder,
physical_output_blob_objects,
consumer_op_arg_parallel_attr,
produced_blob_object.op_arg_blob_attr,
)
def NaiveBoxingToPhysicalBlobObjects(
builder,
produced_blob_object,
consumer_op_arg_parallel_attr,
physical_in_blob_objects,
boxing_parallel_desc_symbol,
out_parallel_num,
):
op_attribute = ConstructNaiveBoxingOpConf(
produced_blob_object,
consumer_op_arg_parallel_attr,
len(physical_in_blob_objects),
out_parallel_num,
)
return BuildNaiveCpuBoxing(
builder,
op_attribute,
physical_in_blob_objects,
boxing_parallel_desc_symbol,
out_parallel_num,
)
def RefBlobObjectWithParallelDesc(
builder, physical_blob_objects, phy_parallel_desc_symbols
):
assert len(physical_blob_objects) == len(
phy_parallel_desc_symbols
), "%s v.s. %s" % (len(physical_blob_objects), len(phy_parallel_desc_symbols))
def RefWithParallelDesc(physical_blob_object, phy_parallel_desc_symbol):
if physical_blob_object.parallel_desc_symbol == phy_parallel_desc_symbol:
return physical_blob_object
return builder.BroadcastBlobReference(
physical_blob_object, phy_parallel_desc_symbol
)
return [
RefWithParallelDesc(*pair)
for pair in zip(physical_blob_objects, phy_parallel_desc_symbols)
]
def PackPhysicalBoxingBlobObjectsToLogical(
builder, physical_blob_objects, op_arg_parallel_attr, op_arg_blob_attr
):
if len(physical_blob_objects) == 1:
return physical_blob_objects[0]
return builder.PackPhysicalBlobsToLogicalBlob(
physical_blob_objects, op_arg_parallel_attr, op_arg_blob_attr
)
def BuildNaiveCpuBoxing(
builder,
op_attribute,
physical_in_blob_objects,
boxing_parallel_desc_symbol,
out_parallel_num,
):
bn_in_op2blob_object = oneflow._oneflow_internal.deprecated.BnInOp2BlobObject()
for i in range(len(physical_in_blob_objects)):
bn_in_op2blob_object["in_%s" % i] = physical_in_blob_objects[i]
cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString(
str(op_attribute)
)
builder.NoBoxingStatelessCall(
cfg_op_attribute,
boxing_parallel_desc_symbol.parallel_conf,
bn_in_op2blob_object,
)
return [bn_in_op2blob_object["out_%s" % i] for i in range(out_parallel_num)]
def ConstructNaiveBoxingOpConf(
produced_blob_object,
consumer_op_arg_parallel_attr,
in_parallel_num,
out_parallel_num,
):
op_conf = op_conf_pb.OperatorConf()
op_conf.name = "undefined_boxing_op_name"
op_conf.device_tag = "cpu"
op_conf.boxing_conf.lbi.op_name = "undefined_boxing_op_name"
op_conf.boxing_conf.lbi.blob_name = "undefined_boxing_blob_name"
op_conf.boxing_conf.in_num = in_parallel_num
op_conf.boxing_conf.out_num = out_parallel_num
in_sbp_parallel = produced_blob_object.op_arg_parallel_attr.sbp_parallel
if in_sbp_parallel.has_split_parallel():
op_conf.boxing_conf.concat_box.axis = in_sbp_parallel.split_parallel().axis()
elif in_parallel_num == 1:
op_conf.boxing_conf.concat_box.axis = 0
else:
assert in_sbp_parallel.has_partial_sum_parallel()
op_conf.boxing_conf.add_box.SetInParent()
out_sbp_parallel = consumer_op_arg_parallel_attr.sbp_parallel
if out_sbp_parallel.has_split_parallel():
out_axis = out_sbp_parallel.split_parallel().axis()
else:
assert out_parallel_num == 1
out_axis = 0
op_conf.boxing_conf.split_box.axis = out_axis
shape = produced_blob_object.op_arg_blob_attr.shape
op_conf.boxing_conf.split_box.part_num.extend(
balanced_splitter.BalancedPartNums(shape[out_axis], out_parallel_num)
)
bn_in_op2blob_object = oneflow._oneflow_internal.deprecated.BnInOp2BlobObject()
for i in range(in_parallel_num):
bn_in_op2blob_object["in_%s" % i] = produced_blob_object
return op_infer_util.Infer(op_conf, bn_in_op2blob_object)
def GetConcatSplitBoxingParallelDescSymbol(
builder, blob_parallel_desc_symbol, max_parallel_num
):
random_rank_id = random.randint(0, max_parallel_num - 1)
parallel_conf = placement_cfg.ParallelConf()
parallel_conf.set_device_tag("cpu")
for (machine_id, _) in blob_parallel_desc_symbol.machine_id2device_id_list.items():
parallel_conf.add_device_name("@%s:%s" % (machine_id, random_rank_id))
return builder.GetParallelDescSymbol(parallel_conf)
def UnpackLogicalBoxingBlobObjectToPhysical(builder, produced_blob_object):
if produced_blob_object.parallel_desc_symbol.parallel_num == 1:
return [produced_blob_object]
return builder.UnpackLogicalBlobToPhysicalBlobs(produced_blob_object)
MatchCpuBroadcastOneToMany = (
boxing_hob.SingleMachine
& (boxing_hob.producer_parallel_desc.device_tag == "cpu")
& (boxing_hob.consumer_parallel_desc.device_tag == "cpu")
& boxing_hob.ProducerDevicesContainedInConsumerDevices
& (boxing_hob.producer_parallel_desc.parallel_num == 1)
& (boxing_hob.consumer_parallel_desc.parallel_num > 1)
& boxing_hob.consumer_sbp_parallel.HasField("broadcast_parallel")
)
@boxing_condition(MatchCpuBroadcastOneToMany)
def CpuBroadcastOneToMany(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return CpuOneToManyBroadcastBlobReference(
builder,
produced_blob_object,
consumer_op_arg_parallel_attr.parallel_desc_symbol,
)
MatchBroadcastManyToOne = (
(
boxing_hob.producer_parallel_desc.device_tag
== boxing_hob.consumer_parallel_desc.device_tag
)
& boxing_hob.ConsumerDevicesContainedInProducerDevices
& (boxing_hob.producer_parallel_desc.parallel_num > 1)
& (boxing_hob.consumer_parallel_desc.parallel_num == 1)
& boxing_hob.producer_sbp_parallel.HasField("broadcast_parallel")
)
@boxing_condition(MatchBroadcastManyToOne)
def BroadcastManyToOne(builder, produced_blob_object, consumer_op_arg_parallel_attr):
y_blob_objects = builder.UnpackLogicalBlobToPhysicalBlobs(produced_blob_object)
for y in y_blob_objects:
if y.parallel_desc_symbol == consumer_op_arg_parallel_attr.parallel_desc_symbol:
return y
raise NotImplementedError("op_arg's devices is not contained in blob's devices")
def Assign(builder, ref_blob_object, value_blob_object):
return BuildAssignInstruction(
builder, ref_blob_object, value_blob_object, _AssignOpConf()
)
def CpuOneToManyBroadcastBlobReference(
builder, produced_blob_object, to_parallel_desc_symbol
):
x_parallel_desc_symbol = produced_blob_object.parallel_desc_symbol
x_machine_ids = list(dict(x_parallel_desc_symbol.machine_id2device_id_list).keys())
to_machine_ids = list(
dict(to_parallel_desc_symbol.machine_id2device_id_list).keys()
)
assert x_machine_ids == to_machine_ids, (x_machine_ids, to_machine_ids)
x_first_device_ids = x_parallel_desc_symbol.machine_id2device_id_list[
x_machine_ids[0]
]
assert len(x_first_device_ids) == 1, x_first_device_ids
if x_parallel_desc_symbol == to_parallel_desc_symbol:
return produced_blob_object
return builder.BroadcastBlobReference(produced_blob_object, to_parallel_desc_symbol)
def BuildCopyHdInstruction(builder, produced_blob_object, to_device_tag):
(op_conf, lbi) = _MakeCopyHdOpConfAndRetLbi()
return _BuildCopyInstruction(builder, produced_blob_object, op_conf, to_device_tag)
def _MakeCopyHdOpConfAndRetLbi():
op_conf = op_conf_pb.OperatorConf()
op_conf.name = "copy_hd"
op_conf.device_tag = "gpu"
setattr(op_conf.copy_conf, "in", "%s/in" % op_conf.name)
op_conf.copy_conf.out = "out"
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
return (op_conf, lbi)
@contextmanager
def _CudaHostPinBlob(build, blob_object):
build.CudaHostRegisterBlob(blob_object)
try:
yield
finally:
build.CudaHostUnregisterBlob(blob_object)
def _BuildCopyInstruction(builder, produced_blob_object, op_conf, to_device_tag):
x_devices = produced_blob_object.parallel_desc_symbol.machine_id2device_id_list
x_device_tag = produced_blob_object.parallel_desc_symbol.device_tag
bn_in_op2blob_object = oneflow._oneflow_internal.deprecated.BnInOp2BlobObject()
bn_in_op2blob_object["in"] = produced_blob_object
op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object)
assert to_device_tag != x_device_tag, (to_device_tag, x_device_tag)
cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString(
str(op_attribute)
)
if to_device_tag == "cpu" and x_device_tag == "gpu":
x_parallel_conf = produced_blob_object.parallel_desc_symbol.parallel_conf
builder.NoBoxingCudaD2HStatelessCall(
cfg_op_attribute, x_parallel_conf, bn_in_op2blob_object, TryReplaceDeviceTag
)
elif to_device_tag == "gpu" and x_device_tag == "cpu":
out_parallel_desc_symbol = TryReplaceDeviceTag(
builder, produced_blob_object.parallel_desc_symbol, to_device_tag
)
out_parallel_conf = out_parallel_desc_symbol.parallel_conf
with _CudaHostPinBlob(builder, produced_blob_object):
builder.NoBoxingCudaH2DStatelessCall(
cfg_op_attribute, out_parallel_conf, bn_in_op2blob_object
)
else:
raise NotImplementedError(
"invalid device found. to_device_tag: %s, x_device_tag: %s"
% (to_device_tag, x_device_tag)
)
sbp_parallel = bn_in_op2blob_object["out"].op_arg_parallel_attr.sbp_parallel
sbp_parallel.CopyFrom(produced_blob_object.op_arg_parallel_attr.sbp_parallel)
return bn_in_op2blob_object["out"]
def _AssignOpConf():
op_conf = op_conf_pb.OperatorConf()
op_conf.name = "assign"
op_conf.assign_conf.ref = "assign/ref"
op_conf.assign_conf.value = "assign/value"
device_tag = oneflow.current_scope().device_parallel_desc_symbol.device_tag
op_conf.device_tag = device_tag
return op_conf
def BuildAssignInstruction(builder, ref_blob_object, value_blob_object, op_conf):
ref_parallel_conf = ref_blob_object.parallel_desc_symbol.parallel_conf
ref_devices = ref_blob_object.parallel_desc_symbol.machine_id2device_id_list
value_devices = value_blob_object.parallel_desc_symbol.machine_id2device_id_list
assert ref_devices == value_devices, "\nref_devices: %s\nvalue_devices: %s" % (
ref_devices,
value_devices,
)
ref_device_tag = ref_blob_object.parallel_desc_symbol.device_tag
value_device_tag = value_blob_object.parallel_desc_symbol.device_tag
bn_in_op2blob_object = oneflow._oneflow_internal.deprecated.BnInOp2BlobObject()
bn_in_op2blob_object["ref"] = ref_blob_object
bn_in_op2blob_object["value"] = value_blob_object
op_attribute = op_infer_util.Infer(op_conf, bn_in_op2blob_object)
cfg_op_attribute = oneflow._oneflow_internal.deprecated.MakeOpAttributeByString(
str(op_attribute)
)
if ref_device_tag == value_device_tag:
builder.NoBoxingStatelessCall(
cfg_op_attribute, ref_parallel_conf, bn_in_op2blob_object
)
elif ref_device_tag == "cpu" and value_device_tag == "gpu":
value_parallel_conf = value_blob_object.parallel_desc_symbol.parallel_conf
builder.NoBoxingCudaD2HStatelessCall(
cfg_op_attribute,
value_parallel_conf,
bn_in_op2blob_object,
TryReplaceDeviceTag,
)
elif ref_device_tag == "gpu" and value_device_tag == "cpu":
with _CudaHostPinBlob(builder, value_blob_object):
builder.NoBoxingCudaH2DStatelessCall(
cfg_op_attribute, ref_parallel_conf, bn_in_op2blob_object
)
else:
raise NotImplementedError(
"invalid device found. ref_device_tag: %s, value_device_tag: %s"
% (ref_device_tag, value_device_tag)
)
def TryReplaceDeviceTag(builder, parallel_desc_symbol, device_tag):
return boxing_middle.TryReplaceDeviceTag(builder, parallel_desc_symbol, device_tag)
def ReplaceDeviceTag(parallel_desc_symbol, device_tag, builder=None):
return boxing_middle.ReplaceDeviceTag(
parallel_desc_symbol, device_tag, builder=builder
)
def _GetEagerNcclAllReduce(parallel_conf, ibn2blob_object):
op_conf = op_conf_pb.OperatorConf()
op_conf.device_tag = "gpu"
op_conf.name = "eager_nccl_all_reduce"
op_conf.user_conf.op_type_name = "eager_nccl_all_reduce"
op_conf.user_conf.input["in"].s.append("eager_nccl_all_reduce/in_0")
op_conf.user_conf.output["out"].s.append("eager_nccl_all_reduce/out_0")
op_conf.user_conf.attr["parallel_conf"].at_string = str(parallel_conf)
return op_infer_util.Infer(op_conf, ibn2blob_object)
NcclAllReduce = Sequential(
boxing_middle.BoxingToMiddle(
GpuNcclAllReduce,
boxing_middle.ProducerParallelDesc,
boxing_middle.BroadcastParallel,
),
OptionalBoxing(CopyD2H),
)
BoxingIntraNodeOneToOne = Sequential(
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
CpuBroadcastOneToOne,
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.ConsumerSbpParallel,
),
OptionalBoxing(CopyH2D),
)
BoxingInterNodeOneToOne = Sequential(
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
InterNodeOneToOne,
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.ConsumerSbpParallel,
),
OptionalBoxing(CopyH2D),
)
BoxingInterNodeOneToMany = Sequential(
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
InterNodeOneToMany,
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.ConsumerSbpParallel,
),
OptionalBoxing(CopyH2D),
)
conditional_function_table = [
CopyH2D,
CopyD2H,
NoBoxing,
BoxingIntraNodeOneToOne,
BoxingInterNodeOneToOne,
BoxingInterNodeOneToMany,
BroadcastManyToOne,
Sequential(
boxing_middle.BoxingToMiddle(
OptionalBoxing(BroadcastManyToOne),
boxing_middle.ProducerRandomParallelIdPerMachine(),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
OptionalBoxing(CpuBroadcastOneToOne),
boxing_middle.ConsumerRandomParallelIdPerMachine("cpu"),
boxing_middle.BroadcastParallel,
),
boxing_middle.BoxingToMiddle(
OptionalBoxing(CpuBroadcastOneToMany),
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.BroadcastParallel,
),
OptionalBoxing(CopyH2D),
exclude=(
BroadcastManyToOne,
CopyH2D,
CopyD2H,
NoBoxing,
BoxingIntraNodeOneToOne,
),
),
Sequential(
boxing_middle.BoxingToMiddle(
BroadcastManyToOne,
boxing_middle.ProducerRandomParallelIdPerMachine(),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
NaiveCpuSplitToSplit,
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.ConsumerSbpParallel,
),
OptionalBoxing(CopyH2D),
),
NcclAllReduce,
Sequential(
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
NaiveCpuPartialSumToSplit,
boxing_middle.ConsumerRandomParallelIdPerMachine("cpu"),
boxing_middle.BroadcastParallel,
),
boxing_middle.BoxingToMiddle(
CpuBroadcastOneToMany,
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.BroadcastParallel,
),
OptionalBoxing(CopyH2D),
exclude=(NcclAllReduce,),
),
Sequential(
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
NaiveCpuPartialSumToSplit,
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.ConsumerSbpParallel,
),
OptionalBoxing(CopyH2D),
),
Sequential(
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
NaiveCpuSplitToSplit,
boxing_middle.ConsumerRandomParallelIdPerMachine("cpu"),
boxing_middle.BroadcastParallel,
),
boxing_middle.BoxingToMiddle(
CpuBroadcastOneToMany,
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.BroadcastParallel,
),
OptionalBoxing(CopyH2D),
exclude=(NcclAllReduce,),
),
Sequential(
boxing_middle.BoxingToMiddle(
OptionalBoxing(CopyD2H),
boxing_middle.ReplaceProducerDeviceTag("cpu"),
boxing_middle.ProducerSbpParallel,
),
boxing_middle.BoxingToMiddle(
NaiveCpuSplitToSplit,
boxing_middle.ReplaceConsumerDeviceTag("cpu"),
boxing_middle.ConsumerSbpParallel,
),
OptionalBoxing(CopyH2D),
),
]
class BoxingUtil(oneflow._oneflow_internal.deprecated.ForeignBoxingUtil):
def __init__(self):
oneflow._oneflow_internal.deprecated.ForeignBoxingUtil.__init__(self)
def BoxingTo(self, builder, blob_object, op_arg_parallel_attr):
return BoxingTo(builder, blob_object, op_arg_parallel_attr)
def TryReplaceDeviceTag(self, builder, parallel_desc_symbol, device_tag):
return TryReplaceDeviceTag(builder, parallel_desc_symbol, device_tag)
def Assign(self, builder, target_blob_object, source_blob_object):
return Assign(builder, target_blob_object, source_blob_object)
_global_boxing_util = BoxingUtil()
| [
"oneflow.eager.boxing_middle.BoxingToMiddle",
"oneflow._oneflow_internal.deprecated.ForeignBoxingUtil.__init__",
"oneflow.eager.boxing_middle.ReplaceProducerDeviceTag",
"oneflow.support.enable_if.condition",
"oneflow.eager.boxing_hob.producer_sbp_parallel.HasField",
"oneflow._oneflow_internal.deprecated.B... | [((8970, 9035), 'oneflow.eager.boxing_hob.producer_sbp_parallel.HasField', 'boxing_hob.producer_sbp_parallel.HasField', (['"""partial_sum_parallel"""'], {}), "('partial_sum_parallel')\n", (9011, 9035), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((9055, 9118), 'oneflow.eager.boxing_hob.consumer_sbp_parallel.HasField', 'boxing_hob.consumer_sbp_parallel.HasField', (['"""broadcast_parallel"""'], {}), "('broadcast_parallel')\n", (9096, 9118), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((1704, 1773), 'oneflow.eager.boxing_hob.BoxingHobContext', 'BoxingHobContext', (['produced_blob_object', 'consumer_op_arg_parallel_attr'], {}), '(produced_blob_object, consumer_op_arg_parallel_attr)\n', (1720, 1773), False, 'from oneflow.eager.boxing_hob import BoxingHobContext\n'), ((3410, 3456), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['boxing_methods[0]'], {}), '(boxing_methods[0])\n', (3437, 3456), True, 'import oneflow.support.enable_if as enable_if\n'), ((3581, 3610), 'oneflow.support.enable_if.condition', 'enable_if.condition', (['hob_expr'], {}), '(hob_expr)\n', (3600, 3610), True, 'import oneflow.support.enable_if as enable_if\n'), ((4883, 4916), 'oneflow.support.enable_if.condition', 'enable_if.condition', (['composed_hob'], {}), '(composed_hob)\n', (4902, 4916), True, 'import oneflow.support.enable_if as enable_if\n'), ((13619, 13675), 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', ([], {}), '()\n', (13673, 13675), False, 'import oneflow\n'), ((14318, 14377), 'oneflow.eager.boxing_hob.consumer_sbp_parallel.HasField', 'boxing_hob.consumer_sbp_parallel.HasField', (['"""split_parallel"""'], {}), "('split_parallel')\n", (14359, 14377), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((14528, 14587), 'oneflow.eager.boxing_hob.producer_sbp_parallel.HasField', 'boxing_hob.producer_sbp_parallel.HasField', (['"""split_parallel"""'], {}), "('split_parallel')\n", (14569, 14587), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((19752, 19808), 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', ([], {}), '()\n', (19806, 19808), False, 'import oneflow\n'), ((20433, 20458), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_pb.OperatorConf', ([], {}), '()\n', (20456, 20458), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((21683, 21739), 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', ([], {}), '()\n', (21737, 21739), False, 'import oneflow\n'), ((21853, 21903), 'oneflow.eager.op_infer_util.Infer', 'op_infer_util.Infer', (['op_conf', 'bn_in_op2blob_object'], {}), '(op_conf, bn_in_op2blob_object)\n', (21872, 21903), True, 'import oneflow.eager.op_infer_util as op_infer_util\n'), ((22031, 22070), 'random.randint', 'random.randint', (['(0)', '(max_parallel_num - 1)'], {}), '(0, max_parallel_num - 1)\n', (22045, 22070), False, 'import random\n'), ((22091, 22119), 'oneflow._oneflow_internal.oneflow.core.job.placement.ParallelConf', 'placement_cfg.ParallelConf', ([], {}), '()\n', (22117, 22119), True, 'import oneflow._oneflow_internal.oneflow.core.job.placement as placement_cfg\n'), ((23011, 23074), 'oneflow.eager.boxing_hob.consumer_sbp_parallel.HasField', 'boxing_hob.consumer_sbp_parallel.HasField', (['"""broadcast_parallel"""'], {}), "('broadcast_parallel')\n", (23052, 23074), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((23709, 23772), 'oneflow.eager.boxing_hob.producer_sbp_parallel.HasField', 'boxing_hob.producer_sbp_parallel.HasField', (['"""broadcast_parallel"""'], {}), "('broadcast_parallel')\n", (23750, 23772), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((25439, 25464), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_pb.OperatorConf', ([], {}), '()\n', (25462, 25464), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((25630, 25666), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (25664, 25666), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((26207, 26263), 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', ([], {}), '()\n', (26261, 26263), False, 'import oneflow\n'), ((26337, 26387), 'oneflow.eager.op_infer_util.Infer', 'op_infer_util.Infer', (['op_conf', 'bn_in_op2blob_object'], {}), '(op_conf, bn_in_op2blob_object)\n', (26356, 26387), True, 'import oneflow.eager.op_infer_util as op_infer_util\n'), ((27741, 27766), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_pb.OperatorConf', ([], {}), '()\n', (27764, 27766), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((28648, 28704), 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', 'oneflow._oneflow_internal.deprecated.BnInOp2BlobObject', ([], {}), '()\n', (28702, 28704), False, 'import oneflow\n'), ((28828, 28878), 'oneflow.eager.op_infer_util.Infer', 'op_infer_util.Infer', (['op_conf', 'bn_in_op2blob_object'], {}), '(op_conf, bn_in_op2blob_object)\n', (28847, 28878), True, 'import oneflow.eager.op_infer_util as op_infer_util\n'), ((30014, 30090), 'oneflow.eager.boxing_middle.TryReplaceDeviceTag', 'boxing_middle.TryReplaceDeviceTag', (['builder', 'parallel_desc_symbol', 'device_tag'], {}), '(builder, parallel_desc_symbol, device_tag)\n', (30047, 30090), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((30174, 30260), 'oneflow.eager.boxing_middle.ReplaceDeviceTag', 'boxing_middle.ReplaceDeviceTag', (['parallel_desc_symbol', 'device_tag'], {'builder': 'builder'}), '(parallel_desc_symbol, device_tag, builder=\n builder)\n', (30204, 30260), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((30346, 30371), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_pb.OperatorConf', ([], {}), '()\n', (30369, 30371), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((30742, 30787), 'oneflow.eager.op_infer_util.Infer', 'op_infer_util.Infer', (['op_conf', 'ibn2blob_object'], {}), '(op_conf, ibn2blob_object)\n', (30761, 30787), True, 'import oneflow.eager.op_infer_util as op_infer_util\n'), ((30822, 30942), 'oneflow.eager.boxing_middle.BoxingToMiddle', 'boxing_middle.BoxingToMiddle', (['GpuNcclAllReduce', 'boxing_middle.ProducerParallelDesc', 'boxing_middle.BroadcastParallel'], {}), '(GpuNcclAllReduce, boxing_middle.\n ProducerParallelDesc, boxing_middle.BroadcastParallel)\n', (30850, 30942), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((1781, 1818), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['NoBoxing'], {}), '(NoBoxing)\n', (1808, 1818), True, 'import oneflow.support.enable_if as enable_if\n'), ((3709, 3778), 'oneflow.eager.boxing_hob.BoxingHobContext', 'BoxingHobContext', (['produced_blob_object', 'consumer_op_arg_parallel_attr'], {}), '(produced_blob_object, consumer_op_arg_parallel_attr)\n', (3725, 3778), False, 'from oneflow.eager.boxing_hob import BoxingHobContext\n'), ((4661, 4700), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['lhs_boxing'], {}), '(lhs_boxing)\n', (4688, 4700), True, 'import oneflow.support.enable_if as enable_if\n'), ((4710, 4749), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['rhs_boxing'], {}), '(rhs_boxing)\n', (4737, 4749), True, 'import oneflow.support.enable_if as enable_if\n'), ((7074, 7113), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['exclude[0]'], {}), '(exclude[0])\n', (7101, 7113), True, 'import oneflow.support.enable_if as enable_if\n'), ((7243, 7280), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['composed'], {}), '(composed)\n', (7270, 7280), True, 'import oneflow.support.enable_if as enable_if\n'), ((7289, 7350), 'oneflow.support.enable_if.set_condition_hob', 'enable_if.set_condition_hob', (['composed', '(old_hob & ~exclude_hob)'], {}), '(composed, old_hob & ~exclude_hob)\n', (7316, 7350), True, 'import oneflow.support.enable_if as enable_if\n'), ((14809, 14868), 'oneflow.eager.boxing_hob.consumer_sbp_parallel.HasField', 'boxing_hob.consumer_sbp_parallel.HasField', (['"""split_parallel"""'], {}), "('split_parallel')\n", (14850, 14868), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((15912, 15977), 'oneflow.eager.boxing_hob.producer_sbp_parallel.HasField', 'boxing_hob.producer_sbp_parallel.HasField', (['"""partial_sum_parallel"""'], {}), "('partial_sum_parallel')\n", (15953, 15977), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((16058, 16117), 'oneflow.eager.boxing_hob.consumer_sbp_parallel.HasField', 'boxing_hob.consumer_sbp_parallel.HasField', (['"""split_parallel"""'], {}), "('split_parallel')\n", (16099, 16117), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((21580, 21649), 'oneflow.framework.balanced_splitter.BalancedPartNums', 'balanced_splitter.BalancedPartNums', (['shape[out_axis]', 'out_parallel_num'], {}), '(shape[out_axis], out_parallel_num)\n', (21614, 21649), True, 'import oneflow.framework.balanced_splitter as balanced_splitter\n'), ((31114, 31159), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (31152, 31159), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((31283, 31328), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (31321, 31328), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((31524, 31569), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (31562, 31569), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((31690, 31735), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (31728, 31735), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((31932, 31977), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (31970, 31977), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((32099, 32144), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (32137, 32144), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((36421, 36490), 'oneflow._oneflow_internal.deprecated.ForeignBoxingUtil.__init__', 'oneflow._oneflow_internal.deprecated.ForeignBoxingUtil.__init__', (['self'], {}), '(self)\n', (36484, 36490), False, 'import oneflow\n'), ((2914, 2983), 'oneflow.eager.boxing_hob.BoxingHobContext', 'BoxingHobContext', (['produced_blob_object', 'consumer_op_arg_parallel_attr'], {}), '(produced_blob_object, consumer_op_arg_parallel_attr)\n', (2930, 2983), False, 'from oneflow.eager.boxing_hob import BoxingHobContext\n'), ((3532, 3574), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['boxing_method'], {}), '(boxing_method)\n', (3559, 3574), True, 'import oneflow.support.enable_if as enable_if\n'), ((3847, 3889), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['boxing_method'], {}), '(boxing_method)\n', (3874, 3889), True, 'import oneflow.support.enable_if as enable_if\n'), ((9838, 9866), 'oneflow._oneflow_internal.oneflow.core.job.placement.ParallelConf', 'placement_cfg.ParallelConf', ([], {}), '()\n', (9864, 9866), True, 'import oneflow._oneflow_internal.oneflow.core.job.placement as placement_cfg\n'), ((14743, 14802), 'oneflow.eager.boxing_hob.producer_sbp_parallel.HasField', 'boxing_hob.producer_sbp_parallel.HasField', (['"""split_parallel"""'], {}), "('split_parallel')\n", (14784, 14802), True, 'import oneflow.eager.boxing_hob as boxing_hob\n'), ((27902, 27925), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (27923, 27925), False, 'import oneflow\n'), ((32524, 32574), 'oneflow.eager.boxing_middle.ProducerRandomParallelIdPerMachine', 'boxing_middle.ProducerRandomParallelIdPerMachine', ([], {}), '()\n', (32572, 32574), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((32721, 32766), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (32759, 32766), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((32926, 32981), 'oneflow.eager.boxing_middle.ConsumerRandomParallelIdPerMachine', 'boxing_middle.ConsumerRandomParallelIdPerMachine', (['"""cpu"""'], {}), "('cpu')\n", (32974, 32981), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((33140, 33185), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (33178, 33185), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((33543, 33593), 'oneflow.eager.boxing_middle.ProducerRandomParallelIdPerMachine', 'boxing_middle.ProducerRandomParallelIdPerMachine', ([], {}), '()\n', (33591, 33593), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((33740, 33785), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (33778, 33785), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((33929, 33974), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (33967, 33974), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((34196, 34241), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (34234, 34241), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((34390, 34445), 'oneflow.eager.boxing_middle.ConsumerRandomParallelIdPerMachine', 'boxing_middle.ConsumerRandomParallelIdPerMachine', (['"""cpu"""'], {}), "('cpu')\n", (34438, 34445), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((34588, 34633), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (34626, 34633), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((34868, 34913), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (34906, 34913), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((35062, 35107), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (35100, 35107), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((35310, 35355), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (35348, 35355), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((35499, 35554), 'oneflow.eager.boxing_middle.ConsumerRandomParallelIdPerMachine', 'boxing_middle.ConsumerRandomParallelIdPerMachine', (['"""cpu"""'], {}), "('cpu')\n", (35547, 35554), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((35697, 35742), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (35735, 35742), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((35977, 36022), 'oneflow.eager.boxing_middle.ReplaceProducerDeviceTag', 'boxing_middle.ReplaceProducerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (36015, 36022), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((36166, 36211), 'oneflow.eager.boxing_middle.ReplaceConsumerDeviceTag', 'boxing_middle.ReplaceConsumerDeviceTag', (['"""cpu"""'], {}), "('cpu')\n", (36204, 36211), True, 'import oneflow.eager.boxing_middle as boxing_middle\n'), ((7189, 7224), 'oneflow.support.enable_if.get_condition_hob', 'enable_if.get_condition_hob', (['method'], {}), '(method)\n', (7216, 7224), True, 'import oneflow.support.enable_if as enable_if\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import numpy as np
import unittest
from collections import OrderedDict
import oneflow as flow
from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type
def _random_inputs(x_shape, x_dtype, index_shape, index_dtype):
assert isinstance(x_shape, (tuple, list))
assert isinstance(index_shape, (tuple, list))
assert index_dtype == np.int32 or index_dtype == np.int64
if x_dtype == np.float32 or x_dtype == np.double:
x = np.random.rand(*x_shape).astype(x_dtype)
elif x_dtype == np.int32 or x_dtype == np.int64 or x_dtype == np.int8:
x = np.random.randint(low=0, high=100, size=x_shape).astype(x_dtype)
else:
raise NotImplementedError("{}".format(x_dtype))
index = []
index_rows = np.prod(index_shape[:-1])
index_cols = index_shape[-1]
for col in range(index_cols):
index_col = np.random.randint(
low=0, high=x_shape[col], size=(index_rows,), dtype=index_dtype
).reshape(index_shape[:-1])
index.append(index_col)
index = np.stack(index, axis=len(index_shape) - 1)
return x, index
def _make_gather_nd_fn(
x_shape,
index_shape,
x_dtype,
index_type,
device_type,
device_num,
dynamic,
need_grad,
comp_diff_fn,
):
assert device_num >= 1
fn_type = "train" if need_grad else "predict"
if device_type == "gpu":
flow.config.gpu_device_num(device_num)
elif device_type == "cpu":
flow.config.cpu_device_num(device_num)
else:
raise ValueError
func_config = flow.FunctionConfig()
func_config.default_data_type(x_dtype)
func_config.default_placement_scope(
flow.scope.placement(device_type, "0:0-{}".format(device_num - 1))
)
if dynamic:
func_config.default_logical_view(flow.scope.mirrored_view())
else:
func_config.default_logical_view(flow.scope.consistent_view())
def do_gather_nd(x, index):
x_var = flow.get_variable(
"params",
shape=(1,),
dtype=x_dtype,
initializer=flow.constant_initializer(0, x_dtype),
)
x = x + flow.cast_to_current_logical_view(x_var)
y = flow.gather_nd(x, index)
if need_grad:
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(y)
if callable(comp_diff_fn):
flow.watch_diff(x, comp_diff_fn)
return y
if dynamic:
@flow.global_function(type=fn_type, function_config=func_config)
def gather_nd_fn(
x: flow.typing.ListNumpy.Placeholder(x_shape, dtype=x_dtype),
index: flow.typing.ListNumpy.Placeholder(index_shape, dtype=index_type),
) -> flow.typing.ListNumpy:
return do_gather_nd(x, index)
else:
@flow.global_function(type=fn_type, function_config=func_config)
def gather_nd_fn(
x: flow.typing.Numpy.Placeholder(x_shape, dtype=x_dtype),
index: flow.typing.Numpy.Placeholder(index_shape, dtype=index_type),
) -> flow.typing.Numpy:
return do_gather_nd(x, index)
return gather_nd_fn
def _gather_nd_np(x, index, require_grad=False, init_grad_value=1.0):
ndim = index.shape[-1]
assert ndim <= x.ndim
indices = []
for dim in range(ndim):
indices.append(index[..., dim])
y = x[tuple(indices)]
dy = None
dx = None
if require_grad:
dy = np.zeros(shape=y.shape, dtype=np.float32)
dy.fill(init_grad_value)
dx = np.zeros(shape=x.shape, dtype=np.float32)
flat_index = index.reshape(-1, ndim)
flat_dy = dy.reshape(-1, *y.shape[(index.ndim - 1) :])
for i, nd_index in enumerate(flat_index):
if dx.ndim == ndim:
ravel_index = np.ravel_multi_index(nd_index, dx.shape)
dx_partial = np.zeros(shape=dx.shape, dtype=np.float32)
np.put(dx_partial, ravel_index, flat_dy[i])
dx += dx_partial
else:
dx[tuple(nd_index)] += flat_dy[i]
return y, dx
def _is_floating_dtype(dtype):
if dtype in ("float32", "double", "float16"):
return True
return False
def _compare_with_np(
test_case,
shape,
index_shape,
dynamic_shape=None,
dynamic_index_shape=None,
dtype="float32",
index_dtype="int32",
device_type="gpu",
device_num=1,
dynamic=False,
):
x_is_floating = _is_floating_dtype(dtype)
need_grad = True if x_is_floating else False
x_of_dtype = type_name_to_flow_type[dtype]
index_of_dtype = type_name_to_flow_type[index_dtype]
x_dtype = type_name_to_np_type[dtype]
index_dtype = type_name_to_np_type[index_dtype]
if dynamic_shape is None:
dynamic_shape = shape
else:
dynamic = True
if dynamic_index_shape is None:
dynamic_index_shape = index_shape
else:
dynamic = True
if dynamic:
x, index, y, dx = [], [], [], []
for _ in range(device_num):
x_, index_ = _random_inputs(
dynamic_shape, x_dtype, dynamic_index_shape, index_dtype
)
y_, dx_ = _gather_nd_np(x_, index_, need_grad)
x.append(x_)
index.append(index_)
y.append(y_)
dx.append(dx_)
def comp_diff(dx_blob: flow.typing.ListNumpy):
for dx_blob_, dx_ in zip(dx_blob, dx):
test_case.assertTrue(np.array_equal(dx_blob_, dx_))
else:
x, index = _random_inputs(
dynamic_shape, x_dtype, dynamic_index_shape, index_dtype
)
y, dx = _gather_nd_np(x, index, need_grad)
def comp_diff(dx_blob: flow.typing.Numpy):
test_case.assertTrue(np.array_equal(dx_blob, dx))
flow.clear_default_session()
gather_nd_fn = _make_gather_nd_fn(
shape,
index_shape,
x_of_dtype,
index_of_dtype,
device_type,
device_num,
dynamic,
need_grad,
comp_diff if device_num == 1 else None,
)
ret_y = gather_nd_fn(x, index)
if dynamic:
for ret_y_, y_ in zip(ret_y, y):
test_case.assertTrue(np.array_equal(ret_y_, y_))
else:
test_case.assertTrue(np.array_equal(ret_y, y))
@flow.unittest.skip_unless_1n1d()
class TestGatherNd(flow.unittest.TestCase):
def test_gather_nd(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10,)]
arg_dict["index_shape"] = [(5, 1)]
arg_dict["dtype"] = ["float32", "int32", "double"]
arg_dict["index_dtype"] = ["int32", "int64"]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["dynamic"] = [False, True]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_gather_nd_case_1(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(20, 10, 10, 3, 3)]
arg_dict["index_shape"] = [(2, 3, 3)]
arg_dict["device_type"] = ["gpu"]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
def test_gather_nd_case_2(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10, 8, 4)]
arg_dict["index_shape"] = [(2, 2)]
arg_dict["dtype"] = ["float32", "int32"]
arg_dict["index_dtype"] = ["int32", "int64"]
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["dynamic"] = [True]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_gather_nd_case_3(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(32, 60, 80, 25)]
arg_dict["index_shape"] = [(128, 2)]
arg_dict["device_type"] = ["gpu"]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_gather_nd_case_4(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(128, 64, 2, 16, 7)]
arg_dict["index_shape"] = [(30, 10, 3)]
arg_dict["device_type"] = ["gpu"]
arg_dict["dynamic"] = [True]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
def test_with_dynamic_x(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(32, 16)]
arg_dict["dynamic_shape"] = [(30, 15)]
arg_dict["index_shape"] = [(12, 1)]
arg_dict["device_type"] = ["cpu", "gpu"]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
def test_with_dynamic_index(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(25, 10)]
arg_dict["index_shape"] = [(15, 1)]
arg_dict["dynamic_index_shape"] = [(11, 1)]
arg_dict["device_type"] = ["cpu", "gpu"]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
def test_with_empty_index(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(12, 13, 7)]
arg_dict["index_shape"] = [(5, 10, 2)]
arg_dict["dynamic_index_shape"] = [(5, 0, 2)]
arg_dict["device_type"] = ["cpu", "gpu"]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
# @flow.unittest.skip_unless_1n4d()
# TODO(zhangwenxiao, jiangxuefei): refine in multi-client
@unittest.skipIf(True, "skip for now because of single-client tensor_list removed")
class TestGatherNdParallel(flow.unittest.TestCase):
def test_case_1(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(12, 5)]
arg_dict["index_shape"] = [(4, 8, 2)]
arg_dict["dtype"] = ["float32", "int32", "double"]
arg_dict["index_dtype"] = ["int32", "int64"]
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["device_num"] = [4]
arg_dict["dynamic"] = [True, False]
for arg in GenArgDict(arg_dict):
_compare_with_np(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.consistent_view",
"oneflow.constant_initializer",
"oneflow.clear_default_session",
"oneflow.config.cpu_device_num",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.typing.ListNumpy.Placeholder",
"oneflow.scope.mirrored_view",
"oneflow.cast_to_current_... | [((6957, 6989), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (6987, 6989), True, 'import oneflow as flow\n'), ((10234, 10320), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""skip for now because of single-client tensor_list removed"""'], {}), "(True,\n 'skip for now because of single-client tensor_list removed')\n", (10249, 10320), False, 'import unittest\n'), ((1357, 1382), 'numpy.prod', 'np.prod', (['index_shape[:-1]'], {}), '(index_shape[:-1])\n', (1364, 1382), True, 'import numpy as np\n'), ((2161, 2182), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2180, 2182), True, 'import oneflow as flow\n'), ((6456, 6484), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (6482, 6484), True, 'import oneflow as flow\n'), ((10881, 10896), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10894, 10896), False, 'import unittest\n'), ((1990, 2028), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['device_num'], {}), '(device_num)\n', (2016, 2028), True, 'import oneflow as flow\n'), ((2797, 2821), 'oneflow.gather_nd', 'flow.gather_nd', (['x', 'index'], {}), '(x, index)\n', (2811, 2821), True, 'import oneflow as flow\n'), ((3116, 3179), 'oneflow.global_function', 'flow.global_function', ([], {'type': 'fn_type', 'function_config': 'func_config'}), '(type=fn_type, function_config=func_config)\n', (3136, 3179), True, 'import oneflow as flow\n'), ((3464, 3527), 'oneflow.global_function', 'flow.global_function', ([], {'type': 'fn_type', 'function_config': 'func_config'}), '(type=fn_type, function_config=func_config)\n', (3484, 3527), True, 'import oneflow as flow\n'), ((4103, 4144), 'numpy.zeros', 'np.zeros', ([], {'shape': 'y.shape', 'dtype': 'np.float32'}), '(shape=y.shape, dtype=np.float32)\n', (4111, 4144), True, 'import numpy as np\n'), ((4191, 4232), 'numpy.zeros', 'np.zeros', ([], {'shape': 'x.shape', 'dtype': 'np.float32'}), '(shape=x.shape, dtype=np.float32)\n', (4199, 4232), True, 'import numpy as np\n'), ((7088, 7101), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7099, 7101), False, 'from collections import OrderedDict\n'), ((7405, 7425), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (7415, 7425), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((7616, 7629), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7627, 7629), False, 'from collections import OrderedDict\n'), ((7786, 7806), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (7796, 7806), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((7496, 7530), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (7505, 7530), False, 'import os\n'), ((7917, 7930), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7928, 7930), False, 'from collections import OrderedDict\n'), ((8222, 8242), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (8232, 8242), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((8433, 8446), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8444, 8446), False, 'from collections import OrderedDict\n'), ((8600, 8620), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (8610, 8620), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((8313, 8347), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (8322, 8347), False, 'import os\n'), ((8811, 8824), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8822, 8824), False, 'from collections import OrderedDict\n'), ((9021, 9041), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (9031, 9041), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((8691, 8725), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (8700, 8725), False, 'import os\n'), ((9150, 9163), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9161, 9163), False, 'from collections import OrderedDict\n'), ((9362, 9382), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (9372, 9382), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((9495, 9508), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9506, 9508), False, 'from collections import OrderedDict\n'), ((9712, 9732), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (9722, 9732), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((9843, 9856), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9854, 9856), False, 'from collections import OrderedDict\n'), ((10068, 10088), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (10078, 10088), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((10420, 10433), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (10431, 10433), False, 'from collections import OrderedDict\n'), ((10779, 10799), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (10789, 10799), False, 'from test_util import GenArgDict, type_name_to_flow_type, type_name_to_np_type\n'), ((2068, 2106), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['device_num'], {}), '(device_num)\n', (2094, 2106), True, 'import oneflow as flow\n'), ((2405, 2431), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2429, 2431), True, 'import oneflow as flow\n'), ((2484, 2512), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2510, 2512), True, 'import oneflow as flow\n'), ((2744, 2784), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['x_var'], {}), '(x_var)\n', (2777, 2784), True, 'import oneflow as flow\n'), ((6928, 6952), 'numpy.array_equal', 'np.array_equal', (['ret_y', 'y'], {}), '(ret_y, y)\n', (6942, 6952), True, 'import numpy as np\n'), ((1065, 1089), 'numpy.random.rand', 'np.random.rand', (['*x_shape'], {}), '(*x_shape)\n', (1079, 1089), True, 'import numpy as np\n'), ((1470, 1557), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'x_shape[col]', 'size': '(index_rows,)', 'dtype': 'index_dtype'}), '(low=0, high=x_shape[col], size=(index_rows,), dtype=\n index_dtype)\n', (1487, 1557), True, 'import numpy as np\n'), ((2679, 2716), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)', 'x_dtype'], {}), '(0, x_dtype)\n', (2704, 2716), True, 'import oneflow as flow\n'), ((3039, 3071), 'oneflow.watch_diff', 'flow.watch_diff', (['x', 'comp_diff_fn'], {}), '(x, comp_diff_fn)\n', (3054, 3071), True, 'import oneflow as flow\n'), ((3221, 3278), 'oneflow.typing.ListNumpy.Placeholder', 'flow.typing.ListNumpy.Placeholder', (['x_shape'], {'dtype': 'x_dtype'}), '(x_shape, dtype=x_dtype)\n', (3254, 3278), True, 'import oneflow as flow\n'), ((3299, 3363), 'oneflow.typing.ListNumpy.Placeholder', 'flow.typing.ListNumpy.Placeholder', (['index_shape'], {'dtype': 'index_type'}), '(index_shape, dtype=index_type)\n', (3332, 3363), True, 'import oneflow as flow\n'), ((3569, 3622), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'x_dtype'}), '(x_shape, dtype=x_dtype)\n', (3598, 3622), True, 'import oneflow as flow\n'), ((3643, 3703), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['index_shape'], {'dtype': 'index_type'}), '(index_shape, dtype=index_type)\n', (3672, 3703), True, 'import oneflow as flow\n'), ((4453, 4493), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['nd_index', 'dx.shape'], {}), '(nd_index, dx.shape)\n', (4473, 4493), True, 'import numpy as np\n'), ((4523, 4565), 'numpy.zeros', 'np.zeros', ([], {'shape': 'dx.shape', 'dtype': 'np.float32'}), '(shape=dx.shape, dtype=np.float32)\n', (4531, 4565), True, 'import numpy as np\n'), ((4582, 4625), 'numpy.put', 'np.put', (['dx_partial', 'ravel_index', 'flat_dy[i]'], {}), '(dx_partial, ravel_index, flat_dy[i])\n', (4588, 4625), True, 'import numpy as np\n'), ((6422, 6449), 'numpy.array_equal', 'np.array_equal', (['dx_blob', 'dx'], {}), '(dx_blob, dx)\n', (6436, 6449), True, 'import numpy as np\n'), ((6861, 6887), 'numpy.array_equal', 'np.array_equal', (['ret_y_', 'y_'], {}), '(ret_y_, y_)\n', (6875, 6887), True, 'import numpy as np\n'), ((1193, 1241), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(100)', 'size': 'x_shape'}), '(low=0, high=100, size=x_shape)\n', (1210, 1241), True, 'import numpy as np\n'), ((6130, 6159), 'numpy.array_equal', 'np.array_equal', (['dx_blob_', 'dx_'], {}), '(dx_blob_, dx_)\n', (6144, 6159), True, 'import numpy as np\n'), ((2892, 2946), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (2933, 2946), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def nll_loss_1d(logs, targets, reduction="none"):
input_shape = logs.shape
N = input_shape[0]
C = input_shape[1]
out = np.zeros_like(targets).astype(np.float64)
total_weight = N
for i in range(N):
cur_target = targets[i]
out[i] = -logs[i][cur_target]
if reduction == "sum":
return np.sum(out)
elif reduction == "mean":
return out.sum() / total_weight
elif reduction == "none":
return out
def nll_loss_2d(logs, targets, reduction="none"):
input_shape = logs.shape
N = input_shape[0]
H = input_shape[2]
W = input_shape[3]
out = np.zeros_like(targets).astype(np.float64)
total_weight = N * H * W
for i in range(N):
for h in range(H):
for w in range(W):
cur_target = targets[i][h][w]
out[i][h][w] = -logs[i][cur_target][h][w]
if reduction == "sum":
return np.sum(out)
elif reduction == "mean":
return out.sum() / total_weight
elif reduction == "none":
return out
def nll_loss_bert(logs, targets, reduction="none"):
input_shape = logs.shape
N = input_shape[0]
H = input_shape[2]
out = np.zeros_like(targets).astype(np.float64)
total_weight = N * H
for i in range(N):
for h in range(H):
cur_target = targets[i][h]
out[i][h] = -logs[i][cur_target][h]
if reduction == "sum":
return np.sum(out)
elif reduction == "mean":
return out.sum() / total_weight
elif reduction == "none":
return out
def _test_nllloss_none(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_1d(input.numpy(), target.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def _test_nllloss_mean(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_1d(input.numpy(), target.numpy(), reduction="mean")
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def _test_nllloss_sum(test_case, device):
x = np.array(
[
[0.88103855, 0.9908683, 0.6226845],
[0.53331435, 0.07999352, 0.8549948],
[0.25879037, 0.39530203, 0.698465],
[0.73427284, 0.63575995, 0.18827209],
[0.05689114, 0.0862954, 0.6325046],
]
).astype(np.float32)
y = np.array([0, 2, 1, 1, 0]).astype(np.int)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_1d(input.numpy(), target.numpy(), reduction="sum")
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def _test_nllloss_segmentation_none(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_2d(input.numpy(), target.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def _test_nllloss_segmentation_mean(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_2d(input.numpy(), target.numpy(), reduction="mean")
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def _test_nllloss_segmentation_sum(test_case, device):
x = np.array(
[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]
).astype(np.float32)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
y = np.array([[[1, 0], [0, 1]]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_2d(input.numpy(), target.numpy(), reduction="sum")
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def _test_nllloss_bert_none(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="none")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_bert(input.numpy(), target.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def _test_nllloss_bert_mean(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="mean")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_bert(input.numpy(), target.numpy(), reduction="mean")
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def _test_nllloss_bert_sum(test_case, device):
x = np.array([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]).astype(
np.float32
)
input = flow.Tensor(x, dtype=flow.float32, device=flow.device(device))
y = np.array([[1, 0, 0, 1]]).astype(np.int)
target = flow.Tensor(y, dtype=flow.int64, device=flow.device(device))
nll_loss = flow.nn.NLLLoss(reduction="sum")
nll_loss = nll_loss.to(device)
of_out = nll_loss(input, target)
np_out = nll_loss_bert(input.numpy(), target.numpy(), reduction="sum")
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestNLLLossModule(flow.unittest.TestCase):
def test_nllloss(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_nllloss_none,
_test_nllloss_mean,
_test_nllloss_sum,
_test_nllloss_segmentation_none,
_test_nllloss_segmentation_mean,
_test_nllloss_segmentation_sum,
_test_nllloss_bert_none,
_test_nllloss_bert_mean,
_test_nllloss_bert_sum,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.nn.NLLLoss",
"oneflow.experimental.device",
"oneflow.experimental.unittest.env.eager_execution_enabled"
] | [((2961, 2994), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (2976, 2994), True, 'import oneflow.experimental as flow\n'), ((3771, 3804), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (3786, 3804), True, 'import oneflow.experimental as flow\n'), ((4598, 4630), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (4613, 4630), True, 'import oneflow.experimental as flow\n'), ((5241, 5274), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (5256, 5274), True, 'import oneflow.experimental as flow\n'), ((5868, 5901), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (5883, 5901), True, 'import oneflow.experimental as flow\n'), ((6512, 6544), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (6527, 6544), True, 'import oneflow.experimental as flow\n'), ((7135, 7168), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""none"""'}), "(reduction='none')\n", (7150, 7168), True, 'import oneflow.experimental as flow\n'), ((7744, 7777), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (7759, 7777), True, 'import oneflow.experimental as flow\n'), ((8370, 8402), 'oneflow.experimental.nn.NLLLoss', 'flow.nn.NLLLoss', ([], {'reduction': '"""sum"""'}), "(reduction='sum')\n", (8385, 8402), True, 'import oneflow.experimental as flow\n'), ((9416, 9431), 'unittest.main', 'unittest.main', ([], {}), '()\n', (9429, 9431), False, 'import unittest\n'), ((1101, 1112), 'numpy.sum', 'np.sum', (['out'], {}), '(out)\n', (1107, 1112), True, 'import numpy as np\n'), ((1710, 1721), 'numpy.sum', 'np.sum', (['out'], {}), '(out)\n', (1716, 1721), True, 'import numpy as np\n'), ((2244, 2255), 'numpy.sum', 'np.sum', (['out'], {}), '(out)\n', (2250, 2255), True, 'import numpy as np\n'), ((8843, 8856), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8854, 8856), False, 'from collections import OrderedDict\n'), ((9316, 9336), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (9326, 9336), False, 'from test_util import GenArgList\n'), ((8648, 8691), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (8689, 8691), True, 'import oneflow.experimental as flow\n'), ((897, 919), 'numpy.zeros_like', 'np.zeros_like', (['targets'], {}), '(targets)\n', (910, 919), True, 'import numpy as np\n'), ((1404, 1426), 'numpy.zeros_like', 'np.zeros_like', (['targets'], {}), '(targets)\n', (1417, 1426), True, 'import numpy as np\n'), ((1991, 2013), 'numpy.zeros_like', 'np.zeros_like', (['targets'], {}), '(targets)\n', (2004, 2013), True, 'import numpy as np\n'), ((2436, 2638), 'numpy.array', 'np.array', (['[[0.88103855, 0.9908683, 0.6226845], [0.53331435, 0.07999352, 0.8549948], [\n 0.25879037, 0.39530203, 0.698465], [0.73427284, 0.63575995, 0.18827209],\n [0.05689114, 0.0862954, 0.6325046]]'], {}), '([[0.88103855, 0.9908683, 0.6226845], [0.53331435, 0.07999352, \n 0.8549948], [0.25879037, 0.39530203, 0.698465], [0.73427284, 0.63575995,\n 0.18827209], [0.05689114, 0.0862954, 0.6325046]])\n', (2444, 2638), True, 'import numpy as np\n'), ((2751, 2776), 'numpy.array', 'np.array', (['[0, 2, 1, 1, 0]'], {}), '([0, 2, 1, 1, 0])\n', (2759, 2776), True, 'import numpy as np\n'), ((2847, 2866), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2858, 2866), True, 'import oneflow.experimental as flow\n'), ((2924, 2943), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2935, 2943), True, 'import oneflow.experimental as flow\n'), ((3246, 3448), 'numpy.array', 'np.array', (['[[0.88103855, 0.9908683, 0.6226845], [0.53331435, 0.07999352, 0.8549948], [\n 0.25879037, 0.39530203, 0.698465], [0.73427284, 0.63575995, 0.18827209],\n [0.05689114, 0.0862954, 0.6325046]]'], {}), '([[0.88103855, 0.9908683, 0.6226845], [0.53331435, 0.07999352, \n 0.8549948], [0.25879037, 0.39530203, 0.698465], [0.73427284, 0.63575995,\n 0.18827209], [0.05689114, 0.0862954, 0.6325046]])\n', (3254, 3448), True, 'import numpy as np\n'), ((3561, 3586), 'numpy.array', 'np.array', (['[0, 2, 1, 1, 0]'], {}), '([0, 2, 1, 1, 0])\n', (3569, 3586), True, 'import numpy as np\n'), ((3657, 3676), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (3668, 3676), True, 'import oneflow.experimental as flow\n'), ((3734, 3753), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (3745, 3753), True, 'import oneflow.experimental as flow\n'), ((4073, 4275), 'numpy.array', 'np.array', (['[[0.88103855, 0.9908683, 0.6226845], [0.53331435, 0.07999352, 0.8549948], [\n 0.25879037, 0.39530203, 0.698465], [0.73427284, 0.63575995, 0.18827209],\n [0.05689114, 0.0862954, 0.6325046]]'], {}), '([[0.88103855, 0.9908683, 0.6226845], [0.53331435, 0.07999352, \n 0.8549948], [0.25879037, 0.39530203, 0.698465], [0.73427284, 0.63575995,\n 0.18827209], [0.05689114, 0.0862954, 0.6325046]])\n', (4081, 4275), True, 'import numpy as np\n'), ((4388, 4413), 'numpy.array', 'np.array', (['[0, 2, 1, 1, 0]'], {}), '([0, 2, 1, 1, 0])\n', (4396, 4413), True, 'import numpy as np\n'), ((4484, 4503), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (4495, 4503), True, 'import oneflow.experimental as flow\n'), ((4561, 4580), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (4572, 4580), True, 'import oneflow.experimental as flow\n'), ((4912, 4985), 'numpy.array', 'np.array', (['[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]'], {}), '([[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]])\n', (4920, 4985), True, 'import numpy as np\n'), ((5076, 5095), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5087, 5095), True, 'import oneflow.experimental as flow\n'), ((5106, 5134), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]]]'], {}), '([[[1, 0], [0, 1]]])\n', (5114, 5134), True, 'import numpy as np\n'), ((5204, 5223), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5215, 5223), True, 'import oneflow.experimental as flow\n'), ((5539, 5612), 'numpy.array', 'np.array', (['[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]'], {}), '([[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]])\n', (5547, 5612), True, 'import numpy as np\n'), ((5703, 5722), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5714, 5722), True, 'import oneflow.experimental as flow\n'), ((5733, 5761), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]]]'], {}), '([[[1, 0], [0, 1]]])\n', (5741, 5761), True, 'import numpy as np\n'), ((5831, 5850), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (5842, 5850), True, 'import oneflow.experimental as flow\n'), ((6183, 6256), 'numpy.array', 'np.array', (['[[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]]'], {}), '([[[[0.12, 0.36], [0.22, 0.66]], [[0.13, 0.34], [0.52, -0.96]]]])\n', (6191, 6256), True, 'import numpy as np\n'), ((6347, 6366), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (6358, 6366), True, 'import oneflow.experimental as flow\n'), ((6377, 6405), 'numpy.array', 'np.array', (['[[[1, 0], [0, 1]]]'], {}), '([[[1, 0], [0, 1]]])\n', (6385, 6405), True, 'import numpy as np\n'), ((6475, 6494), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (6486, 6494), True, 'import oneflow.experimental as flow\n'), ((6818, 6883), 'numpy.array', 'np.array', (['[[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]'], {}), '([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]])\n', (6826, 6883), True, 'import numpy as np\n'), ((6974, 6993), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (6985, 6993), True, 'import oneflow.experimental as flow\n'), ((7004, 7028), 'numpy.array', 'np.array', (['[[1, 0, 0, 1]]'], {}), '([[1, 0, 0, 1]])\n', (7012, 7028), True, 'import numpy as np\n'), ((7098, 7117), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (7109, 7117), True, 'import oneflow.experimental as flow\n'), ((7427, 7492), 'numpy.array', 'np.array', (['[[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]'], {}), '([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]])\n', (7435, 7492), True, 'import numpy as np\n'), ((7583, 7602), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (7594, 7602), True, 'import oneflow.experimental as flow\n'), ((7613, 7637), 'numpy.array', 'np.array', (['[[1, 0, 0, 1]]'], {}), '([[1, 0, 0, 1]])\n', (7621, 7637), True, 'import numpy as np\n'), ((7707, 7726), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (7718, 7726), True, 'import oneflow.experimental as flow\n'), ((8053, 8118), 'numpy.array', 'np.array', (['[[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]]'], {}), '([[[0.12, 0.36, 0.22, 0.66], [0.13, 0.34, 0.52, -0.96]]])\n', (8061, 8118), True, 'import numpy as np\n'), ((8209, 8228), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (8220, 8228), True, 'import oneflow.experimental as flow\n'), ((8239, 8263), 'numpy.array', 'np.array', (['[[1, 0, 0, 1]]'], {}), '([[1, 0, 0, 1]])\n', (8247, 8263), True, 'import numpy as np\n'), ((8333, 8352), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (8344, 8352), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
from functools import reduce
from typing import Any, Optional, Sequence, Union
import numpy as np
import oneflow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.operator.interface_blob_conf_pb2 as inter_face_blob_conf_util
import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.compile_context as compile_context
import oneflow.python.framework.distribute as distribute_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
import oneflow._oneflow_internal.oneflow.core.register.logical_blob_id as lbi_util
import oneflow._oneflow_internal
from functools import reduce
import traceback
class ArgBlobDef(object):
def __init__(
self,
shape,
dtype,
name=None,
distribute=oneflow._oneflow_internal.distribute.auto(),
):
lbi = lbi_util.LogicalBlobId()
if name is None:
name = id_util.UniqueStr("Input_")
lbi.set_op_name(name)
lbi.set_blob_name("out")
self.lbi_ = lbi
assert type(shape) is tuple
for dim in shape:
assert type(dim) is int
assert dim > 0
self.shape_ = shape
self.dtype_ = dtype
self.distribute_ = distribute
@property
def lbi(self):
return self.lbi_
@property
def op_name(self):
return self.lbi_.op_name()
@property
def blob_name(self):
return self.lbi_.blob_name()
@property
def unique_name(self):
return self.op_name + "/" + self.blob_name + self._Distribute2Str()
@property
def shape(self):
return self.shape_
@property
def dtype(self):
return self.dtype_
@property
def is_dynamic(self):
raise NotImplementedError
def with_distribute(self, distribute):
return type(self)(shape=self.shape_, dtype=self.dtype_, name=self.op_name,)
def Clone(self, op_name=None):
return type(self)(shape=self.shape_, dtype=self.dtype_, name=op_name,)
def AddAndInferOp(self, op_conf):
raise NotImplementedError
def EagerAddAndInferOp(self, op_conf):
raise NotImplementedError
def CheckAndAsyncPush(self, session, arg_ndarray):
self._CheckNdarray(arg_ndarray)
self._AsyncPush(session, arg_ndarray)
def _CheckNdarray(self, ndarray):
raise NotImplementedError
def _AsyncPush(self, session, arg_ndarray):
raise NotImplementedError
def ToInterfaceBlobConf(self):
interface_blob_conf = inter_face_blob_conf_util.InterfaceBlobConf()
interface_blob_conf.shape.dim.extend(self.shape_)
interface_blob_conf.data_type = oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(
self.dtype_
)
interface_blob_conf.is_dynamic = self.is_dynamic
# NOTE(chengcheng): rm batch_axis, so set split_axis always = 0 for safe. will support
# set sbp in future, or will delete in multi-client
sbp_parallel = sbp_parallel_pb.SbpParallel()
sbp_parallel.split_parallel.axis = 0
interface_blob_conf.parallel_distribution.sbp_parallel.extend([sbp_parallel])
return interface_blob_conf
def _Distribute2Str(self):
if (
type(self.distribute_)
is oneflow._oneflow_internal.distribute.AutoDistribute
):
return ""
elif (
type(self.distribute_)
is oneflow._oneflow_internal.distribute.SplitDistribute
):
return ":S" + str(self.distribute_.axis)
elif (
type(self.distribute_)
is oneflow._oneflow_internal.distribute.BroadcastDistribute
):
return ":B"
else:
raise NotImplementedError
class FixedTensorDef(ArgBlobDef):
def __init__(
self,
shape: Sequence[int],
dtype: oneflow.dtype = oneflow.float,
name: Optional[str] = None,
) -> None:
ArgBlobDef.__init__(
self, shape, dtype=dtype, name=name,
)
@property
def is_dynamic(self) -> bool:
return False
def AddAndInferOp(self, op_conf: op_conf_util.OperatorConf) -> Any:
return compile_context.CurJobAddConsistentOp(op_conf)
def EagerAddAndInferOp(self, op_conf: op_conf_util.OperatorConf) -> Any:
parallel_symbol = oneflow.current_scope().device_parallel_desc_symbol
if (
parallel_symbol.device_tag == "gpu"
and list(dict(parallel_symbol.machine_id2device_id_list).keys()) == [0]
and parallel_symbol.parallel_num == 1
):
device_tag = "gpu"
device_ids = "@0:%s" % (parallel_symbol.machine_id2device_id_list[0][0])
else:
device_tag = "cpu"
device_ids = "@0:0"
with oneflow.scope.placement(device_tag, device_ids):
return compile_context.CurJobAddConsistentOp(op_conf)
def _CheckNdarray(self, ndarray: np.ndarray) -> None:
assert isinstance(ndarray, np.ndarray)
assert ndarray.shape == self.shape
def _AsyncPush(self, session: object, arg_ndarray: np.ndarray) -> None:
session.AsyncPush(self.op_name, _MakePushNdarrayCallback(arg_ndarray))
class MirroredTensorDef(ArgBlobDef):
def __init__(
self,
shape: Sequence[int],
dtype: oneflow.dtype = oneflow.float,
name: Optional[str] = None,
) -> None:
assert type(shape) is tuple
ArgBlobDef.__init__(self, shape, dtype=dtype, name=name)
self.sub_consistent_blob_list_ = []
@property
def is_dynamic(self) -> bool:
return True
def AddAndInferOp(self, op_conf: op_conf_util.OperatorConf) -> None:
_AddAndInferMirroredOp(
self.unique_name, op_conf, self.sub_consistent_blob_list_
)
def EagerAddAndInferOp(self, op_conf: op_conf_util.OperatorConf) -> Any:
return compile_context.CurJobAddMirroredOp(op_conf)
def _CheckNdarray(self, ndarray_list: Sequence[np.ndarray]) -> None:
assert isinstance(ndarray_list, (list, tuple))
assert len(self.sub_consistent_blob_list_) == len(ndarray_list)
def GetElemCnt(shape):
return reduce(lambda x, y: x * y, shape, 1)
for consistent_blob, ndarray in zip(
self.sub_consistent_blob_list_, ndarray_list
):
assert type(ndarray) is np.ndarray
assert len(ndarray.shape) == len(self.shape)
assert GetElemCnt(ndarray.shape) <= GetElemCnt(self.shape)
def _AsyncPush(self, session: object, ndarray_list: Sequence[np.ndarray]) -> None:
for i in range(len(ndarray_list)):
sub_blob = self.sub_consistent_blob_list_[i]
session.AsyncPush(
sub_blob.op_name, _MakePushNdarrayCallback(ndarray_list[i])
)
def _AddAndInferMirroredOp(mirrored_lbn, op_conf, sub_consistent_blob_list):
compile_context.CurJobAddMirroredOp(op_conf)
job_name = oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName()
num_sub_lbi = c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi(
job_name, mirrored_lbn
)
for i in range(num_sub_lbi):
sub_lbi = c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi(
job_name, mirrored_lbn, i
)
lbi = lbi_util.LogicalBlobId()
lbi.set_op_name(sub_lbi.op_name)
lbi.set_blob_name(sub_lbi.blob_name)
sub_consistent_blob_list.append(
oneflow._oneflow_internal.ConsistentBlob(
lbi, "", oneflow._oneflow_internal.distribute.auto()
)
)
def _MakePushNdarrayCallback(ndarray):
copied = np.copy(ndarray)
def Copy(ofblob):
capacity = reduce(lambda x, y: x * y, ofblob.static_shape, 1)
elem_cnt = reduce(lambda x, y: x * y, copied.shape, 1)
assert elem_cnt <= capacity, "%s v.s. %s" % (copied.shape, ofblob.static_shape)
ofblob.CopyFromNdarray(copied)
return Copy
@oneflow_export("FixedTensorDef")
class DeprecatedFixedTensorDef(FixedTensorDef):
def __init__(self, *args, **kwargs):
running_script = traceback.format_stack()[-2].split(",")[0].split(" ")[3]
if not running_script.endswith('input_blob_def.py"'):
print(
"WARNING: oneflow.FixedTensorDef has been deprecated. "
"Please use oneflow.typing.Numpy.Placeholder instead."
)
print(
"""For instance:
- def job_func(images=oneflow.FixedTensorDef((32, 1, 28, 28), dtype=flow.float))
+ def job_func(images:oneflow.typing.Numpy.Placeholder((32, 1, 28, 28), dtype=flow.float))"""
)
print(traceback.format_stack()[-2])
super().__init__(*args, **kwargs)
@oneflow_export("MirroredTensorDef")
class DeprecatedMirroredTensorDef(MirroredTensorDef):
def __init__(self, *args, **kwargs):
running_script = traceback.format_stack()[-2].split(",")[0].split(" ")[3]
if not running_script.endswith('input_blob_def.py"'):
print(
"WARNING: oneflow.MirroredTensorDef has been deprecated. "
"Please use oneflow.typing.ListNumpy.Placeholder instead."
)
print(
"""For instance:
- def job_func(images=oneflow.MirroredTensorDef((32, 1, 28, 28), dtype=flow.float))
+ def job_func(images:oneflow.typing.ListNumpy.Placeholder((32, 1, 28, 28), dtype=flow.float))"""
)
print(traceback.format_stack()[-2])
super().__init__(*args, **kwargs)
| [
"oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi",
"oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName",
"oneflow._oneflow_internal.distribute.auto",
"oneflow.current_scope",
"oneflow.python.framework.compile_context.CurJobAddMirroredOp",
"oneflow.core.operator.i... | [((8929, 8961), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""FixedTensorDef"""'], {}), "('FixedTensorDef')\n", (8943, 8961), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((9730, 9765), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""MirroredTensorDef"""'], {}), "('MirroredTensorDef')\n", (9744, 9765), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((7851, 7895), 'oneflow.python.framework.compile_context.CurJobAddMirroredOp', 'compile_context.CurJobAddMirroredOp', (['op_conf'], {}), '(op_conf)\n', (7886, 7895), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((7911, 7976), 'oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (7974, 7976), False, 'import oneflow\n'), ((7995, 8074), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi', (['job_name', 'mirrored_lbn'], {}), '(job_name, mirrored_lbn)\n', (8050, 8074), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((8609, 8625), 'numpy.copy', 'np.copy', (['ndarray'], {}), '(ndarray)\n', (8616, 8625), True, 'import numpy as np\n'), ((1670, 1713), 'oneflow._oneflow_internal.distribute.auto', 'oneflow._oneflow_internal.distribute.auto', ([], {}), '()\n', (1711, 1713), False, 'import oneflow\n'), ((1736, 1760), 'oneflow._oneflow_internal.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (1758, 1760), True, 'import oneflow._oneflow_internal.oneflow.core.register.logical_blob_id as lbi_util\n'), ((3425, 3470), 'oneflow.core.operator.interface_blob_conf_pb2.InterfaceBlobConf', 'inter_face_blob_conf_util.InterfaceBlobConf', ([], {}), '()\n', (3468, 3470), True, 'import oneflow.core.operator.interface_blob_conf_pb2 as inter_face_blob_conf_util\n'), ((3569, 3640), 'oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype', 'oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype', (['self.dtype_'], {}), '(self.dtype_)\n', (3627, 3640), False, 'import oneflow\n'), ((3902, 3931), 'oneflow.core.job.sbp_parallel_pb2.SbpParallel', 'sbp_parallel_pb.SbpParallel', ([], {}), '()\n', (3929, 3931), True, 'import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb\n'), ((5110, 5156), 'oneflow.python.framework.compile_context.CurJobAddConsistentOp', 'compile_context.CurJobAddConsistentOp', (['op_conf'], {}), '(op_conf)\n', (5147, 5156), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((6836, 6880), 'oneflow.python.framework.compile_context.CurJobAddMirroredOp', 'compile_context.CurJobAddMirroredOp', (['op_conf'], {}), '(op_conf)\n', (6871, 6880), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((8140, 8219), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi', (['job_name', 'mirrored_lbn', 'i'], {}), '(job_name, mirrored_lbn, i)\n', (8192, 8219), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((8256, 8280), 'oneflow._oneflow_internal.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (8278, 8280), True, 'import oneflow._oneflow_internal.oneflow.core.register.logical_blob_id as lbi_util\n'), ((8668, 8718), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'ofblob.static_shape', '(1)'], {}), '(lambda x, y: x * y, ofblob.static_shape, 1)\n', (8674, 8718), False, 'from functools import reduce\n'), ((8738, 8781), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'copied.shape', '(1)'], {}), '(lambda x, y: x * y, copied.shape, 1)\n', (8744, 8781), False, 'from functools import reduce\n'), ((1805, 1832), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Input_"""'], {}), "('Input_')\n", (1822, 1832), True, 'import oneflow.python.framework.id_util as id_util\n'), ((5261, 5284), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (5282, 5284), False, 'import oneflow\n'), ((5725, 5772), 'oneflow.scope.placement', 'oneflow.scope.placement', (['device_tag', 'device_ids'], {}), '(device_tag, device_ids)\n', (5748, 5772), False, 'import oneflow\n'), ((5793, 5839), 'oneflow.python.framework.compile_context.CurJobAddConsistentOp', 'compile_context.CurJobAddConsistentOp', (['op_conf'], {}), '(op_conf)\n', (5830, 5839), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((7133, 7169), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'shape', '(1)'], {}), '(lambda x, y: x * y, shape, 1)\n', (7139, 7169), False, 'from functools import reduce\n'), ((8487, 8530), 'oneflow._oneflow_internal.distribute.auto', 'oneflow._oneflow_internal.distribute.auto', ([], {}), '()\n', (8528, 8530), False, 'import oneflow\n'), ((9654, 9678), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (9676, 9678), False, 'import traceback\n'), ((10478, 10502), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (10500, 10502), False, 'import traceback\n'), ((9076, 9100), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (9098, 9100), False, 'import traceback\n'), ((9886, 9910), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (9908, 9910), False, 'import traceback\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional, Union
import oneflow as flow
from oneflow.nn.module import Module
from oneflow.nn.modules.utils import _single
def bernoulli(input, *, generator=None, out=None):
"""This operator returns a Tensor with binaray random numbers (0 / 1) from a Bernoulli distribution.
Args:
input(Tensor) - the input tensor of probability values for the Bernoulli distribution
generator: (optional) – a pseudorandom number generator for sampling
out (Tensor, optional) – the output tensor.
Shape:
- Input: :math:`(*)`. Input can be of any shape
- Output: :math:`(*)`. Output is of the same shape as input
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> arr = np.array(
... [
... [1.0, 1.0, 1.0],
... [1.0, 1.0, 1.0],
... [1.0, 1.0, 1.0],
... ]
... )
>>> x = flow.Tensor(arr)
>>> y = flow.bernoulli(x)
>>> y
tensor([[1., 1., 1.],
[1., 1., 1.],
[1., 1., 1.]], dtype=oneflow.float32)
"""
return flow.F.bernoulli(input, flow.float32, generator)
class RandN(Module):
def __init__(
self,
size,
generator=None,
dtype=None,
layout=None,
device=None,
placement=None,
sbp=None,
requires_grad=False,
) -> None:
super().__init__()
assert size is not None, "shape must not be None!"
assert isinstance(
size, (int, tuple, list, flow.Size)
), "shape should be int or tuple int!"
self.device = device
if isinstance(self.device, str):
self.device = flow.device(self.device)
self.requires_grad = requires_grad
size = _single(size)
if generator is None:
generator = flow.Generator()
self.generator = generator
self.placement = placement
self.sbp = sbp
if placement is not None:
assert isinstance(sbp, (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp
if isinstance(self.sbp, flow.sbp.sbp):
self.sbp = (self.sbp,)
else:
for elem in sbp:
assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp
assert len(self.sbp) == len(placement.hierarchy)
else:
assert sbp is None, "sbp: %s" % sbp
self.size = size
self.dtype = dtype
def forward(self):
if self.placement is not None:
res = flow.F.consistent_randn(
self.size, self.placement, self.sbp, self.dtype, self.generator
)
else:
res = flow.F.randn(self.size, self.dtype, self.device, self.generator)
res.requires_grad = self.requires_grad
return res
def randn_op(
*size,
out=None,
generator=None,
dtype: Optional[flow.dtype] = None,
layout=None,
device: Union[flow.device, str, None] = None,
placement: flow.placement = None,
sbp: flow._oneflow_internal.sbp.sbp = None,
requires_grad: bool = False
):
"""
Returns a tensor filled with random numbers from a normal distribution with mean 0 and variance 1 (also called the standard normal distribution).
The shape of the tensor is defined by the variable argument ``size``.
Args:
size (int... or flow.Size): Defining the shape of the output tensor.
Can be a variable number of arguments or a collection like a list or tuple or flow.Size.
out (optional): The output tensor.
dtype (flow.dtype, optional): The desired data type of returned tensor. Default: ``flow.float32``.
layout (optional): The desired layout of returned Tensor.
generator (flow.Generator, optional) – a pseudorandom number generator for sampling
device (torch.device, optional): The desired device of returned local tensor. If None, uses the
current device.
placement (flow.placement, optional): The desired device of returned consistent tensor. If None, will
construct local tensor.
sbp (flow.sbp, optional): The desired sbp of returned consistent tensor. It must be equal with the
numbers of placement.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.randn(3,3)
>>> x.shape
flow.Size([3, 3])
>>> x.is_consistent
False
>>> placement = flow.placement("cpu", {0:[0]})
>>> sbp = flow.sbp.broadcast
>>> x = flow.randn(3,3,placement=placement,sbp=sbp)
>>> x.is_consistent
True
"""
assert out is None, "out not supported yet"
assert layout is None, "layout not supported yet"
if generator is None:
generator = flow.default_generator()
return RandN(
size, generator, dtype, layout, device, placement, sbp, requires_grad
)()
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.nn.modules.utils._single",
"oneflow.default_generator",
"oneflow.F.consistent_randn",
"oneflow.Generator",
"oneflow.F.bernoulli",
"oneflow.F.randn",
"oneflow.device"
] | [((1781, 1829), 'oneflow.F.bernoulli', 'flow.F.bernoulli', (['input', 'flow.float32', 'generator'], {}), '(input, flow.float32, generator)\n', (1797, 1829), True, 'import oneflow as flow\n'), ((5795, 5831), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (5810, 5831), False, 'import doctest\n'), ((2458, 2471), 'oneflow.nn.modules.utils._single', '_single', (['size'], {}), '(size)\n', (2465, 2471), False, 'from oneflow.nn.modules.utils import _single\n'), ((5613, 5637), 'oneflow.default_generator', 'flow.default_generator', ([], {}), '()\n', (5635, 5637), True, 'import oneflow as flow\n'), ((2375, 2399), 'oneflow.device', 'flow.device', (['self.device'], {}), '(self.device)\n', (2386, 2399), True, 'import oneflow as flow\n'), ((2527, 2543), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (2541, 2543), True, 'import oneflow as flow\n'), ((3224, 3316), 'oneflow.F.consistent_randn', 'flow.F.consistent_randn', (['self.size', 'self.placement', 'self.sbp', 'self.dtype', 'self.generator'], {}), '(self.size, self.placement, self.sbp, self.dtype,\n self.generator)\n', (3247, 3316), True, 'import oneflow as flow\n'), ((3375, 3439), 'oneflow.F.randn', 'flow.F.randn', (['self.size', 'self.dtype', 'self.device', 'self.generator'], {}), '(self.size, self.dtype, self.device, self.generator)\n', (3387, 3439), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.searchsorted,
"""
searchsorted() -> oneflow.Tensor
The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.searchsorted.html?highlight=searchsorted
Find the indices from the innermost dimension of sorted_sequence such that, if the corresponding values
in values were inserted before the indices, the order of the corresponding innermost dimension within
sorted_sequence would be preserved. Return a new tensor with the same size as values. If right is False
(default), then the left boundary of sorted_sequence is closed. More formally, the returned index
satisfies the following rules:
================= ========= ==========================================================================
sorted_sequence right returned index satisfies
================= ========= ==========================================================================
1-D False sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]
1-D True sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]
N-D False sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x]
<= sorted_sequence[m][n]...[l][i]
N-D True sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x]
sorted_sequence[m][n]...[l][i]
================= ========= ==========================================================================
Args:
sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the
innermost dimension.
values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).
out_int32 (bool optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.
Default value is False, i.e. default output data type is torch.int64.
right (bool optional): if False, return the first suitable location that is found. If True, return the
last such index. If no suitable index found, return 0 for non-numerical value
(eg. nan, inf) or the size of innermost dimension within sorted_sequence (one
pass the last index of the innermost dimension). In other words, if False, gets
the lower bound index for each value in values on the corresponding innermost
dimension of the sorted_sequence. If True, gets the upper bound index instead.
Default value is False.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> sorted_sequence = flow.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])
>>> sorted_sequence
tensor([[ 1, 3, 5, 7, 9],
[ 2, 4, 6, 8, 10]], dtype=oneflow.int64)
>>> values = flow.tensor([[3, 6, 9], [3, 6, 9]])
>>> values
tensor([[3, 6, 9],
[3, 6, 9]], dtype=oneflow.int64)
>>> flow.searchsorted(sorted_sequence, values)
tensor([[1, 3, 4],
[1, 2, 4]], dtype=oneflow.int64)
>>> flow.searchsorted(sorted_sequence, values, right=True)
tensor([[2, 3, 5],
[1, 3, 4]], dtype=oneflow.int64)
>>> sorted_sequence_1d = flow.tensor([1, 3, 5, 7, 9])
>>> sorted_sequence_1d
tensor([1, 3, 5, 7, 9], dtype=oneflow.int64)
>>> flow.searchsorted(sorted_sequence_1d, values)
tensor([[1, 3, 4],
[1, 3, 4]], dtype=oneflow.int64)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 4474), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.searchsorted', '"""\n searchsorted() -> oneflow.Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.searchsorted.html?highlight=searchsorted\n\n Find the indices from the innermost dimension of sorted_sequence such that, if the corresponding values\n in values were inserted before the indices, the order of the corresponding innermost dimension within\n sorted_sequence would be preserved. Return a new tensor with the same size as values. If right is False\n (default), then the left boundary of sorted_sequence is closed. More formally, the returned index\n satisfies the following rules:\n\n ================= ========= ==========================================================================\n sorted_sequence right returned index satisfies\n ================= ========= ==========================================================================\n 1-D False sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]\n 1-D True sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]\n N-D False sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] \n <= sorted_sequence[m][n]...[l][i]\n N-D True sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] \n sorted_sequence[m][n]...[l][i]\n ================= ========= ==========================================================================\n\n Args:\n sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the\n innermost dimension.\n values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).\n out_int32 (bool optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.\n Default value is False, i.e. default output data type is torch.int64.\n right (bool optional): if False, return the first suitable location that is found. If True, return the\n last such index. If no suitable index found, return 0 for non-numerical value\n (eg. nan, inf) or the size of innermost dimension within sorted_sequence (one\n pass the last index of the innermost dimension). In other words, if False, gets\n the lower bound index for each value in values on the corresponding innermost\n dimension of the sorted_sequence. If True, gets the upper bound index instead.\n Default value is False.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> sorted_sequence = flow.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])\n >>> sorted_sequence\n tensor([[ 1, 3, 5, 7, 9],\n [ 2, 4, 6, 8, 10]], dtype=oneflow.int64)\n >>> values = flow.tensor([[3, 6, 9], [3, 6, 9]])\n >>> values\n tensor([[3, 6, 9],\n [3, 6, 9]], dtype=oneflow.int64)\n >>> flow.searchsorted(sorted_sequence, values)\n tensor([[1, 3, 4],\n [1, 2, 4]], dtype=oneflow.int64)\n >>> flow.searchsorted(sorted_sequence, values, right=True)\n tensor([[2, 3, 5],\n [1, 3, 4]], dtype=oneflow.int64)\n >>> sorted_sequence_1d = flow.tensor([1, 3, 5, 7, 9])\n >>> sorted_sequence_1d\n tensor([1, 3, 5, 7, 9], dtype=oneflow.int64)\n >>> flow.searchsorted(sorted_sequence_1d, values)\n tensor([[1, 3, 4],\n [1, 3, 4]], dtype=oneflow.int64)\n\n """'], {}), '(oneflow.searchsorted,\n """\n searchsorted() -> oneflow.Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.searchsorted.html?highlight=searchsorted\n\n Find the indices from the innermost dimension of sorted_sequence such that, if the corresponding values\n in values were inserted before the indices, the order of the corresponding innermost dimension within\n sorted_sequence would be preserved. Return a new tensor with the same size as values. If right is False\n (default), then the left boundary of sorted_sequence is closed. More formally, the returned index\n satisfies the following rules:\n\n ================= ========= ==========================================================================\n sorted_sequence right returned index satisfies\n ================= ========= ==========================================================================\n 1-D False sorted_sequence[i-1] < values[m][n]...[l][x] <= sorted_sequence[i]\n 1-D True sorted_sequence[i-1] <= values[m][n]...[l][x] < sorted_sequence[i]\n N-D False sorted_sequence[m][n]...[l][i-1] < values[m][n]...[l][x] \n <= sorted_sequence[m][n]...[l][i]\n N-D True sorted_sequence[m][n]...[l][i-1] <= values[m][n]...[l][x] \n sorted_sequence[m][n]...[l][i]\n ================= ========= ==========================================================================\n\n Args:\n sorted_sequence (Tensor): N-D or 1-D tensor, containing monotonically increasing sequence on the\n innermost dimension.\n values (Tensor or Scalar): N-D tensor or a Scalar containing the search value(s).\n out_int32 (bool optional): indicate the output data type. torch.int32 if True, torch.int64 otherwise.\n Default value is False, i.e. default output data type is torch.int64.\n right (bool optional): if False, return the first suitable location that is found. If True, return the\n last such index. If no suitable index found, return 0 for non-numerical value\n (eg. nan, inf) or the size of innermost dimension within sorted_sequence (one\n pass the last index of the innermost dimension). In other words, if False, gets\n the lower bound index for each value in values on the corresponding innermost\n dimension of the sorted_sequence. If True, gets the upper bound index instead.\n Default value is False.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> sorted_sequence = flow.tensor([[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]])\n >>> sorted_sequence\n tensor([[ 1, 3, 5, 7, 9],\n [ 2, 4, 6, 8, 10]], dtype=oneflow.int64)\n >>> values = flow.tensor([[3, 6, 9], [3, 6, 9]])\n >>> values\n tensor([[3, 6, 9],\n [3, 6, 9]], dtype=oneflow.int64)\n >>> flow.searchsorted(sorted_sequence, values)\n tensor([[1, 3, 4],\n [1, 2, 4]], dtype=oneflow.int64)\n >>> flow.searchsorted(sorted_sequence, values, right=True)\n tensor([[2, 3, 5],\n [1, 3, 4]], dtype=oneflow.int64)\n >>> sorted_sequence_1d = flow.tensor([1, 3, 5, 7, 9])\n >>> sorted_sequence_1d\n tensor([1, 3, 5, 7, 9], dtype=oneflow.int64)\n >>> flow.searchsorted(sorted_sequence_1d, values)\n tensor([[1, 3, 4],\n [1, 3, 4]], dtype=oneflow.int64)\n\n """\n )\n', (670, 4474), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Modified from https://github.com/pytorch/vision/blob/main/torchvision/ops/stochastic_depth.py
"""
import oneflow as flow
import oneflow.nn as nn
from oneflow import Tensor
def stochastic_depth(
input: Tensor, p: float, mode: str, training: bool = True
) -> Tensor:
"""
Implements the Stochastic Depth from `"Deep Networks with Stochastic Depth"
<https://arxiv.org/abs/1603.09382>`_ used for randomly dropping residual
branches of residual architectures.
Args:
input (Tensor[N, ...]): The input tensor or arbitrary dimensions with the first one
being its batch i.e. a batch with ``N`` rows.
p (float): probability of the input to be zeroed.
mode (str): ``"batch"`` or ``"row"``.
``"batch"`` randomly zeroes the entire input, ``"row"`` zeroes
randomly selected rows from the batch.
training: apply stochastic depth if is ``True``. Default: ``True``
Returns:
Tensor[N, ...]: The randomly zeroed tensor.
"""
if p < 0.0 or p > 1.0:
raise ValueError(f"drop probability has to be between 0 and 1, but got {p}")
if mode not in ["batch", "row"]:
raise ValueError(f"mode has to be either 'batch' or 'row', but got {mode}")
if not training or p == 0.0:
return input
survival_rate = 1.0 - p
if mode == "row": # randomly samples some data of one batch and set them zero
size = [input.shape[0]] + [1] * (input.ndim - 1)
else: # zeros the entire input
size = [1] * input.ndim
# noise = flow.empty(size, dtype=input.dtype, device=input.device)
# TODO: add tensor.bernoulli_() method
noise = flow.ones(size, dtype=input.dtype) * survival_rate
# TODO: for now bernoulli only has cpu implementation in oneflow
noise = flow.bernoulli(noise).to(input.device)
if survival_rate > 0.0:
noise.div(survival_rate)
return input * noise
class StochasticDepth(nn.Module):
"""
See :func:`stochastic_depth`.
"""
def __init__(self, p: float, mode: str) -> None:
super().__init__()
self.p = p
self.mode = mode
def forward(self, input: Tensor) -> Tensor:
return stochastic_depth(input, self.p, self.mode, self.training)
def __repr__(self) -> str:
tmpstr = self.__class__.__name__ + "("
tmpstr += "p=" + str(self.p)
tmpstr += ", mode=" + str(self.mode)
tmpstr += ")"
return tmpstr
| [
"oneflow.ones",
"oneflow.bernoulli"
] | [((1692, 1726), 'oneflow.ones', 'flow.ones', (['size'], {'dtype': 'input.dtype'}), '(size, dtype=input.dtype)\n', (1701, 1726), True, 'import oneflow as flow\n'), ((1824, 1845), 'oneflow.bernoulli', 'flow.bernoulli', (['noise'], {}), '(noise)\n', (1838, 1845), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
config = flow.function_config()
class TestScalarOp(unittest.TestCase):
run_test = False
def _test_body(self, x, scalar, dtype=np.float32):
if not self.run_test:
return
f1 = self.make_job(x.shape, scalar, dtype=flow.float32)
f2 = self.make_xla_job(x.shape, scalar, dtype=flow.float32)
a = f1(x).get()
b = f2(x).get()
print("without xla: ", a)
print("with xla", b)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=1e-03, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, x_shape, scalar, dtype=np.float32):
x = np.ones(x_shape, dtype=dtype)
self._test_body(x, scalar, dtype=dtype)
def _test_random_body(self, x_shape, scalar, dtype=np.float32):
x = np.random.random(x_shape).astype(dtype)
self._test_body(x, scalar, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((1, 10), 2.0)
self._test_ones_body((2, 10, 2), 2.0)
self._test_ones_body((2, 5, 2, 2), 2.0)
def test_random_input(self):
self._test_random_body((1, 10), 2.0)
self._test_random_body((2, 10, 2), 2.0)
self._test_random_body((2, 5, 2, 2), 2.0)
class TestScalarAddOp(TestScalarOp):
run_test = True
def make_job(self, x_shape, scalar, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def scalar_add_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.math.add(x, scalar)
return scalar_add_job
def make_xla_job(self, x_shape, scalar, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_scalar_add_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.math.add(x, scalar)
return xla_scalar_add_job
class TestScalarMulOp(TestScalarOp):
run_test = True
def make_job(self, x_shape, scalar, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def scalar_mul_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.math.multiply(x, scalar)
return scalar_mul_job
def make_xla_job(self, x_shape, scalar, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_scalar_mul_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.math.multiply(x, scalar)
return xla_scalar_mul_job
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.function_config",
"oneflow.compatible.single_client.FixedTensorDef",
"oneflow.compatible.single_client.clear_default_session",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.math.multiply",
"oneflow.compatible.single_client.math.add"... | [((689, 711), 'oneflow.compatible.single_client.function_config', 'flow.function_config', ([], {}), '()\n', (709, 711), True, 'from oneflow.compatible import single_client as flow\n'), ((3370, 3385), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3383, 3385), False, 'import unittest\n'), ((1214, 1242), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1240, 1242), True, 'from oneflow.compatible import single_client as flow\n'), ((1322, 1351), 'numpy.ones', 'np.ones', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (1329, 1351), True, 'import numpy as np\n'), ((2115, 2143), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (2135, 2143), True, 'from oneflow.compatible import single_client as flow\n'), ((2436, 2464), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (2456, 2464), True, 'from oneflow.compatible import single_client as flow\n'), ((2821, 2849), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (2841, 2849), True, 'from oneflow.compatible import single_client as flow\n'), ((3147, 3175), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (3167, 3175), True, 'from oneflow.compatible import single_client as flow\n'), ((2173, 2214), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (2192, 2214), True, 'from oneflow.compatible import single_client as flow\n'), ((2236, 2260), 'oneflow.compatible.single_client.math.add', 'flow.math.add', (['x', 'scalar'], {}), '(x, scalar)\n', (2249, 2260), True, 'from oneflow.compatible import single_client as flow\n'), ((2498, 2539), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (2517, 2539), True, 'from oneflow.compatible import single_client as flow\n'), ((2561, 2585), 'oneflow.compatible.single_client.math.add', 'flow.math.add', (['x', 'scalar'], {}), '(x, scalar)\n', (2574, 2585), True, 'from oneflow.compatible import single_client as flow\n'), ((2879, 2920), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (2898, 2920), True, 'from oneflow.compatible import single_client as flow\n'), ((2942, 2971), 'oneflow.compatible.single_client.math.multiply', 'flow.math.multiply', (['x', 'scalar'], {}), '(x, scalar)\n', (2960, 2971), True, 'from oneflow.compatible import single_client as flow\n'), ((3209, 3250), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (3228, 3250), True, 'from oneflow.compatible import single_client as flow\n'), ((3272, 3301), 'oneflow.compatible.single_client.math.multiply', 'flow.math.multiply', (['x', 'scalar'], {}), '(x, scalar)\n', (3290, 3301), True, 'from oneflow.compatible import single_client as flow\n'), ((1481, 1506), 'numpy.random.random', 'np.random.random', (['x_shape'], {}), '(x_shape)\n', (1497, 1506), True, 'import numpy as np\n')] |
from typing import Sequence
import oneflow.experimental as flow
import argparse
import numpy as np
import os
import time
import sys
import oneflow.experimental.nn as nn
import json
from tqdm import tqdm
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "model_compress/distil_new_api/src")))
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "./src")))
import config as configs
from data_util import OFRecordDataLoader
from bert_model.bert import BERT
from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score
from util import getdirsize
from knowledge_distill_util import layer_distill, pred_distill
def _parse_args():
def str2bool(v):
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
parser = configs.get_parser()
parser.add_argument("--task_name", type=str, default='CoLA')
parser.add_argument("--teacher_model", default=None, type=str, help="The teacher model dir.")
parser.add_argument("--student_model", default=None, type=str, help="The student model dir.")
parser.add_argument("--total_model", default=None, type=str, help="The student model dir.")
parser.add_argument('--num_epochs', type=int, default=3, help='number of epochs')
parser.add_argument("--train_data_dir", type=str, default='/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_ofrecord_test/SST-2/train/')
parser.add_argument("--train_data_prefix", type=str, default='train.of_record-')
parser.add_argument("--train_example_num", type=int, default=67349,
help="example number in dataset")
parser.add_argument("--batch_size_per_device", type=int, default=8)
parser.add_argument("--train_data_part_num", type=int, default=1,
help="data part number in dataset")
parser.add_argument("--eval_data_dir", type=str, default='/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_ofrecord_test/SST-2/eval/')
parser.add_argument("--eval_data_prefix", type=str, default='eval.of_record-')
parser.add_argument("--eval_example_num", type=int, default=872,
help="example number in dataset")
parser.add_argument("--eval_batch_size_per_device", type=int, default=12)
parser.add_argument("--eval_data_part_num", type=int, default=1,
help="data part number in dataset")
parser.add_argument("--result_dir", type=str, default="", help="the save directory of results")
#
parser.add_argument("--student_num_hidden_layers", type=int, default=3)
parser.add_argument("--student_num_attention_heads", type=int, default=12)
parser.add_argument("--student_max_position_embeddings", type=int, default=512)
parser.add_argument("--student_type_vocab_size", type=int, default=2)
parser.add_argument("--student_vocab_size", type=int, default=30522)
parser.add_argument("--student_attention_probs_dropout_prob", type=float, default=0.1)
parser.add_argument("--student_hidden_dropout_prob", type=float, default=0.1)
parser.add_argument("--student_hidden_size_per_head", type=int, default=64)
parser.add_argument("--student_hidden_size", type=int, default=768)
parser.add_argument("--teacher_num_hidden_layers", type=int, default=12)
parser.add_argument("--teacher_num_attention_heads", type=int, default=16)
parser.add_argument("--teacher_max_position_embeddings", type=int, default=512)
parser.add_argument("--teacher_type_vocab_size", type=int, default=2)
parser.add_argument("--teacher_vocab_size", type=int, default=30522)
parser.add_argument("--teacher_attention_probs_dropout_prob", type=float, default=0.1)
parser.add_argument("--teacher_hidden_dropout_prob", type=float, default=0.1)
parser.add_argument("--teacher_hidden_size_per_head", type=int, default=64)
parser.add_argument("--teacher_hidden_size", type=int, default=768)
parser.add_argument("--kd_alpha", type=float, default=0.2)
parser.add_argument("--kd_beta", type=float, default=10, help='the proposed loss {10,100,500,1000}')
parser.add_argument('--from_scratch', type=str2bool, nargs='?', const=False, help='train the student model from scratch or initialize from teacher layers')
parser.add_argument('--temperature', type=float, default=1.)
parser.add_argument('--aug_train', type=str2bool, nargs='?', const=False, help='using augmented training set?')
parser.add_argument('--serve_for_online', type=str2bool, nargs='?', const=False,
help='if serve for online, then after training, will delete the teacher params and optimizer parmas from model_save_dir')
return parser.parse_args()
class bert_pkd(nn.Module):
def __init__(self, student_vocab_size,
student_hidden,
student_n_layers,
student_attn_heads,
student_dropout,
teacher_vocab_size,
teacher_hidden,
teacher_n_layers,
teacher_attn_heads,
teacher_dropout):
super().__init__()
self.student_model = BERT(student_vocab_size,
student_hidden,
student_n_layers,
student_attn_heads,
student_dropout)
self.teacher_model = BERT(teacher_vocab_size,
teacher_hidden,
teacher_n_layers,
teacher_attn_heads,
teacher_dropout)
self.student_output_layer = nn.Linear(student_hidden,2)
self.teacher_output_layer = nn.Linear(teacher_hidden,2)
self.student_softmax = nn.Softmax(dim=1)
self.teacher_softmax = nn.Softmax(dim=1)
def eval_forward(self, x, segment_info):
student_output,_,_ = self.student_model(x,segment_info)
student_output2 = self.student_output_layer(student_output[:,0])
student_logits = self.student_softmax(student_output2)
return student_logits
def forward(self, x, segment_info):
student_output,student_sequence_out,_ = self.student_model(x,segment_info)
student_output2 = self.student_output_layer(student_output[:,0])
student_logits = self.student_softmax(student_output2)
teacher_output,teacher_sequence_out,_ = self.teacher_model(x,segment_info)
teacher_output2 = self.teacher_output_layer(teacher_output[:,0])
teacher_logits = self.teacher_softmax(teacher_output2)
return student_logits, student_sequence_out, teacher_logits, teacher_sequence_out
def eval(model, dataloader, desc = "train"):
model.eval()
labels = []
predictions = []
start_time = time.time()
with flow.no_grad():
for b in tqdm(range(len(dataloader))):
blob_confs = dataloader.get_batch()
input_ids = blob_confs['input_ids'].to("cuda")
segment_ids = blob_confs['segment_ids'].to("cuda")
label_ids = blob_confs['label_ids'].squeeze(-1)
student_logits = model.eval_forward(input_ids, segment_ids)
predictions.extend(student_logits.detach().to('cpu').numpy().argmax(axis=1).tolist())
labels.extend(label_ids.tolist())
end_time = time.time()
cost_time = end_time - start_time
print('cost time: {} s'.format(cost_time))
model_size = getdirsize(args.model_save_dir)
print('model_size: %d Mbytes' % (model_size / 1024 / 1024)) # Mbytes
accuracy = accuracy_score(labels, predictions)
mcc = matthews_corrcoef(labels, predictions)
precision = precision_score(labels, predictions)
recall = recall_score(labels, predictions)
f_1 = f1_score(labels, predictions)
save_dict = {"accuracy": "%.2f" % accuracy,
"MCC": "%.2f" % mcc,
"precision": "%.2f" % precision,
"recall": "%.2f" % recall,
"f_1": "%.2f" % f_1,
"modelSize": "%d" % (model_size / 1024 / 1024),
"reasoningTime": "%.2f" % (args.eval_example_num / cost_time)} # sample/second
if args.result_dir == "":
args.result_dir = args.model_save_dir
if not os.path.exists(args.result_dir):
os.makedirs(args.result_dir)
with open(os.path.join(args.result_dir, 'results_{}.json'.format(desc)), "w") as f:
json.dump(save_dict, f)
def metric_fn(predictions, labels):
return {
"accuracy": accuracy,
"matthews_corrcoef": mcc,
"precision": precision,
"recall": recall,
"f1": f_1,
}
metric_dict = metric_fn(predictions, labels)
print(desc, ', '.join('{}: {:.3f}'.format(k, v) for k, v in metric_dict.items()))
return metric_dict
def main(args):
acc_tasks = ["mnli", "mrpc", "sst-2", "qqp", "qnli", "rte"]
corr_tasks = ["sts-b"]
mcc_tasks = ["cola"]
task_name = args.task_name.lower()
flow.enable_eager_execution()
flow.InitEagerGlobalSession()
train_data_loader = OFRecordDataLoader( args.train_data_dir,
args.batch_size_per_device,
args.train_data_part_num,
args.seq_length,
args.train_data_prefix,
args.train_example_num)
eval_data_loader = OFRecordDataLoader(args.eval_data_dir,
args.eval_batch_size_per_device,
args.eval_data_part_num,
args.seq_length,
args.eval_data_prefix,
args.eval_example_num)
model = bert_pkd(
args.student_vocab_size,
args.student_hidden_size,
args.student_num_hidden_layers,
args.student_num_attention_heads,
args.student_hidden_dropout_prob,
args.teacher_vocab_size,
args.teacher_hidden_size,
args.teacher_num_hidden_layers,
args.teacher_num_attention_heads,
args.teacher_hidden_dropout_prob
)
model.to('cuda')
if not os.path.exists(args.model_save_dir):
os.makedirs(args.model_save_dir)
if args.do_train:
of_cross_entropy = flow.nn.CrossEntropyLoss(reduction='mean')
of_cross_entropy.to("cuda")
of_sgd = flow.optim.SGD(
model.parameters(), lr=args.learning_rate)
of_losses = []
all_samples = len(eval_data_loader) * args.eval_batch_size_per_device
print_interval = 10
best_dev_acc = 0.0
for epoch in range(args.num_epochs):
model.train()
for b in range(len(train_data_loader)):
blob_confs = train_data_loader.get_batch()
# oneflow train
start_t = time.time()
input_ids = blob_confs['input_ids'].to("cuda")
segment_ids = blob_confs['segment_ids'].to("cuda")
label_ids = blob_confs['label_ids'].squeeze(-1).to("cuda")
student_logits, student_sequence_out, teacher_logits, teacher_sequence_out = model(input_ids, segment_ids)
pt_loss = layer_distill(args, student_sequence_out,teacher_sequence_out)
ds_loss = pred_distill(args, student_logits, teacher_logits)
loss_ce = of_cross_entropy(student_logits, label_ids)
loss_pkd = loss_ce * (1-args.kd_alpha) + args.kd_alpha * ds_loss + args.kd_beta * pt_loss
loss_pkd.backward()
of_sgd.step()
of_sgd.zero_grad()
end_t = time.time()
if b % print_interval == 0:
l = loss_pkd.numpy()[0]
of_losses.append(l)
print(
"epoch {} train iter {} oneflow loss {}, train time : {}".format(
epoch, b, l, end_t - start_t
)
)
# print('EvalTrainJob...')
# eval(model,train_data_loader,desc = 'train')
print('EvalValJob...')
result = eval(model,eval_data_loader,desc = 'eval')
save_model = False
if task_name in acc_tasks and result['accuracy'] > best_dev_acc:
best_dev_acc = result['accuracy']
save_model = True
# if task_name in corr_tasks and result['corr'] > best_dev_acc:
# best_dev_acc = result['corr']
# save_model = True
if task_name in mcc_tasks and result['matthews_corrcoef'] > best_dev_acc:
best_dev_acc = result['matthews_corrcoef']
save_model = True
print('Best result:', result)
if save_model:
if os.path.exists(args.model_save_dir):
import shutil
shutil.rmtree(args.model_save_dir)
if not os.path.exists(args.model_save_dir):
os.makedirs(args.model_save_dir)
snapshot_save_path = os.path.join(args.model_save_dir)
print("Saving best model to {}".format(snapshot_save_path))
flow.save(model.state_dict(),snapshot_save_path)
if args.do_eval:
print('Loading model...')
print(args.model_save_dir)
if not args.do_train:
model_dict = flow.load(args.model_save_dir)
print('successful')
model.load_state_dict(model_dict)
print('Evaluation...')
result = eval(model,eval_data_loader,desc = 'eval')
if __name__ == "__main__":
args = _parse_args()
main(args) | [
"oneflow.experimental.InitEagerGlobalSession",
"oneflow.experimental.nn.Softmax",
"oneflow.experimental.no_grad",
"oneflow.experimental.nn.Linear",
"oneflow.experimental.load",
"oneflow.experimental.enable_eager_execution",
"oneflow.experimental.nn.CrossEntropyLoss"
] | [((326, 351), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (341, 351), False, 'import os\n'), ((364, 386), 'os.path.split', 'os.path.split', (['curPath'], {}), '(curPath)\n', (377, 386), False, 'import os\n'), ((1063, 1083), 'config.get_parser', 'configs.get_parser', ([], {}), '()\n', (1081, 1083), True, 'import config as configs\n'), ((7194, 7205), 'time.time', 'time.time', ([], {}), '()\n', (7203, 7205), False, 'import time\n'), ((7739, 7750), 'time.time', 'time.time', ([], {}), '()\n', (7748, 7750), False, 'import time\n'), ((7854, 7885), 'util.getdirsize', 'getdirsize', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (7864, 7885), False, 'from util import getdirsize\n'), ((7976, 8011), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (7990, 8011), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((8022, 8060), 'sklearn.metrics.matthews_corrcoef', 'matthews_corrcoef', (['labels', 'predictions'], {}), '(labels, predictions)\n', (8039, 8060), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((8077, 8113), 'sklearn.metrics.precision_score', 'precision_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (8092, 8113), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((8127, 8160), 'sklearn.metrics.recall_score', 'recall_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (8139, 8160), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((8171, 8200), 'sklearn.metrics.f1_score', 'f1_score', (['labels', 'predictions'], {}), '(labels, predictions)\n', (8179, 8200), False, 'from sklearn.metrics import accuracy_score, matthews_corrcoef, precision_score, recall_score, f1_score\n'), ((9431, 9460), 'oneflow.experimental.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (9458, 9460), True, 'import oneflow.experimental as flow\n'), ((9465, 9494), 'oneflow.experimental.InitEagerGlobalSession', 'flow.InitEagerGlobalSession', ([], {}), '()\n', (9492, 9494), True, 'import oneflow.experimental as flow\n'), ((9520, 9688), 'data_util.OFRecordDataLoader', 'OFRecordDataLoader', (['args.train_data_dir', 'args.batch_size_per_device', 'args.train_data_part_num', 'args.seq_length', 'args.train_data_prefix', 'args.train_example_num'], {}), '(args.train_data_dir, args.batch_size_per_device, args.\n train_data_part_num, args.seq_length, args.train_data_prefix, args.\n train_example_num)\n', (9538, 9688), False, 'from data_util import OFRecordDataLoader\n'), ((9928, 10096), 'data_util.OFRecordDataLoader', 'OFRecordDataLoader', (['args.eval_data_dir', 'args.eval_batch_size_per_device', 'args.eval_data_part_num', 'args.seq_length', 'args.eval_data_prefix', 'args.eval_example_num'], {}), '(args.eval_data_dir, args.eval_batch_size_per_device,\n args.eval_data_part_num, args.seq_length, args.eval_data_prefix, args.\n eval_example_num)\n', (9946, 10096), False, 'from data_util import OFRecordDataLoader\n'), ((5509, 5608), 'bert_model.bert.BERT', 'BERT', (['student_vocab_size', 'student_hidden', 'student_n_layers', 'student_attn_heads', 'student_dropout'], {}), '(student_vocab_size, student_hidden, student_n_layers,\n student_attn_heads, student_dropout)\n', (5513, 5608), False, 'from bert_model.bert import BERT\n'), ((5774, 5873), 'bert_model.bert.BERT', 'BERT', (['teacher_vocab_size', 'teacher_hidden', 'teacher_n_layers', 'teacher_attn_heads', 'teacher_dropout'], {}), '(teacher_vocab_size, teacher_hidden, teacher_n_layers,\n teacher_attn_heads, teacher_dropout)\n', (5778, 5873), False, 'from bert_model.bert import BERT\n'), ((6042, 6070), 'oneflow.experimental.nn.Linear', 'nn.Linear', (['student_hidden', '(2)'], {}), '(student_hidden, 2)\n', (6051, 6070), True, 'import oneflow.experimental.nn as nn\n'), ((6106, 6134), 'oneflow.experimental.nn.Linear', 'nn.Linear', (['teacher_hidden', '(2)'], {}), '(teacher_hidden, 2)\n', (6115, 6134), True, 'import oneflow.experimental.nn as nn\n'), ((6165, 6182), 'oneflow.experimental.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (6175, 6182), True, 'import oneflow.experimental.nn as nn\n'), ((6214, 6231), 'oneflow.experimental.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (6224, 6231), True, 'import oneflow.experimental.nn as nn\n'), ((7215, 7229), 'oneflow.experimental.no_grad', 'flow.no_grad', ([], {}), '()\n', (7227, 7229), True, 'import oneflow.experimental as flow\n'), ((8669, 8700), 'os.path.exists', 'os.path.exists', (['args.result_dir'], {}), '(args.result_dir)\n', (8683, 8700), False, 'import os\n'), ((8710, 8738), 'os.makedirs', 'os.makedirs', (['args.result_dir'], {}), '(args.result_dir)\n', (8721, 8738), False, 'import os\n'), ((8835, 8858), 'json.dump', 'json.dump', (['save_dict', 'f'], {}), '(save_dict, f)\n', (8844, 8858), False, 'import json\n'), ((10886, 10921), 'os.path.exists', 'os.path.exists', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (10900, 10921), False, 'import os\n'), ((10931, 10963), 'os.makedirs', 'os.makedirs', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (10942, 10963), False, 'import os\n'), ((11014, 11056), 'oneflow.experimental.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (11038, 11056), True, 'import oneflow.experimental as flow\n'), ((248, 259), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (257, 259), False, 'import os\n'), ((435, 446), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (444, 446), False, 'import os\n'), ((14190, 14220), 'oneflow.experimental.load', 'flow.load', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (14199, 14220), True, 'import oneflow.experimental as flow\n'), ((988, 1048), 'argparse.ArgumentTypeError', 'argparse.ArgumentTypeError', (['"""Unsupported value encountered."""'], {}), "('Unsupported value encountered.')\n", (1014, 1048), False, 'import argparse\n'), ((11579, 11590), 'time.time', 'time.time', ([], {}), '()\n', (11588, 11590), False, 'import time\n'), ((11964, 12027), 'knowledge_distill_util.layer_distill', 'layer_distill', (['args', 'student_sequence_out', 'teacher_sequence_out'], {}), '(args, student_sequence_out, teacher_sequence_out)\n', (11977, 12027), False, 'from knowledge_distill_util import layer_distill, pred_distill\n'), ((12053, 12103), 'knowledge_distill_util.pred_distill', 'pred_distill', (['args', 'student_logits', 'teacher_logits'], {}), '(args, student_logits, teacher_logits)\n', (12065, 12103), False, 'from knowledge_distill_util import layer_distill, pred_distill\n'), ((12406, 12417), 'time.time', 'time.time', ([], {}), '()\n', (12415, 12417), False, 'import time\n'), ((13592, 13627), 'os.path.exists', 'os.path.exists', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (13606, 13627), False, 'import os\n'), ((13868, 13901), 'os.path.join', 'os.path.join', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (13880, 13901), False, 'import os\n'), ((13683, 13717), 'shutil.rmtree', 'shutil.rmtree', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (13696, 13717), False, 'import shutil\n'), ((13741, 13776), 'os.path.exists', 'os.path.exists', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (13755, 13776), False, 'import os\n'), ((13798, 13830), 'os.makedirs', 'os.makedirs', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (13809, 13830), False, 'import os\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# RUN: python3 %s
import os
import sys
sys.path.append(os.path.abspath(os.path.dirname(__file__)))
sys.path.append(os.path.abspath(os.path.dirname(__file__)) + "/..")
os.environ["ONEFLOW_MLIR_ENABLE_ROUND_TRIP"] = "1"
os.environ["ONEFLOW_MLIR_ENABLE_CODEGEN_FUSERS"] = "1"
import unittest
import oneflow as flow
import oneflow.unittest
from oneflow.core.job import job_pb2 as job_pb
from networks.resnet50 import resnet50
class InferGraph(flow.nn.Graph):
def __init__(self, placement_arg=None):
super().__init__()
model = resnet50()
if placement_arg is not None:
if "placement" in placement_arg:
model.to_global(**placement_arg)
else:
model.to(**placement_arg)
self.model = model
def build(self, image):
logits = self.model(image.to("cuda"))
pred = logits.softmax()
return pred
@unittest.skipIf(not flow.sysconfig.with_mlir(), "only test with mlir")
@flow.unittest.skip_unless_1n1d()
class GraphSaveTestCase(flow.unittest.TestCase):
def test_save_and_load(self):
placement_arg = {
"placement": flow.placement("cuda", ranks=[0]),
"sbp": flow.sbp.broadcast,
}
graph = InferGraph(placement_arg)
image_placeholder = flow.empty(
(1, 3, 224, 224),
dtype=flow.float32,
placement=flow.placement("cpu", ranks=[0]),
sbp=flow.sbp.broadcast,
)
graph._compile(image_placeholder)
saved_path = os.path.join("saved_model", graph.name)
if not os.path.exists(saved_path):
os.makedirs(saved_path)
flow.save(graph, saved_path)
saved_ir_path = os.path.join(saved_path, "model.mlir")
serialized_job = oneflow._oneflow_internal.nn.graph.LoadSerializedJobFromIR(
saved_ir_path
)
job = job_pb.Job()
job.ParseFromString(serialized_job)
# TODO: run loaded job as graph and original graph, compare the result
if __name__ == "__main__":
unittest.main()
| [
"oneflow.save",
"oneflow.sysconfig.with_mlir",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.core.job.job_pb2.Job",
"oneflow.placement"
] | [((1571, 1603), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1601, 1603), True, 'import oneflow as flow\n'), ((2656, 2671), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2669, 2671), False, 'import unittest\n'), ((663, 688), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (678, 688), False, 'import os\n'), ((1139, 1149), 'networks.resnet50.resnet50', 'resnet50', ([], {}), '()\n', (1147, 1149), False, 'from networks.resnet50 import resnet50\n'), ((2131, 2170), 'os.path.join', 'os.path.join', (['"""saved_model"""', 'graph.name'], {}), "('saved_model', graph.name)\n", (2143, 2170), False, 'import os\n'), ((2258, 2286), 'oneflow.save', 'flow.save', (['graph', 'saved_path'], {}), '(graph, saved_path)\n', (2267, 2286), True, 'import oneflow as flow\n'), ((2312, 2350), 'os.path.join', 'os.path.join', (['saved_path', '"""model.mlir"""'], {}), "(saved_path, 'model.mlir')\n", (2324, 2350), False, 'import os\n'), ((2486, 2498), 'oneflow.core.job.job_pb2.Job', 'job_pb.Job', ([], {}), '()\n', (2496, 2498), True, 'from oneflow.core.job import job_pb2 as job_pb\n'), ((1519, 1545), 'oneflow.sysconfig.with_mlir', 'flow.sysconfig.with_mlir', ([], {}), '()\n', (1543, 1545), True, 'import oneflow as flow\n'), ((723, 748), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (738, 748), False, 'import os\n'), ((1738, 1771), 'oneflow.placement', 'flow.placement', (['"""cuda"""'], {'ranks': '[0]'}), "('cuda', ranks=[0])\n", (1752, 1771), True, 'import oneflow as flow\n'), ((2186, 2212), 'os.path.exists', 'os.path.exists', (['saved_path'], {}), '(saved_path)\n', (2200, 2212), False, 'import os\n'), ((2226, 2249), 'os.makedirs', 'os.makedirs', (['saved_path'], {}), '(saved_path)\n', (2237, 2249), False, 'import os\n'), ((1988, 2020), 'oneflow.placement', 'flow.placement', (['"""cpu"""'], {'ranks': '[0]'}), "('cpu', ranks=[0])\n", (2002, 2020), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
import test_global_storage
from test_util import GenArgList
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def compare_with_tensorflow_rmsprop(
device_type, x_shape, centered, decay_rate, learning_rate, train_iters
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testRmsprop(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.RMSProp(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
decay_rate=decay_rate,
epsilon=0,
centered=centered,
).minimize(loss)
return x
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testRmsprop(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
var = tf.Variable(init_value)
opt = tf.keras.optimizers.RMSprop(
learning_rate=learning_rate,
rho=decay_rate,
momentum=0.0,
epsilon=0,
centered=centered,
)
for i in range(train_iters):
with tf.GradientTape() as tape:
random_mask = tf.Variable(random_masks_seq[i])
loss = tf.reduce_mean(var * random_mask)
gradients = tape.gradient(loss, var)
opt.apply_gradients(zip([gradients], [var]))
assert np.allclose(x.flatten(), var.numpy().flatten(), rtol=5e-3, atol=5e-3,), (
x.flatten() - var.numpy().flatten()
)
def compare_with_tensorflow_adam(
device_type, x_shape, beta1, beta2, epsilon, learning_rate, train_iters
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testAdam(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
do_bias_correction=True,
).minimize(loss)
return x
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testAdam(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
var = tf.Variable(init_value)
opt = tf.keras.optimizers.Adam(
learning_rate=learning_rate,
beta_1=beta1,
beta_2=beta2,
epsilon=epsilon,
amsgrad=False,
)
for i in range(train_iters):
with tf.GradientTape() as tape:
random_mask = tf.Variable(random_masks_seq[i])
loss = tf.reduce_mean(var * random_mask)
gradients = tape.gradient(loss, var)
opt.apply_gradients(zip([gradients], [var]))
assert np.allclose(x.flatten(), var.numpy().flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_adamw(
device_type,
x_shape,
beta1,
beta2,
epsilon,
weight_decay,
learning_rate,
train_iters,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testAdamW(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.AdamW(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
weight_decay=weight_decay,
do_bias_correction=True,
).minimize(loss)
return x
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testAdamW(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
def adamw_update_numpy(
param,
gradient,
iter,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
weight_decay=0.9,
):
lr_t = lr * np.sqrt(1 - beta2 ** (iter + 1)) / (1 - beta1 ** (iter + 1))
m_t = beta1 * m + (1 - beta1) * gradient
v_t = beta2 * v + (1 - beta2) * gradient * gradient
param_t = param - lr_t * (m_t / (np.sqrt(v_t) + epsilon) + weight_decay * param)
return param_t, m_t, v_t
param = init_value
gradient = np.full(param.shape, 1.0 / np.prod(param.shape))
m = np.zeros(param.shape)
v = np.zeros(param.shape)
for i in range(train_iters):
param, m, v = adamw_update_numpy(
param,
gradient * random_masks_seq[i],
i,
m,
v,
learning_rate,
beta1,
beta2,
epsilon,
weight_decay,
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_lazy_adam(
device_type, x_shape, beta1, beta2, epsilon, learning_rate, train_iters,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testLazyAdam() -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x)
flow.optimizer.LazyAdam(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
).minimize(loss)
return x
init_value = None
for i in range(train_iters + 1):
x = testLazyAdam()
if i == 0:
init_value = np.copy(x)
def lazy_adam_update_numpy(
param, gradient, iter, m, v, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7,
):
lr_t = lr * np.sqrt(1 - beta2 ** (iter + 1)) / (1 - beta1 ** (iter + 1))
m_t = np.copy(m)
v_t = np.copy(v)
m_t_o = beta1 * m + (1 - beta1) * gradient
v_t_o = beta2 * v + (1 - beta2) * gradient * gradient
m_t = m_t_o
v_t = v_t_o
param_t = np.copy(param)
param_t_o = param - lr_t * m_t / (np.sqrt(v_t) + epsilon)
param_t = param_t_o
return param_t, m_t, v_t
param = init_value
gradient = np.full(param.shape, 1.0 / np.prod(param.shape))
m = np.zeros(param.shape)
v = np.zeros(param.shape)
for i in range(train_iters):
param, m, v = lazy_adam_update_numpy(
param, gradient, i, m, v, learning_rate, beta1, beta2, epsilon
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_lars(
device_type,
x_shape,
momentum_beta,
epsilon,
lars_coefficient,
learning_rate,
weight_decay,
train_iters,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testLars(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.LARS(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
momentum_beta=momentum_beta,
epsilon=epsilon,
lars_coefficient=lars_coefficient,
weight_decay=weight_decay,
).minimize(loss)
return x
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testLars(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
def lars_update_numpy(
param,
gradient,
momentum,
learning_rate,
momentum_beta,
weight_decay,
epsilon,
lars_coefficient,
):
import math
model_norm = math.sqrt(np.sum(param * param))
model_diff_norm = math.sqrt(np.sum(gradient * gradient))
if model_norm > 0 and model_diff_norm > 0:
lars = (
lars_coefficient
* model_norm
/ (model_diff_norm + weight_decay * model_norm + epsilon)
)
else:
lars = 1.0
local_learning_rate = learning_rate * lars
momentum_t = momentum_beta * momentum - local_learning_rate * gradient
param_t = param + momentum_t - local_learning_rate * weight_decay * param
return param_t, momentum_t
param = init_value
gradient = np.full(param.shape, 1.0 / np.prod(param.shape))
momentum = np.zeros(param.shape)
for i in range(train_iters):
param, momentum = lars_update_numpy(
param,
gradient * random_masks_seq[i],
momentum,
learning_rate,
momentum_beta,
weight_decay,
epsilon,
lars_coefficient,
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_tensorflow_sgd(
device_type, x_shape, momentum, learning_rate, train_iters
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testSGD(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name="x",
shape=x_shape,
dtype=flow.float32,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
trainable=True,
)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
momentum=momentum,
).minimize(loss)
return x
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = None
for i in range(train_iters + 1):
x = testSGD(random_masks_seq[i])
if i == 0:
init_value = np.copy(x)
var = tf.Variable(init_value)
opt = tf.keras.optimizers.SGD(
learning_rate=learning_rate, momentum=momentum, nesterov=False
)
for i in range(train_iters):
with tf.GradientTape() as tape:
random_mask = tf.Variable(random_masks_seq[i])
loss = tf.reduce_mean(var * random_mask)
gradients = tape.gradient(loss, var)
opt.apply_gradients(zip([gradients], [var]))
assert np.allclose(x.flatten(), var.numpy().flatten(), rtol=1e-4, atol=1e-4,)
def unique_grads(sparse_ids, sparse_grads):
num_ids = np.prod(sparse_ids.shape)
sparse_grads_shape = (num_ids,) + sparse_grads.shape[len(sparse_ids.shape) :]
sparse_grads = sparse_grads.reshape(sparse_grads_shape)
sparse_ids = sparse_ids.flatten()
unique_dict = {}
for i in range(num_ids):
if sparse_ids[i] in unique_dict:
unique_dict[sparse_ids[i]] += sparse_grads[i].copy()
else:
unique_dict[sparse_ids[i]] = sparse_grads[i].copy()
return unique_dict
def compare_with_numpy_indexed_slices_sgd(
device_type,
model_shape,
ids_shape,
grad_shape,
momentum_beta,
learning_rate,
train_iters,
mul_scalar,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.indexed_slices_optimizer_conf(
dict(include_op_names=dict(op_name=["embeddings"]))
)
@flow.global_function(type="train", function_config=func_config)
def testIndexedSlicesSGD(
sparse_ids: flow.typing.Numpy.Placeholder(ids_shape, dtype=flow.int32),
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0"):
embedding_table = flow.get_variable(
name="embeddings",
shape=model_shape,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
)
embedding = flow.gather(
params=embedding_table * mul_scalar, indices=sparse_ids
)
loss = flow.math.reduce_mean(embedding)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
momentum=momentum_beta,
).minimize(loss)
return embedding_table
sparse_ids = np.random.randint(model_shape[0], size=ids_shape).astype(np.int32)
init_value = None
for i in range(train_iters + 1):
x = testIndexedSlicesSGD(sparse_ids)
if i == 0:
init_value = np.copy(x)
def indexed_slices_update_numpy(
param, unique_dict, iter, momentum, lr=0.001, momentum_beta=0,
):
param_t = np.copy(param)
momentum_t = np.copy(momentum)
for ids in unique_dict.keys():
next_momentum = momentum_beta * momentum_t[ids] - lr * unique_dict[ids]
momentum_t[ids] = next_momentum
param_t_o = param[ids] + next_momentum
param_t[ids] = param_t_o
return param_t, momentum_t
param = init_value
gradient = np.full(grad_shape, float(mul_scalar) / np.prod(grad_shape))
momentum = np.zeros(param.shape)
unique_dict = unique_grads(sparse_ids, gradient)
for i in range(train_iters):
param, momentum = indexed_slices_update_numpy(
param, unique_dict, i, momentum, learning_rate, momentum_beta
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_indexed_slices_sgdw(
device_type,
model_shape,
ids_shape,
grad_shape,
momentum_beta,
learning_rate,
train_iters,
mul_scalar,
weight_decay,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.indexed_slices_optimizer_conf(
dict(include_op_names=dict(op_name=["embeddings"]))
)
@flow.global_function(type="train", function_config=func_config)
def testIndexedSlicesSGDW(
sparse_ids: flow.typing.Numpy.Placeholder(ids_shape, dtype=flow.int32),
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0"):
embedding_table = flow.get_variable(
name="embeddings",
shape=model_shape,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
)
embedding = flow.gather(
params=embedding_table * mul_scalar, indices=sparse_ids
)
loss = flow.math.reduce_mean(embedding)
flow.optimizer.SGDW(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
momentum=momentum_beta,
weight_decay=weight_decay,
).minimize(loss)
return embedding_table
sparse_ids = np.random.randint(model_shape[0], size=ids_shape).astype(np.int32)
init_value = None
for i in range(train_iters + 1):
x = testIndexedSlicesSGDW(sparse_ids)
if i == 0:
init_value = np.copy(x)
def indexed_slices_update_numpy(
param, unique_dict, iter, momentum, lr=0.001, momentum_beta=0, weight_decay=0.9,
):
param_t = np.copy(param)
momentum_t = np.copy(momentum)
for ids in unique_dict.keys():
next_momentum = momentum_beta * momentum_t[ids] - lr * unique_dict[ids]
momentum_t[ids] = next_momentum
param_t_o = param[ids] + next_momentum - lr * weight_decay * param[ids]
param_t[ids] = param_t_o
return param_t, momentum_t
param = init_value
gradient = np.full(grad_shape, float(mul_scalar) / np.prod(grad_shape))
momentum = np.zeros(param.shape)
unique_dict = unique_grads(sparse_ids, gradient)
for i in range(train_iters):
param, momentum = indexed_slices_update_numpy(
param, unique_dict, i, momentum, learning_rate, momentum_beta, weight_decay
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_indexed_slices_adam(
device_type,
model_shape,
ids_shape,
grad_shape,
beta1,
beta2,
epsilon,
learning_rate,
train_iters,
mul_scalar,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.indexed_slices_optimizer_conf(
dict(include_op_names=dict(op_name=["embeddings"]))
)
@flow.global_function(type="train", function_config=func_config)
def testIndexedSlicesAdam(
sparse_ids: flow.typing.Numpy.Placeholder(ids_shape, dtype=flow.int32),
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0"):
embedding_table = flow.get_variable(
name="embeddings",
shape=model_shape,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
)
embedding = flow.gather(
params=embedding_table * mul_scalar, indices=sparse_ids
)
loss = flow.math.reduce_mean(embedding)
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
do_bias_correction=True,
).minimize(loss)
return embedding_table
sparse_ids = np.random.randint(model_shape[0], size=ids_shape).astype(np.int32)
init_value = None
for i in range(train_iters + 1):
x = testIndexedSlicesAdam(sparse_ids)
if i == 0:
init_value = np.copy(x)
def indexed_slices_update_numpy(
param, unique_dict, iter, m, v, lr=0.001, beta1=0.9, beta2=0.999, epsilon=1e-7,
):
param_t = np.copy(param)
m_t = np.copy(m)
v_t = np.copy(v)
for ids in unique_dict.keys():
lr_t = lr * np.sqrt(1 - beta2 ** (iter + 1)) / (1 - beta1 ** (iter + 1))
m_t_o = beta1 * m[ids] + (1 - beta1) * unique_dict[ids]
v_t_o = beta2 * v[ids] + (1 - beta2) * unique_dict[ids] * unique_dict[ids]
m_t[ids] = m_t_o
v_t[ids] = v_t_o
param_t_o = param[ids] - lr_t * m_t[ids] / (np.sqrt(v_t[ids]) + epsilon)
param_t[ids] = param_t_o
return param_t, m_t, v_t
param = init_value
gradient = np.full(grad_shape, float(mul_scalar) / np.prod(grad_shape))
m = np.zeros(param.shape)
v = np.zeros(param.shape)
unique_dict = unique_grads(sparse_ids, gradient)
for i in range(train_iters):
param, m, v = indexed_slices_update_numpy(
param, unique_dict, i, m, v, learning_rate, beta1, beta2, epsilon
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_numpy_indexed_slices_adamw(
device_type,
model_shape,
ids_shape,
grad_shape,
beta1,
beta2,
epsilon,
learning_rate,
train_iters,
mul_scalar,
weight_decay,
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.indexed_slices_optimizer_conf(
dict(include_op_names=dict(op_name=["embeddings"]))
)
@flow.global_function(type="train", function_config=func_config)
def testIndexedSlicesAdamW(
sparse_ids: flow.typing.Numpy.Placeholder(ids_shape, dtype=flow.int32),
) -> flow.typing.Numpy:
with flow.scope.placement(device_type, "0:0"):
embedding_table = flow.get_variable(
name="embeddings",
shape=model_shape,
initializer=flow.random_uniform_initializer(minval=0, maxval=100),
)
embedding = flow.gather(
params=embedding_table * mul_scalar, indices=sparse_ids
)
loss = flow.math.reduce_mean(embedding)
flow.optimizer.AdamW(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
do_bias_correction=True,
weight_decay=weight_decay,
).minimize(loss)
return embedding_table
sparse_ids = np.random.randint(model_shape[0], size=ids_shape).astype(np.int32)
init_value = None
for i in range(train_iters + 1):
x = testIndexedSlicesAdamW(sparse_ids)
if i == 0:
init_value = np.copy(x)
def indexed_slices_update_numpy(
param,
unique_dict,
iter,
m,
v,
lr=0.001,
beta1=0.9,
beta2=0.999,
epsilon=1e-7,
weight_decay=0.9,
):
param_t = np.copy(param)
m_t = np.copy(m)
v_t = np.copy(v)
for ids in unique_dict.keys():
lr_t = lr * np.sqrt(1 - beta2 ** (iter + 1)) / (1 - beta1 ** (iter + 1))
m_t_o = beta1 * m[ids] + (1 - beta1) * unique_dict[ids]
v_t_o = beta2 * v[ids] + (1 - beta2) * unique_dict[ids] * unique_dict[ids]
m_t[ids] = m_t_o
v_t[ids] = v_t_o
param_t_o = param[ids] - lr_t * (
m_t[ids] / (np.sqrt(v_t[ids]) + epsilon) + weight_decay * param[ids]
)
param_t[ids] = param_t_o
return param_t, m_t, v_t
param = init_value
gradient = np.full(grad_shape, float(mul_scalar) / np.prod(grad_shape))
m = np.zeros(param.shape)
v = np.zeros(param.shape)
unique_dict = unique_grads(sparse_ids, gradient)
for i in range(train_iters):
param, m, v = indexed_slices_update_numpy(
param,
unique_dict,
i,
m,
v,
learning_rate,
beta1,
beta2,
epsilon,
weight_decay,
)
assert np.allclose(x.flatten(), param.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_flow_job_fused_sgd_model_update(
device_type, x_shape, momentum, learning_rate, train_iters
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
def flow_net(var_name, random_mask):
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name=var_name,
shape=x_shape,
dtype=flow.float32,
initializer=flow.ones_initializer(),
trainable=True,
)
constant_val = flow.constant(3.0, dtype=flow.float32, shape=(1,))
x = x * constant_val
x = x * 2.0
if device_type == "gpu":
x = flow.cast(x, flow.float16)
x = flow.math.relu(x)
x = flow.cast(x, flow.float)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
momentum=momentum,
).minimize(loss)
return x
def make_sgd_job():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testSGD(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
return flow_net("x1", random_mask)
return testSGD
def make_fused_sgd_job():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.enable_fuse_model_update_ops(True)
@flow.global_function(type="train", function_config=func_config)
def testFusedSGD(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
return flow_net("x2", random_mask)
return testFusedSGD
sgd_job = make_sgd_job()
fused_sgd_job = make_fused_sgd_job()
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
for i in range(train_iters + 1):
var1 = sgd_job(random_masks_seq[i])
for i in range(train_iters + 1):
var2 = fused_sgd_job(random_masks_seq[i])
assert np.allclose(var1.flatten(), var2.flatten(), rtol=1e-4, atol=1e-4,)
def compare_with_flow_job_fused_adam_model_update(
device_type, x_shape, beta1, beta2, epsilon, learning_rate, train_iters
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
def flow_net(var_name, random_mask):
with flow.scope.placement(device_type, "0:0-0"):
x = flow.get_variable(
name=var_name,
shape=x_shape,
dtype=flow.float32,
initializer=flow.ones_initializer(),
trainable=True,
)
constant_val = flow.constant(3.0, dtype=flow.float32, shape=(1,))
x = x * constant_val
x = x * 2.0
if device_type == "gpu":
x = flow.cast(x, flow.float16)
x = flow.math.relu(x)
x = flow.cast(x, flow.float)
loss = flow.math.reduce_mean(x * random_mask)
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]),
beta1=beta1,
beta2=beta2,
epsilon=epsilon,
do_bias_correction=True,
).minimize(loss)
return x
def make_adam_job():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def testAdam(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
return flow_net("x1", random_mask)
return testAdam
def make_fused_adam_job():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.enable_fuse_model_update_ops(True)
@flow.global_function(type="train", function_config=func_config)
def testFusedAdam(
random_mask: flow.typing.Numpy.Placeholder(x_shape, dtype=flow.float32)
) -> flow.typing.Numpy:
return flow_net("x2", random_mask)
return testFusedAdam
adam_job = make_adam_job()
fused_adam_job = make_fused_adam_job()
# generate random number sequences
random_masks_seq = []
for i in range(train_iters + 1):
random_masks_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
for i in range(train_iters + 1):
var1 = adam_job(random_masks_seq[i])
for i in range(train_iters + 1):
var2 = fused_adam_job(random_masks_seq[i])
assert np.allclose(var1.flatten(), var2.flatten(), rtol=1e-4, atol=1e-4,)
@flow.unittest.skip_unless_1n1d()
class TestOptimizers(flow.unittest.TestCase):
def test_rmsprop(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["centered"] = [True, False]
arg_dict["decay_rate"] = [0.9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_rmsprop(*arg)
def test_adam(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_adam(*arg)
def test_lazy_adam(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_numpy_lazy_adam(*arg)
def test_adamw(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["weight_decay"] = [0.9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_numpy_adamw(*arg)
def test_lars(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["momentum_beta"] = [0.9]
arg_dict["epsilon"] = [1e-9]
arg_dict["lars_coefficient"] = [0.0001]
arg_dict["learning_rate"] = [1]
arg_dict["weight_decay"] = [0.9]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_numpy_lars(*arg)
def test_sgd(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["momentum"] = [0.9, 0.0]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_tensorflow_sgd(*arg)
def test_indexed_slices_sgd(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["model_shape"] = [(200, 2)]
arg_dict["ids"] = [(10, 4)]
arg_dict["grad_shape"] = [(10, 4, 2)]
arg_dict["momentum_beta"] = [0, 0.9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
arg_dict["mul_scalar"] = [1, 2]
for arg in GenArgList(arg_dict):
compare_with_numpy_indexed_slices_sgd(*arg)
@unittest.skipIf(
flow.unittest.env.eager_execution_enabled(),
"indexed slices sgdw doesn't work in eager mode",
)
def test_indexed_slices_sgdw(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["model_shape"] = [(200, 2)]
arg_dict["ids"] = [(10, 4)]
arg_dict["grad_shape"] = [(10, 4, 2)]
arg_dict["momentum_beta"] = [0, 0.9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
arg_dict["mul_scalar"] = [2]
arg_dict["weight_decay"] = [0.5, 0.3]
for arg in GenArgList(arg_dict):
compare_with_numpy_indexed_slices_sgdw(*arg)
def test_indexed_slices_adam(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["model_shape"] = [(200, 2)]
arg_dict["ids"] = [(10, 4)]
arg_dict["grad_shape"] = [(10, 4, 2)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
arg_dict["mul_scalar"] = [1, 2]
for arg in GenArgList(arg_dict):
compare_with_numpy_indexed_slices_adam(*arg)
@unittest.skipIf(
flow.unittest.env.eager_execution_enabled(),
"indexed slices adamw doesn't work in eager mode",
)
def test_indexed_slices_adamw(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["model_shape"] = [(200, 2)]
arg_dict["ids"] = [(10, 4)]
arg_dict["grad_shape"] = [(10, 4, 2)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
arg_dict["mul_scalar"] = [2]
arg_dict["weight_decay"] = [0.5, 0.3]
for arg in GenArgList(arg_dict):
compare_with_numpy_indexed_slices_adamw(*arg)
def test_fused_sgd(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["momentum"] = [0.9, 0.0]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_flow_job_fused_sgd_model_update(*arg)
def test_fused_adam(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["beta1"] = [0.9]
arg_dict["beta2"] = [0.99]
arg_dict["epsilon"] = [1e-9]
arg_dict["learning_rate"] = [1]
arg_dict["train_iters"] = [10]
for arg in GenArgList(arg_dict):
compare_with_flow_job_fused_adam_model_update(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.ones_initializer",
"oneflow.typing.Numpy.Placeholder",
"oneflow.gather",
"oneflow.clear_default_session",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.unittest.env.eager_execution_enabled",
"oneflow.math.relu",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.FunctionConfig",
"on... | [((788, 839), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (832, 839), True, 'import tensorflow as tf\n'), ((31375, 31407), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (31405, 31407), True, 'import oneflow as flow\n'), ((861, 912), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (901, 912), True, 'import tensorflow as tf\n'), ((1075, 1103), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1101, 1103), True, 'import oneflow as flow\n'), ((1122, 1143), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1141, 1143), True, 'import oneflow as flow\n'), ((1198, 1261), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1218, 1261), True, 'import oneflow as flow\n'), ((2388, 2411), 'tensorflow.Variable', 'tf.Variable', (['init_value'], {}), '(init_value)\n', (2399, 2411), True, 'import tensorflow as tf\n'), ((2422, 2542), 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'learning_rate': 'learning_rate', 'rho': 'decay_rate', 'momentum': '(0.0)', 'epsilon': '(0)', 'centered': 'centered'}), '(learning_rate=learning_rate, rho=decay_rate,\n momentum=0.0, epsilon=0, centered=centered)\n', (2449, 2542), True, 'import tensorflow as tf\n'), ((3166, 3194), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (3192, 3194), True, 'import oneflow as flow\n'), ((3213, 3234), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3232, 3234), True, 'import oneflow as flow\n'), ((3289, 3352), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (3309, 3352), True, 'import oneflow as flow\n'), ((4501, 4524), 'tensorflow.Variable', 'tf.Variable', (['init_value'], {}), '(init_value)\n', (4512, 4524), True, 'import tensorflow as tf\n'), ((4535, 4653), 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate', 'beta_1': 'beta1', 'beta_2': 'beta2', 'epsilon': 'epsilon', 'amsgrad': '(False)'}), '(learning_rate=learning_rate, beta_1=beta1, beta_2=\n beta2, epsilon=epsilon, amsgrad=False)\n', (4559, 4653), True, 'import tensorflow as tf\n'), ((5262, 5290), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (5288, 5290), True, 'import oneflow as flow\n'), ((5309, 5330), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (5328, 5330), True, 'import oneflow as flow\n'), ((5385, 5448), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (5405, 5448), True, 'import oneflow as flow\n'), ((7253, 7274), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (7261, 7274), True, 'import numpy as np\n'), ((7283, 7304), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (7291, 7304), True, 'import numpy as np\n'), ((7848, 7876), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (7874, 7876), True, 'import oneflow as flow\n'), ((7895, 7916), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (7914, 7916), True, 'import oneflow as flow\n'), ((7971, 8034), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (7991, 8034), True, 'import oneflow as flow\n'), ((9513, 9534), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (9521, 9534), True, 'import numpy as np\n'), ((9543, 9564), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (9551, 9564), True, 'import numpy as np\n'), ((10024, 10052), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (10050, 10052), True, 'import oneflow as flow\n'), ((10071, 10092), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (10090, 10092), True, 'import oneflow as flow\n'), ((10147, 10210), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (10167, 10210), True, 'import oneflow as flow\n'), ((12339, 12360), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (12347, 12360), True, 'import numpy as np\n'), ((12889, 12917), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (12915, 12917), True, 'import oneflow as flow\n'), ((12936, 12957), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (12955, 12957), True, 'import oneflow as flow\n'), ((13012, 13075), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (13032, 13075), True, 'import oneflow as flow\n'), ((14124, 14147), 'tensorflow.Variable', 'tf.Variable', (['init_value'], {}), '(init_value)\n', (14135, 14147), True, 'import tensorflow as tf\n'), ((14158, 14249), 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'learning_rate', 'momentum': 'momentum', 'nesterov': '(False)'}), '(learning_rate=learning_rate, momentum=momentum,\n nesterov=False)\n', (14181, 14249), True, 'import tensorflow as tf\n'), ((14687, 14712), 'numpy.prod', 'np.prod', (['sparse_ids.shape'], {}), '(sparse_ids.shape)\n', (14694, 14712), True, 'import numpy as np\n'), ((15379, 15407), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (15405, 15407), True, 'import oneflow as flow\n'), ((15426, 15447), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (15445, 15447), True, 'import oneflow as flow\n'), ((15615, 15678), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (15635, 15678), True, 'import oneflow as flow\n'), ((17319, 17340), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (17327, 17340), True, 'import numpy as np\n'), ((17891, 17919), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (17917, 17919), True, 'import oneflow as flow\n'), ((17938, 17959), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (17957, 17959), True, 'import oneflow as flow\n'), ((18127, 18190), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (18147, 18190), True, 'import oneflow as flow\n'), ((19928, 19949), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (19936, 19949), True, 'import numpy as np\n'), ((20512, 20540), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (20538, 20540), True, 'import oneflow as flow\n'), ((20559, 20580), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (20578, 20580), True, 'import oneflow as flow\n'), ((20748, 20811), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (20768, 20811), True, 'import oneflow as flow\n'), ((22771, 22792), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (22779, 22792), True, 'import numpy as np\n'), ((22801, 22822), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (22809, 22822), True, 'import numpy as np\n'), ((23390, 23418), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (23416, 23418), True, 'import oneflow as flow\n'), ((23437, 23458), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (23456, 23458), True, 'import oneflow as flow\n'), ((23626, 23689), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (23646, 23689), True, 'import oneflow as flow\n'), ((25845, 25866), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (25853, 25866), True, 'import numpy as np\n'), ((25875, 25896), 'numpy.zeros', 'np.zeros', (['param.shape'], {}), '(param.shape)\n', (25883, 25896), True, 'import numpy as np\n'), ((26485, 26513), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (26511, 26513), True, 'import oneflow as flow\n'), ((28962, 28990), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (28988, 28990), True, 'import oneflow as flow\n'), ((37431, 37446), 'unittest.main', 'unittest.main', ([], {}), '()\n', (37444, 37446), False, 'import unittest\n'), ((9062, 9072), 'numpy.copy', 'np.copy', (['m'], {}), '(m)\n', (9069, 9072), True, 'import numpy as np\n'), ((9087, 9097), 'numpy.copy', 'np.copy', (['v'], {}), '(v)\n', (9094, 9097), True, 'import numpy as np\n'), ((9272, 9286), 'numpy.copy', 'np.copy', (['param'], {}), '(param)\n', (9279, 9286), True, 'import numpy as np\n'), ((16859, 16873), 'numpy.copy', 'np.copy', (['param'], {}), '(param)\n', (16866, 16873), True, 'import numpy as np\n'), ((16895, 16912), 'numpy.copy', 'np.copy', (['momentum'], {}), '(momentum)\n', (16902, 16912), True, 'import numpy as np\n'), ((19435, 19449), 'numpy.copy', 'np.copy', (['param'], {}), '(param)\n', (19442, 19449), True, 'import numpy as np\n'), ((19471, 19488), 'numpy.copy', 'np.copy', (['momentum'], {}), '(momentum)\n', (19478, 19488), True, 'import numpy as np\n'), ((22105, 22119), 'numpy.copy', 'np.copy', (['param'], {}), '(param)\n', (22112, 22119), True, 'import numpy as np\n'), ((22134, 22144), 'numpy.copy', 'np.copy', (['m'], {}), '(m)\n', (22141, 22144), True, 'import numpy as np\n'), ((22159, 22169), 'numpy.copy', 'np.copy', (['v'], {}), '(v)\n', (22166, 22169), True, 'import numpy as np\n'), ((25119, 25133), 'numpy.copy', 'np.copy', (['param'], {}), '(param)\n', (25126, 25133), True, 'import numpy as np\n'), ((25148, 25158), 'numpy.copy', 'np.copy', (['m'], {}), '(m)\n', (25155, 25158), True, 'import numpy as np\n'), ((25173, 25183), 'numpy.copy', 'np.copy', (['v'], {}), '(v)\n', (25180, 25183), True, 'import numpy as np\n'), ((27449, 27470), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (27468, 27470), True, 'import oneflow as flow\n'), ((27533, 27596), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (27553, 27596), True, 'import oneflow as flow\n'), ((27858, 27879), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (27877, 27879), True, 'import oneflow as flow\n'), ((27997, 28060), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (28017, 28060), True, 'import oneflow as flow\n'), ((30025, 30046), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (30044, 30046), True, 'import oneflow as flow\n'), ((30109, 30172), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (30129, 30172), True, 'import oneflow as flow\n'), ((30437, 30458), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (30456, 30458), True, 'import oneflow as flow\n'), ((30576, 30639), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (30596, 30639), True, 'import oneflow as flow\n'), ((31506, 31519), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (31517, 31519), False, 'from collections import OrderedDict\n'), ((31789, 31809), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (31799, 31809), False, 'from test_util import GenArgList\n'), ((31911, 31924), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (31922, 31924), False, 'from collections import OrderedDict\n'), ((32216, 32236), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (32226, 32236), False, 'from test_util import GenArgList\n'), ((32340, 32353), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (32351, 32353), False, 'from collections import OrderedDict\n'), ((32645, 32665), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (32655, 32665), False, 'from test_util import GenArgList\n'), ((32765, 32778), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (32776, 32778), False, 'from collections import OrderedDict\n'), ((33111, 33131), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (33121, 33131), False, 'from test_util import GenArgList\n'), ((33226, 33239), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (33237, 33239), False, 'from collections import OrderedDict\n'), ((33593, 33613), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (33603, 33613), False, 'from test_util import GenArgList\n'), ((33706, 33719), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (33717, 33719), False, 'from collections import OrderedDict\n'), ((33947, 33967), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (33957, 33967), False, 'from test_util import GenArgList\n'), ((34079, 34092), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (34090, 34092), False, 'from collections import OrderedDict\n'), ((34452, 34472), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (34462, 34472), False, 'from test_util import GenArgList\n'), ((34734, 34747), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (34745, 34747), False, 'from collections import OrderedDict\n'), ((35150, 35170), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (35160, 35170), False, 'from test_util import GenArgList\n'), ((34561, 34604), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (34602, 34604), True, 'import oneflow as flow\n'), ((35294, 35307), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (35305, 35307), False, 'from collections import OrderedDict\n'), ((35728, 35748), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (35738, 35748), False, 'from test_util import GenArgList\n'), ((36013, 36026), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (36024, 36026), False, 'from collections import OrderedDict\n'), ((36490, 36510), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (36500, 36510), False, 'from test_util import GenArgList\n'), ((35838, 35881), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (35879, 35881), True, 'import oneflow as flow\n'), ((36625, 36638), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (36636, 36638), False, 'from collections import OrderedDict\n'), ((36866, 36886), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (36876, 36886), False, 'from test_util import GenArgList\n'), ((37007, 37020), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (37018, 37020), False, 'from collections import OrderedDict\n'), ((37312, 37332), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (37322, 37332), False, 'from test_util import GenArgList\n'), ((1304, 1362), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (1333, 1362), True, 'import oneflow as flow\n'), ((1404, 1446), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (1424, 1446), True, 'import oneflow as flow\n'), ((1724, 1762), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['(x * random_mask)'], {}), '(x * random_mask)\n', (1745, 1762), True, 'import oneflow as flow\n'), ((2366, 2376), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (2373, 2376), True, 'import numpy as np\n'), ((2633, 2650), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (2648, 2650), True, 'import tensorflow as tf\n'), ((2686, 2718), 'tensorflow.Variable', 'tf.Variable', (['random_masks_seq[i]'], {}), '(random_masks_seq[i])\n', (2697, 2718), True, 'import tensorflow as tf\n'), ((2738, 2771), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(var * random_mask)'], {}), '(var * random_mask)\n', (2752, 2771), True, 'import tensorflow as tf\n'), ((3392, 3450), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (3421, 3450), True, 'import oneflow as flow\n'), ((3492, 3534), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (3512, 3534), True, 'import oneflow as flow\n'), ((3812, 3850), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['(x * random_mask)'], {}), '(x * random_mask)\n', (3833, 3850), True, 'import oneflow as flow\n'), ((4479, 4489), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (4486, 4489), True, 'import numpy as np\n'), ((4743, 4760), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (4758, 4760), True, 'import tensorflow as tf\n'), ((4796, 4828), 'tensorflow.Variable', 'tf.Variable', (['random_masks_seq[i]'], {}), '(random_masks_seq[i])\n', (4807, 4828), True, 'import tensorflow as tf\n'), ((4848, 4881), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(var * random_mask)'], {}), '(var * random_mask)\n', (4862, 4881), True, 'import tensorflow as tf\n'), ((5489, 5547), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (5518, 5547), True, 'import oneflow as flow\n'), ((5589, 5631), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (5609, 5631), True, 'import oneflow as flow\n'), ((5909, 5947), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['(x * random_mask)'], {}), '(x * random_mask)\n', (5930, 5947), True, 'import oneflow as flow\n'), ((6621, 6631), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (6628, 6631), True, 'import numpy as np\n'), ((7223, 7243), 'numpy.prod', 'np.prod', (['param.shape'], {}), '(param.shape)\n', (7230, 7243), True, 'import numpy as np\n'), ((8093, 8135), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (8113, 8135), True, 'import oneflow as flow\n'), ((8413, 8437), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['x'], {}), '(x)\n', (8434, 8437), True, 'import oneflow as flow\n'), ((8829, 8839), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (8836, 8839), True, 'import numpy as np\n'), ((9483, 9503), 'numpy.prod', 'np.prod', (['param.shape'], {}), '(param.shape)\n', (9490, 9503), True, 'import numpy as np\n'), ((10250, 10308), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (10279, 10308), True, 'import oneflow as flow\n'), ((10350, 10392), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (10370, 10392), True, 'import oneflow as flow\n'), ((10670, 10708), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['(x * random_mask)'], {}), '(x * random_mask)\n', (10691, 10708), True, 'import oneflow as flow\n'), ((11377, 11387), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (11384, 11387), True, 'import numpy as np\n'), ((11637, 11658), 'numpy.sum', 'np.sum', (['(param * param)'], {}), '(param * param)\n', (11643, 11658), True, 'import numpy as np\n'), ((11696, 11723), 'numpy.sum', 'np.sum', (['(gradient * gradient)'], {}), '(gradient * gradient)\n', (11702, 11723), True, 'import numpy as np\n'), ((12302, 12322), 'numpy.prod', 'np.prod', (['param.shape'], {}), '(param.shape)\n', (12309, 12322), True, 'import numpy as np\n'), ((13114, 13172), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (13143, 13172), True, 'import oneflow as flow\n'), ((13214, 13256), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (13234, 13256), True, 'import oneflow as flow\n'), ((13534, 13572), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['(x * random_mask)'], {}), '(x * random_mask)\n', (13555, 13572), True, 'import oneflow as flow\n'), ((14102, 14112), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (14109, 14112), True, 'import numpy as np\n'), ((14307, 14324), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (14322, 14324), True, 'import tensorflow as tf\n'), ((14360, 14392), 'tensorflow.Variable', 'tf.Variable', (['random_masks_seq[i]'], {}), '(random_masks_seq[i])\n', (14371, 14392), True, 'import tensorflow as tf\n'), ((14412, 14445), 'tensorflow.reduce_mean', 'tf.reduce_mean', (['(var * random_mask)'], {}), '(var * random_mask)\n', (14426, 14445), True, 'import tensorflow as tf\n'), ((15729, 15787), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['ids_shape'], {'dtype': 'flow.int32'}), '(ids_shape, dtype=flow.int32)\n', (15758, 15787), True, 'import oneflow as flow\n'), ((15830, 15870), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (15850, 15870), True, 'import oneflow as flow\n'), ((16112, 16180), 'oneflow.gather', 'flow.gather', ([], {'params': '(embedding_table * mul_scalar)', 'indices': 'sparse_ids'}), '(params=embedding_table * mul_scalar, indices=sparse_ids)\n', (16123, 16180), True, 'import oneflow as flow\n'), ((16230, 16262), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['embedding'], {}), '(embedding)\n', (16251, 16262), True, 'import oneflow as flow\n'), ((16498, 16547), 'numpy.random.randint', 'np.random.randint', (['model_shape[0]'], {'size': 'ids_shape'}), '(model_shape[0], size=ids_shape)\n', (16515, 16547), True, 'import numpy as np\n'), ((16714, 16724), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (16721, 16724), True, 'import numpy as np\n'), ((17283, 17302), 'numpy.prod', 'np.prod', (['grad_shape'], {}), '(grad_shape)\n', (17290, 17302), True, 'import numpy as np\n'), ((18242, 18300), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['ids_shape'], {'dtype': 'flow.int32'}), '(ids_shape, dtype=flow.int32)\n', (18271, 18300), True, 'import oneflow as flow\n'), ((18343, 18383), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (18363, 18383), True, 'import oneflow as flow\n'), ((18625, 18693), 'oneflow.gather', 'flow.gather', ([], {'params': '(embedding_table * mul_scalar)', 'indices': 'sparse_ids'}), '(params=embedding_table * mul_scalar, indices=sparse_ids)\n', (18636, 18693), True, 'import oneflow as flow\n'), ((18743, 18775), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['embedding'], {}), '(embedding)\n', (18764, 18775), True, 'import oneflow as flow\n'), ((19055, 19104), 'numpy.random.randint', 'np.random.randint', (['model_shape[0]'], {'size': 'ids_shape'}), '(model_shape[0], size=ids_shape)\n', (19072, 19104), True, 'import numpy as np\n'), ((19272, 19282), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (19279, 19282), True, 'import numpy as np\n'), ((19892, 19911), 'numpy.prod', 'np.prod', (['grad_shape'], {}), '(grad_shape)\n', (19899, 19911), True, 'import numpy as np\n'), ((20863, 20921), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['ids_shape'], {'dtype': 'flow.int32'}), '(ids_shape, dtype=flow.int32)\n', (20892, 20921), True, 'import oneflow as flow\n'), ((20964, 21004), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (20984, 21004), True, 'import oneflow as flow\n'), ((21246, 21314), 'oneflow.gather', 'flow.gather', ([], {'params': '(embedding_table * mul_scalar)', 'indices': 'sparse_ids'}), '(params=embedding_table * mul_scalar, indices=sparse_ids)\n', (21257, 21314), True, 'import oneflow as flow\n'), ((21364, 21396), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['embedding'], {}), '(embedding)\n', (21385, 21396), True, 'import oneflow as flow\n'), ((21726, 21775), 'numpy.random.randint', 'np.random.randint', (['model_shape[0]'], {'size': 'ids_shape'}), '(model_shape[0], size=ids_shape)\n', (21743, 21775), True, 'import numpy as np\n'), ((21943, 21953), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (21950, 21953), True, 'import numpy as np\n'), ((22742, 22761), 'numpy.prod', 'np.prod', (['grad_shape'], {}), '(grad_shape)\n', (22749, 22761), True, 'import numpy as np\n'), ((23742, 23800), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['ids_shape'], {'dtype': 'flow.int32'}), '(ids_shape, dtype=flow.int32)\n', (23771, 23800), True, 'import oneflow as flow\n'), ((23843, 23883), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (23863, 23883), True, 'import oneflow as flow\n'), ((24125, 24193), 'oneflow.gather', 'flow.gather', ([], {'params': '(embedding_table * mul_scalar)', 'indices': 'sparse_ids'}), '(params=embedding_table * mul_scalar, indices=sparse_ids)\n', (24136, 24193), True, 'import oneflow as flow\n'), ((24243, 24275), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['embedding'], {}), '(embedding)\n', (24264, 24275), True, 'import oneflow as flow\n'), ((24649, 24698), 'numpy.random.randint', 'np.random.randint', (['model_shape[0]'], {'size': 'ids_shape'}), '(model_shape[0], size=ids_shape)\n', (24666, 24698), True, 'import numpy as np\n'), ((24867, 24877), 'numpy.copy', 'np.copy', (['x'], {}), '(x)\n', (24874, 24877), True, 'import numpy as np\n'), ((25816, 25835), 'numpy.prod', 'np.prod', (['grad_shape'], {}), '(grad_shape)\n', (25823, 25835), True, 'import numpy as np\n'), ((26569, 26611), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (26589, 26611), True, 'import oneflow as flow\n'), ((26872, 26922), 'oneflow.constant', 'flow.constant', (['(3.0)'], {'dtype': 'flow.float32', 'shape': '(1,)'}), '(3.0, dtype=flow.float32, shape=(1,))\n', (26885, 26922), True, 'import oneflow as flow\n'), ((27166, 27204), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['(x * random_mask)'], {}), '(x * random_mask)\n', (27187, 27204), True, 'import oneflow as flow\n'), ((29046, 29088), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0-0"""'], {}), "(device_type, '0:0-0')\n", (29066, 29088), True, 'import oneflow as flow\n'), ((29349, 29399), 'oneflow.constant', 'flow.constant', (['(3.0)'], {'dtype': 'flow.float32', 'shape': '(1,)'}), '(3.0, dtype=flow.float32, shape=(1,))\n', (29362, 29399), True, 'import oneflow as flow\n'), ((29643, 29681), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['(x * random_mask)'], {}), '(x * random_mask)\n', (29664, 29681), True, 'import oneflow as flow\n'), ((6863, 6895), 'numpy.sqrt', 'np.sqrt', (['(1 - beta2 ** (iter + 1))'], {}), '(1 - beta2 ** (iter + 1))\n', (6870, 6895), True, 'import numpy as np\n'), ((8986, 9018), 'numpy.sqrt', 'np.sqrt', (['(1 - beta2 ** (iter + 1))'], {}), '(1 - beta2 ** (iter + 1))\n', (8993, 9018), True, 'import numpy as np\n'), ((27037, 27063), 'oneflow.cast', 'flow.cast', (['x', 'flow.float16'], {}), '(x, flow.float16)\n', (27046, 27063), True, 'import oneflow as flow\n'), ((27084, 27101), 'oneflow.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (27098, 27101), True, 'import oneflow as flow\n'), ((27122, 27146), 'oneflow.cast', 'flow.cast', (['x', 'flow.float'], {}), '(x, flow.float)\n', (27131, 27146), True, 'import oneflow as flow\n'), ((27643, 27701), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (27672, 27701), True, 'import oneflow as flow\n'), ((28112, 28170), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (28141, 28170), True, 'import oneflow as flow\n'), ((29514, 29540), 'oneflow.cast', 'flow.cast', (['x', 'flow.float16'], {}), '(x, flow.float16)\n', (29523, 29540), True, 'import oneflow as flow\n'), ((29561, 29578), 'oneflow.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (29575, 29578), True, 'import oneflow as flow\n'), ((29599, 29623), 'oneflow.cast', 'flow.cast', (['x', 'flow.float'], {}), '(x, flow.float)\n', (29608, 29623), True, 'import oneflow as flow\n'), ((30220, 30278), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (30249, 30278), True, 'import oneflow as flow\n'), ((30692, 30750), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow.float32'}), '(x_shape, dtype=flow.float32)\n', (30721, 30750), True, 'import oneflow as flow\n'), ((1604, 1657), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (1635, 1657), True, 'import oneflow as flow\n'), ((2165, 2196), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (2182, 2196), True, 'import numpy as np\n'), ((3692, 3745), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (3723, 3745), True, 'import oneflow as flow\n'), ((4281, 4312), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (4298, 4312), True, 'import numpy as np\n'), ((5789, 5842), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (5820, 5842), True, 'import oneflow as flow\n'), ((6422, 6453), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (6439, 6453), True, 'import numpy as np\n'), ((8293, 8346), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (8324, 8346), True, 'import oneflow as flow\n'), ((9330, 9342), 'numpy.sqrt', 'np.sqrt', (['v_t'], {}), '(v_t)\n', (9337, 9342), True, 'import numpy as np\n'), ((10550, 10603), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (10581, 10603), True, 'import oneflow as flow\n'), ((11179, 11210), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (11196, 11210), True, 'import numpy as np\n'), ((13414, 13467), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (13445, 13467), True, 'import oneflow as flow\n'), ((13905, 13936), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (13922, 13936), True, 'import numpy as np\n'), ((16019, 16072), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (16050, 16072), True, 'import oneflow as flow\n'), ((18532, 18585), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (18563, 18585), True, 'import oneflow as flow\n'), ((21153, 21206), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (21184, 21206), True, 'import oneflow as flow\n'), ((22233, 22265), 'numpy.sqrt', 'np.sqrt', (['(1 - beta2 ** (iter + 1))'], {}), '(1 - beta2 ** (iter + 1))\n', (22240, 22265), True, 'import numpy as np\n'), ((24032, 24085), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(0)', 'maxval': '(100)'}), '(minval=0, maxval=100)\n', (24063, 24085), True, 'import oneflow as flow\n'), ((25247, 25279), 'numpy.sqrt', 'np.sqrt', (['(1 - beta2 ** (iter + 1))'], {}), '(1 - beta2 ** (iter + 1))\n', (25254, 25279), True, 'import numpy as np\n'), ((26774, 26797), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (26795, 26797), True, 'import oneflow as flow\n'), ((28485, 28516), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (28502, 28516), True, 'import numpy as np\n'), ((29251, 29274), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (29272, 29274), True, 'import oneflow as flow\n'), ((31070, 31101), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (31087, 31101), True, 'import numpy as np\n'), ((1815, 1877), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (1856, 1877), True, 'import oneflow as flow\n'), ((3900, 3962), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (3941, 3962), True, 'import oneflow as flow\n'), ((5998, 6060), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (6039, 6060), True, 'import oneflow as flow\n'), ((8492, 8554), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (8533, 8554), True, 'import oneflow as flow\n'), ((10758, 10820), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (10799, 10820), True, 'import oneflow as flow\n'), ((13621, 13683), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (13662, 13683), True, 'import oneflow as flow\n'), ((16311, 16373), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (16352, 16373), True, 'import oneflow as flow\n'), ((18825, 18887), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (18866, 18887), True, 'import oneflow as flow\n'), ((21447, 21509), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (21488, 21509), True, 'import oneflow as flow\n'), ((22563, 22580), 'numpy.sqrt', 'np.sqrt', (['v_t[ids]'], {}), '(v_t[ids])\n', (22570, 22580), True, 'import numpy as np\n'), ((24327, 24389), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (24368, 24389), True, 'import oneflow as flow\n'), ((27253, 27315), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (27294, 27315), True, 'import oneflow as flow\n'), ((29731, 29793), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (29772, 29793), True, 'import oneflow as flow\n'), ((7076, 7088), 'numpy.sqrt', 'np.sqrt', (['v_t'], {}), '(v_t)\n', (7083, 7088), True, 'import numpy as np\n'), ((25595, 25612), 'numpy.sqrt', 'np.sqrt', (['v_t[ids]'], {}), '(v_t[ids])\n', (25602, 25612), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import logging
import math
import operator
import time
from collections import Counter
import oneflow as flow
from libai.evaluation import flatten_results_dict
from libai.utils import distributed as dist
from libai.utils.checkpoint import Checkpointer
from libai.utils.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
from libai.utils.events import EventWriter
from libai.utils.timer import Timer
from .trainer import HookBase
"""
Implement some common hooks.
"""
logger = logging.getLogger(__name__)
class CallbackHook(HookBase):
"""
Create a hook using callback functions provided by the user.
"""
def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
"""
Each argument is a function that takes one argument: the trainer.
"""
self._before_train = before_train
self._before_step = before_step
self._after_step = after_step
self._after_train = after_train
def before_train(self):
if self._before_train:
self._before_train(self.trainer)
def after_train(self):
if self._after_train:
self._after_train(self.trainer)
# The functions may be closures that hold reference to the trainer
# Therefore, delete them to avoid circular reference.
del self._before_train, self._after_train
del self._before_step, self._after_step
def before_step(self):
if self._before_step:
self._before_step(self.trainer)
def after_step(self):
if self._after_step:
self._after_step(self.trainer)
class IterationTimer(HookBase):
"""
Track the time spent for each iteration (each run_step call in the trainer).
Print a summary in the end of training.
This hook uses the time between the call to its :meth:`before_step`
and :meth:`after_step` methods.
Under the convention that :meth:`before_step` of all hooks should only
take negligible amount of time, the :class:`IterationTimer` hook should be
placed at the beginning of the list of hooks to obtain accurate timing.
"""
def __init__(self, warmup_iter=3):
"""
Args:
warmup_iter (int): the number of iterations at the beginning to exclude
from timing.
"""
self._warmup_iter = warmup_iter
self._step_timer = Timer()
def before_train(self):
self._start_time = time.perf_counter()
self._total_timer = Timer()
self._total_timer.pause()
def after_train(self):
total_time = time.perf_counter() - self._start_time
total_time_minus_hooks = self._total_timer.seconds()
hook_time = total_time - total_time_minus_hooks
num_iter = self.trainer.iter + 1 - self.trainer.start_iter - self._warmup_iter
if num_iter > 0 and total_time_minus_hooks > 0:
# Speed is meaningful only after warmup
# NOTE this format is parsed by grep in some scripts
logger.info(
"Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
num_iter,
str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
total_time_minus_hooks / num_iter,
)
)
logger.info(
"Total training time: {} ({} on hooks)".format(
str(datetime.timedelta(seconds=int(total_time))),
str(datetime.timedelta(seconds=int(hook_time))),
)
)
def before_step(self):
self._step_timer.reset()
self._total_timer.resume()
def after_step(self):
# +1 because we're in after_step
iter_done = self.trainer.iter - self.trainer.start_iter + 1
if iter_done >= self._warmup_iter:
sec = self._step_timer.seconds()
self.trainer.storage.put_scalars(time=sec)
else:
self._start_time = time.perf_counter()
self._total_timer.reset()
self._total_timer.pause()
class PeriodicWriter(HookBase):
"""
Write events to EventStorage periodically.
It is executed every ``period`` iterations and after the last iteration.
"""
def __init__(self, writers, period=20):
"""
Args:
writers (list[EventWriter]): a list of EventWriter objects
period (int):
"""
self._writers = writers
for w in writers:
assert isinstance(w, EventWriter), w
self._period = period
def after_step(self):
if (self.trainer.iter + 1) % self._period == 0 or (
self.trainer.iter == self.trainer.max_iter - 1
):
for writer in self._writers:
writer.write()
def after_train(self):
for writer in self._writers:
writer.close()
class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
"""
Same as :class:`libai.utils.checkpoint.PeriodicCheckpointer`, but as a hook.
Note that when used as a hook,
it is unable to save additional data other than what's defined
by the given `checkpointer`.
It is executed every ``period`` iterations and after the last iteration.
"""
def before_train(self):
self.max_iter = self.trainer.max_iter
def after_step(self):
self.step(self.trainer.iter)
class BestCheckpointer(HookBase):
"""
Checkpoints best weights based off given metric.
This hook should be used in conjunction to and executed after the hook
that produces the metric, e.g. `EvalHook`.
"""
def __init__(
self,
eval_period: int,
checkpointer: Checkpointer,
val_metric: str,
mode: str = "max",
file_prefix: str = "model_best",
) -> None:
"""
Args:
eval_period (int): the period `EvalHook` is set to run.
checkpointer: the checkpointer object used to save checkpoints.
val_metric (str): validation metric to track for best checkpoint, e.g. "acc@1"
mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be
maximized or minimized, e.g. for "acc@1" it should be "max"
file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best"
"""
self._period = eval_period
self._val_metric = val_metric
assert mode in [
"max",
"min",
], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.'
if mode == "max":
self._compare = operator.gt
else:
self._compare = operator.lt
self._checkpointer = checkpointer
self._file_prefix = file_prefix
self.best_metric = None
self.best_iter = None
def _update_best(self, val, iteration):
if math.isnan(val) or math.isinf(val):
return False
self.best_metric = val
self.best_iter = iteration
return True
def _best_checking(self):
metric_tuple = self.trainer.storage.latest().get(self._val_metric)
flag = flow.zeros(1)
if dist.is_main_process():
if metric_tuple is None:
logger.warning(
f"Given val metric {self._val_metric} does not seem to be computed/stored. "
"Will not be checkpointed based on that."
)
else:
latest_metric, metric_iter = metric_tuple
if self.best_metric is None:
if self._update_best(latest_metric, metric_iter):
flag = flag + 1
logger.info(
f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps"
)
elif self._compare(latest_metric, self.best_metric):
flag = flag + 1
logger.info(
f"Saved best model as latest eval score for {self._val_metric} is "
f"{latest_metric:0.5f}, better than last best score "
f"{self.best_metric:0.5f} @ iteration {self.best_iter}."
)
self._update_best(latest_metric, metric_iter)
else:
logger.info(
f"Not saving as latest eval score for "
f"{self._val_metric} is {latest_metric:0.5f}, "
f"not better than best score {self.best_metric:0.5f} "
f"@ iteration {self.best_iter}."
)
dist.synchronize()
flag = flag.to_global(
sbp=flow.sbp.broadcast, placement=flow.env.all_device_placement("cpu")
)
if flag.to_local().item() == 1:
self._checkpointer.save(f"{self._file_prefix}")
def after_step(self):
# same conditions as `EvalHook`
next_iter = self.trainer.iter + 1
if (
self._period > 0
and next_iter % self._period == 0
and next_iter != self.trainer.max_iter
):
self._best_checking()
def after_train(self):
# same conditions as `EvalHook`
if self.trainer.iter + 1 >= self.trainer.max_iter:
self._best_checking()
class EvalHook(HookBase):
"""
Run an evaluation function periodically, and at the end of training.
It is executed every ``eval_period`` iterations and after the last iteration.
"""
def __init__(self, eval_period, eval_function):
"""
Args:
eval_period (int): the period to run `eval_function`.
eval_function (callable): a function which takes no arguments, and
returns a nested dict of evaluation metrics.
Note:
This hook must be enabled in all or none workers.
If you would like only certain workers to perform evaluation,
give other workers a no-op function (`eval_function=lambda: None`).
"""
self._period = eval_period
self._func = eval_function
def _do_eval(self):
results = self._func()
if results:
assert isinstance(
results, dict
), "Eval function must return a dict. Got {} instead.".format(results)
flattened_results = flatten_results_dict(results)
# fixme: flatten_results_dict is not defined
for k, v in flattened_results.items():
try:
v = float(v)
except Exception:
raise ValueError(
"[EvalHook] eval_function should return a nested dict of float. "
"Got '{}: {}' instead.".format(k, v)
)
self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
# Evaluation may take different time among workers.
# A barrier make them start the next iteration together.
dist.synchronize()
def after_step(self):
next_iter = self.trainer.iter + 1
if self._period > 0 and next_iter % self._period == 0:
# do the last eval in after_train
if next_iter != self.trainer.max_iter:
self._do_eval()
def after_train(self):
# This condition is to prevent the eval from running after a failed training
if self.trainer.iter + 1 >= self.trainer.max_iter:
self._do_eval()
# func is likely a closure that holds reference to the trainer
# therefore we clean it to avoid circular reference in the end
del self._func
class LRScheduler(HookBase):
"""
A hook which executes a torch builtin LR scheduler and summarizes the LR.
It is executed after every iteration.
"""
def __init__(self, optimizer=None, scheduler=None):
"""
Args:
optimizer (flow.optim.Optimizer):
scheduler (flow.optim.LRScheduler):
if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
in the optimizer.
If any argument is not given, will try to obtain it from the trainer.
"""
self._optimizer = optimizer
self._scheduler = scheduler
def before_train(self):
self._optimizer = self._optimizer or self.trainer.optimizer
self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer)
@staticmethod
def get_best_param_group_id(optimizer):
# NOTE: some heuristics on what LR to summarize
# summarize the param group with most parameters
largest_group = max(len(g["params"]) for g in optimizer.state_dict()["param_groups"])
if largest_group == 1:
# If all groups have one parameter,
# then find the most common initial LR, and use it for summary
lr_count = Counter(
[g["_options"]["lr"] for g in optimizer.state_dict()["param_groups"]]
)
lr = lr_count.most_common()[0][0]
for i, g in enumerate(optimizer.state_dict()["param_groups"]):
if g["_options"]["lr"] == lr:
return i
else:
for i, g in enumerate(optimizer.state_dict()["param_groups"]):
if len(g["params"]) == largest_group:
return i
def after_step(self):
lr = self.scheduler.get_last_lr()[self._best_param_group_id]
self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
self.scheduler.step()
@property
def scheduler(self):
return self._scheduler or self.trainer.lr_scheduler
def state_dict(self):
if isinstance(self.scheduler, flow.optim.lr_scheduler._LRScheduler):
return self.scheduler.state_dict()
return {}
def load_state_dict(self, state_dict):
if isinstance(self.scheduler, flow.optim.lr_scheduler._LRScheduler):
logger.info("Loading scheduler from state_dict ...")
self.scheduler.load_state_dict(state_dict)
| [
"oneflow.zeros",
"oneflow.env.all_device_placement"
] | [((1129, 1156), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1146, 1156), False, 'import logging\n'), ((3042, 3049), 'libai.utils.timer.Timer', 'Timer', ([], {}), '()\n', (3047, 3049), False, 'from libai.utils.timer import Timer\n'), ((3106, 3125), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3123, 3125), False, 'import time\n'), ((3154, 3161), 'libai.utils.timer.Timer', 'Timer', ([], {}), '()\n', (3159, 3161), False, 'from libai.utils.timer import Timer\n'), ((7836, 7849), 'oneflow.zeros', 'flow.zeros', (['(1)'], {}), '(1)\n', (7846, 7849), True, 'import oneflow as flow\n'), ((7861, 7883), 'libai.utils.distributed.is_main_process', 'dist.is_main_process', ([], {}), '()\n', (7881, 7883), True, 'from libai.utils import distributed as dist\n'), ((9362, 9380), 'libai.utils.distributed.synchronize', 'dist.synchronize', ([], {}), '()\n', (9378, 9380), True, 'from libai.utils import distributed as dist\n'), ((11769, 11787), 'libai.utils.distributed.synchronize', 'dist.synchronize', ([], {}), '()\n', (11785, 11787), True, 'from libai.utils import distributed as dist\n'), ((3245, 3264), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3262, 3264), False, 'import time\n'), ((4630, 4649), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (4647, 4649), False, 'import time\n'), ((7568, 7583), 'math.isnan', 'math.isnan', (['val'], {}), '(val)\n', (7578, 7583), False, 'import math\n'), ((7587, 7602), 'math.isinf', 'math.isinf', (['val'], {}), '(val)\n', (7597, 7602), False, 'import math\n'), ((11110, 11139), 'libai.evaluation.flatten_results_dict', 'flatten_results_dict', (['results'], {}), '(results)\n', (11130, 11139), False, 'from libai.evaluation import flatten_results_dict\n'), ((9458, 9494), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (9487, 9494), True, 'import oneflow as flow\n')] |
"""
Modified from https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py
"""
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from flowvision.layers import trunc_normal_, DropPath
from .registry import ModelCreator
from .utils import load_state_dict_from_url
model_urls = {
"convnext_tiny_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_tiny_1k_224_ema.zip",
"convnext_small_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_small_224.zip",
"convnext_base_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_base_224.zip",
"convnext_base_384": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_base_384.zip",
"convnext_large_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_large_224.zip",
"convnext_large_384": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_large_384.zip",
"convnext_base_224_22k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_base_224_22k.zip",
"convnext_base_224_22k_to_1k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_base_224_22k_to_1k.zip",
"convnext_base_384_22k_to_1k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_base_384_22k_to_1k.zip",
"convnext_large_224_22k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_large_224_22k.zip",
"convnext_large_224_22k_to_1k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_large_224_22k_to_1k.zip",
"convnext_large_384_22k_to_1k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_large_384_22k_to_1k.zip",
"convnext_xlarge_224_22k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_xlarge_224_22k.zip",
"convnext_xlarge_224_22k_to_1k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_xlarge_224_22k_to_1k.zip",
"convnext_xlarge_384_22k_to_1k": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_xlarge_384_22k_to_1k.zip",
"convnext_iso_small_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_iso_small_224.zip",
"convnext_iso_base_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_iso_base_224.zip",
"convnext_iso_large_224": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/classification/ConvNeXt/convnext_iso_large_224.zip",
}
class Block(nn.Module):
r""" ConvNeXt Block. There are two equivalent implementations:
(1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W)
(2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back
We use (2) as we find it slightly faster in PyTorch
Args:
dim (int): Number of input channels.
drop_path (float): Stochastic depth rate. Default: 0.0
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
"""
def __init__(self, dim, drop_path=0.0, layer_scale_init_value=1e-6):
super().__init__()
self.dwconv = nn.Conv2d(
dim, dim, kernel_size=7, padding=3, groups=dim
) # depthwise conv
self.norm = LayerNorm(dim, eps=1e-6)
self.pwconv1 = nn.Linear(
dim, 4 * dim
) # pointwise/1x1 convs, implemented with linear layers
self.act = nn.GELU()
self.pwconv2 = nn.Linear(4 * dim, dim)
self.gamma = (
nn.Parameter(layer_scale_init_value * flow.ones((dim)), requires_grad=True)
if layer_scale_init_value > 0
else None
)
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
def forward(self, x):
input = x
x = self.dwconv(x)
x = x.permute(0, 2, 3, 1) # (N, C, H, W) -> (N, H, W, C)
x = self.norm(x)
x = self.pwconv1(x)
x = self.act(x)
x = self.pwconv2(x)
if self.gamma is not None:
x = self.gamma * x
x = x.permute(0, 3, 1, 2) # (N, H, W, C) -> (N, C, H, W)
x = input + self.drop_path(x)
return x
class ConvNeXt(nn.Module):
r""" ConvNeXt
The OneFlow impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3]
dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768]
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
depths=[3, 3, 9, 3],
dims=[96, 192, 384, 768],
drop_path_rate=0.0,
layer_scale_init_value=1e-6,
head_init_scale=1.0,
):
super().__init__()
self.downsample_layers = nn.ModuleList()
stem = nn.Sequential(
nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),
LayerNorm(dims[0], eps=1e-6, data_format="channels_first"),
)
self.downsample_layers.append(stem)
for i in range(3):
downsample_layer = nn.Sequential(
LayerNorm(dims[i], eps=1e-6, data_format="channels_first"),
nn.Conv2d(dims[i], dims[i + 1], kernel_size=2, stride=2),
)
self.downsample_layers.append(downsample_layer)
self.stages = (
nn.ModuleList()
) # 4 feature resolution stages, each consisting of multiple residual blocks
dp_rates = [x.item() for x in flow.linspace(0, drop_path_rate, sum(depths))]
cur = 0
for i in range(4):
stage = nn.Sequential(
*[
Block(
dim=dims[i],
drop_path=dp_rates[cur + j],
layer_scale_init_value=layer_scale_init_value,
)
for j in range(depths[i])
]
)
self.stages.append(stage)
cur += depths[i]
self.norm = nn.LayerNorm(dims[-1], eps=1e-6)
self.head = nn.Linear(dims[-1], num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=0.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
for i in range(4):
x = self.downsample_layers[i](x)
x = self.stages[i](x)
return self.norm(
x.mean([-2, -1])
) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
class ConvNeXtIsotropic(nn.Module):
r""" ConvNeXt
A PyTorch impl of : `A ConvNet for the 2020s` -
https://arxiv.org/pdf/2201.03545.pdf
Isotropic ConvNeXts (Section 3.3 in paper)
Args:
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
depth (tuple(int)): Number of blocks. Default: 18.
dims (int): Feature dimension. Default: 384
drop_path_rate (float): Stochastic depth rate. Default: 0.
layer_scale_init_value (float): Init value for Layer Scale. Default: 0.
head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1.
"""
def __init__(
self,
in_chans=3,
num_classes=1000,
depth=18,
dim=384,
drop_path_rate=0.0,
layer_scale_init_value=0,
head_init_scale=1.0,
):
super().__init__()
self.stem = nn.Conv2d(in_chans, dim, kernel_size=16, stride=16)
dp_rates = [x.item() for x in flow.linspace(0, drop_path_rate, depth)]
self.blocks = nn.Sequential(
*[
Block(
dim=dim,
drop_path=dp_rates[i],
layer_scale_init_value=layer_scale_init_value,
)
for i in range(depth)
]
)
self.norm = nn.LayerNorm(dim, eps=1e-6) # final norm layer
self.head = nn.Linear(dim, num_classes)
self.apply(self._init_weights)
self.head.weight.data.mul_(head_init_scale)
self.head.bias.data.mul_(head_init_scale)
def _init_weights(self, m):
if isinstance(m, (nn.Conv2d, nn.Linear)):
trunc_normal_(m.weight, std=0.02)
nn.init.constant_(m.bias, 0)
def forward_features(self, x):
x = self.stem(x)
x = self.blocks(x)
return self.norm(
x.mean([-2, -1])
) # global average pooling, (N, C, H, W) -> (N, C)
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
class LayerNorm(nn.Module):
r""" LayerNorm that supports two data formats: channels_last (default) or channels_first.
The ordering of the dimensions in the inputs. channels_last corresponds to inputs with
shape (batch_size, height, width, channels) while channels_first corresponds to inputs
with shape (batch_size, channels, height, width).
"""
def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"):
super().__init__()
self.weight = nn.Parameter(flow.ones(normalized_shape))
self.bias = nn.Parameter(flow.zeros(normalized_shape))
self.eps = eps
self.data_format = data_format
if self.data_format not in ["channels_last", "channels_first"]:
raise NotImplementedError
self.normalized_shape = (normalized_shape,)
def forward(self, x):
if self.data_format == "channels_last":
# TODO: use F.layer_norm
u = x.mean(-1, keepdim=True)
s = (x - u).pow(2).mean(-1, keepdim=True)
x = (x - u) / flow.sqrt(s + self.eps)
x = self.weight[None, None, :] * x + self.bias[None, None, :]
return x
elif self.data_format == "channels_first":
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / flow.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
def _create_convnext(arch, pretrained=False, progress=True, **model_kwargs):
model = ConvNeXt(**model_kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
def _create_convnext_isotropic(arch, pretrained=False, progress=True, **model_kwargs):
model = ConvNeXtIsotropic(**model_kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
model.load_state_dict(state_dict)
return model
@ModelCreator.register_model
def convnext_tiny_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Tiny model trained on ImageNet2012.
.. note::
ConvNext-Tiny model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_tiny_224 = flowvision.models.convnext_tiny_224(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], **kwargs)
return _create_convnext(
"convnext_tiny_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def convnext_small_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Small model trained on ImageNet2012.
.. note::
ConvNext-Small model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_small_224 = flowvision.models.convnext_small_224(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[96, 192, 384, 768], **kwargs)
return _create_convnext(
"convnext_small_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def convnext_base_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Base model trained on ImageNet2012.
.. note::
ConvNext-Base model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_base_224 = flowvision.models.convnext_base_224(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
return _create_convnext(
"convnext_base_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def convnext_base_384(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Base model trained on ImageNet2012.
.. note::
ConvNext-Base model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_base_384 = flowvision.models.convnext_base_384(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
return _create_convnext(
"convnext_base_384", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def convnext_large_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Large model trained on ImageNet2012.
.. note::
ConvNext-Large model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_large_224 = flowvision.models.convnext_large_224(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
return _create_convnext(
"convnext_large_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def convnext_large_384(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Large model trained on ImageNet2012.
.. note::
ConvNext-Large model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_large_384 = flowvision.models.convnext_large_384(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
return _create_convnext(
"convnext_large_224", pretrained=pretrained, progress=progress, **model_kwargs
)
@ModelCreator.register_model
def convnext_base_224_22k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Base model pretrained on ImageNet22k.
.. note::
ConvNext-Base model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_base_224_22k = flowvision.models.convnext_base_224_22k(pretrained=False, progress=True)
"""
model_kwargs = dict(
depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], num_classes=21841, **kwargs
)
return _create_convnext(
"convnext_base_224_22k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_base_224_22k_to_1k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Base model pretrained on ImageNet22k and finetuned on ImageNet2012.
.. note::
ConvNext-Base model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_base_224_22k_to_1k = flowvision.models.convnext_base_224_22k_to_1k(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
return _create_convnext(
"convnext_base_224_22k_to_1k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_base_384_22k_to_1k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Base model pretrained on ImageNet22k and finetuned on ImageNet2012.
.. note::
ConvNext-Base model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_base_384_22k_to_1k = flowvision.models.convnext_base_384_22k_to_1k(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[128, 256, 512, 1024], **kwargs)
return _create_convnext(
"convnext_base_384_22k_to_1k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_large_224_22k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Large model trained on ImageNet22k.
.. note::
ConvNext-Large model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_large_224_22k = flowvision.models.convnext_large_224_22k(pretrained=False, progress=True)
"""
model_kwargs = dict(
depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], num_classes=21841, **kwargs
)
return _create_convnext(
"convnext_large_224_22k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_large_224_22k_to_1k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Large model pretrained on ImageNet22k and finetuned on ImageNet2012.
.. note::
ConvNext-Large model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_large_224_22k_to_1k = flowvision.models.convnext_large_224_22k_to_1k(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
return _create_convnext(
"convnext_large_224_22k_to_1k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_large_384_22k_to_1k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Large model pretrained on ImageNet22k and finetuned on ImageNet2012.
.. note::
ConvNext-Large model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_large_384_22k_to_1k = flowvision.models.convnext_large_384_22k_to_1k(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[192, 384, 768, 1536], **kwargs)
return _create_convnext(
"convnext_large_384_22k_to_1k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_xlarge_224_22k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-xLarge model pretrained on ImageNet22k.
.. note::
ConvNext-xLarge model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_xlarge_224_22k = flowvision.models.convnext_xlarge_224_22k(pretrained=False, progress=True)
"""
model_kwargs = dict(
depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], num_classes=21841, **kwargs
)
return _create_convnext(
"convnext_xlarge_224_22k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_xlarge_224_22k_to_1k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-xLarge model pretrained on ImageNet22k and finetuned on ImageNet2012.
.. note::
ConvNext-xLarge model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_xlarge_224_22k_to_1k = flowvision.models.convnext_xlarge_224_22k_to_1k(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
return _create_convnext(
"convnext_xlarge_224_22k_to_1k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_xlarge_384_22k_to_1k(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-xLarge model pretrained on ImageNet22k and finetuned on ImageNet2012.
.. note::
ConvNext-xLarge model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 384x384.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_xlarge_384_22k_to_1k = flowvision.models.convnext_xlarge_384_22k_to_1k(pretrained=False, progress=True)
"""
model_kwargs = dict(depths=[3, 3, 27, 3], dims=[256, 512, 1024, 2048], **kwargs)
return _create_convnext(
"convnext_xlarge_384_22k_to_1k",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_iso_small_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Isotropic-Small model trained on ImageNet2012.
.. note::
ConvNext-Isotropic-Small model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_iso_small_224 = flowvision.models.convnext_iso_small_224(pretrained=False, progress=True)
"""
model_kwargs = dict(depth=18, dim=384, **kwargs)
return _create_convnext_isotropic(
"convnext_iso_small_224",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_iso_base_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Isotropic-Base model trained on ImageNet2012.
.. note::
ConvNext-Isotropic-Base model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_iso_base_224 = flowvision.models.convnext_iso_base_224(pretrained=False, progress=True)
"""
model_kwargs = dict(depth=18, dim=768, **kwargs)
return _create_convnext_isotropic(
"convnext_iso_base_224",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
@ModelCreator.register_model
def convnext_iso_large_224(pretrained=False, progress=True, **kwargs):
"""
Constructs the ConvNext-Isotropic-Large model trained on ImageNet2012.
.. note::
ConvNext-Isotropic-Large model from `"A ConvNet for the 2020s" <https://arxiv.org/abs/2201.03545>` _.
The required input size of the model is 224x224.
Args:
pretrained (bool): Whether to download the pre-trained model on ImageNet. Default: ``False``
progress (bool): If True, displays a progress bar of the download to stderrt. Default: ``True``
For example:
.. code-block:: python
>>> import flowvision
>>> convnext_iso_large_224 = flowvision.models.convnext_iso_large_224(pretrained=False, progress=True)
"""
model_kwargs = dict(depth=36, dim=1024, layer_scale_init_value=1e-6, **kwargs)
return _create_convnext_isotropic(
"convnext_iso_large_224",
pretrained=pretrained,
progress=progress,
**model_kwargs
)
| [
"oneflow.nn.ModuleList",
"oneflow.nn.LayerNorm",
"oneflow.sqrt",
"oneflow.nn.init.constant_",
"oneflow.zeros",
"oneflow.nn.GELU",
"oneflow.ones",
"oneflow.nn.Identity",
"oneflow.linspace",
"oneflow.nn.Conv2d",
"oneflow.nn.Linear"
] | [((3857, 3914), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['dim', 'dim'], {'kernel_size': '(7)', 'padding': '(3)', 'groups': 'dim'}), '(dim, dim, kernel_size=7, padding=3, groups=dim)\n', (3866, 3914), True, 'import oneflow.nn as nn\n'), ((4023, 4046), 'oneflow.nn.Linear', 'nn.Linear', (['dim', '(4 * dim)'], {}), '(dim, 4 * dim)\n', (4032, 4046), True, 'import oneflow.nn as nn\n'), ((4143, 4152), 'oneflow.nn.GELU', 'nn.GELU', ([], {}), '()\n', (4150, 4152), True, 'import oneflow.nn as nn\n'), ((4176, 4199), 'oneflow.nn.Linear', 'nn.Linear', (['(4 * dim)', 'dim'], {}), '(4 * dim, dim)\n', (4185, 4199), True, 'import oneflow.nn as nn\n'), ((5944, 5959), 'oneflow.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (5957, 5959), True, 'import oneflow.nn as nn\n'), ((6517, 6532), 'oneflow.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (6530, 6532), True, 'import oneflow.nn as nn\n'), ((7177, 7210), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['dims[-1]'], {'eps': '(1e-06)'}), '(dims[-1], eps=1e-06)\n', (7189, 7210), True, 'import oneflow.nn as nn\n'), ((7230, 7262), 'oneflow.nn.Linear', 'nn.Linear', (['dims[-1]', 'num_classes'], {}), '(dims[-1], num_classes)\n', (7239, 7262), True, 'import oneflow.nn as nn\n'), ((8933, 8984), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['in_chans', 'dim'], {'kernel_size': '(16)', 'stride': '(16)'}), '(in_chans, dim, kernel_size=16, stride=16)\n', (8942, 8984), True, 'import oneflow.nn as nn\n'), ((9379, 9407), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['dim'], {'eps': '(1e-06)'}), '(dim, eps=1e-06)\n', (9391, 9407), True, 'import oneflow.nn as nn\n'), ((9447, 9474), 'oneflow.nn.Linear', 'nn.Linear', (['dim', 'num_classes'], {}), '(dim, num_classes)\n', (9456, 9474), True, 'import oneflow.nn as nn\n'), ((4410, 4429), 'flowvision.layers.DropPath', 'DropPath', (['drop_path'], {}), '(drop_path)\n', (4418, 4429), False, 'from flowvision.layers import trunc_normal_, DropPath\n'), ((4454, 4467), 'oneflow.nn.Identity', 'nn.Identity', ([], {}), '()\n', (4465, 4467), True, 'import oneflow.nn as nn\n'), ((6002, 6055), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['in_chans', 'dims[0]'], {'kernel_size': '(4)', 'stride': '(4)'}), '(in_chans, dims[0], kernel_size=4, stride=4)\n', (6011, 6055), True, 'import oneflow.nn as nn\n'), ((7500, 7533), 'flowvision.layers.trunc_normal_', 'trunc_normal_', (['m.weight'], {'std': '(0.02)'}), '(m.weight, std=0.02)\n', (7513, 7533), False, 'from flowvision.layers import trunc_normal_, DropPath\n'), ((7546, 7574), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (7563, 7574), True, 'import oneflow.nn as nn\n'), ((9712, 9745), 'flowvision.layers.trunc_normal_', 'trunc_normal_', (['m.weight'], {'std': '(0.02)'}), '(m.weight, std=0.02)\n', (9725, 9745), False, 'from flowvision.layers import trunc_normal_, DropPath\n'), ((9758, 9786), 'oneflow.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (9775, 9786), True, 'import oneflow.nn as nn\n'), ((10611, 10638), 'oneflow.ones', 'flow.ones', (['normalized_shape'], {}), '(normalized_shape)\n', (10620, 10638), True, 'import oneflow as flow\n'), ((10673, 10701), 'oneflow.zeros', 'flow.zeros', (['normalized_shape'], {}), '(normalized_shape)\n', (10683, 10701), True, 'import oneflow as flow\n'), ((6348, 6404), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['dims[i]', 'dims[i + 1]'], {'kernel_size': '(2)', 'stride': '(2)'}), '(dims[i], dims[i + 1], kernel_size=2, stride=2)\n', (6357, 6404), True, 'import oneflow.nn as nn\n'), ((9023, 9062), 'oneflow.linspace', 'flow.linspace', (['(0)', 'drop_path_rate', 'depth'], {}), '(0, drop_path_rate, depth)\n', (9036, 9062), True, 'import oneflow as flow\n'), ((11160, 11183), 'oneflow.sqrt', 'flow.sqrt', (['(s + self.eps)'], {}), '(s + self.eps)\n', (11169, 11183), True, 'import oneflow as flow\n'), ((4273, 4287), 'oneflow.ones', 'flow.ones', (['dim'], {}), '(dim)\n', (4282, 4287), True, 'import oneflow as flow\n'), ((11450, 11473), 'oneflow.sqrt', 'flow.sqrt', (['(s + self.eps)'], {}), '(s + self.eps)\n', (11459, 11473), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from automated_test_util import *
from scipy import special
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestReLUModule(flow.unittest.TestCase):
@autotest()
def test_relu_module_with_random_data(test_case):
m = torch.nn.ReLU()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@autotest(auto_backward=False)
def test_relu_module_with_0shape_data(test_case):
m = torch.nn.ReLU()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(4, 2, 3, 0, 3).to(device)
y = m(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestReLU6Module(flow.unittest.TestCase):
@autotest()
def test_relu6_module_with_random_data(test_case):
m = torch.nn.ReLU6()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@autotest(auto_backward=False)
def test_relu6_module_with_0shape_data(test_case):
m = torch.nn.ReLU6()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(4, 2, 3, 0, 3).to(device)
y = m(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestTanh(flow.unittest.TestCase):
@autotest()
def test_tanh_module_with_random_data(test_case):
m = torch.nn.Tanh()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@autotest(auto_backward=False)
def test_tanh_module_with_0shapedata(test_case):
m = torch.nn.Tanh()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(4, 2, 3, 0, 3).to(device)
y = m(x)
return y
@autotest()
def test_flow_tanh_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.tanh(x)
return y
@autotest(auto_backward=False)
def test_flow_tanh_with_0shape_data(test_case):
device = random_device()
x = random_pytorch_tensor(4, 2, 3, 0, 3).to(device)
y = torch.tanh(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestELUModule(flow.unittest.TestCase):
@autotest()
def test_elu_module_with_random_data(test_case):
m = torch.nn.ELU(alpha=random() | nothing())
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@autotest(auto_backward=False)
def test_elu_module_with_0shape_data(test_case):
m = torch.nn.ELU(alpha=random() | nothing())
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(4, 2, 3, 0, 3).to(device)
y = m(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestGelu(flow.unittest.TestCase):
@autotest()
def test_gelu_module_with_random_data(test_case):
m = torch.nn.GELU()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
def numpy_softmax(x, axis):
x = x - x.max(axis=axis, keepdims=True)
y = np.exp(x)
return y / y.sum(axis=axis, keepdims=True)
def numpy_logsoftmax(x, dim):
e_x = np.exp(x - np.max(x, axis=dim, keepdims=True))
return np.log(e_x / e_x.sum(axis=dim, keepdims=True))
def numpy_softplus(x, beta, threshold):
return np.where(
x * beta > threshold, x, 1.0 / beta * np.log(1.0 + np.exp(beta * x))
)
def numpy_mish_grad(x):
f = 1 + np.exp(x)
y_grad = (f * f - 1) / (f * f + 1) + x * (4 * f * (f - 1)) / (
(f * f + 1) * (f * f + 1)
)
return y_grad
@flow.unittest.skip_unless_1n1d()
class TestSigmoid(flow.unittest.TestCase):
@autotest()
def test_sigmoid_module_with_random_data(test_case):
m = torch.nn.Sigmoid()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@autotest()
def test_sigmoid_flow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.sigmoid(x)
return y
@autotest()
def test_sigmoid_tensor_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = x.sigmoid()
return y
def _test_softmax(test_case, device):
axis = 0
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(2, 3, 4, 5)
x = flow.Tensor(arr, device=flow.device(device))
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
def _test_softmax_dim_1(test_case, device):
axis = 1
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(9, 7, 8, 16)
x = flow.Tensor(arr, device=flow.device(device))
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
def _test_softmax_dim_2(test_case, device):
axis = 2
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(2, 5, 6, 3)
x = flow.Tensor(arr, device=flow.device(device))
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
def _test_softmax_dim_3(test_case, device):
axis = 3
m = flow.nn.Softmax(dim=axis)
arr = np.random.randn(1, 3, 4, 7)
x = flow.Tensor(arr, device=flow.device(device))
y = m(x)
output = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
axis2 = -1
m2 = flow.nn.Softmax(dim=axis)
y2 = m(x)
output2 = numpy_softmax(arr, axis)
test_case.assertTrue(np.allclose(y2.numpy(), output2, 1e-05, 1e-05))
def _test_softmax_backward_normal(test_case, device):
x_grad = np.zeros((2, 3, 4, 5))
axis = 0
m = flow.nn.Softmax(dim=axis)
x = flow.Tensor(
np.random.randn(2, 3, 4, 5),
requires_grad=True,
device=flow.device(device),
dtype=flow.float64,
)
y = m(x).sum()
y.backward()
test_case.assertTrue(np.allclose(x.grad.numpy(), x_grad, 1e-05, 1e-05))
def _test_softmax_backward_1_dim(test_case, device):
a = flow.tensor(
[1, 2], dtype=flow.float64, device=flow.device(device), requires_grad=True
)
b = flow.tensor(
[3, 4], dtype=flow.float64, device=flow.device(device), requires_grad=True
)
c = a * b
m = flow.nn.Softmax(dim=None)
d = m(c)
d[0].backward()
a_grad = np.array([0.01994417, -0.0265922267])
test_case.assertTrue(np.allclose(a.grad.numpy(), a_grad, 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestSoftmax(flow.unittest.TestCase):
def test_softmax(test_case):
arg_dict = OrderedDict()
arg_dict["fun"] = [
_test_softmax,
_test_softmax_dim_1,
_test_softmax_dim_2,
_test_softmax_dim_3,
_test_softmax_backward_normal,
_test_softmax_backward_1_dim,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@flow.unittest.skip_unless_1n1d()
class TestHardsigmoidModule(flow.unittest.TestCase):
@autotest()
def test_hardsigmoid_module_with_random_data(test_case):
m = torch.nn.Hardsigmoid()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
def _test_logsoftmax(test_case, device):
dim = 1
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(4, 7)
x = flow.Tensor(input_arr, device=flow.device(device))
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
def _test_logsoftmax_dim_2(test_case, device):
dim = 2
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(3, 4, 5)
x = flow.Tensor(input_arr, device=flow.device(device))
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
def _test_logsoftmax_dim_3(test_case, device):
dim = 3
m = flow.nn.LogSoftmax(dim)
input_arr = np.random.randn(8, 9, 7, 3)
x = flow.Tensor(input_arr, device=flow.device(device))
y = m(x)
output = numpy_logsoftmax(input_arr, dim)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
def _test_logsoftmax_backward(test_case, device):
axis = 0
m = flow.nn.LogSoftmax(axis)
input_arr = np.array(
[
[
[
[2.0, 1.0, 9.0, 3.0, 4.0],
[1.0, 6.0, 7.0, 1.0, 4.0],
[4.0, 7.0, 5.0, 8.0, 1.0],
[9.0, 5.0, 7.0, 8.0, 5.0],
],
[
[1.0, 1.0, 5.0, 3.0, 5.0],
[3.0, 6.0, 3.0, 7.0, 8.0],
[8.0, 8.0, 1.0, 2.0, 6.0],
[3.0, 5.0, 6.0, 1.0, 1.0],
],
[
[8.0, 3.0, 6.0, 3.0, 7.0],
[8.0, 5.0, 1.0, 2.0, 7.0],
[3.0, 9.0, 4.0, 6.0, 5.0],
[5.0, 1.0, 2.0, 3.0, 6.0],
],
],
[
[
[3.0, 5.0, 3.0, 1.0, 7.0],
[5.0, 2.0, 6.0, 3.0, 5.0],
[5.0, 1.0, 8.0, 6.0, 9.0],
[9.0, 8.0, 4.0, 5.0, 1.0],
],
[
[7.0, 5.0, 7.0, 1.0, 6.0],
[3.0, 3.0, 6.0, 6.0, 7.0],
[9.0, 4.0, 1.0, 5.0, 7.0],
[7.0, 6.0, 9.0, 8.0, 6.0],
],
[
[6.0, 7.0, 5.0, 3.0, 9.0],
[4.0, 1.0, 2.0, 3.0, 2.0],
[4.0, 3.0, 8.0, 7.0, 8.0],
[1.0, 3.0, 8.0, 6.0, 2.0],
],
],
]
)
x = flow.Tensor(
input_arr, requires_grad=True, device=flow.device(device), dtype=flow.float64
)
x_grad = np.array(
[
[
[
[0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825],
[0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716],
[0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.9993293],
[0.0, 0.90514825, -0.90514825, -0.90514825, -0.96402758],
],
[
[0.99505475, 0.96402758, 0.76159416, -0.76159416, 0.46211716],
[0.0, -0.90514825, 0.90514825, -0.46211716, -0.46211716],
[0.46211716, -0.96402758, 0.0, 0.90514825, 0.46211716],
[0.96402758, 0.46211716, 0.90514825, 0.9981779, 0.9866143],
],
[
[-0.76159416, 0.96402758, -0.46211716, 0.0, 0.76159416],
[-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.9866143],
[0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825],
[-0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758],
],
],
[
[
[-0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825],
[-0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716],
[-0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.9993293],
[0.0, -0.90514825, 0.90514825, 0.90514825, 0.96402758],
],
[
[-0.99505475, -0.96402758, -0.76159416, 0.76159416, -0.46211716],
[0.0, 0.90514825, -0.90514825, 0.46211716, 0.46211716],
[-0.46211716, 0.96402758, 0.0, -0.90514825, -0.46211716],
[-0.96402758, -0.46211716, -0.90514825, -0.9981779, -0.9866143],
],
[
[0.76159416, -0.96402758, 0.46211716, 0.0, -0.76159416],
[0.96402758, 0.96402758, -0.46211716, -0.46211716, 0.9866143],
[-0.46211716, 0.99505475, -0.96402758, -0.46211716, -0.90514825],
[0.96402758, -0.76159416, -0.99505475, -0.90514825, 0.96402758],
],
],
]
)
y = m(x).sum()
y.backward()
test_case.assertTrue(np.allclose(x.grad.numpy(), x_grad, 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestLogSoftmax(flow.unittest.TestCase):
def test_log_softmax(test_case):
arg_dict = OrderedDict()
arg_dict["fun"] = [
_test_logsoftmax,
_test_logsoftmax_dim_2,
_test_logsoftmax_dim_3,
_test_logsoftmax_backward,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@flow.unittest.skip_unless_1n1d()
class TestLogSigmoidModule(flow.unittest.TestCase):
@autotest()
def test_logsigmoid_module_with_random_data(test_case):
m = torch.nn.LogSigmoid()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
def _test_softplus(test_case, device):
m = flow.nn.Softplus()
arr = np.random.randn(2, 3, 4, 5)
np_out = numpy_softplus(arr, 1.0, 20)
x = flow.Tensor(arr, device=flow.device(device))
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _test_softplus_beta(test_case, device):
m = flow.nn.Softplus(beta=1.11)
arr = np.random.randn(2, 3, 4, 5)
np_out = numpy_softplus(arr, 1.11, 20)
x = flow.Tensor(arr, device=flow.device(device))
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _test_softplus_threshold(test_case, device):
m = flow.nn.Softplus(beta=1.11, threshold=1.55)
arr = np.random.randn(2, 3, 4, 5)
np_out = np.where(
arr * 1.11 > 1.55, arr, 1.0 / 1.11 * np.log(1.0 + np.exp(1.11 * arr))
)
np_out = numpy_softplus(arr, 1.11, 1.55)
x = flow.Tensor(arr, device=flow.device(device))
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _test_softplus_backward(test_case, device):
m = flow.nn.Softplus()
arr = np.array([1.0, 2.0, 21.0, 20.0, 4.0])
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
of_out = of_out.sum()
of_out.backward()
np_grad = [0.7310585786300049, 0.8807970779778824, 1.0, 1.0, 0.9820137900379085]
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestSoftplusModule(flow.unittest.TestCase):
def test_softplus(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_softplus,
_test_softplus_beta,
_test_softplus_threshold,
_test_softplus_backward,
]
arg_dict["device"] = ["cpu"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@unittest.skip("pytorch softplus backward has bug")
@autotest()
def test_softplus_module_with_random_data(test_case):
m = torch.nn.Softplus(beta=random() | nothing(), threshold=random() | nothing())
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestHardswishModule(flow.unittest.TestCase):
@autotest()
def test_hardswish_module_with_random_data(test_case):
m = torch.nn.Hardswish()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
def _np_hardtanh_grad(x):
return np.where(x <= -2.0, 0.0, np.where(x >= 2.3, 0.0, 1.0))
def _test_hardtanh_impl(test_case, shape, device):
m = flow.nn.Hardtanh()
arr = np.random.randn(*shape)
np_out = np.maximum(-1, np.minimum(1, arr))
x = flow.Tensor(arr, device=flow.device(device))
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
m = flow.nn.Hardtanh(min_val=-2.0, max_val=2.3)
arr = np.random.randn(*shape)
np_out = np.maximum(-2.0, np.minimum(2.3, arr))
x = flow.Tensor(arr, device=flow.device(device), requires_grad=True)
of_out = m(x)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), _np_hardtanh_grad(np_out), 1e-05, 1e-05)
)
@flow.unittest.skip_unless_1n1d()
class TestHardtanhModule(flow.unittest.TestCase):
def test_hardtanh(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_hardtanh_impl(test_case, *arg)
@flow.unittest.skip_unless_1n1d()
class TestLeakyReLUModule(flow.unittest.TestCase):
@autotest()
def test_leakyrelu_module_with_random_data(test_case):
m = torch.nn.LeakyReLU(negative_slope=random() | nothing())
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestMishModule(flow.unittest.TestCase):
@autotest(n=5)
def test_mish_module_with_random_data(test_case):
m = torch.nn.Mish()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestSiluModule(flow.unittest.TestCase):
@autotest(n=5)
def test_silu_module_with_random_data(test_case):
m = torch.nn.SiLU()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestSeluModule(flow.unittest.TestCase):
@autotest(n=5)
def test_selu_module_with_random_data(test_case):
m = torch.nn.SELU()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
def _np_softsign(x):
return x / (1.0 + np.abs(x))
def _np_softsign_grad(x):
return 1.0 / (np.square(1.0 + np.abs(x)))
def _test_softsign_impl(test_case, shape, device):
m = flow.nn.Softsign()
np_input = np.random.randn(*shape)
np_out = _np_softsign(np_input)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
of_out = m(of_input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-3, 1e-3))
of_out = of_out.sum()
of_out.backward()
test_case.assertTrue(
np.allclose(of_input.grad.numpy(), _np_softsign_grad(np_input), 1e-3, 1e-3)
)
@unittest.skip("still have error in ci test")
class TestSoftsignModule(flow.unittest.TestCase):
@autotest(n=5)
def test_softsign_module_with_random_data(test_case):
m = torch.nn.Softsign()
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor().to(device)
y = m(x)
return y
def test_softsign(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_softsign_impl]
arg_dict["shape"] = [(3, 3), (2, 3, 3)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.Softplus",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.LogSoftmax",
"oneflow.device",
"oneflow.nn.Softsign",
"oneflow.nn.Softmax",
"oneflow.nn.Hardtanh"
] | [((807, 839), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (837, 839), True, 'import oneflow as flow\n'), ((1440, 1472), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1470, 1472), True, 'import oneflow as flow\n'), ((2078, 2110), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2108, 2110), True, 'import oneflow as flow\n'), ((3120, 3152), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3150, 3152), True, 'import oneflow as flow\n'), ((3800, 3832), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3830, 3832), True, 'import oneflow as flow\n'), ((4740, 4772), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4770, 4772), True, 'import oneflow as flow\n'), ((7762, 7794), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (7792, 7794), True, 'import oneflow as flow\n'), ((8282, 8314), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (8312, 8314), True, 'import oneflow as flow\n'), ((13679, 13711), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (13709, 13711), True, 'import oneflow as flow\n'), ((14136, 14168), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (14166, 14168), True, 'import oneflow as flow\n'), ((15966, 15998), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (15996, 15998), True, 'import oneflow as flow\n'), ((16797, 16829), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (16827, 16829), True, 'import oneflow as flow\n'), ((18018, 18050), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (18048, 18050), True, 'import oneflow as flow\n'), ((18368, 18400), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (18398, 18400), True, 'import oneflow as flow\n'), ((18759, 18791), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (18789, 18791), True, 'import oneflow as flow\n'), ((19103, 19135), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (19133, 19135), True, 'import oneflow as flow\n'), ((19447, 19479), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (19477, 19479), True, 'import oneflow as flow\n'), ((20459, 20503), 'unittest.skip', 'unittest.skip', (['"""still have error in ci test"""'], {}), "('still have error in ci test')\n", (20472, 20503), False, 'import unittest\n'), ((4214, 4223), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (4220, 4223), True, 'import numpy as np\n'), ((5535, 5560), 'oneflow.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (5550, 5560), True, 'import oneflow as flow\n'), ((5571, 5598), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (5586, 5598), True, 'import numpy as np\n'), ((5841, 5866), 'oneflow.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (5856, 5866), True, 'import oneflow as flow\n'), ((5877, 5905), 'numpy.random.randn', 'np.random.randn', (['(9)', '(7)', '(8)', '(16)'], {}), '(9, 7, 8, 16)\n', (5892, 5905), True, 'import numpy as np\n'), ((6148, 6173), 'oneflow.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (6163, 6173), True, 'import oneflow as flow\n'), ((6184, 6211), 'numpy.random.randn', 'np.random.randn', (['(2)', '(5)', '(6)', '(3)'], {}), '(2, 5, 6, 3)\n', (6199, 6211), True, 'import numpy as np\n'), ((6454, 6479), 'oneflow.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (6469, 6479), True, 'import oneflow as flow\n'), ((6490, 6517), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(4)', '(7)'], {}), '(1, 3, 4, 7)\n', (6505, 6517), True, 'import numpy as np\n'), ((6717, 6742), 'oneflow.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (6732, 6742), True, 'import oneflow as flow\n'), ((6938, 6960), 'numpy.zeros', 'np.zeros', (['(2, 3, 4, 5)'], {}), '((2, 3, 4, 5))\n', (6946, 6960), True, 'import numpy as np\n'), ((6982, 7007), 'oneflow.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'axis'}), '(dim=axis)\n', (6997, 7007), True, 'import oneflow as flow\n'), ((7573, 7598), 'oneflow.nn.Softmax', 'flow.nn.Softmax', ([], {'dim': 'None'}), '(dim=None)\n', (7588, 7598), True, 'import oneflow as flow\n'), ((7645, 7682), 'numpy.array', 'np.array', (['[0.01994417, -0.0265922267]'], {}), '([0.01994417, -0.0265922267])\n', (7653, 7682), True, 'import numpy as np\n'), ((8704, 8727), 'oneflow.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (8722, 8727), True, 'import oneflow as flow\n'), ((8744, 8765), 'numpy.random.randn', 'np.random.randn', (['(4)', '(7)'], {}), '(4, 7)\n', (8759, 8765), True, 'import numpy as np\n'), ((9024, 9047), 'oneflow.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (9042, 9047), True, 'import oneflow as flow\n'), ((9064, 9088), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (9079, 9088), True, 'import numpy as np\n'), ((9347, 9370), 'oneflow.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['dim'], {}), '(dim)\n', (9365, 9370), True, 'import oneflow as flow\n'), ((9387, 9414), 'numpy.random.randn', 'np.random.randn', (['(8)', '(9)', '(7)', '(3)'], {}), '(8, 9, 7, 3)\n', (9402, 9414), True, 'import numpy as np\n'), ((9677, 9701), 'oneflow.nn.LogSoftmax', 'flow.nn.LogSoftmax', (['axis'], {}), '(axis)\n', (9695, 9701), True, 'import oneflow as flow\n'), ((9718, 10431), 'numpy.array', 'np.array', (['[[[[2.0, 1.0, 9.0, 3.0, 4.0], [1.0, 6.0, 7.0, 1.0, 4.0], [4.0, 7.0, 5.0, \n 8.0, 1.0], [9.0, 5.0, 7.0, 8.0, 5.0]], [[1.0, 1.0, 5.0, 3.0, 5.0], [3.0,\n 6.0, 3.0, 7.0, 8.0], [8.0, 8.0, 1.0, 2.0, 6.0], [3.0, 5.0, 6.0, 1.0, \n 1.0]], [[8.0, 3.0, 6.0, 3.0, 7.0], [8.0, 5.0, 1.0, 2.0, 7.0], [3.0, 9.0,\n 4.0, 6.0, 5.0], [5.0, 1.0, 2.0, 3.0, 6.0]]], [[[3.0, 5.0, 3.0, 1.0, 7.0\n ], [5.0, 2.0, 6.0, 3.0, 5.0], [5.0, 1.0, 8.0, 6.0, 9.0], [9.0, 8.0, 4.0,\n 5.0, 1.0]], [[7.0, 5.0, 7.0, 1.0, 6.0], [3.0, 3.0, 6.0, 6.0, 7.0], [9.0,\n 4.0, 1.0, 5.0, 7.0], [7.0, 6.0, 9.0, 8.0, 6.0]], [[6.0, 7.0, 5.0, 3.0, \n 9.0], [4.0, 1.0, 2.0, 3.0, 2.0], [4.0, 3.0, 8.0, 7.0, 8.0], [1.0, 3.0, \n 8.0, 6.0, 2.0]]]]'], {}), '([[[[2.0, 1.0, 9.0, 3.0, 4.0], [1.0, 6.0, 7.0, 1.0, 4.0], [4.0, 7.0,\n 5.0, 8.0, 1.0], [9.0, 5.0, 7.0, 8.0, 5.0]], [[1.0, 1.0, 5.0, 3.0, 5.0],\n [3.0, 6.0, 3.0, 7.0, 8.0], [8.0, 8.0, 1.0, 2.0, 6.0], [3.0, 5.0, 6.0, \n 1.0, 1.0]], [[8.0, 3.0, 6.0, 3.0, 7.0], [8.0, 5.0, 1.0, 2.0, 7.0], [3.0,\n 9.0, 4.0, 6.0, 5.0], [5.0, 1.0, 2.0, 3.0, 6.0]]], [[[3.0, 5.0, 3.0, 1.0,\n 7.0], [5.0, 2.0, 6.0, 3.0, 5.0], [5.0, 1.0, 8.0, 6.0, 9.0], [9.0, 8.0, \n 4.0, 5.0, 1.0]], [[7.0, 5.0, 7.0, 1.0, 6.0], [3.0, 3.0, 6.0, 6.0, 7.0],\n [9.0, 4.0, 1.0, 5.0, 7.0], [7.0, 6.0, 9.0, 8.0, 6.0]], [[6.0, 7.0, 5.0,\n 3.0, 9.0], [4.0, 1.0, 2.0, 3.0, 2.0], [4.0, 3.0, 8.0, 7.0, 8.0], [1.0, \n 3.0, 8.0, 6.0, 2.0]]]])\n', (9726, 10431), True, 'import numpy as np\n'), ((11288, 12901), 'numpy.array', 'np.array', (['[[[[0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825], [\n 0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716], [\n 0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.9993293], [0.0, \n 0.90514825, -0.90514825, -0.90514825, -0.96402758]], [[0.99505475, \n 0.96402758, 0.76159416, -0.76159416, 0.46211716], [0.0, -0.90514825, \n 0.90514825, -0.46211716, -0.46211716], [0.46211716, -0.96402758, 0.0, \n 0.90514825, 0.46211716], [0.96402758, 0.46211716, 0.90514825, 0.9981779,\n 0.9866143]], [[-0.76159416, 0.96402758, -0.46211716, 0.0, 0.76159416],\n [-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.9866143], [\n 0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825], [-\n 0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758]]], [[[-\n 0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825], [-\n 0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716], [-\n 0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.9993293], [0.0, -\n 0.90514825, 0.90514825, 0.90514825, 0.96402758]], [[-0.99505475, -\n 0.96402758, -0.76159416, 0.76159416, -0.46211716], [0.0, 0.90514825, -\n 0.90514825, 0.46211716, 0.46211716], [-0.46211716, 0.96402758, 0.0, -\n 0.90514825, -0.46211716], [-0.96402758, -0.46211716, -0.90514825, -\n 0.9981779, -0.9866143]], [[0.76159416, -0.96402758, 0.46211716, 0.0, -\n 0.76159416], [0.96402758, 0.96402758, -0.46211716, -0.46211716, \n 0.9866143], [-0.46211716, 0.99505475, -0.96402758, -0.46211716, -\n 0.90514825], [0.96402758, -0.76159416, -0.99505475, -0.90514825, \n 0.96402758]]]]'], {}), '([[[[0.46211716, 0.96402758, -0.99505475, -0.76159416, 0.90514825],\n [0.96402758, -0.96402758, -0.46211716, 0.76159416, 0.46211716], [\n 0.46211716, -0.99505475, 0.90514825, -0.76159416, 0.9993293], [0.0, \n 0.90514825, -0.90514825, -0.90514825, -0.96402758]], [[0.99505475, \n 0.96402758, 0.76159416, -0.76159416, 0.46211716], [0.0, -0.90514825, \n 0.90514825, -0.46211716, -0.46211716], [0.46211716, -0.96402758, 0.0, \n 0.90514825, 0.46211716], [0.96402758, 0.46211716, 0.90514825, 0.9981779,\n 0.9866143]], [[-0.76159416, 0.96402758, -0.46211716, 0.0, 0.76159416],\n [-0.96402758, -0.96402758, 0.46211716, 0.46211716, -0.9866143], [\n 0.46211716, -0.99505475, 0.96402758, 0.46211716, 0.90514825], [-\n 0.96402758, 0.76159416, 0.99505475, 0.90514825, -0.96402758]]], [[[-\n 0.46211716, -0.96402758, 0.99505475, 0.76159416, -0.90514825], [-\n 0.96402758, 0.96402758, 0.46211716, -0.76159416, -0.46211716], [-\n 0.46211716, 0.99505475, -0.90514825, 0.76159416, -0.9993293], [0.0, -\n 0.90514825, 0.90514825, 0.90514825, 0.96402758]], [[-0.99505475, -\n 0.96402758, -0.76159416, 0.76159416, -0.46211716], [0.0, 0.90514825, -\n 0.90514825, 0.46211716, 0.46211716], [-0.46211716, 0.96402758, 0.0, -\n 0.90514825, -0.46211716], [-0.96402758, -0.46211716, -0.90514825, -\n 0.9981779, -0.9866143]], [[0.76159416, -0.96402758, 0.46211716, 0.0, -\n 0.76159416], [0.96402758, 0.96402758, -0.46211716, -0.46211716, \n 0.9866143], [-0.46211716, 0.99505475, -0.96402758, -0.46211716, -\n 0.90514825], [0.96402758, -0.76159416, -0.99505475, -0.90514825, \n 0.96402758]]]])\n', (11296, 12901), True, 'import numpy as np\n'), ((14541, 14559), 'oneflow.nn.Softplus', 'flow.nn.Softplus', ([], {}), '()\n', (14557, 14559), True, 'import oneflow as flow\n'), ((14570, 14597), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (14585, 14597), True, 'import numpy as np\n'), ((14841, 14868), 'oneflow.nn.Softplus', 'flow.nn.Softplus', ([], {'beta': '(1.11)'}), '(beta=1.11)\n', (14857, 14868), True, 'import oneflow as flow\n'), ((14879, 14906), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (14894, 14906), True, 'import numpy as np\n'), ((15156, 15199), 'oneflow.nn.Softplus', 'flow.nn.Softplus', ([], {'beta': '(1.11)', 'threshold': '(1.55)'}), '(beta=1.11, threshold=1.55)\n', (15172, 15199), True, 'import oneflow as flow\n'), ((15210, 15237), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (15225, 15237), True, 'import numpy as np\n'), ((15595, 15613), 'oneflow.nn.Softplus', 'flow.nn.Softplus', ([], {}), '()\n', (15611, 15613), True, 'import oneflow as flow\n'), ((15624, 15661), 'numpy.array', 'np.array', (['[1.0, 2.0, 21.0, 20.0, 4.0]'], {}), '([1.0, 2.0, 21.0, 20.0, 4.0])\n', (15632, 15661), True, 'import numpy as np\n'), ((16419, 16469), 'unittest.skip', 'unittest.skip', (['"""pytorch softplus backward has bug"""'], {}), "('pytorch softplus backward has bug')\n", (16432, 16469), False, 'import unittest\n'), ((17305, 17323), 'oneflow.nn.Hardtanh', 'flow.nn.Hardtanh', ([], {}), '()\n', (17321, 17323), True, 'import oneflow as flow\n'), ((17334, 17357), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (17349, 17357), True, 'import numpy as np\n'), ((17561, 17604), 'oneflow.nn.Hardtanh', 'flow.nn.Hardtanh', ([], {'min_val': '(-2.0)', 'max_val': '(2.3)'}), '(min_val=-2.0, max_val=2.3)\n', (17577, 17604), True, 'import oneflow as flow\n'), ((17615, 17638), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (17630, 17638), True, 'import numpy as np\n'), ((19979, 19997), 'oneflow.nn.Softsign', 'flow.nn.Softsign', ([], {}), '()\n', (19995, 19997), True, 'import oneflow as flow\n'), ((20013, 20036), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (20028, 20036), True, 'import numpy as np\n'), ((21153, 21168), 'unittest.main', 'unittest.main', ([], {}), '()\n', (21166, 21168), False, 'import unittest\n'), ((4602, 4611), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (4608, 4611), True, 'import numpy as np\n'), ((7037, 7064), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (7052, 7064), True, 'import numpy as np\n'), ((7890, 7903), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7901, 7903), False, 'from collections import OrderedDict\n'), ((8217, 8237), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8227, 8237), False, 'from test_util import GenArgList\n'), ((13814, 13827), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13825, 13827), False, 'from collections import OrderedDict\n'), ((14071, 14091), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (14081, 14091), False, 'from test_util import GenArgList\n'), ((16102, 16115), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16113, 16115), False, 'from collections import OrderedDict\n'), ((16351, 16371), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (16361, 16371), False, 'from test_util import GenArgList\n'), ((17214, 17242), 'numpy.where', 'np.where', (['(x >= 2.3)', '(0.0)', '(1.0)'], {}), '(x >= 2.3, 0.0, 1.0)\n', (17222, 17242), True, 'import numpy as np\n'), ((17386, 17404), 'numpy.minimum', 'np.minimum', (['(1)', 'arr'], {}), '(1, arr)\n', (17396, 17404), True, 'import numpy as np\n'), ((17669, 17689), 'numpy.minimum', 'np.minimum', (['(2.3)', 'arr'], {}), '(2.3, arr)\n', (17679, 17689), True, 'import numpy as np\n'), ((18154, 18167), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18165, 18167), False, 'from collections import OrderedDict\n'), ((18294, 18314), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18304, 18314), False, 'from test_util import GenArgList\n'), ((20878, 20891), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20889, 20891), False, 'from collections import OrderedDict\n'), ((21058, 21078), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21068, 21078), False, 'from test_util import GenArgList\n'), ((4324, 4358), 'numpy.max', 'np.max', (['x'], {'axis': 'dim', 'keepdims': '(True)'}), '(x, axis=dim, keepdims=True)\n', (4330, 4358), True, 'import numpy as np\n'), ((5631, 5650), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (5642, 5650), True, 'import oneflow as flow\n'), ((5938, 5957), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (5949, 5957), True, 'import oneflow as flow\n'), ((6244, 6263), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (6255, 6263), True, 'import oneflow as flow\n'), ((6550, 6569), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (6561, 6569), True, 'import oneflow as flow\n'), ((7109, 7128), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (7120, 7128), True, 'import oneflow as flow\n'), ((7395, 7414), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (7406, 7414), True, 'import oneflow as flow\n'), ((7505, 7524), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (7516, 7524), True, 'import oneflow as flow\n'), ((8804, 8823), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (8815, 8823), True, 'import oneflow as flow\n'), ((9127, 9146), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (9138, 9146), True, 'import oneflow as flow\n'), ((9453, 9472), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (9464, 9472), True, 'import oneflow as flow\n'), ((11229, 11248), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (11240, 11248), True, 'import oneflow as flow\n'), ((14672, 14691), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (14683, 14691), True, 'import oneflow as flow\n'), ((14982, 15001), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (14993, 15001), True, 'import oneflow as flow\n'), ((15422, 15441), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (15433, 15441), True, 'import oneflow as flow\n'), ((15694, 15713), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (15705, 15713), True, 'import oneflow as flow\n'), ((17438, 17457), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (17449, 17457), True, 'import oneflow as flow\n'), ((17723, 17742), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (17734, 17742), True, 'import oneflow as flow\n'), ((19833, 19842), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (19839, 19842), True, 'import numpy as np\n'), ((20146, 20165), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (20157, 20165), True, 'import oneflow as flow\n'), ((19906, 19915), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (19912, 19915), True, 'import numpy as np\n'), ((4540, 4556), 'numpy.exp', 'np.exp', (['(beta * x)'], {}), '(beta * x)\n', (4546, 4556), True, 'import numpy as np\n'), ((15319, 15337), 'numpy.exp', 'np.exp', (['(1.11 * arr)'], {}), '(1.11 * arr)\n', (15325, 15337), True, 'import numpy as np\n')] |
import oneflow as flow
import networks
import itertools
from image import ImagePool, ndarray2image
import numpy as np
import cv2
class CycleGANModel:
def __init__(self, opt):
self.opt = opt
self.device = "cuda"
self.netG_A = networks.ResnetGenerator(n_blocks=opt.n_blocks).to(self.device)
self.netG_B = networks.ResnetGenerator(n_blocks=opt.n_blocks).to(self.device)
self.netD_A = networks.NLayerDiscriminator().to(self.device)
self.netD_B = networks.NLayerDiscriminator().to(self.device)
self.fake_A_pool = ImagePool(opt.pool_size)
self.fake_B_pool = ImagePool(opt.pool_size)
self.criterionGAN = networks.GANLoss("lsgan").to(self.device)
self.criterionCycle = flow.nn.L1Loss()
self.criterionIdt = flow.nn.L1Loss()
self.optimizer_G = flow.optim.Adam(
itertools.chain(self.netG_A.parameters(), self.netG_B.parameters()),
lr=opt.lr,
betas=(opt.beta1, opt.beta2),
)
self.optimizer_D = flow.optim.Adam(
itertools.chain(self.netD_A.parameters(), self.netD_B.parameters()),
lr=opt.lr,
betas=(opt.beta1, opt.beta2),
)
self.optimizers = [self.optimizer_G, self.optimizer_D]
self.schedulers = [
flow.optim.lr_scheduler.CosineAnnealingLR(optimizer, steps=100, alpha=0.0)
for optimizer in self.optimizers
]
def update_learning_rate(self):
"""Update learning rates for all the networks; called at the end of every epoch"""
old_lr = self.optimizers[0].param_groups[0]["lr"]
for scheduler in self.schedulers:
scheduler.step()
lr = self.optimizers[0].param_groups[0]["lr"]
print("learning rate %.7f -> %.7f" % (old_lr, lr))
def set_requires_grad(self, nets, requires_grad=False):
"""Set requies_grad=Fasle for all the networks to avoid unnecessary computations
Parameters:
nets (network list) -- a list of networks
requires_grad (bool) -- whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def set_input(self, image_A, image_B):
"""Unpack input data from the dataloader and perform necessary pre-processing steps.
Parameters:
input (dict): include the data itself and its metadata information.
The option 'direction' can be used to swap domain A and domain B.
"""
self.real_A = flow.Tensor(image_A).to(self.device)
self.real_B = flow.Tensor(image_B).to(self.device)
def forward(self):
"""Run forward pass; called by both functions <optimize_parameters> and <test>."""
self.fake_B = self.netG_A(self.real_A) # G_A(A)
self.rec_A = self.netG_B(self.fake_B) # G_B(G_A(A))
self.fake_A = self.netG_B(self.real_B) # G_B(B)
self.rec_B = self.netG_A(self.fake_A) # G_A(G_B(B))
def backward_D_basic(self, netD, real, fake):
"""Calculate GAN loss for the discriminator
Parameters:
netD (network) -- the discriminator D
real (tensor array) -- real images
fake (tensor array) -- images generated by a generator
Return the discriminator loss.
We also call loss_D.backward() to calculate the gradients.
"""
# Real
pred_real = netD(real)
loss_D_real = self.criterionGAN(pred_real, True)
# Fake
pred_fake = netD(fake.detach())
loss_D_fake = self.criterionGAN(pred_fake, False)
# Combined loss and calculate gradients
loss_D = (loss_D_real + loss_D_fake) * 0.5
loss_D.backward()
return loss_D
def backward_D_A(self):
"""Calculate GAN loss for discriminator D_A"""
fake_B = self.fake_B_pool.query(self.fake_B)
self.loss_D_A = self.backward_D_basic(self.netD_A, self.real_B, fake_B)
def backward_D_B(self):
"""Calculate GAN loss for discriminator D_B"""
fake_A = self.fake_A_pool.query(self.fake_A)
self.loss_D_B = self.backward_D_basic(self.netD_B, self.real_A, fake_A)
def backward_G(self):
"""Calculate the loss for generators G_A and G_B"""
lambda_idt = self.opt.lambda_identity
lambda_A = self.opt.lambda_A
lambda_B = self.opt.lambda_B
# Identity loss
if lambda_idt > 0:
# G_A should be identity if real_B is fed: ||G_A(B) - B||
self.idt_A = self.netG_A(self.real_B)
self.loss_idt_A = (
self.criterionIdt(self.idt_A, self.real_B) * lambda_B * lambda_idt
)
# G_B should be identity if real_A is fed: ||G_B(A) - A||
self.idt_B = self.netG_B(self.real_A)
self.loss_idt_B = (
self.criterionIdt(self.idt_B, self.real_A) * lambda_A * lambda_idt
)
else:
self.loss_idt_A = 0
self.loss_idt_B = 0
# GAN loss D_A(G_A(A))
self.loss_G_A = self.criterionGAN(self.netD_A(self.fake_B), True)
# GAN loss D_B(G_B(B))
self.loss_G_B = self.criterionGAN(self.netD_B(self.fake_A), True)
# Forward cycle loss || G_B(G_A(A)) - A||
self.loss_cycle_A = self.criterionCycle(self.rec_A, self.real_A) * lambda_A
# Backward cycle loss || G_A(G_B(B)) - B||
self.loss_cycle_B = self.criterionCycle(self.rec_B, self.real_B) * lambda_B
# combined loss and calculate gradients
self.loss_G = (
self.loss_G_A
+ self.loss_G_B
+ self.loss_cycle_A
+ self.loss_cycle_B
+ self.loss_idt_A
+ self.loss_idt_B
)
self.loss_G.backward()
def optimize_parameters(self):
"""Calculate losses, gradients, and update network weights; called in every training iteration"""
# forward
self.forward() # compute fake images and reconstruction images.
# G_A and G_B
self.optimizer_G.zero_grad() # set G_A and G_B's gradients to zero
self.backward_G() # calculate gradients for G_A and G_B
self.optimizer_G.step() # update G_A and G_B's weights
# D_A and D_B
self.optimizer_D.zero_grad() # set D_A and D_B's gradients to zero
self.backward_D_A() # calculate gradients for D_A
self.backward_D_B() # calculate graidents for D_B
self.optimizer_D.step() # update D_A and D_B's weights
def save_result(self, path):
imageA = ndarray2image(self.real_A.numpy())
imageB = ndarray2image(self.real_B.numpy())
image_fake_B = ndarray2image(self.fake_B.numpy())
image_rec_A = ndarray2image(self.rec_A.numpy())
image_fake_A = ndarray2image(self.fake_A.numpy())
image_rec_B = ndarray2image(self.rec_B.numpy())
result1 = np.concatenate((imageA, image_fake_B, image_rec_A), axis=1)
result2 = np.concatenate((imageB, image_fake_A, image_rec_B), axis=1)
result = np.concatenate((result1, result2), axis=0)
cv2.imwrite(path, result)
def log_loss(self, epoch, iter):
print(
"epoch: %d, iter: %d, d_loss: %f, g_loss: %f"
% (
epoch,
iter,
self.loss_D_A.numpy()[0] + self.loss_D_B.numpy()[0],
self.loss_G.numpy()[0],
)
)
| [
"oneflow.Tensor",
"oneflow.nn.L1Loss",
"oneflow.optim.lr_scheduler.CosineAnnealingLR"
] | [((572, 596), 'image.ImagePool', 'ImagePool', (['opt.pool_size'], {}), '(opt.pool_size)\n', (581, 596), False, 'from image import ImagePool, ndarray2image\n'), ((624, 648), 'image.ImagePool', 'ImagePool', (['opt.pool_size'], {}), '(opt.pool_size)\n', (633, 648), False, 'from image import ImagePool, ndarray2image\n'), ((750, 766), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', ([], {}), '()\n', (764, 766), True, 'import oneflow as flow\n'), ((795, 811), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', ([], {}), '()\n', (809, 811), True, 'import oneflow as flow\n'), ((7098, 7157), 'numpy.concatenate', 'np.concatenate', (['(imageA, image_fake_B, image_rec_A)'], {'axis': '(1)'}), '((imageA, image_fake_B, image_rec_A), axis=1)\n', (7112, 7157), True, 'import numpy as np\n'), ((7176, 7235), 'numpy.concatenate', 'np.concatenate', (['(imageB, image_fake_A, image_rec_B)'], {'axis': '(1)'}), '((imageB, image_fake_A, image_rec_B), axis=1)\n', (7190, 7235), True, 'import numpy as np\n'), ((7253, 7295), 'numpy.concatenate', 'np.concatenate', (['(result1, result2)'], {'axis': '(0)'}), '((result1, result2), axis=0)\n', (7267, 7295), True, 'import numpy as np\n'), ((7304, 7329), 'cv2.imwrite', 'cv2.imwrite', (['path', 'result'], {}), '(path, result)\n', (7315, 7329), False, 'import cv2\n'), ((1316, 1390), 'oneflow.optim.lr_scheduler.CosineAnnealingLR', 'flow.optim.lr_scheduler.CosineAnnealingLR', (['optimizer'], {'steps': '(100)', 'alpha': '(0.0)'}), '(optimizer, steps=100, alpha=0.0)\n', (1357, 1390), True, 'import oneflow as flow\n'), ((255, 302), 'networks.ResnetGenerator', 'networks.ResnetGenerator', ([], {'n_blocks': 'opt.n_blocks'}), '(n_blocks=opt.n_blocks)\n', (279, 302), False, 'import networks\n'), ((341, 388), 'networks.ResnetGenerator', 'networks.ResnetGenerator', ([], {'n_blocks': 'opt.n_blocks'}), '(n_blocks=opt.n_blocks)\n', (365, 388), False, 'import networks\n'), ((428, 458), 'networks.NLayerDiscriminator', 'networks.NLayerDiscriminator', ([], {}), '()\n', (456, 458), False, 'import networks\n'), ((497, 527), 'networks.NLayerDiscriminator', 'networks.NLayerDiscriminator', ([], {}), '()\n', (525, 527), False, 'import networks\n'), ((678, 703), 'networks.GANLoss', 'networks.GANLoss', (['"""lsgan"""'], {}), "('lsgan')\n", (694, 703), False, 'import networks\n'), ((2710, 2730), 'oneflow.Tensor', 'flow.Tensor', (['image_A'], {}), '(image_A)\n', (2721, 2730), True, 'import oneflow as flow\n'), ((2769, 2789), 'oneflow.Tensor', 'flow.Tensor', (['image_B'], {}), '(image_B)\n', (2780, 2789), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
import oneflow as flow
import numpy as np
import oneflow.typing as oft
import test_global_storage
from test_util import GenArgDict
def _test_logical_slice(
test_case, var_shape, slice_tuples, split_axis, device_tag, flow_dtype, device_num
):
flow.clear_default_session()
if device_tag == "gpu":
flow.config.gpu_device_num(device_num)
@flow.global_function()
def slice_fn():
with flow.scope.placement(device_tag, "0:0-{}".format(device_num - 1)):
var = flow.get_variable(
name="var",
shape=var_shape,
dtype=flow_dtype,
initializer=flow.random_uniform_initializer(-10, 10, dtype=flow_dtype),
distribute=flow.distribute.split(split_axis),
)
flow.watch(var, test_global_storage.Setter("var"))
ret = flow.experimental.logical_slice(var, slice_tuples)
return ret
of_res = slice_fn().get().numpy()
var_np = test_global_storage.Get("var")
slice_objs = []
for s in slice_tuples:
slice_objs.append(slice(s[0], s[1], s[2]))
test_case.assertTrue(np.array_equal(of_res, var_np[tuple(slice_objs)]))
class TestLogicalSlice(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n2d()
def test_logical_slice_4dim_2d(test_case):
var_shape = (30, 40, 20, 15)
slice_tuples = [(10, 20, 3), (1, 30, 4), (3, 16, 2), (5, 11, 1)]
arg_dict = OrderedDict()
arg_dict["split_axis"] = list(range(4))
arg_dict["device_tag"] = ["cpu", "gpu"]
arg_dict["flow_dtype"] = [flow.float, flow.int8]
arg_dict["device_num"] = [2]
for arg in GenArgDict(arg_dict):
_test_logical_slice(test_case, var_shape, slice_tuples, **arg)
@flow.unittest.skip_unless_1n4d()
def test_logical_slice_negative_start_stop_4dim_4d(test_case):
var_shape = (30, 40, 20, 15)
slice_tuples = [(10, None, 3), (1, -10, 4), (-15, -5, 2), (5, 11, 1)]
arg_dict = OrderedDict()
arg_dict["split_axis"] = list(range(4))
arg_dict["device_tag"] = ["cpu", "gpu"]
arg_dict["flow_dtype"] = [flow.float]
arg_dict["device_num"] = [4]
for arg in GenArgDict(arg_dict):
_test_logical_slice(test_case, var_shape, slice_tuples, **arg)
@flow.unittest.skip_unless_1n4d()
def test_logical_slice_2dim_3d(test_case):
var_shape = (30, 40)
slice_tuples = [(10, 20, 3), (1, 30, 4)]
arg_dict = OrderedDict()
arg_dict["split_axis"] = list(range(2))
arg_dict["device_tag"] = ["cpu", "gpu"]
arg_dict["flow_dtype"] = [flow.float]
arg_dict["device_num"] = [3]
for arg in GenArgDict(arg_dict):
_test_logical_slice(test_case, var_shape, slice_tuples, **arg)
| [
"oneflow.global_function",
"oneflow.distribute.split",
"oneflow.unittest.skip_unless_1n4d",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.clear_default_session",
"oneflow.config.gpu_device_num",
"oneflow.experimental.logical_slice",
"oneflow.random_uniform_initializer"
] | [((879, 907), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (905, 907), True, 'import oneflow as flow\n'), ((989, 1011), 'oneflow.global_function', 'flow.global_function', ([], {}), '()\n', (1009, 1011), True, 'import oneflow as flow\n'), ((1616, 1646), 'test_global_storage.Get', 'test_global_storage.Get', (['"""var"""'], {}), "('var')\n", (1639, 1646), False, 'import test_global_storage\n'), ((1876, 1908), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (1906, 1908), True, 'import oneflow as flow\n'), ((2411, 2443), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (2441, 2443), True, 'import oneflow as flow\n'), ((2960, 2992), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (2990, 2992), True, 'import oneflow as flow\n'), ((944, 982), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['device_num'], {}), '(device_num)\n', (970, 982), True, 'import oneflow as flow\n'), ((2085, 2098), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2096, 2098), False, 'from collections import OrderedDict\n'), ((2308, 2328), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (2318, 2328), False, 'from test_util import GenArgDict\n'), ((2645, 2658), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2656, 2658), False, 'from collections import OrderedDict\n'), ((2857, 2877), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (2867, 2877), False, 'from test_util import GenArgDict\n'), ((3137, 3150), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3148, 3150), False, 'from collections import OrderedDict\n'), ((3349, 3369), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (3359, 3369), False, 'from test_util import GenArgDict\n'), ((1489, 1539), 'oneflow.experimental.logical_slice', 'flow.experimental.logical_slice', (['var', 'slice_tuples'], {}), '(var, slice_tuples)\n', (1520, 1539), True, 'import oneflow as flow\n'), ((1436, 1469), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""var"""'], {}), "('var')\n", (1462, 1469), False, 'import test_global_storage\n'), ((1272, 1330), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', (['(-10)', '(10)'], {'dtype': 'flow_dtype'}), '(-10, 10, dtype=flow_dtype)\n', (1303, 1330), True, 'import oneflow as flow\n'), ((1359, 1392), 'oneflow.distribute.split', 'flow.distribute.split', (['split_axis'], {}), '(split_axis)\n', (1380, 1392), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow.experimental as flow
import unittest
import numpy as np
from collections import OrderedDict
from test_util import GenArgList
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestVariance(flow.unittest.TestCase):
def test_variance(test_case):
input_arr = np.random.randn(2, 3, 4, 5)
of_out = flow.Tensor(input_arr).var(1, True)
np_out = np.var(input_arr, 1, keepdims=True)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def test_variance_v2(test_case):
input_arr = np.random.randn(4, 1, 3, 2)
of_out = flow.var(flow.Tensor(input_arr), 2, False)
np_out = np.var(input_arr, 2, keepdims=False)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSin(flow.unittest.TestCase):
def test_sin(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3))
of_out = flow.sin(input)
np_out = np.sin(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def test_sin_tensor_function(test_case):
input = flow.Tensor(np.random.randn(8, 11, 9, 7))
of_out = input.sin()
np_out = np.sin(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestCos(flow.unittest.TestCase):
def test_cos(test_case):
input = flow.Tensor(np.random.randn(1, 3, 6), dtype=flow.float32)
of_out = flow.cos(input)
np_out = np.cos(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
test_case.assertTrue(np.allclose(input.cos().numpy(), np_out, 1e-5, 1e-5))
def test_cos_tensor_function(test_case):
arr = np.random.randn(4, 5, 6, 7)
input = flow.Tensor(arr, dtype=flow.float32)
np_out = np.cos(arr)
of_out = input.cos()
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestLog(flow.unittest.TestCase):
def test_log(test_case):
input = flow.Tensor(np.random.randn(2, 3, 4, 5), dtype=flow.float32)
of_out = flow.log(input)
np_out = np.log(input.numpy())
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
test_case.assertTrue(np.allclose(input.log().numpy(), np_out, equal_nan=True))
def test_log_nan_value(test_case):
arr = np.array([-0.7168, -0.5471, -0.8933, -1.4428, -0.1190])
input = flow.Tensor(arr, dtype=flow.float32)
np_out = np.full((5,), np.nan)
of_out = flow.log(input)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestStd(flow.unittest.TestCase):
def test_std(test_case):
np_arr = np.random.randn(2, 3, 4, 5)
input = flow.Tensor(np_arr)
of_out = flow.std(input, dim=2)
np_out = np.std(np_arr, axis=2)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5,))
def test_std_tensor_function(test_case):
np_arr = np.random.randn(9, 8, 7, 6)
input = flow.Tensor(np_arr)
of_out = input.std(dim=1, keepdim=False)
np_out = np.std(np_arr, axis=1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def test_std_negative_dim(test_case):
np_arr = np.random.randn(4, 2, 3, 5)
input = flow.Tensor(np_arr)
of_out = input.std(dim=(-2, -1, -3), keepdim=False)
np_out = np.std(np_arr, axis=(-2, -1, -3))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSqrt(flow.unittest.TestCase):
def test_sqrt(test_case):
input_arr = np.random.randn(3, 2, 5, 7)
np_out = np.sqrt(input_arr)
x = flow.Tensor(input_arr)
of_out = flow.sqrt(input=x)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
def test_sqrt_tensor_function(test_case):
input_arr = np.random.randn(1, 6, 3, 8)
np_out = np.sqrt(input_arr)
x = flow.Tensor(input_arr)
of_out = x.sqrt()
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestRsqrt(flow.unittest.TestCase):
def test_rsqrt(test_case):
input_arr = np.random.randn(2, 3, 4, 5)
np_out = 1 / np.sqrt(input_arr)
x = flow.Tensor(input_arr)
of_out = x.rsqrt()
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
def test_rsqrt_tensor_function(test_case):
input_arr = np.random.randn(3, 2, 5, 7)
np_out = 1 / np.sqrt(input_arr)
x = flow.Tensor(input_arr)
of_out = flow.rsqrt(input=x)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestSquare(flow.unittest.TestCase):
def test_square(test_case):
input_arr = np.random.randn(9, 4, 5, 6)
np_out = np.square(input_arr)
x = flow.Tensor(input_arr)
of_out = flow.square(x)
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
def test_square_tensor_function(test_case):
input_arr = np.random.randn(2, 7, 7, 3)
np_out = np.square(input_arr)
x = flow.Tensor(input_arr)
of_out = x.square()
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5, equal_nan=True)
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestPow(flow.unittest.TestCase):
def test_pow(test_case):
input = flow.Tensor(np.array([1, 2, 3, 4, 5, 6]), dtype=flow.float32)
of_out = flow.pow(input, 2.1)
np_out = np.power(input.numpy(), 2.1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def test_pow_tensor_function(test_case):
input = flow.Tensor(np.array([1, 2, 3, 4, 5, 6]), dtype=flow.float32)
of_out = input.pow(2.1)
np_out = np.power(input.numpy(), 2.1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.rsqrt",
"oneflow.experimental.pow",
"oneflow.experimental.sin",
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.cos",
"oneflow.experimental.std",
"oneflow.experimental.log",
"oneflow.experimental.Tensor",
"oneflow.experimental.square",
"onef... | [((7824, 7839), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7837, 7839), False, 'import unittest\n'), ((946, 973), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (961, 973), True, 'import numpy as np\n'), ((1044, 1079), 'numpy.var', 'np.var', (['input_arr', '(1)'], {'keepdims': '(True)'}), '(input_arr, 1, keepdims=True)\n', (1050, 1079), True, 'import numpy as np\n'), ((1216, 1243), 'numpy.random.randn', 'np.random.randn', (['(4)', '(1)', '(3)', '(2)'], {}), '(4, 1, 3, 2)\n', (1231, 1243), True, 'import numpy as np\n'), ((1321, 1357), 'numpy.var', 'np.var', (['input_arr', '(2)'], {'keepdims': '(False)'}), '(input_arr, 2, keepdims=False)\n', (1327, 1357), True, 'import numpy as np\n'), ((759, 802), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (800, 802), True, 'import oneflow.experimental as flow\n'), ((1695, 1710), 'oneflow.experimental.sin', 'flow.sin', (['input'], {}), '(input)\n', (1703, 1710), True, 'import oneflow.experimental as flow\n'), ((1464, 1507), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (1505, 1507), True, 'import oneflow.experimental as flow\n'), ((2354, 2369), 'oneflow.experimental.cos', 'flow.cos', (['input'], {}), '(input)\n', (2362, 2369), True, 'import oneflow.experimental as flow\n'), ((2630, 2657), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)', '(6)', '(7)'], {}), '(4, 5, 6, 7)\n', (2645, 2657), True, 'import numpy as np\n'), ((2674, 2710), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {'dtype': 'flow.float32'}), '(arr, dtype=flow.float32)\n', (2685, 2710), True, 'import oneflow.experimental as flow\n'), ((2728, 2739), 'numpy.cos', 'np.cos', (['arr'], {}), '(arr)\n', (2734, 2739), True, 'import numpy as np\n'), ((2106, 2149), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (2147, 2149), True, 'import oneflow.experimental as flow\n'), ((3126, 3141), 'oneflow.experimental.log', 'flow.log', (['input'], {}), '(input)\n', (3134, 3141), True, 'import oneflow.experimental as flow\n'), ((3438, 3492), 'numpy.array', 'np.array', (['[-0.7168, -0.5471, -0.8933, -1.4428, -0.119]'], {}), '([-0.7168, -0.5471, -0.8933, -1.4428, -0.119])\n', (3446, 3492), True, 'import numpy as np\n'), ((3510, 3546), 'oneflow.experimental.Tensor', 'flow.Tensor', (['arr'], {'dtype': 'flow.float32'}), '(arr, dtype=flow.float32)\n', (3521, 3546), True, 'import oneflow.experimental as flow\n'), ((3564, 3585), 'numpy.full', 'np.full', (['(5,)', 'np.nan'], {}), '((5,), np.nan)\n', (3571, 3585), True, 'import numpy as np\n'), ((3603, 3618), 'oneflow.experimental.log', 'flow.log', (['input'], {}), '(input)\n', (3611, 3618), True, 'import oneflow.experimental as flow\n'), ((2875, 2918), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (2916, 2918), True, 'import oneflow.experimental as flow\n'), ((3937, 3964), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (3952, 3964), True, 'import numpy as np\n'), ((3981, 4000), 'oneflow.experimental.Tensor', 'flow.Tensor', (['np_arr'], {}), '(np_arr)\n', (3992, 4000), True, 'import oneflow.experimental as flow\n'), ((4018, 4040), 'oneflow.experimental.std', 'flow.std', (['input'], {'dim': '(2)'}), '(input, dim=2)\n', (4026, 4040), True, 'import oneflow.experimental as flow\n'), ((4058, 4080), 'numpy.std', 'np.std', (['np_arr'], {'axis': '(2)'}), '(np_arr, axis=2)\n', (4064, 4080), True, 'import numpy as np\n'), ((4223, 4250), 'numpy.random.randn', 'np.random.randn', (['(9)', '(8)', '(7)', '(6)'], {}), '(9, 8, 7, 6)\n', (4238, 4250), True, 'import numpy as np\n'), ((4267, 4286), 'oneflow.experimental.Tensor', 'flow.Tensor', (['np_arr'], {}), '(np_arr)\n', (4278, 4286), True, 'import oneflow.experimental as flow\n'), ((4353, 4375), 'numpy.std', 'np.std', (['np_arr'], {'axis': '(1)'}), '(np_arr, axis=1)\n', (4359, 4375), True, 'import numpy as np\n'), ((4514, 4541), 'numpy.random.randn', 'np.random.randn', (['(4)', '(2)', '(3)', '(5)'], {}), '(4, 2, 3, 5)\n', (4529, 4541), True, 'import numpy as np\n'), ((4558, 4577), 'oneflow.experimental.Tensor', 'flow.Tensor', (['np_arr'], {}), '(np_arr)\n', (4569, 4577), True, 'import oneflow.experimental as flow\n'), ((4655, 4688), 'numpy.std', 'np.std', (['np_arr'], {'axis': '(-2, -1, -3)'}), '(np_arr, axis=(-2, -1, -3))\n', (4661, 4688), True, 'import numpy as np\n'), ((3763, 3806), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3804, 3806), True, 'import oneflow.experimental as flow\n'), ((4974, 5001), 'numpy.random.randn', 'np.random.randn', (['(3)', '(2)', '(5)', '(7)'], {}), '(3, 2, 5, 7)\n', (4989, 5001), True, 'import numpy as np\n'), ((5019, 5037), 'numpy.sqrt', 'np.sqrt', (['input_arr'], {}), '(input_arr)\n', (5026, 5037), True, 'import numpy as np\n'), ((5050, 5072), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (5061, 5072), True, 'import oneflow.experimental as flow\n'), ((5090, 5108), 'oneflow.experimental.sqrt', 'flow.sqrt', ([], {'input': 'x'}), '(input=x)\n', (5099, 5108), True, 'import oneflow.experimental as flow\n'), ((5292, 5319), 'numpy.random.randn', 'np.random.randn', (['(1)', '(6)', '(3)', '(8)'], {}), '(1, 6, 3, 8)\n', (5307, 5319), True, 'import numpy as np\n'), ((5337, 5355), 'numpy.sqrt', 'np.sqrt', (['input_arr'], {}), '(input_arr)\n', (5344, 5355), True, 'import numpy as np\n'), ((5368, 5390), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (5379, 5390), True, 'import oneflow.experimental as flow\n'), ((4795, 4838), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (4836, 4838), True, 'import oneflow.experimental as flow\n'), ((5742, 5769), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (5757, 5769), True, 'import numpy as np\n'), ((5822, 5844), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (5833, 5844), True, 'import oneflow.experimental as flow\n'), ((6056, 6083), 'numpy.random.randn', 'np.random.randn', (['(3)', '(2)', '(5)', '(7)'], {}), '(3, 2, 5, 7)\n', (6071, 6083), True, 'import numpy as np\n'), ((6136, 6158), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (6147, 6158), True, 'import oneflow.experimental as flow\n'), ((6176, 6195), 'oneflow.experimental.rsqrt', 'flow.rsqrt', ([], {'input': 'x'}), '(input=x)\n', (6186, 6195), True, 'import oneflow.experimental as flow\n'), ((5561, 5604), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (5602, 5604), True, 'import oneflow.experimental as flow\n'), ((6523, 6550), 'numpy.random.randn', 'np.random.randn', (['(9)', '(4)', '(5)', '(6)'], {}), '(9, 4, 5, 6)\n', (6538, 6550), True, 'import numpy as np\n'), ((6568, 6588), 'numpy.square', 'np.square', (['input_arr'], {}), '(input_arr)\n', (6577, 6588), True, 'import numpy as np\n'), ((6601, 6623), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (6612, 6623), True, 'import oneflow.experimental as flow\n'), ((6641, 6655), 'oneflow.experimental.square', 'flow.square', (['x'], {}), '(x)\n', (6652, 6655), True, 'import oneflow.experimental as flow\n'), ((6841, 6868), 'numpy.random.randn', 'np.random.randn', (['(2)', '(7)', '(7)', '(3)'], {}), '(2, 7, 7, 3)\n', (6856, 6868), True, 'import numpy as np\n'), ((6886, 6906), 'numpy.square', 'np.square', (['input_arr'], {}), '(input_arr)\n', (6895, 6906), True, 'import numpy as np\n'), ((6919, 6941), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (6930, 6941), True, 'import oneflow.experimental as flow\n'), ((6340, 6383), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (6381, 6383), True, 'import oneflow.experimental as flow\n'), ((7366, 7386), 'oneflow.experimental.pow', 'flow.pow', (['input', '(2.1)'], {}), '(input, 2.1)\n', (7374, 7386), True, 'import oneflow.experimental as flow\n'), ((7114, 7157), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (7155, 7157), True, 'import oneflow.experimental as flow\n'), ((1270, 1292), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (1281, 1292), True, 'import oneflow.experimental as flow\n'), ((1649, 1676), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (1664, 1676), True, 'import numpy as np\n'), ((1902, 1930), 'numpy.random.randn', 'np.random.randn', (['(8)', '(11)', '(9)', '(7)'], {}), '(8, 11, 9, 7)\n', (1917, 1930), True, 'import numpy as np\n'), ((2291, 2315), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(6)'], {}), '(1, 3, 6)\n', (2306, 2315), True, 'import numpy as np\n'), ((3060, 3087), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (3075, 3087), True, 'import numpy as np\n'), ((5791, 5809), 'numpy.sqrt', 'np.sqrt', (['input_arr'], {}), '(input_arr)\n', (5798, 5809), True, 'import numpy as np\n'), ((6105, 6123), 'numpy.sqrt', 'np.sqrt', (['input_arr'], {}), '(input_arr)\n', (6112, 6123), True, 'import numpy as np\n'), ((7299, 7327), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (7307, 7327), True, 'import numpy as np\n'), ((7585, 7613), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (7593, 7613), True, 'import numpy as np\n'), ((991, 1013), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (1002, 1013), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _test_split_to_split_enable_all_to_all(test_case, src_axis, dst_axis):
flow.clear_default_session()
flow.config.gpu_device_num(2)
flow.config.nccl_use_compute_stream(True)
flow.config.disable_group_boxing_by_dst_parallel(True)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def split_to_split_job(x: oft.Numpy.Placeholder((32, 16, 64, 48))):
with flow.scope.placement("gpu", "0:0-1"):
src = flow.identity(x.with_distribute(flow.distribute.split(src_axis)))
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
x = np.random.rand(32, 16, 64, 48).astype(np.float32)
y = split_to_split_job(x).get().numpy()
test_case.assertTrue(np.array_equal(x, y))
def _test_split_to_broadcast(test_case, src_axis):
flow.clear_default_session()
flow.config.gpu_device_num(2)
flow.config.nccl_use_compute_stream(True)
flow.config.disable_group_boxing_by_dst_parallel(True)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def split_to_broadcast_job(x: oft.Numpy.Placeholder((96, 96))):
with flow.scope.placement("gpu", "0:0-1"):
src = flow.identity(x.with_distribute(flow.distribute.split(src_axis)))
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
x = np.random.rand(96, 96).astype(np.float32)
y = split_to_broadcast_job(x).get().numpy()
test_case.assertTrue(np.array_equal(x, y))
def _test_partial_sum_to_split(test_case, dst_axis):
flow.clear_default_session()
flow.config.gpu_device_num(2)
flow.config.nccl_use_compute_stream(True)
flow.config.disable_group_boxing_by_dst_parallel(True)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def partial_sum_to_split_job(x: oft.Numpy.Placeholder((96, 96, 96))):
with flow.scope.placement("gpu", "0:0-1"):
src = flow.identity(x.with_distribute(flow.distribute.split(0)))
src = flow.math.reduce_sum(src, axis=0)
dst = flow.identity(src.with_distribute(flow.distribute.split(dst_axis)))
return dst
x = np.random.uniform(-1e-05, 1e-05, (96, 96, 96)).astype(np.float32)
y = partial_sum_to_split_job(x).get().numpy()
test_case.assertTrue(np.allclose(np.sum(x, axis=0), y))
def _test_partial_sum_to_broadcast(test_case):
flow.clear_default_session()
flow.config.gpu_device_num(2)
flow.config.nccl_use_compute_stream(True)
flow.config.disable_group_boxing_by_dst_parallel(True)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def partial_sum_to_broadcast_job(x: oft.Numpy.Placeholder((96, 96, 96))):
with flow.scope.placement("gpu", "0:0-1"):
src = flow.identity(x.with_distribute(flow.distribute.split(0)))
src = flow.math.reduce_sum(src, axis=0)
dst = flow.identity(src.with_distribute(flow.distribute.broadcast()))
return dst
x = np.random.uniform(-1e-05, 1e-05, (96, 96, 96)).astype(np.float32)
y = partial_sum_to_broadcast_job(x).get().numpy()
test_case.assertTrue(np.allclose(np.sum(x, axis=0), y))
@flow.unittest.skip_unless_1n2d()
class TestNcclUseComputeStream(flow.unittest.TestCase):
def test_split_to_split_all_to_all(test_case):
arg_dict = OrderedDict()
arg_dict["src_axis"] = [0, 1, 2, 3]
arg_dict["dst_axis"] = [0, 1, 2, 3]
for arg in GenArgList(arg_dict):
(src_axis, dst_axis) = arg
if src_axis == dst_axis:
continue
_test_split_to_split_enable_all_to_all(test_case, *arg)
def test_split_to_broadcast(test_case):
arg_dict = OrderedDict()
arg_dict["src_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_split_to_broadcast(test_case, *arg)
def test_partial_sum_to_split(test_case):
arg_dict = OrderedDict()
arg_dict["dst_axis"] = [0, 1]
for arg in GenArgList(arg_dict):
_test_partial_sum_to_split(test_case, *arg)
def test_partial_sum_to_broadcast(test_case):
_test_partial_sum_to_broadcast(test_case)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.unittest.skip_unless_1n2d",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.clear_default_session",
"oneflow.compatible.single_clie... | [((4623, 4655), 'oneflow.compatible.single_client.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (4653, 4655), True, 'from oneflow.compatible import single_client as flow\n'), ((939, 967), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (965, 967), True, 'from oneflow.compatible import single_client as flow\n'), ((972, 1001), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (998, 1001), True, 'from oneflow.compatible import single_client as flow\n'), ((1006, 1047), 'oneflow.compatible.single_client.config.nccl_use_compute_stream', 'flow.config.nccl_use_compute_stream', (['(True)'], {}), '(True)\n', (1041, 1047), True, 'from oneflow.compatible import single_client as flow\n'), ((1052, 1106), 'oneflow.compatible.single_client.config.disable_group_boxing_by_dst_parallel', 'flow.config.disable_group_boxing_by_dst_parallel', (['(True)'], {}), '(True)\n', (1100, 1106), True, 'from oneflow.compatible import single_client as flow\n'), ((1125, 1146), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1144, 1146), True, 'from oneflow.compatible import single_client as flow\n'), ((1266, 1315), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1286, 1315), True, 'from oneflow.compatible import single_client as flow\n'), ((1835, 1863), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1861, 1863), True, 'from oneflow.compatible import single_client as flow\n'), ((1868, 1897), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (1894, 1897), True, 'from oneflow.compatible import single_client as flow\n'), ((1902, 1943), 'oneflow.compatible.single_client.config.nccl_use_compute_stream', 'flow.config.nccl_use_compute_stream', (['(True)'], {}), '(True)\n', (1937, 1943), True, 'from oneflow.compatible import single_client as flow\n'), ((1948, 2002), 'oneflow.compatible.single_client.config.disable_group_boxing_by_dst_parallel', 'flow.config.disable_group_boxing_by_dst_parallel', (['(True)'], {}), '(True)\n', (1996, 2002), True, 'from oneflow.compatible import single_client as flow\n'), ((2021, 2042), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2040, 2042), True, 'from oneflow.compatible import single_client as flow\n'), ((2162, 2211), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2182, 2211), True, 'from oneflow.compatible import single_client as flow\n'), ((2721, 2749), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2747, 2749), True, 'from oneflow.compatible import single_client as flow\n'), ((2754, 2783), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (2780, 2783), True, 'from oneflow.compatible import single_client as flow\n'), ((2788, 2829), 'oneflow.compatible.single_client.config.nccl_use_compute_stream', 'flow.config.nccl_use_compute_stream', (['(True)'], {}), '(True)\n', (2823, 2829), True, 'from oneflow.compatible import single_client as flow\n'), ((2834, 2888), 'oneflow.compatible.single_client.config.disable_group_boxing_by_dst_parallel', 'flow.config.disable_group_boxing_by_dst_parallel', (['(True)'], {}), '(True)\n', (2882, 2888), True, 'from oneflow.compatible import single_client as flow\n'), ((2907, 2928), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2926, 2928), True, 'from oneflow.compatible import single_client as flow\n'), ((3048, 3097), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3068, 3097), True, 'from oneflow.compatible import single_client as flow\n'), ((3695, 3723), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (3721, 3723), True, 'from oneflow.compatible import single_client as flow\n'), ((3728, 3757), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (3754, 3757), True, 'from oneflow.compatible import single_client as flow\n'), ((3762, 3803), 'oneflow.compatible.single_client.config.nccl_use_compute_stream', 'flow.config.nccl_use_compute_stream', (['(True)'], {}), '(True)\n', (3797, 3803), True, 'from oneflow.compatible import single_client as flow\n'), ((3808, 3862), 'oneflow.compatible.single_client.config.disable_group_boxing_by_dst_parallel', 'flow.config.disable_group_boxing_by_dst_parallel', (['(True)'], {}), '(True)\n', (3856, 3862), True, 'from oneflow.compatible import single_client as flow\n'), ((3881, 3902), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3900, 3902), True, 'from oneflow.compatible import single_client as flow\n'), ((4022, 4071), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4042, 4071), True, 'from oneflow.compatible import single_client as flow\n'), ((5654, 5669), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5667, 5669), False, 'import unittest\n'), ((1230, 1258), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1256, 1258), True, 'from oneflow.compatible import single_client as flow\n'), ((1756, 1776), 'numpy.array_equal', 'np.array_equal', (['x', 'y'], {}), '(x, y)\n', (1770, 1776), True, 'import numpy as np\n'), ((2126, 2154), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2152, 2154), True, 'from oneflow.compatible import single_client as flow\n'), ((2640, 2660), 'numpy.array_equal', 'np.array_equal', (['x', 'y'], {}), '(x, y)\n', (2654, 2660), True, 'import numpy as np\n'), ((3012, 3040), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (3038, 3040), True, 'from oneflow.compatible import single_client as flow\n'), ((3986, 4014), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (4012, 4014), True, 'from oneflow.compatible import single_client as flow\n'), ((4782, 4795), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4793, 4795), False, 'from collections import OrderedDict\n'), ((4903, 4923), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4913, 4923), False, 'from test_util import GenArgList\n'), ((5158, 5171), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5169, 5171), False, 'from collections import OrderedDict\n'), ((5229, 5249), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5239, 5249), False, 'from test_util import GenArgList\n'), ((5371, 5384), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5382, 5384), False, 'from collections import OrderedDict\n'), ((5442, 5462), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5452, 5462), False, 'from test_util import GenArgList\n'), ((1346, 1385), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(32, 16, 64, 48)'], {}), '((32, 16, 64, 48))\n', (1367, 1385), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1401, 1437), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-1"""'], {}), "('gpu', '0:0-1')\n", (1421, 1437), True, 'from oneflow.compatible import single_client as flow\n'), ((1637, 1667), 'numpy.random.rand', 'np.random.rand', (['(32)', '(16)', '(64)', '(48)'], {}), '(32, 16, 64, 48)\n', (1651, 1667), True, 'import numpy as np\n'), ((2246, 2277), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(96, 96)'], {}), '((96, 96))\n', (2267, 2277), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2293, 2329), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-1"""'], {}), "('gpu', '0:0-1')\n", (2313, 2329), True, 'from oneflow.compatible import single_client as flow\n'), ((2525, 2547), 'numpy.random.rand', 'np.random.rand', (['(96)', '(96)'], {}), '(96, 96)\n', (2539, 2547), True, 'import numpy as np\n'), ((3134, 3169), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(96, 96, 96)'], {}), '((96, 96, 96))\n', (3155, 3169), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3185, 3221), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-1"""'], {}), "('gpu', '0:0-1')\n", (3205, 3221), True, 'from oneflow.compatible import single_client as flow\n'), ((3318, 3351), 'oneflow.compatible.single_client.math.reduce_sum', 'flow.math.reduce_sum', (['src'], {'axis': '(0)'}), '(src, axis=0)\n', (3338, 3351), True, 'from oneflow.compatible import single_client as flow\n'), ((3466, 3512), 'numpy.random.uniform', 'np.random.uniform', (['(-1e-05)', '(1e-05)', '(96, 96, 96)'], {}), '(-1e-05, 1e-05, (96, 96, 96))\n', (3483, 3512), True, 'import numpy as np\n'), ((3619, 3636), 'numpy.sum', 'np.sum', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (3625, 3636), True, 'import numpy as np\n'), ((4112, 4147), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(96, 96, 96)'], {}), '((96, 96, 96))\n', (4133, 4147), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((4163, 4199), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-1"""'], {}), "('gpu', '0:0-1')\n", (4183, 4199), True, 'from oneflow.compatible import single_client as flow\n'), ((4296, 4329), 'oneflow.compatible.single_client.math.reduce_sum', 'flow.math.reduce_sum', (['src'], {'axis': '(0)'}), '(src, axis=0)\n', (4316, 4329), True, 'from oneflow.compatible import single_client as flow\n'), ((4440, 4486), 'numpy.random.uniform', 'np.random.uniform', (['(-1e-05)', '(1e-05)', '(96, 96, 96)'], {}), '(-1e-05, 1e-05, (96, 96, 96))\n', (4457, 4486), True, 'import numpy as np\n'), ((4597, 4614), 'numpy.sum', 'np.sum', (['x'], {'axis': '(0)'}), '(x, axis=0)\n', (4603, 4614), True, 'import numpy as np\n'), ((1489, 1520), 'oneflow.compatible.single_client.distribute.split', 'flow.distribute.split', (['src_axis'], {}), '(src_axis)\n', (1510, 1520), True, 'from oneflow.compatible import single_client as flow\n'), ((1575, 1606), 'oneflow.compatible.single_client.distribute.split', 'flow.distribute.split', (['dst_axis'], {}), '(dst_axis)\n', (1596, 1606), True, 'from oneflow.compatible import single_client as flow\n'), ((2381, 2412), 'oneflow.compatible.single_client.distribute.split', 'flow.distribute.split', (['src_axis'], {}), '(src_axis)\n', (2402, 2412), True, 'from oneflow.compatible import single_client as flow\n'), ((2467, 2494), 'oneflow.compatible.single_client.distribute.broadcast', 'flow.distribute.broadcast', ([], {}), '()\n', (2492, 2494), True, 'from oneflow.compatible import single_client as flow\n'), ((3273, 3297), 'oneflow.compatible.single_client.distribute.split', 'flow.distribute.split', (['(0)'], {}), '(0)\n', (3294, 3297), True, 'from oneflow.compatible import single_client as flow\n'), ((3404, 3435), 'oneflow.compatible.single_client.distribute.split', 'flow.distribute.split', (['dst_axis'], {}), '(dst_axis)\n', (3425, 3435), True, 'from oneflow.compatible import single_client as flow\n'), ((4251, 4275), 'oneflow.compatible.single_client.distribute.split', 'flow.distribute.split', (['(0)'], {}), '(0)\n', (4272, 4275), True, 'from oneflow.compatible import single_client as flow\n'), ((4382, 4409), 'oneflow.compatible.single_client.distribute.broadcast', 'flow.distribute.broadcast', ([], {}), '()\n', (4407, 4409), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
from oneflow.test_utils.automated_test_util import *
import oneflow as flow
import oneflow.unittest
def _test_groupnorm(test_case, device):
input_arr = np.array(
[
[
[
[-0.8791, 0.2553, 0.7403, -0.2859],
[0.8006, -1.7701, -0.9617, 0.1705],
[0.2842, 1.7825, 0.3365, -0.8525],
],
[
[0.7332, -0.0737, 0.7245, -0.6551],
[1.4461, -0.1827, 0.9737, -2.1571],
[0.4657, 0.7244, 0.3378, 0.1775],
],
],
[
[
[1.8896, 1.8686, 0.1896, 0.9817],
[-0.0671, 1.5569, 1.1449, 0.0086],
[-0.9468, -0.0124, 1.3227, -0.6567],
],
[
[-0.8472, 1.3012, -1.1065, 0.9348],
[1.0346, 1.5703, 0.2419, -0.7048],
[0.6957, -0.4523, -0.8819, 1.0164],
],
],
],
dtype=np.float32,
)
output = np.array(
[
[
[
[-1.0548115, 0.18125379, 0.7097197, -0.4084487],
[0.77542377, -2.0256634, -1.1448141, 0.08885399],
[0.21274385, 1.845322, 0.26973096, -1.0258276],
],
[
[0.7019834, -0.17723128, 0.6925037, -0.81073654],
[1.4787737, -0.2959999, 0.96403706, -2.4473464],
[0.4105099, 0.69239473, 0.2711475, 0.09648134],
],
],
[
[
[1.5438884, 1.5218256, -0.24213786, 0.5900453],
[-0.5118278, 1.1943525, 0.76150376, -0.43229714],
[-1.4360437, -0.4543598, 0.94830114, -1.1312639],
],
[
[-1.3314037, 0.9257132, -1.6038253, 0.54077196],
[0.6456222, 1.2084305, -0.18719131, -1.1817979],
[0.28957263, -0.91652036, -1.3678597, 0.6265012],
],
],
],
dtype=np.float32,
)
x = flow.tensor(input_arr, dtype=flow.float32, device=flow.device(device))
m = flow.nn.GroupNorm(num_groups=1, num_channels=2).to(device=flow.device(device))
y = m(x)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-03, 1e-03))
def _test_groupnorm_3d(test_case, device):
input_arr = np.array(
[
[
[
[
[1.04569761, 0.22863248, 1.42439335, 1.62249689],
[-0.80578825, -0.27276461, 1.04556507, 0.56864134],
[-1.24085419, -1.23960097, 0.33451416, -1.84820402],
],
[
[-1.511261, 1.06157517, -0.26715858, -1.32888141],
[1.17976881, -0.07931171, 0.33910684, -1.93458573],
[-1.72659647, 0.79049652, 0.39102785, -1.16264882],
],
],
[
[
[0.30067973, -1.2912226, -0.61508225, 0.56454001],
[0.87074187, -1.69257376, 0.36119148, -0.31014289],
[0.20776964, 1.26195488, -1.37122193, -0.17945234],
],
[
[-0.31112407, -0.80682631, 0.8233194, 0.6384975],
[0.57617527, 0.45505028, 1.68286151, -1.09590744],
[-1.18127546, -1.07529277, 0.52779943, 1.21755926],
],
],
],
[
[
[
[-0.12832351, 1.05625455, -0.23253249, -0.64747611],
[-0.00738123, -1.41390089, -1.92664144, -0.21427625],
[-0.94631219, -0.86493989, 0.21026905, 0.24989732],
],
[
[1.3859182, 1.72002107, 0.50091892, 1.04198896],
[0.71694594, 1.66417023, -1.63030052, 0.77182641],
[0.71545083, 1.96458366, -1.99031931, 1.3196714],
],
],
[
[
[1.80091702, 0.02834973, 0.82259214, -1.05597501],
[-0.58212207, 0.44205949, -0.14740003, -0.994508],
[1.14678114, -0.39196097, 1.2554798, -0.41829324],
],
[
[-1.0153903, -0.25755713, -1.81756333, -1.06781159],
[1.79680841, -1.9107133, -0.64325796, -1.94640775],
[1.30671156, 1.20445339, -1.26262901, -0.79494188],
],
],
],
],
dtype=np.float32,
)
output = np.array(
[
[
[
[
[1.0670303, 0.3324034, 1.4075173, 1.5856332],
[-0.5976489, -0.11840499, 1.0669112, 0.6381069],
[-0.9888186, -0.9876919, 0.42760208, -1.5348896],
],
[
[-1.2319425, 1.0813059, -0.11336456, -1.0679643],
[1.1875744, 0.05552938, 0.43173137, -1.6125557],
[-1.4255517, 0.8375778, 0.4784138, -0.9185038],
],
],
[
[
[0.3447361, -1.3750811, -0.6446106, 0.62979853],
[0.9606047, -1.8086823, 0.41011015, -0.3151683],
[0.24436034, 1.3832531, -1.4615086, -0.17397629],
],
[
[-0.31622827, -0.8517619, 0.9093717, 0.7096987],
[0.6423687, 0.51151085, 1.8379811, -1.1640717],
[-1.2562994, -1.1418006, 0.59010565, 1.3352901],
],
],
],
[
[
[
[-0.23265934, 0.8016156, -0.32364592, -0.6859402],
[-0.12706259, -1.3551185, -1.802801, -0.30770612],
[-0.946859, -0.8758114, 0.06297152, 0.09757163],
],
[
[1.0894505, 1.3811613, 0.3167428, 0.78916013],
[0.50535965, 1.3323971, -1.5440607, 0.55327666],
[0.50405425, 1.5946931, -1.8583992, 1.0316093],
],
],
[
[
[1.7506906, 0.19012147, 0.8893728, -0.7645185],
[-0.3473382, 0.5543517, 0.03539129, -0.71040297],
[1.174789, -0.17992027, 1.2704874, -0.20310321],
],
[
[-0.7287877, -0.06159106, -1.4350212, -0.7749395],
[1.7470733, -1.5170306, -0.40116227, -1.548456],
[1.3155918, 1.2255636, -0.9464568, -0.53470486],
],
],
],
],
dtype=np.float32,
)
x = flow.tensor(input_arr, dtype=flow.float32, device=flow.device(device))
m = flow.nn.GroupNorm(num_groups=2, num_channels=2, affine=False).to(
device=flow.device(device)
)
y = m(x)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-03, 1e-03))
def _test_groupnorm_backward(test_case, device):
input_arr = np.array(
[
[
[
[-0.8791, 0.2553, 0.7403, -0.2859],
[0.8006, -1.7701, -0.9617, 0.1705],
[0.2842, 1.7825, 0.3365, -0.8525],
],
[
[0.7332, -0.0737, 0.7245, -0.6551],
[1.4461, -0.1827, 0.9737, -2.1571],
[0.4657, 0.7244, 0.3378, 0.1775],
],
],
[
[
[1.8896, 1.8686, 0.1896, 0.9817],
[-0.0671, 1.5569, 1.1449, 0.0086],
[-0.9468, -0.0124, 1.3227, -0.6567],
],
[
[-0.8472, 1.3012, -1.1065, 0.9348],
[1.0346, 1.5703, 0.2419, -0.7048],
[0.6957, -0.4523, -0.8819, 1.0164],
],
],
],
dtype=np.float32,
)
x = flow.tensor(
input_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
m = flow.nn.GroupNorm(num_groups=1, num_channels=2).to(device=flow.device(device))
y = m(x)
z = y.sum()
z.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.zeros(shape=input_arr.shape), 1e-03, 1e-03)
)
def _test_groupnorm_backward_3d(test_case, device):
input_arr = np.array(
[
[
[
[
[1.04569761, 0.22863248, 1.42439335, 1.62249689],
[-0.80578825, -0.27276461, 1.04556507, 0.56864134],
[-1.24085419, -1.23960097, 0.33451416, -1.84820402],
],
[
[-1.511261, 1.06157517, -0.26715858, -1.32888141],
[1.17976881, -0.07931171, 0.33910684, -1.93458573],
[-1.72659647, 0.79049652, 0.39102785, -1.16264882],
],
],
[
[
[0.30067973, -1.2912226, -0.61508225, 0.56454001],
[0.87074187, -1.69257376, 0.36119148, -0.31014289],
[0.20776964, 1.26195488, -1.37122193, -0.17945234],
],
[
[-0.31112407, -0.80682631, 0.8233194, 0.6384975],
[0.57617527, 0.45505028, 1.68286151, -1.09590744],
[-1.18127546, -1.07529277, 0.52779943, 1.21755926],
],
],
],
[
[
[
[-0.12832351, 1.05625455, -0.23253249, -0.64747611],
[-0.00738123, -1.41390089, -1.92664144, -0.21427625],
[-0.94631219, -0.86493989, 0.21026905, 0.24989732],
],
[
[1.3859182, 1.72002107, 0.50091892, 1.04198896],
[0.71694594, 1.66417023, -1.63030052, 0.77182641],
[0.71545083, 1.96458366, -1.99031931, 1.3196714],
],
],
[
[
[1.80091702, 0.02834973, 0.82259214, -1.05597501],
[-0.58212207, 0.44205949, -0.14740003, -0.994508],
[1.14678114, -0.39196097, 1.2554798, -0.41829324],
],
[
[-1.0153903, -0.25755713, -1.81756333, -1.06781159],
[1.79680841, -1.9107133, -0.64325796, -1.94640775],
[1.30671156, 1.20445339, -1.26262901, -0.79494188],
],
],
],
],
dtype=np.float32,
)
x = flow.tensor(
input_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
m = flow.nn.GroupNorm(num_groups=2, num_channels=2, affine=False).to(
device=flow.device(device)
)
y = m(x)
z = y.sum()
z.backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), np.zeros(shape=input_arr.shape), 1e-03, 1e-03)
)
@flow.unittest.skip_unless_1n1d()
class TestGroupNorm(flow.unittest.TestCase):
def test_groupnorm(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_groupnorm,
_test_groupnorm_3d,
_test_groupnorm_backward,
_test_groupnorm_backward_3d,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest(rtol=1e-03, atol=1e-03, check_graph=True)
def test_group_norm_with_random_data(test_case):
channels = random(5, 20)
m = torch.nn.GroupNorm(
num_groups=random(1, 5),
num_channels=channels,
eps=random(0, 1) | nothing(),
affine=random(),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(ndim=4, dim1=channels).to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.GroupNorm",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.device"
] | [((12595, 12627), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (12625, 12627), True, 'import oneflow as flow\n'), ((855, 1349), 'numpy.array', 'np.array', (['[[[[-0.8791, 0.2553, 0.7403, -0.2859], [0.8006, -1.7701, -0.9617, 0.1705],\n [0.2842, 1.7825, 0.3365, -0.8525]], [[0.7332, -0.0737, 0.7245, -0.6551],\n [1.4461, -0.1827, 0.9737, -2.1571], [0.4657, 0.7244, 0.3378, 0.1775]]],\n [[[1.8896, 1.8686, 0.1896, 0.9817], [-0.0671, 1.5569, 1.1449, 0.0086],\n [-0.9468, -0.0124, 1.3227, -0.6567]], [[-0.8472, 1.3012, -1.1065, \n 0.9348], [1.0346, 1.5703, 0.2419, -0.7048], [0.6957, -0.4523, -0.8819, \n 1.0164]]]]'], {'dtype': 'np.float32'}), '([[[[-0.8791, 0.2553, 0.7403, -0.2859], [0.8006, -1.7701, -0.9617, \n 0.1705], [0.2842, 1.7825, 0.3365, -0.8525]], [[0.7332, -0.0737, 0.7245,\n -0.6551], [1.4461, -0.1827, 0.9737, -2.1571], [0.4657, 0.7244, 0.3378, \n 0.1775]]], [[[1.8896, 1.8686, 0.1896, 0.9817], [-0.0671, 1.5569, 1.1449,\n 0.0086], [-0.9468, -0.0124, 1.3227, -0.6567]], [[-0.8472, 1.3012, -\n 1.1065, 0.9348], [1.0346, 1.5703, 0.2419, -0.7048], [0.6957, -0.4523, -\n 0.8819, 1.0164]]]], dtype=np.float32)\n', (863, 1349), True, 'import numpy as np\n'), ((1803, 2476), 'numpy.array', 'np.array', (['[[[[-1.0548115, 0.18125379, 0.7097197, -0.4084487], [0.77542377, -2.0256634,\n -1.1448141, 0.08885399], [0.21274385, 1.845322, 0.26973096, -1.0258276]\n ], [[0.7019834, -0.17723128, 0.6925037, -0.81073654], [1.4787737, -\n 0.2959999, 0.96403706, -2.4473464], [0.4105099, 0.69239473, 0.2711475, \n 0.09648134]]], [[[1.5438884, 1.5218256, -0.24213786, 0.5900453], [-\n 0.5118278, 1.1943525, 0.76150376, -0.43229714], [-1.4360437, -0.4543598,\n 0.94830114, -1.1312639]], [[-1.3314037, 0.9257132, -1.6038253, \n 0.54077196], [0.6456222, 1.2084305, -0.18719131, -1.1817979], [\n 0.28957263, -0.91652036, -1.3678597, 0.6265012]]]]'], {'dtype': 'np.float32'}), '([[[[-1.0548115, 0.18125379, 0.7097197, -0.4084487], [0.77542377, -\n 2.0256634, -1.1448141, 0.08885399], [0.21274385, 1.845322, 0.26973096, \n -1.0258276]], [[0.7019834, -0.17723128, 0.6925037, -0.81073654], [\n 1.4787737, -0.2959999, 0.96403706, -2.4473464], [0.4105099, 0.69239473,\n 0.2711475, 0.09648134]]], [[[1.5438884, 1.5218256, -0.24213786, \n 0.5900453], [-0.5118278, 1.1943525, 0.76150376, -0.43229714], [-\n 1.4360437, -0.4543598, 0.94830114, -1.1312639]], [[-1.3314037, \n 0.9257132, -1.6038253, 0.54077196], [0.6456222, 1.2084305, -0.18719131,\n -1.1817979], [0.28957263, -0.91652036, -1.3678597, 0.6265012]]]], dtype\n =np.float32)\n', (1811, 2476), True, 'import numpy as np\n'), ((3213, 4598), 'numpy.array', 'np.array', (['[[[[[1.04569761, 0.22863248, 1.42439335, 1.62249689], [-0.80578825, -\n 0.27276461, 1.04556507, 0.56864134], [-1.24085419, -1.23960097, \n 0.33451416, -1.84820402]], [[-1.511261, 1.06157517, -0.26715858, -\n 1.32888141], [1.17976881, -0.07931171, 0.33910684, -1.93458573], [-\n 1.72659647, 0.79049652, 0.39102785, -1.16264882]]], [[[0.30067973, -\n 1.2912226, -0.61508225, 0.56454001], [0.87074187, -1.69257376, \n 0.36119148, -0.31014289], [0.20776964, 1.26195488, -1.37122193, -\n 0.17945234]], [[-0.31112407, -0.80682631, 0.8233194, 0.6384975], [\n 0.57617527, 0.45505028, 1.68286151, -1.09590744], [-1.18127546, -\n 1.07529277, 0.52779943, 1.21755926]]]], [[[[-0.12832351, 1.05625455, -\n 0.23253249, -0.64747611], [-0.00738123, -1.41390089, -1.92664144, -\n 0.21427625], [-0.94631219, -0.86493989, 0.21026905, 0.24989732]], [[\n 1.3859182, 1.72002107, 0.50091892, 1.04198896], [0.71694594, 1.66417023,\n -1.63030052, 0.77182641], [0.71545083, 1.96458366, -1.99031931, \n 1.3196714]]], [[[1.80091702, 0.02834973, 0.82259214, -1.05597501], [-\n 0.58212207, 0.44205949, -0.14740003, -0.994508], [1.14678114, -\n 0.39196097, 1.2554798, -0.41829324]], [[-1.0153903, -0.25755713, -\n 1.81756333, -1.06781159], [1.79680841, -1.9107133, -0.64325796, -\n 1.94640775], [1.30671156, 1.20445339, -1.26262901, -0.79494188]]]]]'], {'dtype': 'np.float32'}), '([[[[[1.04569761, 0.22863248, 1.42439335, 1.62249689], [-0.80578825,\n -0.27276461, 1.04556507, 0.56864134], [-1.24085419, -1.23960097, \n 0.33451416, -1.84820402]], [[-1.511261, 1.06157517, -0.26715858, -\n 1.32888141], [1.17976881, -0.07931171, 0.33910684, -1.93458573], [-\n 1.72659647, 0.79049652, 0.39102785, -1.16264882]]], [[[0.30067973, -\n 1.2912226, -0.61508225, 0.56454001], [0.87074187, -1.69257376, \n 0.36119148, -0.31014289], [0.20776964, 1.26195488, -1.37122193, -\n 0.17945234]], [[-0.31112407, -0.80682631, 0.8233194, 0.6384975], [\n 0.57617527, 0.45505028, 1.68286151, -1.09590744], [-1.18127546, -\n 1.07529277, 0.52779943, 1.21755926]]]], [[[[-0.12832351, 1.05625455, -\n 0.23253249, -0.64747611], [-0.00738123, -1.41390089, -1.92664144, -\n 0.21427625], [-0.94631219, -0.86493989, 0.21026905, 0.24989732]], [[\n 1.3859182, 1.72002107, 0.50091892, 1.04198896], [0.71694594, 1.66417023,\n -1.63030052, 0.77182641], [0.71545083, 1.96458366, -1.99031931, \n 1.3196714]]], [[[1.80091702, 0.02834973, 0.82259214, -1.05597501], [-\n 0.58212207, 0.44205949, -0.14740003, -0.994508], [1.14678114, -\n 0.39196097, 1.2554798, -0.41829324]], [[-1.0153903, -0.25755713, -\n 1.81756333, -1.06781159], [1.79680841, -1.9107133, -0.64325796, -\n 1.94640775], [1.30671156, 1.20445339, -1.26262901, -0.79494188]]]]],\n dtype=np.float32)\n', (3221, 4598), True, 'import numpy as np\n'), ((5668, 6980), 'numpy.array', 'np.array', (['[[[[[1.0670303, 0.3324034, 1.4075173, 1.5856332], [-0.5976489, -0.11840499,\n 1.0669112, 0.6381069], [-0.9888186, -0.9876919, 0.42760208, -1.5348896]\n ], [[-1.2319425, 1.0813059, -0.11336456, -1.0679643], [1.1875744, \n 0.05552938, 0.43173137, -1.6125557], [-1.4255517, 0.8375778, 0.4784138,\n -0.9185038]]], [[[0.3447361, -1.3750811, -0.6446106, 0.62979853], [\n 0.9606047, -1.8086823, 0.41011015, -0.3151683], [0.24436034, 1.3832531,\n -1.4615086, -0.17397629]], [[-0.31622827, -0.8517619, 0.9093717, \n 0.7096987], [0.6423687, 0.51151085, 1.8379811, -1.1640717], [-1.2562994,\n -1.1418006, 0.59010565, 1.3352901]]]], [[[[-0.23265934, 0.8016156, -\n 0.32364592, -0.6859402], [-0.12706259, -1.3551185, -1.802801, -\n 0.30770612], [-0.946859, -0.8758114, 0.06297152, 0.09757163]], [[\n 1.0894505, 1.3811613, 0.3167428, 0.78916013], [0.50535965, 1.3323971, -\n 1.5440607, 0.55327666], [0.50405425, 1.5946931, -1.8583992, 1.0316093]]\n ], [[[1.7506906, 0.19012147, 0.8893728, -0.7645185], [-0.3473382, \n 0.5543517, 0.03539129, -0.71040297], [1.174789, -0.17992027, 1.2704874,\n -0.20310321]], [[-0.7287877, -0.06159106, -1.4350212, -0.7749395], [\n 1.7470733, -1.5170306, -0.40116227, -1.548456], [1.3155918, 1.2255636, \n -0.9464568, -0.53470486]]]]]'], {'dtype': 'np.float32'}), '([[[[[1.0670303, 0.3324034, 1.4075173, 1.5856332], [-0.5976489, -\n 0.11840499, 1.0669112, 0.6381069], [-0.9888186, -0.9876919, 0.42760208,\n -1.5348896]], [[-1.2319425, 1.0813059, -0.11336456, -1.0679643], [\n 1.1875744, 0.05552938, 0.43173137, -1.6125557], [-1.4255517, 0.8375778,\n 0.4784138, -0.9185038]]], [[[0.3447361, -1.3750811, -0.6446106, \n 0.62979853], [0.9606047, -1.8086823, 0.41011015, -0.3151683], [\n 0.24436034, 1.3832531, -1.4615086, -0.17397629]], [[-0.31622827, -\n 0.8517619, 0.9093717, 0.7096987], [0.6423687, 0.51151085, 1.8379811, -\n 1.1640717], [-1.2562994, -1.1418006, 0.59010565, 1.3352901]]]], [[[[-\n 0.23265934, 0.8016156, -0.32364592, -0.6859402], [-0.12706259, -\n 1.3551185, -1.802801, -0.30770612], [-0.946859, -0.8758114, 0.06297152,\n 0.09757163]], [[1.0894505, 1.3811613, 0.3167428, 0.78916013], [\n 0.50535965, 1.3323971, -1.5440607, 0.55327666], [0.50405425, 1.5946931,\n -1.8583992, 1.0316093]]], [[[1.7506906, 0.19012147, 0.8893728, -\n 0.7645185], [-0.3473382, 0.5543517, 0.03539129, -0.71040297], [1.174789,\n -0.17992027, 1.2704874, -0.20310321]], [[-0.7287877, -0.06159106, -\n 1.4350212, -0.7749395], [1.7470733, -1.5170306, -0.40116227, -1.548456],\n [1.3155918, 1.2255636, -0.9464568, -0.53470486]]]]], dtype=np.float32)\n', (5676, 6980), True, 'import numpy as np\n'), ((8395, 8889), 'numpy.array', 'np.array', (['[[[[-0.8791, 0.2553, 0.7403, -0.2859], [0.8006, -1.7701, -0.9617, 0.1705],\n [0.2842, 1.7825, 0.3365, -0.8525]], [[0.7332, -0.0737, 0.7245, -0.6551],\n [1.4461, -0.1827, 0.9737, -2.1571], [0.4657, 0.7244, 0.3378, 0.1775]]],\n [[[1.8896, 1.8686, 0.1896, 0.9817], [-0.0671, 1.5569, 1.1449, 0.0086],\n [-0.9468, -0.0124, 1.3227, -0.6567]], [[-0.8472, 1.3012, -1.1065, \n 0.9348], [1.0346, 1.5703, 0.2419, -0.7048], [0.6957, -0.4523, -0.8819, \n 1.0164]]]]'], {'dtype': 'np.float32'}), '([[[[-0.8791, 0.2553, 0.7403, -0.2859], [0.8006, -1.7701, -0.9617, \n 0.1705], [0.2842, 1.7825, 0.3365, -0.8525]], [[0.7332, -0.0737, 0.7245,\n -0.6551], [1.4461, -0.1827, 0.9737, -2.1571], [0.4657, 0.7244, 0.3378, \n 0.1775]]], [[[1.8896, 1.8686, 0.1896, 0.9817], [-0.0671, 1.5569, 1.1449,\n 0.0086], [-0.9468, -0.0124, 1.3227, -0.6567]], [[-0.8472, 1.3012, -\n 1.1065, 0.9348], [1.0346, 1.5703, 0.2419, -0.7048], [0.6957, -0.4523, -\n 0.8819, 1.0164]]]], dtype=np.float32)\n', (8403, 8889), True, 'import numpy as np\n'), ((9761, 11146), 'numpy.array', 'np.array', (['[[[[[1.04569761, 0.22863248, 1.42439335, 1.62249689], [-0.80578825, -\n 0.27276461, 1.04556507, 0.56864134], [-1.24085419, -1.23960097, \n 0.33451416, -1.84820402]], [[-1.511261, 1.06157517, -0.26715858, -\n 1.32888141], [1.17976881, -0.07931171, 0.33910684, -1.93458573], [-\n 1.72659647, 0.79049652, 0.39102785, -1.16264882]]], [[[0.30067973, -\n 1.2912226, -0.61508225, 0.56454001], [0.87074187, -1.69257376, \n 0.36119148, -0.31014289], [0.20776964, 1.26195488, -1.37122193, -\n 0.17945234]], [[-0.31112407, -0.80682631, 0.8233194, 0.6384975], [\n 0.57617527, 0.45505028, 1.68286151, -1.09590744], [-1.18127546, -\n 1.07529277, 0.52779943, 1.21755926]]]], [[[[-0.12832351, 1.05625455, -\n 0.23253249, -0.64747611], [-0.00738123, -1.41390089, -1.92664144, -\n 0.21427625], [-0.94631219, -0.86493989, 0.21026905, 0.24989732]], [[\n 1.3859182, 1.72002107, 0.50091892, 1.04198896], [0.71694594, 1.66417023,\n -1.63030052, 0.77182641], [0.71545083, 1.96458366, -1.99031931, \n 1.3196714]]], [[[1.80091702, 0.02834973, 0.82259214, -1.05597501], [-\n 0.58212207, 0.44205949, -0.14740003, -0.994508], [1.14678114, -\n 0.39196097, 1.2554798, -0.41829324]], [[-1.0153903, -0.25755713, -\n 1.81756333, -1.06781159], [1.79680841, -1.9107133, -0.64325796, -\n 1.94640775], [1.30671156, 1.20445339, -1.26262901, -0.79494188]]]]]'], {'dtype': 'np.float32'}), '([[[[[1.04569761, 0.22863248, 1.42439335, 1.62249689], [-0.80578825,\n -0.27276461, 1.04556507, 0.56864134], [-1.24085419, -1.23960097, \n 0.33451416, -1.84820402]], [[-1.511261, 1.06157517, -0.26715858, -\n 1.32888141], [1.17976881, -0.07931171, 0.33910684, -1.93458573], [-\n 1.72659647, 0.79049652, 0.39102785, -1.16264882]]], [[[0.30067973, -\n 1.2912226, -0.61508225, 0.56454001], [0.87074187, -1.69257376, \n 0.36119148, -0.31014289], [0.20776964, 1.26195488, -1.37122193, -\n 0.17945234]], [[-0.31112407, -0.80682631, 0.8233194, 0.6384975], [\n 0.57617527, 0.45505028, 1.68286151, -1.09590744], [-1.18127546, -\n 1.07529277, 0.52779943, 1.21755926]]]], [[[[-0.12832351, 1.05625455, -\n 0.23253249, -0.64747611], [-0.00738123, -1.41390089, -1.92664144, -\n 0.21427625], [-0.94631219, -0.86493989, 0.21026905, 0.24989732]], [[\n 1.3859182, 1.72002107, 0.50091892, 1.04198896], [0.71694594, 1.66417023,\n -1.63030052, 0.77182641], [0.71545083, 1.96458366, -1.99031931, \n 1.3196714]]], [[[1.80091702, 0.02834973, 0.82259214, -1.05597501], [-\n 0.58212207, 0.44205949, -0.14740003, -0.994508], [1.14678114, -\n 0.39196097, 1.2554798, -0.41829324]], [[-1.0153903, -0.25755713, -\n 1.81756333, -1.06781159], [1.79680841, -1.9107133, -0.64325796, -\n 1.94640775], [1.30671156, 1.20445339, -1.26262901, -0.79494188]]]]],\n dtype=np.float32)\n', (9769, 11146), True, 'import numpy as np\n'), ((13593, 13608), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13606, 13608), False, 'import unittest\n'), ((12727, 12740), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12738, 12740), False, 'from collections import OrderedDict\n'), ((12988, 13008), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (12998, 13008), False, 'from test_util import GenArgList\n'), ((2960, 2979), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2971, 2979), True, 'import oneflow as flow\n'), ((2989, 3036), 'oneflow.nn.GroupNorm', 'flow.nn.GroupNorm', ([], {'num_groups': '(1)', 'num_channels': '(2)'}), '(num_groups=1, num_channels=2)\n', (3006, 3036), True, 'import oneflow as flow\n'), ((3047, 3066), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (3058, 3066), True, 'import oneflow as flow\n'), ((8108, 8127), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (8119, 8127), True, 'import oneflow as flow\n'), ((8137, 8198), 'oneflow.nn.GroupNorm', 'flow.nn.GroupNorm', ([], {'num_groups': '(2)', 'num_channels': '(2)', 'affine': '(False)'}), '(num_groups=2, num_channels=2, affine=False)\n', (8154, 8198), True, 'import oneflow as flow\n'), ((8218, 8237), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (8229, 8237), True, 'import oneflow as flow\n'), ((9397, 9416), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (9408, 9416), True, 'import oneflow as flow\n'), ((9451, 9498), 'oneflow.nn.GroupNorm', 'flow.nn.GroupNorm', ([], {'num_groups': '(1)', 'num_channels': '(2)'}), '(num_groups=1, num_channels=2)\n', (9468, 9498), True, 'import oneflow as flow\n'), ((9509, 9528), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (9520, 9528), True, 'import oneflow as flow\n'), ((9638, 9669), 'numpy.zeros', 'np.zeros', ([], {'shape': 'input_arr.shape'}), '(shape=input_arr.shape)\n', (9646, 9669), True, 'import numpy as np\n'), ((12270, 12289), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (12281, 12289), True, 'import oneflow as flow\n'), ((12324, 12385), 'oneflow.nn.GroupNorm', 'flow.nn.GroupNorm', ([], {'num_groups': '(2)', 'num_channels': '(2)', 'affine': '(False)'}), '(num_groups=2, num_channels=2, affine=False)\n', (12341, 12385), True, 'import oneflow as flow\n'), ((12405, 12424), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (12416, 12424), True, 'import oneflow as flow\n'), ((12539, 12570), 'numpy.zeros', 'np.zeros', ([], {'shape': 'input_arr.shape'}), '(shape=input_arr.shape)\n', (12547, 12570), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n2d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestStatefulKernelWithInpersistentState(flow.unittest.TestCase):
def test_stateful_kernel_with_inpersistent_state(test_case):
x = flow.arange(4).reshape(2, 2)
x = x.to_consistent(flow.env.all_device_placement("cuda"), flow.sbp.split(0))
y = flow._C.logical_slice(x, [0, 0], [3, 1], [1, 1])
y_np = np.array([[0], [2], [0]])
test_case.assertTrue(
np.array_equal(
y.to_consistent(sbp=flow.sbp.broadcast).to_local().numpy(), y_np
)
)
x = x.to_consistent(sbp=flow.sbp.split(1))
y = flow._C.logical_slice(x, [0, 0], [3, 1], [1, 1])
test_case.assertTrue(
np.array_equal(
y.to_consistent(sbp=flow.sbp.broadcast).to_local().numpy(), y_np
)
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.arange",
"oneflow._C.logical_slice",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.sbp.split",
"oneflow.env.all_device_placement"
] | [((687, 719), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (717, 719), True, 'import oneflow as flow\n'), ((737, 771), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (746, 771), False, 'import os\n'), ((1632, 1647), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1645, 1647), False, 'import unittest\n'), ((1071, 1119), 'oneflow._C.logical_slice', 'flow._C.logical_slice', (['x', '[0, 0]', '[3, 1]', '[1, 1]'], {}), '(x, [0, 0], [3, 1], [1, 1])\n', (1092, 1119), True, 'import oneflow as flow\n'), ((1135, 1160), 'numpy.array', 'np.array', (['[[0], [2], [0]]'], {}), '([[0], [2], [0]])\n', (1143, 1160), True, 'import numpy as np\n'), ((1387, 1435), 'oneflow._C.logical_slice', 'flow._C.logical_slice', (['x', '[0, 0]', '[3, 1]', '[1, 1]'], {}), '(x, [0, 0], [3, 1], [1, 1])\n', (1408, 1435), True, 'import oneflow as flow\n'), ((1001, 1038), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (1030, 1038), True, 'import oneflow as flow\n'), ((1040, 1057), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (1054, 1057), True, 'import oneflow as flow\n'), ((944, 958), 'oneflow.arange', 'flow.arange', (['(4)'], {}), '(4)\n', (955, 958), True, 'import oneflow as flow\n'), ((1356, 1373), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (1370, 1373), True, 'import oneflow as flow\n')] |
import argparse
import time
import os
import numpy as np
from tsn.models.TSN import TSN
from tsn.utils.checkpoint import load_checkpoint
import oneflow as flow
from tsn.datasets.transform import *
from tsn.datasets.dataset import TSNDataSet
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
def parse_args():
parser = argparse.ArgumentParser(description="Test an action recognizer")
parser.add_argument("--test_mode", default=False, help="train or test mode")
parser.add_argument("-load_checkpoint", default="", help="checkpoint file")
parser.add_argument(
"--data_dir", default="./data", help="data file path",
)
parser.add_argument(
"--pretrained",
default="./resnet50_imagenet_pretrain_model",
help="test videos prefix path",
)
parser.add_argument(
"--input_mean", default=[0.485, 0.456, 0.406], help="data normalization value"
)
parser.add_argument(
"--input_std", default=[0.229, 0.224, 0.225], help="data normalization value"
)
parser.add_argument("--epochs", default=100, help="max epochs for training")
parser.add_argument("--lr_steps", default=[40, 80], help="lr step")
parser.add_argument("--batch_size", default=4, help="imgs per gpu")
parser.add_argument(
"--save_checkpoint_path", default="./res", help="imgs per gpu",
)
parser.add_argument("--lr", default=0.0025, help="learning_rate")
parser.add_argument("--mom", default=0.9, help="momentum")
parser.add_argument("--weight_decay", default=0.000, help="weight decay")
# for spatial_temporal module
parser.add_argument(
"--spatial_type", default="avg", help="data normalization value"
)
parser.add_argument("--spatial_size", default=7, help="data normalization value")
# for segmental consensus
parser.add_argument(
"--consensus_type", default="avg", help="data normalization value"
)
# for class head
parser.add_argument(
"--spatial_feature_size", default=1, help="data normalization value"
)
parser.add_argument("--dropout_ratio", default=0.4, help="data normalization value")
parser.add_argument("--num_classes", default=400, help="data normalization value")
parser.add_argument("--out", help="output result file", default="default.pkl")
parser.add_argument(
"--use_softmax", action="store_true", help="whether to use softmax score"
)
# only for TSN3D
parser.add_argument(
"--fcn_testing", action="store_true", help="use fcn testing for 3D convnet"
)
parser.add_argument("--local_rank", type=int, default=0)
args = parser.parse_args()
return args
def top_k_hit(score, lb_set, k=3):
idx = np.argsort(score)[-k:]
return len(lb_set.intersection(idx)) > 0, 1
def top_k_accuracy(scores, labels, k=(1,)):
res = []
for kk in k:
hits = []
for x, y in zip(scores, labels):
y = [y] if isinstance(y, int) else y
hits.append(top_k_hit(x, set(y), k=kk)[0])
res.append(np.mean(hits))
return res
def adjust_learning_rate(optimizer, epoch, lr_steps):
"""Sets the learning rate to the initial LR decayed by lr steps"""
decay = 0.1 ** (sum(epoch >= np.array(lr_steps)))
lr = args.lr * decay
for param_group in optimizer.param_groups:
param_group["lr"] = lr
def main():
flow.InitEagerGlobalSession()
global args
args = parse_args()
model = TSN(
args.spatial_feature_size, args.dropout_ratio, args.num_classes, args.pretrained
)
train_ann_file = (
args.data_dir + "/kinetics400/kinetics400_train_list_videos_sub_frames.txt"
)
train_prefix = args.data_dir + "/kinetics400/rawframes_train"
val_ann_file = (
args.data_dir + "/kinetics400/kinetics400_val_list_videos_sub_frames.txt"
)
val_prefix = args.data_dir + "/kinetics400/rawframes_val"
train_dataset = TSNDataSet(
"",
train_ann_file,
num_segments=3,
video_dir=train_prefix,
new_length=1,
image_tmpl="img_{:05d}.jpg",
flip=GroupMultiScaleCrop(224, [1, 0.875, 0.75, 0.66]),
crop=GroupRandomHorizontalFlip(is_flow=False),
stack=Stack(roll=False),
Normalize=GroupNormalize(args.input_mean, args.input_std),
batch_size=args.batch_size,
)
val_dataset = TSNDataSet(
"",
val_ann_file,
num_segments=3,
video_dir=val_prefix,
new_length=1,
image_tmpl="img_{:05d}.jpg",
test_mode=True,
sample=GroupOverSample(224, 256),
stack=Stack(roll=False),
Normalize=GroupNormalize(args.input_mean, args.input_std),
batch_size=args.batch_size,
)
if args.load_checkpoint != "":
model.load_state_dict(flow.load(args.load_checkpoint))
of_corss_entropy = flow.nn.CrossEntropyLoss()
of_corss_entropy.to("cuda")
model.to("cuda")
optimizer = flow.optim.SGD(model.parameters(), lr=args.lr, momentum=args.mom)
losses = []
print_interval = 50
val_interval = 1
for epoch in range(args.epochs):
adjust_learning_rate(optimizer, epoch, args.lr_steps)
model.train()
for idx in range(int(len(train_dataset) / args.batch_size)):
data, label = train_dataset[idx]
num_modalities = 1
img_group = data.reshape(
args.batch_size,
int(data.shape[0] / args.batch_size),
data.shape[1],
data.shape[2],
data.shape[3],
)
start_t = time.time()
img_group = flow.Tensor(img_group, device=flow.device("cuda"))
gt_label = flow.Tensor(label, device=flow.device("cuda"))
gt_label = gt_label.squeeze().to(dtype=flow.int32)
predeicts = model(num_modalities, gt_label, img_group, True)
loss = of_corss_entropy(predeicts, gt_label)
loss.backward()
optimizer.step()
optimizer.zero_grad()
end_t = time.time()
if idx % print_interval == 0:
l = loss.numpy()
losses.append(l)
tmp_lr = optimizer.param_groups[0]["lr"]
print(
"epoch {} train iter {} oneflow loss {}, lr: {}, train time : {}".format(
epoch, idx, l, tmp_lr, end_t - start_t
)
)
if epoch % val_interval == 0:
model.eval()
results = []
for idx in range(int(len(val_dataset) / args.batch_size)):
data, label = val_dataset[idx]
num_modalities = 1
img_meta = label
img_group = data.reshape(
args.batch_size,
int(data.shape[0] / args.batch_size),
data.shape[1],
data.shape[2],
data.shape[3],
)
img_group = flow.Tensor(img_group, device=flow.device("cuda"))
with flow.no_grad():
result = model(num_modalities, img_meta, img_group)
results.append(result)
outputs = []
for res in results:
for idx in range(0, args.batch_size):
outputs.append(res[idx])
gt_labels = []
for i in range(len(val_dataset)):
ann = val_dataset.get_ann_info(i)
gt_labels.append(ann["label"])
top1, top5 = top_k_accuracy(outputs, gt_labels, k=(1, 5))
print(
"epoch %d, oneflow top1 val acc: %f, top5 val acc: %f"
% (epoch, top1 * 100, top5 * 100)
)
flow.save(
model.state_dict(),
os.path.join(
args.save_checkpoint_path,
"epoch_%d_val_acc_%f_%f" % (epoch, top1, top5),
),
)
writer = open("of_losses.txt", "w")
for o in losses:
writer.write("%f\n" % o)
writer.close()
if __name__ == "__main__":
main()
| [
"oneflow.load",
"oneflow.no_grad",
"oneflow.nn.CrossEntropyLoss",
"oneflow.InitEagerGlobalSession",
"oneflow.device"
] | [((259, 314), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (282, 314), False, 'import warnings\n'), ((348, 412), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test an action recognizer"""'}), "(description='Test an action recognizer')\n", (371, 412), False, 'import argparse\n'), ((3407, 3436), 'oneflow.InitEagerGlobalSession', 'flow.InitEagerGlobalSession', ([], {}), '()\n', (3434, 3436), True, 'import oneflow as flow\n'), ((3491, 3581), 'tsn.models.TSN.TSN', 'TSN', (['args.spatial_feature_size', 'args.dropout_ratio', 'args.num_classes', 'args.pretrained'], {}), '(args.spatial_feature_size, args.dropout_ratio, args.num_classes, args.\n pretrained)\n', (3494, 3581), False, 'from tsn.models.TSN import TSN\n'), ((4895, 4921), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (4919, 4921), True, 'import oneflow as flow\n'), ((2744, 2761), 'numpy.argsort', 'np.argsort', (['score'], {}), '(score)\n', (2754, 2761), True, 'import numpy as np\n'), ((3073, 3086), 'numpy.mean', 'np.mean', (['hits'], {}), '(hits)\n', (3080, 3086), True, 'import numpy as np\n'), ((4838, 4869), 'oneflow.load', 'flow.load', (['args.load_checkpoint'], {}), '(args.load_checkpoint)\n', (4847, 4869), True, 'import oneflow as flow\n'), ((5642, 5653), 'time.time', 'time.time', ([], {}), '()\n', (5651, 5653), False, 'import time\n'), ((6104, 6115), 'time.time', 'time.time', ([], {}), '()\n', (6113, 6115), False, 'import time\n'), ((3263, 3281), 'numpy.array', 'np.array', (['lr_steps'], {}), '(lr_steps)\n', (3271, 3281), True, 'import numpy as np\n'), ((7893, 7984), 'os.path.join', 'os.path.join', (['args.save_checkpoint_path', "('epoch_%d_val_acc_%f_%f' % (epoch, top1, top5))"], {}), "(args.save_checkpoint_path, 'epoch_%d_val_acc_%f_%f' % (epoch,\n top1, top5))\n", (7905, 7984), False, 'import os\n'), ((5708, 5727), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (5719, 5727), True, 'import oneflow as flow\n'), ((5778, 5797), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (5789, 5797), True, 'import oneflow as flow\n'), ((7138, 7152), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (7150, 7152), True, 'import oneflow as flow\n'), ((7096, 7115), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (7107, 7115), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import logging
import numpy as np
from onnx import onnx_pb, helper
from oneflow.python.framework import id_util
from oneflow.python.onnx import util
from oneflow.python.onnx.handler import flow_op
logger = logging.getLogger(__name__)
# pylint: disable=unused-argument,missing-docstring
@flow_op("reduce_min", onnx_op="ReduceMin")
# reduce_max is not user op
# @flow_op("reduce_max", onnx_op="ReduceMax")
@flow_op("reduce_sum", onnx_op="ReduceSum")
@flow_op("reduce_prod", onnx_op="ReduceProd")
class ReduceOpBase:
@classmethod
def Version_1(cls, ctx, node, **kwargs):
axes = node.get_attr_value("axis")
input_shape = ctx.get_shape(node.input[0])
if input_shape is None:
if any([val < 0 for val in axes]):
raise ValueError(
"reduce_op: cannot have negative axis because we don't know input rank"
)
else:
input_rank = len(ctx.get_shape(node.input[0]))
axes = [val + input_rank if val < 0 else val for val in axes]
# axes == [] means reducing all axes, which is also the default value of onnx
if len(axes) > 0:
node.set_attr("axes", axes)
keep_dims = node.get_attr("keepdims")
if keep_dims:
del node.attr["keepdims"]
node.set_attr("keepdims", keep_dims.i)
@classmethod
def Version_11(cls, ctx, node, **kwargs):
# Opset 11 supports negative axis, but core logic is same
cls.Version_1(ctx, node, **kwargs)
@flow_op(["argmax", "argmin"], ["ArgMax", "ArgMin"])
class ArgMax:
@classmethod
def Version_1(cls, ctx, node, **kwargs):
# output_type output = ArgMin(T input, Tidx dimension, @type Tidx, @type output_type)
# tensor(int32) reduced = ArgMin(T data, @INT axis, @INT keepdims)
input_shape = ctx.get_shape(node.input[0])
dim_count = len(input_shape) if input_shape else 0
axis = dim_count - 1
# Onnx ArgMin/ArgMax only supports int64 output, add cast if needed
if ctx.get_dtype(node.output[0]) == onnx_pb.TensorProto.INT32:
# current node will return int64 after conversion, which differs from previous dtype got from oneflow
ctx.set_dtype(node.output[0], onnx_pb.TensorProto.INT64)
op_name = id_util.UniqueStr("Cast")
cast_node = ctx.InsertNewNodeOnOutput(
"Cast", node.output[0], name=op_name, to=onnx_pb.TensorProto.INT32
)
ctx.set_dtype(cast_node.output[0], onnx_pb.TensorProto.INT32)
ctx.CopyShape(node.output[0], cast_node.output[0])
node.set_attr("axis", axis)
node.set_attr("keepdims", 0)
ctx.RemoveInput(node, node.input[1])
@classmethod
def Version_11(cls, ctx, node, **kwargs):
# Opset 11 supports negative axis, but core logic same
cls.Version_1(ctx, node, **kwargs)
| [
"oneflow.python.onnx.handler.flow_op",
"oneflow.python.framework.id_util.UniqueStr"
] | [((1044, 1071), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1061, 1071), False, 'import logging\n'), ((1129, 1171), 'oneflow.python.onnx.handler.flow_op', 'flow_op', (['"""reduce_min"""'], {'onnx_op': '"""ReduceMin"""'}), "('reduce_min', onnx_op='ReduceMin')\n", (1136, 1171), False, 'from oneflow.python.onnx.handler import flow_op\n'), ((1247, 1289), 'oneflow.python.onnx.handler.flow_op', 'flow_op', (['"""reduce_sum"""'], {'onnx_op': '"""ReduceSum"""'}), "('reduce_sum', onnx_op='ReduceSum')\n", (1254, 1289), False, 'from oneflow.python.onnx.handler import flow_op\n'), ((1291, 1335), 'oneflow.python.onnx.handler.flow_op', 'flow_op', (['"""reduce_prod"""'], {'onnx_op': '"""ReduceProd"""'}), "('reduce_prod', onnx_op='ReduceProd')\n", (1298, 1335), False, 'from oneflow.python.onnx.handler import flow_op\n'), ((2368, 2419), 'oneflow.python.onnx.handler.flow_op', 'flow_op', (["['argmax', 'argmin']", "['ArgMax', 'ArgMin']"], {}), "(['argmax', 'argmin'], ['ArgMax', 'ArgMin'])\n", (2375, 2419), False, 'from oneflow.python.onnx.handler import flow_op\n'), ((3157, 3182), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Cast"""'], {}), "('Cast')\n", (3174, 3182), False, 'from oneflow.python.framework import id_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from oneflow.test_utils.test_util import GenArgDict
import numpy as np
import oneflow as flow
from oneflow.test_utils.automated_test_util import *
parallel_num = 2
max_id = 1000
def get_tensors(batch_size, num_tables):
placement = flow.placement(type="cuda", ranks=list(range(parallel_num)))
ids = np.random.randint(0, max_id, (batch_size, num_tables), dtype=np.int64)
ids_tensor = flow.tensor(ids, requires_grad=False).to_global(
placement=placement, sbp=flow.sbp.split(0)
)
table_ids = (
ids % num_tables
) # same id must have same table id, so in this case get table_ids from ids
table_ids_tensor = flow.tensor(
table_ids.astype(np.int32), requires_grad=False
).to_global(placement=placement, sbp=flow.sbp.split(0))
return ids_tensor, table_ids_tensor
def _test_id_shuffle(test_case, has_table_id, num_tables):
batch_size = int(1024 / parallel_num)
placement = flow.placement(type="cuda", ranks=list(range(parallel_num)))
class TestGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, ids, table_ids):
(
num_unique_matrix,
inverse_unique_partition_indices,
cur_rank_num_unique,
cur_rank_unique_ids,
cur_rank_unique_table_ids,
cur_rank_inverse_indices,
) = flow._C.one_embedding_id_shuffle(ids, table_ids, num_tables)
return (
flow.cast(num_unique_matrix, flow.int32),
flow.cast(inverse_unique_partition_indices, flow.int32),
flow.cast(cur_rank_num_unique, flow.int32),
flow.cast(cur_rank_unique_ids, flow.int32),
flow.cast(cur_rank_unique_table_ids, flow.int32),
flow.cast(cur_rank_inverse_indices, flow.int32),
)
graph = TestGraph()
for i in range(10):
ids_tensor, table_ids_tensor = get_tensors(batch_size, num_tables)
if not has_table_id:
table_ids_tensor = None
graph(ids_tensor, table_ids_tensor)
(
num_unique_matrix,
inverse_unique_partition_indices,
local_cur_rank_num_unique,
cur_rank_unique_ids,
cur_rank_unique_table_ids,
cur_rank_inverse_indices,
) = graph(ids_tensor, table_ids_tensor)
cur_rank_num_unique = local_cur_rank_num_unique.to_local().to_global(
placement=placement, sbp=flow.sbp.split(0)
)
cur_rank_num_unique_list = []
cur_rank_unique_ids_list = []
cur_rank_unique_table_ids_list = []
cur_rank_num_ids = batch_size * num_tables * parallel_num
for i in range(parallel_num):
num_unique_i = cur_rank_num_unique.numpy()[i]
unique_ids_i = cur_rank_unique_ids.numpy()[
cur_rank_num_ids * i : cur_rank_num_ids * (i + 1)
]
unique_table_ids_i = cur_rank_unique_table_ids.numpy()[
cur_rank_num_ids * i : cur_rank_num_ids * (i + 1)
]
cur_rank_num_unique_list.append(num_unique_i)
cur_rank_unique_ids_list.append(np.array(unique_ids_i[0:num_unique_i]))
cur_rank_unique_table_ids_list.append(
np.array(unique_table_ids_i[0:num_unique_i])
)
global_ids = ids_tensor.numpy()
np_unique_ids, np_unique_index, np_inverse = np.unique(
global_ids, return_index=True, return_inverse=True
)
np_num_unique = np_unique_ids.size
# test num unique
test_case.assertTrue(
np.array_equal(np_num_unique, np.array(cur_rank_num_unique_list).sum())
)
# test unique ids
unique_ids = np.concatenate(cur_rank_unique_ids_list)
unique_ids.sort()
np_unique_ids.sort()
test_case.assertTrue(np.array_equal(unique_ids, np_unique_ids))
if has_table_id:
# test unique table ids
unique_table_ids = np.concatenate(cur_rank_unique_table_ids_list)
unique_table_ids.sort()
global_table_ids = table_ids_tensor.numpy()
np_unique_table_ids = global_table_ids.flatten()[np_unique_index]
np_unique_table_ids.sort()
test_case.assertTrue(np.array_equal(unique_table_ids, np_unique_table_ids))
def round_half_away_from_zero(x):
sign = np.sign(x)
abs_val = np.abs(x)
abs_val += 0.5
floor_val = np.floor(abs_val)
out = floor_val * sign
return out
def embedding_shuffle_quantize(np_data, np_dtype):
# When use float16, ComputeType is set to as Float.
np_reduce_data = np_data.astype(np.float32)
abs_max_factor = np.max(np.abs(np_reduce_data), axis=2)
abs_max_factor = np.expand_dims(abs_max_factor, axis=2)
transport_quantize_factor = abs_max_factor.astype(np_dtype)
int8_factor = np.ones(abs_max_factor.shape, dtype=np.float32) * 127.0
int8_factor = int8_factor.astype(np.float32)
quantize_factor = int8_factor / abs_max_factor
# Covert to Compute Type.
np_data.astype(np.float32)
np_data = np_data * quantize_factor
np_data = round_half_away_from_zero(np_data)
np_data = np_data.astype(np.int8)
# Covert to Compute Type.
np_data = np_data.astype(np.float32)
dequantize_factor = transport_quantize_factor.astype(np.float32) / int8_factor
np_data = np_data * dequantize_factor
np_data = np_data.astype(np_dtype)
return np_data
def _test_embedding_shuffle(test_case, dtype, enable_quantize):
batch_size = int(1024 / parallel_num)
placement = flow.placement(type="cuda", ranks=list(range(parallel_num)))
num_tables = 26
embedding_size = 128
enable_quantized_comm = enable_quantize and embedding_size < 1025
if enable_quantized_comm:
os.environ["ONEFLOW_ONE_EMBEDDING_ENABLE_QUANTIZED_COMM"] = "1"
else:
os.environ["ONEFLOW_ONE_EMBEDDING_ENABLE_QUANTIZED_COMM"] = "0"
if dtype == flow.float16:
np_dtype = np.float16
else:
np_dtype = np.float32
data = np.random.rand(max_id, embedding_size).astype(np_dtype)
data_tensor = flow.tensor(data, requires_grad=False).to_global(
placement=placement, sbp=flow.sbp.broadcast()
)
class TestGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, ids, table_ids, data):
(
num_unique_matrix,
inverse_unique_partition_indices,
_,
cur_rank_unique_ids,
_,
cur_rank_inverse_indices,
) = flow._C.one_embedding_id_shuffle(ids, table_ids, num_tables)
unique_embeddings = flow._C.gather(data, cur_rank_unique_ids, axis=0)
embeddings = flow._C.one_embedding_embedding_shuffle(
unique_embeddings,
flow._C.identity(num_unique_matrix),
flow._C.identity(cur_rank_inverse_indices),
flow._C.identity(inverse_unique_partition_indices),
)
return embeddings
graph = TestGraph()
for i in range(10):
ids_tensor, table_ids_tensor = get_tensors(batch_size, num_tables)
graph(ids_tensor, table_ids_tensor, data_tensor)
embeddings = graph(ids_tensor, table_ids_tensor, data_tensor)
global_ids = ids_tensor.numpy()
global_data = data_tensor.numpy()
np_embeddings = global_data[global_ids]
# Quantized numpy embedding.
if enable_quantized_comm:
np_embeddings = embedding_shuffle_quantize(np_embeddings, np_dtype)
test_case.assertTrue(np.array_equal(embeddings.numpy(), np_embeddings))
def _test_embedding_gradient_shuffle(test_case, enable_quantize, fp16, embedding_size):
np_tolerance = 0
batch_size = int(1024 / parallel_num)
placement = flow.placement(type="cuda", ranks=list(range(parallel_num)))
num_tables = 26
enable_quantized_comm = enable_quantize and embedding_size < 1025
if enable_quantized_comm:
np_tolerance = 0.5
os.environ["ONEFLOW_ONE_EMBEDDING_ENABLE_QUANTIZED_COMM"] = "1"
else:
if fp16:
np_tolerance = 1e-2
else:
np_tolerance = 1e-4
os.environ["ONEFLOW_ONE_EMBEDDING_ENABLE_QUANTIZED_COMM"] = "0"
embedding_grad = np.random.rand(batch_size, num_tables, embedding_size).astype(
np.float32
)
embedding_grad_tensor = flow.tensor(embedding_grad, requires_grad=False).to_global(
placement=placement, sbp=flow.sbp.split(0)
)
class TestGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, ids, table_ids, embedding_grad):
(
num_unique_matrix,
inverse_unique_partition_indices,
cur_rank_num_unique,
cur_rank_unique_ids,
_,
cur_rank_inverse_indices,
) = flow._C.one_embedding_id_shuffle(ids, table_ids, num_tables)
if fp16:
embedding_grad = flow.cast(embedding_grad, flow.float16)
cur_rank_unique_embedding_grad = flow._C.one_embedding_embedding_gradient_shuffle(
embedding_grad,
num_unique_matrix,
cur_rank_inverse_indices,
inverse_unique_partition_indices,
)
if fp16:
cur_rank_unique_embedding_grad = flow.cast(
cur_rank_unique_embedding_grad, flow.float32
)
return (
cur_rank_unique_embedding_grad,
flow.cast(cur_rank_num_unique, flow.int32),
cur_rank_unique_ids,
)
graph = TestGraph()
for i in range(10):
ids_tensor, table_ids_tensor = get_tensors(batch_size, num_tables)
graph(ids_tensor, table_ids_tensor, embedding_grad_tensor)
ids_tensor, table_ids_tensor = get_tensors(batch_size, num_tables)
(
cur_rank_unique_embedding_grad,
local_cur_rank_num_unique,
cur_rank_unique_ids,
) = graph(ids_tensor, table_ids_tensor, embedding_grad_tensor)
cur_rank_num_unique = local_cur_rank_num_unique.to_local().to_global(
placement=placement, sbp=flow.sbp.split(0)
)
global_ids = ids_tensor.numpy()
global_embedding_grad = embedding_grad_tensor.numpy()
np_unique_ids = np.unique(global_ids)
np_num_unique = np_unique_ids.size
np_cur_rank_unique_embedding_grad = np.zeros((max_id, embedding_size))
if fp16:
global_embedding_grad = global_embedding_grad.astype(np.float16)
for k in range(np_num_unique):
unique_id = np_unique_ids[k]
np_data = sum(
global_embedding_grad.reshape(-1, embedding_size)[
np.where(global_ids.flatten() == unique_id)[0]
]
)
# Quantize Embedding Gradient.
if enable_quantized_comm:
abs_max_factor = np.max(np.abs(np_data))
int8_factor = np.full(abs_max_factor.shape, 127.0, dtype=np.float32)
quantize_factor = int8_factor / abs_max_factor
np_data = np_data * quantize_factor
np_data = round_half_away_from_zero(np_data)
np_data = np_data.astype(np.int8)
np_data = np_data.astype(np.float32)
dequantize_factor = abs_max_factor / int8_factor
np_data = np_data * dequantize_factor
np_cur_rank_unique_embedding_grad[unique_id, :] = np_data
if fp16:
np_cur_rank_unique_embedding_grad = np_cur_rank_unique_embedding_grad.astype(
np.float32
)
cur_rank_num_ids = batch_size * num_tables * parallel_num
of_unique_embedding_grad = np.zeros((max_id, embedding_size))
for i in range(parallel_num):
num_unique_i = cur_rank_num_unique.numpy()[i]
unique_ids_i = cur_rank_unique_ids.numpy()[
cur_rank_num_ids * i : cur_rank_num_ids * (i + 1)
]
unique_embedding_grad_i = cur_rank_unique_embedding_grad.numpy()[
cur_rank_num_ids * i : cur_rank_num_ids * (i + 1)
]
for j in range(num_unique_i):
unique_id = unique_ids_i[j]
of_unique_embedding_grad[unique_id, :] = unique_embedding_grad_i[j, :]
test_case.assertTrue(
np.allclose(
of_unique_embedding_grad,
np_cur_rank_unique_embedding_grad,
atol=np_tolerance,
rtol=np_tolerance,
),
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n2d()
class DataShuffleTestCase(flow.unittest.TestCase):
def test_id_shuffle(test_case):
arg_dict = OrderedDict()
arg_dict["has_table_id"] = [True, False]
arg_dict["num_tables"] = [1, 26]
for kwargs in GenArgDict(arg_dict):
_test_id_shuffle(test_case, **kwargs)
def test_embedding_shuffle(test_case):
arg_dict = OrderedDict()
arg_dict["dtype"] = [flow.float32, flow.float16]
arg_dict["enable_quantize"] = [True, False]
for kwargs in GenArgDict(arg_dict):
_test_embedding_shuffle(test_case, **kwargs)
def test_embedding_gradient_shuffle(test_case):
arg_dict = OrderedDict()
arg_dict["enable_quantize"] = [True, False]
arg_dict["fp16"] = [True, False]
arg_dict["embedding_size"] = [128, 17]
for kwargs in GenArgDict(arg_dict):
_test_embedding_gradient_shuffle(test_case, **kwargs)
if __name__ == "__main__":
unittest.main()
| [
"oneflow._C.one_embedding_id_shuffle",
"oneflow._C.identity",
"oneflow.test_utils.test_util.GenArgDict",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.tensor",
"oneflow.sbp.split",
"oneflow.sbp.broadcast",
"oneflow._C.gather",
"oneflow._C.one_embedding_embedding_gradient_shuffle",
"oneflow.cast"
] | [((13125, 13157), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (13155, 13157), True, 'import oneflow as flow\n'), ((953, 1023), 'numpy.random.randint', 'np.random.randint', (['(0)', 'max_id', '(batch_size, num_tables)'], {'dtype': 'np.int64'}), '(0, max_id, (batch_size, num_tables), dtype=np.int64)\n', (970, 1023), True, 'import numpy as np\n'), ((4001, 4062), 'numpy.unique', 'np.unique', (['global_ids'], {'return_index': '(True)', 'return_inverse': '(True)'}), '(global_ids, return_index=True, return_inverse=True)\n', (4010, 4062), True, 'import numpy as np\n'), ((4289, 4329), 'numpy.concatenate', 'np.concatenate', (['cur_rank_unique_ids_list'], {}), '(cur_rank_unique_ids_list)\n', (4303, 4329), True, 'import numpy as np\n'), ((4896, 4906), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (4903, 4906), True, 'import numpy as np\n'), ((4921, 4930), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (4927, 4930), True, 'import numpy as np\n'), ((4966, 4983), 'numpy.floor', 'np.floor', (['abs_val'], {}), '(abs_val)\n', (4974, 4983), True, 'import numpy as np\n'), ((5264, 5302), 'numpy.expand_dims', 'np.expand_dims', (['abs_max_factor'], {'axis': '(2)'}), '(abs_max_factor, axis=2)\n', (5278, 5302), True, 'import numpy as np\n'), ((10927, 10948), 'numpy.unique', 'np.unique', (['global_ids'], {}), '(global_ids)\n', (10936, 10948), True, 'import numpy as np\n'), ((11028, 11062), 'numpy.zeros', 'np.zeros', (['(max_id, embedding_size)'], {}), '((max_id, embedding_size))\n', (11036, 11062), True, 'import numpy as np\n'), ((12280, 12314), 'numpy.zeros', 'np.zeros', (['(max_id, embedding_size)'], {}), '((max_id, embedding_size))\n', (12288, 12314), True, 'import numpy as np\n'), ((14119, 14134), 'unittest.main', 'unittest.main', ([], {}), '()\n', (14132, 14134), False, 'import unittest\n'), ((4402, 4443), 'numpy.array_equal', 'np.array_equal', (['unique_ids', 'np_unique_ids'], {}), '(unique_ids, np_unique_ids)\n', (4416, 4443), True, 'import numpy as np\n'), ((4525, 4571), 'numpy.concatenate', 'np.concatenate', (['cur_rank_unique_table_ids_list'], {}), '(cur_rank_unique_table_ids_list)\n', (4539, 4571), True, 'import numpy as np\n'), ((5211, 5233), 'numpy.abs', 'np.abs', (['np_reduce_data'], {}), '(np_reduce_data)\n', (5217, 5233), True, 'import numpy as np\n'), ((5385, 5432), 'numpy.ones', 'np.ones', (['abs_max_factor.shape'], {'dtype': 'np.float32'}), '(abs_max_factor.shape, dtype=np.float32)\n', (5392, 5432), True, 'import numpy as np\n'), ((12869, 12983), 'numpy.allclose', 'np.allclose', (['of_unique_embedding_grad', 'np_cur_rank_unique_embedding_grad'], {'atol': 'np_tolerance', 'rtol': 'np_tolerance'}), '(of_unique_embedding_grad, np_cur_rank_unique_embedding_grad,\n atol=np_tolerance, rtol=np_tolerance)\n', (12880, 12983), True, 'import numpy as np\n'), ((13264, 13277), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13275, 13277), False, 'from collections import OrderedDict\n'), ((13390, 13410), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (13400, 13410), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((13525, 13538), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13536, 13538), False, 'from collections import OrderedDict\n'), ((13671, 13691), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (13681, 13691), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((13822, 13835), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13833, 13835), False, 'from collections import OrderedDict\n'), ((13998, 14018), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (14008, 14018), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((1041, 1078), 'oneflow.tensor', 'flow.tensor', (['ids'], {'requires_grad': '(False)'}), '(ids, requires_grad=False)\n', (1052, 1078), True, 'import oneflow as flow\n'), ((1123, 1140), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (1137, 1140), True, 'import oneflow as flow\n'), ((1404, 1421), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (1418, 1421), True, 'import oneflow as flow\n'), ((2055, 2115), 'oneflow._C.one_embedding_id_shuffle', 'flow._C.one_embedding_id_shuffle', (['ids', 'table_ids', 'num_tables'], {}), '(ids, table_ids, num_tables)\n', (2087, 2115), True, 'import oneflow as flow\n'), ((3125, 3142), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (3139, 3142), True, 'import oneflow as flow\n'), ((3761, 3799), 'numpy.array', 'np.array', (['unique_ids_i[0:num_unique_i]'], {}), '(unique_ids_i[0:num_unique_i])\n', (3769, 3799), True, 'import numpy as np\n'), ((3860, 3904), 'numpy.array', 'np.array', (['unique_table_ids_i[0:num_unique_i]'], {}), '(unique_table_ids_i[0:num_unique_i])\n', (3868, 3904), True, 'import numpy as np\n'), ((4794, 4847), 'numpy.array_equal', 'np.array_equal', (['unique_table_ids', 'np_unique_table_ids'], {}), '(unique_table_ids, np_unique_table_ids)\n', (4808, 4847), True, 'import numpy as np\n'), ((6581, 6619), 'numpy.random.rand', 'np.random.rand', (['max_id', 'embedding_size'], {}), '(max_id, embedding_size)\n', (6595, 6619), True, 'import numpy as np\n'), ((6655, 6693), 'oneflow.tensor', 'flow.tensor', (['data'], {'requires_grad': '(False)'}), '(data, requires_grad=False)\n', (6666, 6693), True, 'import oneflow as flow\n'), ((6738, 6758), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (6756, 6758), True, 'import oneflow as flow\n'), ((7141, 7201), 'oneflow._C.one_embedding_id_shuffle', 'flow._C.one_embedding_id_shuffle', (['ids', 'table_ids', 'num_tables'], {}), '(ids, table_ids, num_tables)\n', (7173, 7201), True, 'import oneflow as flow\n'), ((7234, 7283), 'oneflow._C.gather', 'flow._C.gather', (['data', 'cur_rank_unique_ids'], {'axis': '(0)'}), '(data, cur_rank_unique_ids, axis=0)\n', (7248, 7283), True, 'import oneflow as flow\n'), ((8839, 8893), 'numpy.random.rand', 'np.random.rand', (['batch_size', 'num_tables', 'embedding_size'], {}), '(batch_size, num_tables, embedding_size)\n', (8853, 8893), True, 'import numpy as np\n'), ((8955, 9003), 'oneflow.tensor', 'flow.tensor', (['embedding_grad'], {'requires_grad': '(False)'}), '(embedding_grad, requires_grad=False)\n', (8966, 9003), True, 'import oneflow as flow\n'), ((9048, 9065), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (9062, 9065), True, 'import oneflow as flow\n'), ((9476, 9536), 'oneflow._C.one_embedding_id_shuffle', 'flow._C.one_embedding_id_shuffle', (['ids', 'table_ids', 'num_tables'], {}), '(ids, table_ids, num_tables)\n', (9508, 9536), True, 'import oneflow as flow\n'), ((9676, 9827), 'oneflow._C.one_embedding_embedding_gradient_shuffle', 'flow._C.one_embedding_embedding_gradient_shuffle', (['embedding_grad', 'num_unique_matrix', 'cur_rank_inverse_indices', 'inverse_unique_partition_indices'], {}), '(embedding_grad,\n num_unique_matrix, cur_rank_inverse_indices,\n inverse_unique_partition_indices)\n', (9724, 9827), True, 'import oneflow as flow\n'), ((10789, 10806), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (10803, 10806), True, 'import oneflow as flow\n'), ((11546, 11600), 'numpy.full', 'np.full', (['abs_max_factor.shape', '(127.0)'], {'dtype': 'np.float32'}), '(abs_max_factor.shape, 127.0, dtype=np.float32)\n', (11553, 11600), True, 'import numpy as np\n'), ((2153, 2193), 'oneflow.cast', 'flow.cast', (['num_unique_matrix', 'flow.int32'], {}), '(num_unique_matrix, flow.int32)\n', (2162, 2193), True, 'import oneflow as flow\n'), ((2211, 2266), 'oneflow.cast', 'flow.cast', (['inverse_unique_partition_indices', 'flow.int32'], {}), '(inverse_unique_partition_indices, flow.int32)\n', (2220, 2266), True, 'import oneflow as flow\n'), ((2284, 2326), 'oneflow.cast', 'flow.cast', (['cur_rank_num_unique', 'flow.int32'], {}), '(cur_rank_num_unique, flow.int32)\n', (2293, 2326), True, 'import oneflow as flow\n'), ((2344, 2386), 'oneflow.cast', 'flow.cast', (['cur_rank_unique_ids', 'flow.int32'], {}), '(cur_rank_unique_ids, flow.int32)\n', (2353, 2386), True, 'import oneflow as flow\n'), ((2404, 2452), 'oneflow.cast', 'flow.cast', (['cur_rank_unique_table_ids', 'flow.int32'], {}), '(cur_rank_unique_table_ids, flow.int32)\n', (2413, 2452), True, 'import oneflow as flow\n'), ((2470, 2517), 'oneflow.cast', 'flow.cast', (['cur_rank_inverse_indices', 'flow.int32'], {}), '(cur_rank_inverse_indices, flow.int32)\n', (2479, 2517), True, 'import oneflow as flow\n'), ((7401, 7436), 'oneflow._C.identity', 'flow._C.identity', (['num_unique_matrix'], {}), '(num_unique_matrix)\n', (7417, 7436), True, 'import oneflow as flow\n'), ((7454, 7496), 'oneflow._C.identity', 'flow._C.identity', (['cur_rank_inverse_indices'], {}), '(cur_rank_inverse_indices)\n', (7470, 7496), True, 'import oneflow as flow\n'), ((7514, 7564), 'oneflow._C.identity', 'flow._C.identity', (['inverse_unique_partition_indices'], {}), '(inverse_unique_partition_indices)\n', (7530, 7564), True, 'import oneflow as flow\n'), ((9591, 9630), 'oneflow.cast', 'flow.cast', (['embedding_grad', 'flow.float16'], {}), '(embedding_grad, flow.float16)\n', (9600, 9630), True, 'import oneflow as flow\n'), ((9969, 10024), 'oneflow.cast', 'flow.cast', (['cur_rank_unique_embedding_grad', 'flow.float32'], {}), '(cur_rank_unique_embedding_grad, flow.float32)\n', (9978, 10024), True, 'import oneflow as flow\n'), ((10148, 10190), 'oneflow.cast', 'flow.cast', (['cur_rank_num_unique', 'flow.int32'], {}), '(cur_rank_num_unique, flow.int32)\n', (10157, 10190), True, 'import oneflow as flow\n'), ((11503, 11518), 'numpy.abs', 'np.abs', (['np_data'], {}), '(np_data)\n', (11509, 11518), True, 'import numpy as np\n'), ((4202, 4236), 'numpy.array', 'np.array', (['cur_rank_num_unique_list'], {}), '(cur_rank_num_unique_list)\n', (4210, 4236), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import random
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
numpy_dtype_to_oneflow_dtype_dict = {
np.int32: flow.int32,
np.int64: flow.int64,
np.int8: flow.int8,
np.uint8: flow.uint8,
np.bool: flow.bool,
np.float64: flow.float64,
np.float32: flow.float32,
np.float16: flow.float16,
}
@flow.unittest.skip_unless_1n1d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test gpu cases")
class TestAsTensor(flow.unittest.TestCase):
def test_tensor_type(test_case):
x = flow.randn(2, 3)
y = flow.as_tensor(x)
y[0] = 2.0
test_case.assertTrue(np.array_equal(x.numpy(), y.numpy()))
test_case.assertTrue(np.array_equal(id(x), id(y)))
x = flow.randn(2, 3)
x = x.to("cuda")
y = flow.as_tensor(x)
y[0] = 2.0
test_case.assertTrue(np.array_equal(x.numpy(), y.numpy()))
test_case.assertTrue(np.array_equal(id(x), id(y)))
x = flow.randn(2, 3)
y = flow.as_tensor(x, device=flow.device("cuda:0"))
test_case.assertTrue(id(x) != id(y))
for dtype in [
flow.float64,
flow.float16,
flow.int64,
flow.int32,
flow.int8,
flow.uint8,
]:
x = flow.randn(2, 3)
y = flow.as_tensor(x, dtype=dtype)
test_case.assertTrue(id(x) != id(y))
def test_numpy_type(test_case):
for device in [flow.device("cpu"), flow.device("cuda:0"), None]:
for np_dtype in [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int8,
np.uint8,
]:
for flow_dtype in [
flow.float64,
flow.float16,
flow.int64,
flow.int32,
flow.int8,
flow.uint8,
]:
np_arr = np.ones((2, 3), dtype=np_dtype)
try:
tensor = flow.as_tensor(np_arr, dtype=flow_dtype)
if numpy_dtype_to_oneflow_dtype_dict[
np_arr.dtype
] == flow_dtype and device is not flow.device("cuda:0"):
tensor[0][0] += 1.0
test_case.assertTrue(np.array_equal(np_arr, tensor.numpy()))
else:
test_case.assertTrue(np.array_equal(np_arr, tensor.numpy()))
except Exception as e:
# Ignore cast or kernel mismatch error in test example
pass
def test_other_type(test_case):
for device in [flow.device("cpu"), flow.device("cuda:0"), None]:
for np_dtype in [
np.float64,
np.float32,
np.float16,
np.int64,
np.int32,
np.int8,
np.uint8,
]:
for flow_dtype in [
flow.float64,
flow.float16,
flow.int64,
flow.int32,
flow.int8,
flow.uint8,
]:
# tuple
np_arr = (1.0, 2.0, 3.0)
try:
tensor = flow.as_tensor(np_arr, dtype=flow_dtype)
test_case.assertTrue(np.array_equal(np_arr, tensor.numpy()))
except Exception as e:
# Ignore cast or kernel mismatch error in test example
pass
# tuple
np_arr = [1.0, 2.0, 3.0]
try:
tensor = flow.as_tensor(np_arr, dtype=flow_dtype)
test_case.assertTrue(np.array_equal(np_arr, tensor.numpy()))
except Exception as e:
# Ignore cast or kernel mismatch error in test example
pass
# scalar
np_arr = 4.0
try:
tensor = flow.as_tensor(np_arr, dtype=flow_dtype)
test_case.assertTrue(np.array_equal(np_arr, tensor.numpy()))
except Exception as e:
# Ignore cast or kernel mismatch error in test example
pass
if __name__ == "__main__":
unittest.main()
| [
"oneflow.as_tensor",
"oneflow.randn",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.device"
] | [((959, 991), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (989, 991), True, 'import oneflow as flow\n'), ((1009, 1043), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1018, 1043), False, 'import os\n'), ((5210, 5225), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5223, 5225), False, 'import unittest\n'), ((1161, 1177), 'oneflow.randn', 'flow.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1171, 1177), True, 'import oneflow as flow\n'), ((1190, 1207), 'oneflow.as_tensor', 'flow.as_tensor', (['x'], {}), '(x)\n', (1204, 1207), True, 'import oneflow as flow\n'), ((1366, 1382), 'oneflow.randn', 'flow.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1376, 1382), True, 'import oneflow as flow\n'), ((1420, 1437), 'oneflow.as_tensor', 'flow.as_tensor', (['x'], {}), '(x)\n', (1434, 1437), True, 'import oneflow as flow\n'), ((1596, 1612), 'oneflow.randn', 'flow.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1606, 1612), True, 'import oneflow as flow\n'), ((1916, 1932), 'oneflow.randn', 'flow.randn', (['(2)', '(3)'], {}), '(2, 3)\n', (1926, 1932), True, 'import oneflow as flow\n'), ((1949, 1979), 'oneflow.as_tensor', 'flow.as_tensor', (['x'], {'dtype': 'dtype'}), '(x, dtype=dtype)\n', (1963, 1979), True, 'import oneflow as flow\n'), ((2089, 2107), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (2100, 2107), True, 'import oneflow as flow\n'), ((2109, 2130), 'oneflow.device', 'flow.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2120, 2130), True, 'import oneflow as flow\n'), ((3432, 3450), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (3443, 3450), True, 'import oneflow as flow\n'), ((3452, 3473), 'oneflow.device', 'flow.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (3463, 3473), True, 'import oneflow as flow\n'), ((1650, 1671), 'oneflow.device', 'flow.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (1661, 1671), True, 'import oneflow as flow\n'), ((2650, 2681), 'numpy.ones', 'np.ones', (['(2, 3)'], {'dtype': 'np_dtype'}), '((2, 3), dtype=np_dtype)\n', (2657, 2681), True, 'import numpy as np\n'), ((2740, 2780), 'oneflow.as_tensor', 'flow.as_tensor', (['np_arr'], {'dtype': 'flow_dtype'}), '(np_arr, dtype=flow_dtype)\n', (2754, 2780), True, 'import oneflow as flow\n'), ((4095, 4135), 'oneflow.as_tensor', 'flow.as_tensor', (['np_arr'], {'dtype': 'flow_dtype'}), '(np_arr, dtype=flow_dtype)\n', (4109, 4135), True, 'import oneflow as flow\n'), ((4503, 4543), 'oneflow.as_tensor', 'flow.as_tensor', (['np_arr'], {'dtype': 'flow_dtype'}), '(np_arr, dtype=flow_dtype)\n', (4517, 4543), True, 'import oneflow as flow\n'), ((4900, 4940), 'oneflow.as_tensor', 'flow.as_tensor', (['np_arr'], {'dtype': 'flow_dtype'}), '(np_arr, dtype=flow_dtype)\n', (4914, 4940), True, 'import oneflow as flow\n'), ((2942, 2963), 'oneflow.device', 'flow.device', (['"""cuda:0"""'], {}), "('cuda:0')\n", (2953, 2963), True, 'import oneflow as flow\n')] |
import math
import oneflow as flow
import oneflow.nn as nn
class LSTMText(nn.Module):
def __init__(self, emb_sz, emb_dim, hidden_size, nfc, n_classes, batch_size):
super(LSTMText, self).__init__()
self.emb_sz = emb_sz
self.emb_dim = emb_dim
self.n_classes = n_classes
self.hidden_size = hidden_size
self.nfc = nfc
self.batch_size = batch_size
self.bilstm = BiLSTM(emb_dim, hidden_size, batch_size=batch_size)
self.embedding = nn.Embedding(self.emb_sz, self.emb_dim)
self.linear = nn.Linear(hidden_size * 2 * nfc, n_classes)
self.softmax = nn.Softmax(dim=1)
def forward(self, inputs, is_train=1):
data = self.embedding(inputs)
data = self.bilstm(data)
datalist = []
for t in range(self.batch_size):
data_t = data[:, t, :].reshape(1, -1)
datalist.append(data_t.unsqueeze(0))
data = flow.cat(datalist, dim=0)
logits = self.linear(data)
logits = logits.squeeze(1)
logits = self.softmax(logits)
return logits
def reverse(inputs, dim=0):
temp = inputs.numpy()
if dim == 0:
temp = temp[::-1, :, :]
elif dim == 1:
temp = temp[:, ::-1, :]
elif dim == 2:
temp = temp[:, :, ::-1]
reserve_inputs = flow.Tensor(temp).to("cuda")
return reserve_inputs
class CustomLSTM(nn.Module):
def __init__(self, input_sz, hidden_sz, batch_size=1, num_layers=1):
super().__init__()
self.input_sz = input_sz
self.hidden_size = hidden_sz
self.W = nn.Parameter(flow.Tensor(input_sz, hidden_sz * 4))
self.U = nn.Parameter(flow.Tensor(hidden_sz, hidden_sz * 4))
self.bias = nn.Parameter(flow.Tensor(hidden_sz * 4))
self.init_weights()
def init_weights(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
for weight in self.parameters():
weight.data.uniform_(-stdv, stdv)
def forward(self, x, init_states=None):
"""Assumes x is of shape (batch, sequence, feature)"""
bs, seq_sz, _ = x.size()
hidden_seq = []
if init_states is None:
h_t, c_t = (
flow.zeros((bs, self.hidden_size)).to("cuda"),
flow.zeros((bs, self.hidden_size)).to("cuda"),
)
else:
h_t, c_t = init_states
HS = self.hidden_size
for t in range(seq_sz):
x_t = x[:, t, :].reshape(x.shape[0], x.shape[2])
gates = flow.matmul(x_t, self.W) + flow.matmul(h_t, self.U) + self.bias
i_t, f_t, g_t, o_t = (
flow.sigmoid(gates[:, :HS]),
flow.sigmoid(gates[:, HS : HS * 2]),
flow.tanh(gates[:, HS * 2 : HS * 3]),
flow.sigmoid(gates[:, HS * 3 :]),
)
c_t = f_t * c_t + i_t * g_t
h_t = o_t * flow.tanh(c_t)
hidden_seq.append(h_t.unsqueeze(0))
hidden_seq = flow.cat(hidden_seq, dim=0)
return hidden_seq, (h_t, c_t)
class BiLSTM(nn.Module):
def __init__(self, input_dim, hidden_dim, batch_size=32, num_layers=1, bi_flag=1):
super(BiLSTM, self).__init__()
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.batch_size = batch_size
self.num_layers = num_layers
if bi_flag:
self.bi_num = 2
else:
self.bi_num = 1
self.biFlag = bi_flag
self.layer1 = nn.ModuleList()
self.layer1.append(
CustomLSTM(
input_dim, hidden_dim, num_layers=num_layers, batch_size=batch_size
)
)
if bi_flag:
# add reverse layer.
self.layer1.append(
CustomLSTM(
input_dim, hidden_dim, num_layers=num_layers, batch_size=batch_size
)
)
def init_hidden(self, batch_size):
return (
flow.zeros((batch_size, self.hidden_dim)).to("cuda"),
flow.zeros((batch_size, self.hidden_dim)).to("cuda"),
)
def forward(self, data): # data: B*L*F B = batch_size,L为seq定长,F为feature
batch_size = data.shape[0]
max_length = data.shape[1]
hidden = [self.init_hidden(batch_size) for _ in range(self.bi_num)]
reverse_inputs = reverse(data, dim=1)
out = [data, reverse_inputs]
for l in range(self.bi_num):
out[l], hidden[l] = self.layer1[l](out[l], hidden[l])
# reverse output
if l == 1:
out[l] = reverse(out[l], dim=0)
if self.bi_num == 1:
out = out[0]
else:
out = flow.cat(out, 2)
return out
| [
"oneflow.Tensor",
"oneflow.cat",
"oneflow.nn.ModuleList",
"oneflow.matmul",
"oneflow.sigmoid",
"oneflow.zeros",
"oneflow.nn.Embedding",
"oneflow.nn.Softmax",
"oneflow.tanh",
"oneflow.nn.Linear"
] | [((505, 544), 'oneflow.nn.Embedding', 'nn.Embedding', (['self.emb_sz', 'self.emb_dim'], {}), '(self.emb_sz, self.emb_dim)\n', (517, 544), True, 'import oneflow.nn as nn\n'), ((567, 610), 'oneflow.nn.Linear', 'nn.Linear', (['(hidden_size * 2 * nfc)', 'n_classes'], {}), '(hidden_size * 2 * nfc, n_classes)\n', (576, 610), True, 'import oneflow.nn as nn\n'), ((634, 651), 'oneflow.nn.Softmax', 'nn.Softmax', ([], {'dim': '(1)'}), '(dim=1)\n', (644, 651), True, 'import oneflow.nn as nn\n'), ((944, 969), 'oneflow.cat', 'flow.cat', (['datalist'], {'dim': '(0)'}), '(datalist, dim=0)\n', (952, 969), True, 'import oneflow as flow\n'), ((2992, 3019), 'oneflow.cat', 'flow.cat', (['hidden_seq'], {'dim': '(0)'}), '(hidden_seq, dim=0)\n', (3000, 3019), True, 'import oneflow as flow\n'), ((3499, 3514), 'oneflow.nn.ModuleList', 'nn.ModuleList', ([], {}), '()\n', (3512, 3514), True, 'import oneflow.nn as nn\n'), ((1328, 1345), 'oneflow.Tensor', 'flow.Tensor', (['temp'], {}), '(temp)\n', (1339, 1345), True, 'import oneflow as flow\n'), ((1614, 1650), 'oneflow.Tensor', 'flow.Tensor', (['input_sz', '(hidden_sz * 4)'], {}), '(input_sz, hidden_sz * 4)\n', (1625, 1650), True, 'import oneflow as flow\n'), ((1682, 1719), 'oneflow.Tensor', 'flow.Tensor', (['hidden_sz', '(hidden_sz * 4)'], {}), '(hidden_sz, hidden_sz * 4)\n', (1693, 1719), True, 'import oneflow as flow\n'), ((1754, 1780), 'oneflow.Tensor', 'flow.Tensor', (['(hidden_sz * 4)'], {}), '(hidden_sz * 4)\n', (1765, 1780), True, 'import oneflow as flow\n'), ((1860, 1887), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (1869, 1887), False, 'import math\n'), ((4704, 4720), 'oneflow.cat', 'flow.cat', (['out', '(2)'], {}), '(out, 2)\n', (4712, 4720), True, 'import oneflow as flow\n'), ((2644, 2671), 'oneflow.sigmoid', 'flow.sigmoid', (['gates[:, :HS]'], {}), '(gates[:, :HS])\n', (2656, 2671), True, 'import oneflow as flow\n'), ((2689, 2722), 'oneflow.sigmoid', 'flow.sigmoid', (['gates[:, HS:HS * 2]'], {}), '(gates[:, HS:HS * 2])\n', (2701, 2722), True, 'import oneflow as flow\n'), ((2742, 2776), 'oneflow.tanh', 'flow.tanh', (['gates[:, HS * 2:HS * 3]'], {}), '(gates[:, HS * 2:HS * 3])\n', (2751, 2776), True, 'import oneflow as flow\n'), ((2796, 2827), 'oneflow.sigmoid', 'flow.sigmoid', (['gates[:, HS * 3:]'], {}), '(gates[:, HS * 3:])\n', (2808, 2827), True, 'import oneflow as flow\n'), ((2908, 2922), 'oneflow.tanh', 'flow.tanh', (['c_t'], {}), '(c_t)\n', (2917, 2922), True, 'import oneflow as flow\n'), ((2529, 2553), 'oneflow.matmul', 'flow.matmul', (['x_t', 'self.W'], {}), '(x_t, self.W)\n', (2540, 2553), True, 'import oneflow as flow\n'), ((2556, 2580), 'oneflow.matmul', 'flow.matmul', (['h_t', 'self.U'], {}), '(h_t, self.U)\n', (2567, 2580), True, 'import oneflow as flow\n'), ((3977, 4018), 'oneflow.zeros', 'flow.zeros', (['(batch_size, self.hidden_dim)'], {}), '((batch_size, self.hidden_dim))\n', (3987, 4018), True, 'import oneflow as flow\n'), ((4043, 4084), 'oneflow.zeros', 'flow.zeros', (['(batch_size, self.hidden_dim)'], {}), '((batch_size, self.hidden_dim))\n', (4053, 4084), True, 'import oneflow as flow\n'), ((2213, 2247), 'oneflow.zeros', 'flow.zeros', (['(bs, self.hidden_size)'], {}), '((bs, self.hidden_size))\n', (2223, 2247), True, 'import oneflow as flow\n'), ((2276, 2310), 'oneflow.zeros', 'flow.zeros', (['(bs, self.hidden_size)'], {}), '((bs, self.hidden_size))\n', (2286, 2310), True, 'import oneflow as flow\n')] |
import h5py
import oneflow as flow
import shutil
import numpy as np
import os
def save_net(fname, net):
with h5py.File(fname, "w") as h5f:
for k, v in net.state_dict().items():
h5f.create_dataset(k, data=v.cpu().numpy())
def load_net(fname, net):
with h5py.File(fname, "r") as h5f:
for k, v in net.state_dict().items():
param = flow.Tensor(np.asarray(h5f[k]))
v.copy_(param)
def save_checkpoint(state, is_best, task_id, filename="checkpoints/"):
del_file(filename + str(int(task_id) - 1))
flow.save(state["state_dict"], filename + task_id)
if is_best:
file_path = "checkpoints/model_best"
del_file(file_path)
shutil.copytree(filename + task_id, file_path)
def del_file(filepath):
"""
Delete all files or folders in a directory
:param filepath:
:return:
"""
if os.path.exists(filepath):
del_list = os.listdir(filepath)
for f in del_list:
file_path = os.path.join(filepath, f)
if os.path.isfile(file_path):
os.remove(file_path)
elif os.path.isdir(file_path):
shutil.rmtree(file_path)
shutil.rmtree(filepath)
| [
"oneflow.save"
] | [((564, 614), 'oneflow.save', 'flow.save', (["state['state_dict']", '(filename + task_id)'], {}), "(state['state_dict'], filename + task_id)\n", (573, 614), True, 'import oneflow as flow\n'), ((890, 914), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (904, 914), False, 'import os\n'), ((115, 136), 'h5py.File', 'h5py.File', (['fname', '"""w"""'], {}), "(fname, 'w')\n", (124, 136), False, 'import h5py\n'), ((284, 305), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (293, 305), False, 'import h5py\n'), ((712, 758), 'shutil.copytree', 'shutil.copytree', (['(filename + task_id)', 'file_path'], {}), '(filename + task_id, file_path)\n', (727, 758), False, 'import shutil\n'), ((935, 955), 'os.listdir', 'os.listdir', (['filepath'], {}), '(filepath)\n', (945, 955), False, 'import os\n'), ((1204, 1227), 'shutil.rmtree', 'shutil.rmtree', (['filepath'], {}), '(filepath)\n', (1217, 1227), False, 'import shutil\n'), ((1007, 1032), 'os.path.join', 'os.path.join', (['filepath', 'f'], {}), '(filepath, f)\n', (1019, 1032), False, 'import os\n'), ((1048, 1073), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1062, 1073), False, 'import os\n'), ((392, 410), 'numpy.asarray', 'np.asarray', (['h5f[k]'], {}), '(h5f[k])\n', (402, 410), True, 'import numpy as np\n'), ((1091, 1111), 'os.remove', 'os.remove', (['file_path'], {}), '(file_path)\n', (1100, 1111), False, 'import os\n'), ((1129, 1153), 'os.path.isdir', 'os.path.isdir', (['file_path'], {}), '(file_path)\n', (1142, 1153), False, 'import os\n'), ((1171, 1195), 'shutil.rmtree', 'shutil.rmtree', (['file_path'], {}), '(file_path)\n', (1184, 1195), False, 'import shutil\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow._C.deconv1d,
r"""
conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose1d.html
Applies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called “deconvolution”.
See :class:`~oneflow.nn.ConvTranspose1d` for details and output shape.
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in_channels} , iW)`
weight: filters of shape :math:`(\text{in_channels} , \frac{\text{out_channels}}{\text{groups}} , kW)`
bias: optional bias of shape :math:`(\text{out_channels})`. Default: None.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sW,)`. Default: 1
padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padW,)`. Default: 0
output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padW)`. Default: 0
groups: split input into groups, :math:`\text{in_channels}` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dW,)`. Default: 1
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import oneflow.nn.functional as F
>>> inputs = flow.randn(20, 16, 50)
>>> weights = flow.randn(16, 33, 5)
>>> outputs = F.conv_transpose1d(inputs, weights)
""",
)
add_docstr(
oneflow._C.deconv2d,
r"""
conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose3d.html
Applies a 2D transposed convolution operator over an input image composed of several input planes, sometimes also called “deconvolution”.
See :class:`~oneflow.nn.ConvTranspose2d` for details and output shape.
Args:
input: input tensor of shape :math:`(\text{minibatch} , \text{in_channels} , iH , iW)`
weight: filters of shape :math:`(\text{in_channels} , \frac{\text{out_channels}}{\text{groups}} , kH , kW)`
bias: optional bias of shape :math:`(\text{out_channels})`. Default: None.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sH, sW)`. Default: 1
padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padH, padW)`. Default: 0
output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padH, out_padW)`. Default: 0
groups: split input into groups, :math:`\text{in_channels}` should be divisible by the
number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dH, dW)`. Default: 1
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import oneflow.nn.functional as F
>>> inputs = flow.randn(1, 4, 5, 5)
>>> weights = flow.randn(4, 8, 3, 3)
>>> outputs = F.conv_transpose2d(inputs, weights, padding=1)
""",
)
add_docstr(
oneflow._C.deconv3d,
r"""
conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose3d.html
Applies a 3D transposed convolution operator over an input image composed of several input planes, sometimes also called “deconvolution”.
See :class:`~oneflow.nn.ConvTranspose3d` for details and output shape.
Args:
input: input tensor of shape
:math:`(\text{minibatch} , \text{in_channels} , iT , iH , iW)`
weight: filters of shape
:math:`(\text{in_channels} , \frac{\text{out_channels}}{\text{groups}} , kT , kH , kW)`
bias: optional bias of shape :math:`(\text{out_channels})`. Default: None.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sD, sH, sW)`. Default: 1
padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padT, padH, padW)`. Default: 0
output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padT, out_padH, out_padW)`. Default: 0
groups: split input into groups, :math:`\text{in_channels}` should be
divisible by the number of groups. Default: 1
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dT, dH, dW)`. Default: 1
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import oneflow.nn.functional as F
>>> inputs = flow.randn(20, 16, 50, 10, 20)
>>> weights = flow.randn(16, 33, 3, 3, 3)
>>> outputs = F.conv_transpose3d(inputs, weights)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 2482), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.deconv1d', '"""\n conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose1d.html\n\n Applies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called “deconvolution”.\n\n See :class:`~oneflow.nn.ConvTranspose1d` for details and output shape.\n\n Args:\n input: input tensor of shape :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iW)`\n weight: filters of shape :math:`(\\\\text{in_channels} , \\\\frac{\\\\text{out_channels}}{\\\\text{groups}} , kW)`\n bias: optional bias of shape :math:`(\\\\text{out_channels})`. Default: None.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sW,)`. Default: 1\n padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padW,)`. Default: 0\n output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padW)`. Default: 0\n groups: split input into groups, :math:`\\\\text{in_channels}` should be divisible by the\n number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dW,)`. Default: 1\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import oneflow.nn.functional as F\n \n >>> inputs = flow.randn(20, 16, 50)\n >>> weights = flow.randn(16, 33, 5)\n >>> outputs = F.conv_transpose1d(inputs, weights)\n """'], {}), '(oneflow._C.deconv1d,\n """\n conv_transpose1d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose1d.html\n\n Applies a 1D transposed convolution operator over an input signal composed of several input planes, sometimes also called “deconvolution”.\n\n See :class:`~oneflow.nn.ConvTranspose1d` for details and output shape.\n\n Args:\n input: input tensor of shape :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iW)`\n weight: filters of shape :math:`(\\\\text{in_channels} , \\\\frac{\\\\text{out_channels}}{\\\\text{groups}} , kW)`\n bias: optional bias of shape :math:`(\\\\text{out_channels})`. Default: None.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sW,)`. Default: 1\n padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padW,)`. Default: 0\n output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padW)`. Default: 0\n groups: split input into groups, :math:`\\\\text{in_channels}` should be divisible by the\n number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dW,)`. Default: 1\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import oneflow.nn.functional as F\n \n >>> inputs = flow.randn(20, 16, 50)\n >>> weights = flow.randn(16, 33, 5)\n >>> outputs = F.conv_transpose1d(inputs, weights)\n """\n )\n', (670, 2482), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2478, 4346), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.deconv2d', '"""\n conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose3d.html\n\n Applies a 2D transposed convolution operator over an input image composed of several input planes, sometimes also called “deconvolution”.\n\n See :class:`~oneflow.nn.ConvTranspose2d` for details and output shape.\n\n Args:\n input: input tensor of shape :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iH , iW)`\n weight: filters of shape :math:`(\\\\text{in_channels} , \\\\frac{\\\\text{out_channels}}{\\\\text{groups}} , kH , kW)`\n bias: optional bias of shape :math:`(\\\\text{out_channels})`. Default: None.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sH, sW)`. Default: 1\n padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padH, padW)`. Default: 0\n output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padH, out_padW)`. Default: 0\n groups: split input into groups, :math:`\\\\text{in_channels}` should be divisible by the\n number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dH, dW)`. Default: 1\n \n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import oneflow.nn.functional as F\n \n >>> inputs = flow.randn(1, 4, 5, 5)\n >>> weights = flow.randn(4, 8, 3, 3)\n >>> outputs = F.conv_transpose2d(inputs, weights, padding=1)\n """'], {}), '(oneflow._C.deconv2d,\n """\n conv_transpose2d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose3d.html\n\n Applies a 2D transposed convolution operator over an input image composed of several input planes, sometimes also called “deconvolution”.\n\n See :class:`~oneflow.nn.ConvTranspose2d` for details and output shape.\n\n Args:\n input: input tensor of shape :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iH , iW)`\n weight: filters of shape :math:`(\\\\text{in_channels} , \\\\frac{\\\\text{out_channels}}{\\\\text{groups}} , kH , kW)`\n bias: optional bias of shape :math:`(\\\\text{out_channels})`. Default: None.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sH, sW)`. Default: 1\n padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padH, padW)`. Default: 0\n output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padH, out_padW)`. Default: 0\n groups: split input into groups, :math:`\\\\text{in_channels}` should be divisible by the\n number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dH, dW)`. Default: 1\n \n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import oneflow.nn.functional as F\n \n >>> inputs = flow.randn(1, 4, 5, 5)\n >>> weights = flow.randn(4, 8, 3, 3)\n >>> outputs = F.conv_transpose2d(inputs, weights, padding=1)\n """\n )\n', (2488, 4346), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4342, 6266), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.deconv3d', '"""\n conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose3d.html\n\n Applies a 3D transposed convolution operator over an input image composed of several input planes, sometimes also called “deconvolution”.\n\n See :class:`~oneflow.nn.ConvTranspose3d` for details and output shape.\n\n Args:\n input: input tensor of shape\n :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iT , iH , iW)`\n weight: filters of shape\n :math:`(\\\\text{in_channels} , \\\\frac{\\\\text{out_channels}}{\\\\text{groups}} , kT , kH , kW)`\n bias: optional bias of shape :math:`(\\\\text{out_channels})`. Default: None.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sD, sH, sW)`. Default: 1\n padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padT, padH, padW)`. Default: 0\n output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padT, out_padH, out_padW)`. Default: 0\n groups: split input into groups, :math:`\\\\text{in_channels}` should be\n divisible by the number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dT, dH, dW)`. Default: 1\n \n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import oneflow.nn.functional as F\n \n >>> inputs = flow.randn(20, 16, 50, 10, 20)\n >>> weights = flow.randn(16, 33, 3, 3, 3)\n >>> outputs = F.conv_transpose3d(inputs, weights)\n """'], {}), '(oneflow._C.deconv3d,\n """\n conv_transpose3d(input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv_transpose3d.html\n\n Applies a 3D transposed convolution operator over an input image composed of several input planes, sometimes also called “deconvolution”.\n\n See :class:`~oneflow.nn.ConvTranspose3d` for details and output shape.\n\n Args:\n input: input tensor of shape\n :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iT , iH , iW)`\n weight: filters of shape\n :math:`(\\\\text{in_channels} , \\\\frac{\\\\text{out_channels}}{\\\\text{groups}} , kT , kH , kW)`\n bias: optional bias of shape :math:`(\\\\text{out_channels})`. Default: None.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sD, sH, sW)`. Default: 1\n padding: `dilation * (kernel_size - 1) - padding` zero-padding will be added to both sides of each dimension in the input. Can be a single number or a tuple `(padT, padH, padW)`. Default: 0\n output_padding: additional size added to one side of each dimension in the output shape. Can be a single number or a tuple `(out_padT, out_padH, out_padW)`. Default: 0\n groups: split input into groups, :math:`\\\\text{in_channels}` should be\n divisible by the number of groups. Default: 1\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dT, dH, dW)`. Default: 1\n \n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import oneflow.nn.functional as F\n \n >>> inputs = flow.randn(20, 16, 50, 10, 20)\n >>> weights = flow.randn(16, 33, 3, 3, 3)\n >>> outputs = F.conv_transpose3d(inputs, weights)\n """\n )\n', (4352, 6266), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
class TestGenerator(flow.unittest.TestCase):
def test_different_devices(test_case):
auto_gen = flow.Generator(device="auto")
cpu_gen = flow.Generator(device="cpu")
test_case.assertTrue(auto_gen.initial_seed() == cpu_gen.initial_seed())
with test_case.assertRaises(
oneflow._oneflow_internal.exception.RuntimeException
) as context:
flow.Generator(device="invalid")
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
cuda_gen = flow.Generator(device="cuda")
test_case.assertTrue(auto_gen.initial_seed() == cuda_gen.initial_seed())
def test_generator_manual_seed(test_case):
generator = flow.Generator()
generator.manual_seed(1)
test_case.assertTrue(generator.initial_seed() == 1)
generator.manual_seed(2)
test_case.assertTrue(generator.initial_seed() == 2)
def test_generator_in_dropout(test_case):
tgt = flow.ones(2000000)
output = flow._C.dropout(tgt, 0.1, True, flow.Generator(), addend=None)
output.numpy()
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
output = flow._C.dropout(
tgt.cuda(), 0.1, True, flow.Generator(), addend=None
)
output.numpy()
class TestDefaultGenerator(flow.unittest.TestCase):
def test_different_devices(test_case):
auto_gen = flow.Generator(device="auto")
cpu_gen = flow.default_generator
with test_case.assertRaises(
oneflow._oneflow_internal.exception.RuntimeException
) as context:
flow.Generator(device="invalid")
flow.Generator(device="cpu:1000")
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
with test_case.assertRaises(
oneflow._oneflow_internal.exception.CheckFailedException
) as context:
flow.Generator(device="cuda:1000")
cuda_gen = flow.Generator(device="cuda")
cuda0_gen = flow.Generator(device="cuda:0")
def test_generator_manual_seed(test_case):
cpu_gen = flow.default_generator
auto_gen = flow.Generator(device="auto")
test_gens = [cpu_gen, auto_gen]
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
cuda_gen = flow.Generator(device="cuda")
cuda0_gen = flow.Generator(device="cuda:0")
test_gens += [cuda_gen, cuda0_gen]
for seed in [1, 2]:
for gen in test_gens:
gen.manual_seed(seed)
test_case.assertTrue(gen.initial_seed() == seed)
def test_generator_seed(test_case):
cpu_gen = flow.default_generator
auto_gen = flow.Generator(device="auto")
test_gens = [auto_gen, cpu_gen]
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
cuda_gen = flow.Generator(device="cuda")
cuda0_gen = flow.Generator(device="cuda:0")
test_gens += [cuda_gen, cuda0_gen]
for gen in test_gens:
seed = gen.seed()
test_case.assertTrue(seed == gen.initial_seed())
def test_generator_getstate(test_case):
auto_gen = flow.Generator(device="auto")
state = auto_gen.get_state()
cpu_gen = flow.Generator(device="cpu")
state = cpu_gen.get_state()
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
cuda_gen = flow.Generator(device="cuda")
state = cuda_gen.get_state()
def test_generator_setstate(test_case):
cpu_gen = flow.default_generator
flow.randn(100, 100, dtype=flow.float32, device="cpu", generator=cpu_gen)
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
cuda_gen = flow.Generator("cuda")
flow.randn(100, 100, dtype=flow.float32, device="cuda", generator=cuda_gen)
state = cpu_gen.get_state()
flow.randn(100, 100, dtype=flow.float32, device="cpu", generator=cpu_gen)
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
cuda_state = cuda_gen.get_state()
flow.randn(100, 100, dtype=flow.float32, device="cuda", generator=cuda_gen)
new_state = cpu_gen.get_state()
test_case.assertTrue(not np.allclose(new_state.numpy(), state.numpy()))
cpu_gen.set_state(state)
new_state = cpu_gen.get_state()
test_case.assertTrue(np.allclose(new_state.numpy(), state.numpy()))
if not os.getenv("ONEFLOW_TEST_CPU_ONLY"):
new_cuda_state = cuda_gen.get_state()
test_case.assertTrue(
not np.allclose(new_cuda_state.numpy(), cuda_state.numpy())
)
cuda_gen.set_state(cuda_state)
new_cuda_state = cuda_gen.get_state()
test_case.assertTrue(
np.allclose(new_cuda_state.numpy(), cuda_state.numpy())
)
def test_get_rng_state(test_case):
cpu_gen = flow.default_generator
state = cpu_gen.get_state()
rng_state = flow.get_rng_state()
test_case.assertTrue(np.allclose(state.numpy(), rng_state.numpy()))
flow.randn(100, 100, dtype=flow.float32, device="cpu", generator=cpu_gen)
state = cpu_gen.get_state()
rng_state = flow.get_rng_state()
test_case.assertTrue(np.allclose(state.numpy(), rng_state.numpy()))
def test_set_rng_state(test_case):
flow.randn(100, 100)
state = flow.get_rng_state()
flow.randn(100, 100)
new_state = flow.get_rng_state()
test_case.assertTrue(not np.allclose(new_state.numpy(), state.numpy()))
flow.set_rng_state(state)
new_state = flow.get_rng_state()
test_case.assertTrue(np.allclose(new_state.numpy(), state.numpy()))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.set_rng_state",
"oneflow.ones",
"oneflow.Generator",
"oneflow.randn",
"oneflow.get_rng_state"
] | [((6398, 6413), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6411, 6413), False, 'import unittest\n'), ((793, 822), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""auto"""'}), "(device='auto')\n", (807, 822), True, 'import oneflow as flow\n'), ((841, 869), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cpu"""'}), "(device='cpu')\n", (855, 869), True, 'import oneflow as flow\n'), ((1376, 1392), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (1390, 1392), True, 'import oneflow as flow\n'), ((1640, 1658), 'oneflow.ones', 'flow.ones', (['(2000000)'], {}), '(2000000)\n', (1649, 1658), True, 'import oneflow as flow\n'), ((2077, 2106), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""auto"""'}), "(device='auto')\n", (2091, 2106), True, 'import oneflow as flow\n'), ((2326, 2359), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cpu:1000"""'}), "(device='cpu:1000')\n", (2340, 2359), True, 'import oneflow as flow\n'), ((2819, 2848), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""auto"""'}), "(device='auto')\n", (2833, 2848), True, 'import oneflow as flow\n'), ((3362, 3391), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""auto"""'}), "(device='auto')\n", (3376, 3391), True, 'import oneflow as flow\n'), ((3824, 3853), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""auto"""'}), "(device='auto')\n", (3838, 3853), True, 'import oneflow as flow\n'), ((3909, 3937), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cpu"""'}), "(device='cpu')\n", (3923, 3937), True, 'import oneflow as flow\n'), ((4213, 4286), 'oneflow.randn', 'flow.randn', (['(100)', '(100)'], {'dtype': 'flow.float32', 'device': '"""cpu"""', 'generator': 'cpu_gen'}), "(100, 100, dtype=flow.float32, device='cpu', generator=cpu_gen)\n", (4223, 4286), True, 'import oneflow as flow\n'), ((4516, 4589), 'oneflow.randn', 'flow.randn', (['(100)', '(100)'], {'dtype': 'flow.float32', 'device': '"""cpu"""', 'generator': 'cpu_gen'}), "(100, 100, dtype=flow.float32, device='cpu', generator=cpu_gen)\n", (4526, 4589), True, 'import oneflow as flow\n'), ((5623, 5643), 'oneflow.get_rng_state', 'flow.get_rng_state', ([], {}), '()\n', (5641, 5643), True, 'import oneflow as flow\n'), ((5729, 5802), 'oneflow.randn', 'flow.randn', (['(100)', '(100)'], {'dtype': 'flow.float32', 'device': '"""cpu"""', 'generator': 'cpu_gen'}), "(100, 100, dtype=flow.float32, device='cpu', generator=cpu_gen)\n", (5739, 5802), True, 'import oneflow as flow\n'), ((5859, 5879), 'oneflow.get_rng_state', 'flow.get_rng_state', ([], {}), '()\n', (5877, 5879), True, 'import oneflow as flow\n'), ((6004, 6024), 'oneflow.randn', 'flow.randn', (['(100)', '(100)'], {}), '(100, 100)\n', (6014, 6024), True, 'import oneflow as flow\n'), ((6041, 6061), 'oneflow.get_rng_state', 'flow.get_rng_state', ([], {}), '()\n', (6059, 6061), True, 'import oneflow as flow\n'), ((6070, 6090), 'oneflow.randn', 'flow.randn', (['(100)', '(100)'], {}), '(100, 100)\n', (6080, 6090), True, 'import oneflow as flow\n'), ((6112, 6132), 'oneflow.get_rng_state', 'flow.get_rng_state', ([], {}), '()\n', (6130, 6132), True, 'import oneflow as flow\n'), ((6222, 6247), 'oneflow.set_rng_state', 'flow.set_rng_state', (['state'], {}), '(state)\n', (6240, 6247), True, 'import oneflow as flow\n'), ((6268, 6288), 'oneflow.get_rng_state', 'flow.get_rng_state', ([], {}), '()\n', (6286, 6288), True, 'import oneflow as flow\n'), ((1086, 1118), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""invalid"""'}), "(device='invalid')\n", (1100, 1118), True, 'import oneflow as flow\n'), ((1134, 1168), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1143, 1168), False, 'import os\n'), ((1193, 1222), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda"""'}), "(device='cuda')\n", (1207, 1222), True, 'import oneflow as flow\n'), ((1708, 1724), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (1722, 1724), True, 'import oneflow as flow\n'), ((1777, 1811), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1786, 1811), False, 'import os\n'), ((2284, 2316), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""invalid"""'}), "(device='invalid')\n", (2298, 2316), True, 'import oneflow as flow\n'), ((2375, 2409), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (2384, 2409), False, 'import os\n'), ((2625, 2654), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda"""'}), "(device='cuda')\n", (2639, 2654), True, 'import oneflow as flow\n'), ((2679, 2710), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda:0"""'}), "(device='cuda:0')\n", (2693, 2710), True, 'import oneflow as flow\n'), ((2904, 2938), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (2913, 2938), False, 'import os\n'), ((2963, 2992), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda"""'}), "(device='cuda')\n", (2977, 2992), True, 'import oneflow as flow\n'), ((3017, 3048), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda:0"""'}), "(device='cuda:0')\n", (3031, 3048), True, 'import oneflow as flow\n'), ((3447, 3481), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (3456, 3481), False, 'import os\n'), ((3506, 3535), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda"""'}), "(device='cuda')\n", (3520, 3535), True, 'import oneflow as flow\n'), ((3560, 3591), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda:0"""'}), "(device='cuda:0')\n", (3574, 3591), True, 'import oneflow as flow\n'), ((3989, 4023), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (3998, 4023), False, 'import os\n'), ((4048, 4077), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda"""'}), "(device='cuda')\n", (4062, 4077), True, 'import oneflow as flow\n'), ((4302, 4336), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (4311, 4336), False, 'import os\n'), ((4361, 4383), 'oneflow.Generator', 'flow.Generator', (['"""cuda"""'], {}), "('cuda')\n", (4375, 4383), True, 'import oneflow as flow\n'), ((4396, 4471), 'oneflow.randn', 'flow.randn', (['(100)', '(100)'], {'dtype': 'flow.float32', 'device': '"""cuda"""', 'generator': 'cuda_gen'}), "(100, 100, dtype=flow.float32, device='cuda', generator=cuda_gen)\n", (4406, 4471), True, 'import oneflow as flow\n'), ((4605, 4639), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (4614, 4639), False, 'import os\n'), ((4699, 4774), 'oneflow.randn', 'flow.randn', (['(100)', '(100)'], {'dtype': 'flow.float32', 'device': '"""cuda"""', 'generator': 'cuda_gen'}), "(100, 100, dtype=flow.float32, device='cuda', generator=cuda_gen)\n", (4709, 4774), True, 'import oneflow as flow\n'), ((5062, 5096), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (5071, 5096), False, 'import os\n'), ((1890, 1906), 'oneflow.Generator', 'flow.Generator', ([], {}), '()\n', (1904, 1906), True, 'import oneflow as flow\n'), ((2567, 2601), 'oneflow.Generator', 'flow.Generator', ([], {'device': '"""cuda:1000"""'}), "(device='cuda:1000')\n", (2581, 2601), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from util import convert_to_onnx_and_check
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
def test_gather_nd(test_case):
@flow.global_function(func_config)
def gather_nd():
x = flow.get_variable(
name="x",
shape=(2, 3, 4),
dtype=flow.float,
initializer=flow.random_uniform_initializer(),
)
y = flow.get_variable(
name="y",
shape=(2, 3),
dtype=flow.int64,
initializer=flow.random_uniform_initializer(0, 1, flow.int64),
)
return flow.gather_nd(x, y)
convert_to_onnx_and_check(gather_nd, opset=11)
| [
"oneflow.global_function",
"oneflow.FunctionConfig",
"oneflow.random_uniform_initializer",
"oneflow.gather_nd"
] | [((671, 692), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (690, 692), True, 'import oneflow as flow\n'), ((773, 806), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (793, 806), True, 'import oneflow as flow\n'), ((1244, 1290), 'util.convert_to_onnx_and_check', 'convert_to_onnx_and_check', (['gather_nd'], {'opset': '(11)'}), '(gather_nd, opset=11)\n', (1269, 1290), False, 'from util import convert_to_onnx_and_check\n'), ((1218, 1238), 'oneflow.gather_nd', 'flow.gather_nd', (['x', 'y'], {}), '(x, y)\n', (1232, 1238), True, 'import oneflow as flow\n'), ((964, 997), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (995, 997), True, 'import oneflow as flow\n'), ((1142, 1191), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', (['(0)', '(1)', 'flow.int64'], {}), '(0, 1, flow.int64)\n', (1173, 1191), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import test_global_storage
from test_util import GenArgList
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
def _check(test_case, data, segment_ids, out_shape, out):
test_case.assertEqual(out.shape, out_shape)
ref = np.zeros_like(out)
for (idx, i) in np.ndenumerate(segment_ids):
out_idx = list(idx)
out_idx[-1] = i
out_idx = tuple(out_idx)
ref[out_idx] += data[idx]
test_case.assertTrue(np.allclose(ref, out, atol=1e-05, rtol=1e-05))
def _check_bw(test_case, params, indices, out_shape, out):
ref = np.zeros_like(out)
for (idx, i) in np.ndenumerate(indices):
in_idx = list(idx)
in_idx[-1] = i
in_idx = tuple(in_idx)
ref[idx] += params[in_idx]
test_case.assertTrue(np.array_equal(ref, out))
def _gen_segment_ids(out_shape, num_segments, segment_ids_shape):
axis = len(segment_ids_shape) - 1
return np.random.randint(
low=0, high=out_shape[axis], size=segment_ids_shape, dtype=np.int32
)
def _gen_data(out_shape, num_segments, segment_ids_shape):
axis = len(segment_ids_shape) - 1
data_shape = out_shape[0:axis] + (segment_ids_shape[axis],) + out_shape[axis + 1 :]
return np.random.rand(*data_shape).astype(np.float32)
def _make_unsoted_segment_sum_fn(device, data, segment_ids, num_segments):
flow.clear_default_session()
@flow.global_function(type="train", function_config=func_config)
def unsorted_batch_segment_sum_job(
data: oft.Numpy.Placeholder(data.shape, dtype=flow.float),
segment_ids: oft.Numpy.Placeholder(segment_ids.shape, dtype=flow.int32),
):
with flow.scope.placement(device, "0:0"):
x = flow.get_variable(
"data",
shape=data.shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
)
data = x + data
res = flow.math.unsorted_batch_segment_sum(
data=data, segment_ids=segment_ids, num_segments=num_segments
)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(res)
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch_diff(res, test_global_storage.Setter("loss_diff"))
return res
return unsorted_batch_segment_sum_job(data, segment_ids)
def _run_test(test_case, device, out_shape, num_segments, segment_ids_shape):
segment_ids = _gen_segment_ids(out_shape, num_segments, segment_ids_shape)
data = _gen_data(out_shape, num_segments, segment_ids_shape)
unsorted_batch_segment_sum_out = _make_unsoted_segment_sum_fn(
device, data, segment_ids, num_segments
).get()
out_ndarray = unsorted_batch_segment_sum_out.numpy()
grad_in_ndarray = test_global_storage.Get("x_diff")
grad_out_ndarray = test_global_storage.Get("loss_diff")
_check(test_case, data, segment_ids, out_shape, out_ndarray)
_check_bw(
test_case, grad_out_ndarray, segment_ids, grad_in_ndarray.shape, grad_in_ndarray
)
@flow.unittest.skip_unless_1n1d()
class TestUnsortedBatchSegmentSum(flow.unittest.TestCase):
def test_unsorted_batch_segment_sum(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["out_shape"] = [(2, 4, 7, 6)]
arg_dict["num_segments"] = [7]
arg_dict["segment_ids_shape"] = [(2, 4, 5)]
for arg in GenArgList(arg_dict):
_run_test(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.constant_initializer",
"oneflow.compatible.single_client.math.unsorted_batch_segment_sum",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single... | [((900, 921), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (919, 921), True, 'from oneflow.compatible import single_client as flow\n'), ((4044, 4076), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4074, 4076), True, 'from oneflow.compatible import single_client as flow\n'), ((997, 1025), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1023, 1025), True, 'from oneflow.compatible import single_client as flow\n'), ((1145, 1163), 'numpy.zeros_like', 'np.zeros_like', (['out'], {}), '(out)\n', (1158, 1163), True, 'import numpy as np\n'), ((1184, 1211), 'numpy.ndenumerate', 'np.ndenumerate', (['segment_ids'], {}), '(segment_ids)\n', (1198, 1211), True, 'import numpy as np\n'), ((1475, 1493), 'numpy.zeros_like', 'np.zeros_like', (['out'], {}), '(out)\n', (1488, 1493), True, 'import numpy as np\n'), ((1514, 1537), 'numpy.ndenumerate', 'np.ndenumerate', (['indices'], {}), '(indices)\n', (1528, 1537), True, 'import numpy as np\n'), ((1823, 1913), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': 'out_shape[axis]', 'size': 'segment_ids_shape', 'dtype': 'np.int32'}), '(low=0, high=out_shape[axis], size=segment_ids_shape,\n dtype=np.int32)\n', (1840, 1913), True, 'import numpy as np\n'), ((2250, 2278), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2276, 2278), True, 'from oneflow.compatible import single_client as flow\n'), ((2285, 2348), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2305, 2348), True, 'from oneflow.compatible import single_client as flow\n'), ((3772, 3805), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (3795, 3805), False, 'import test_global_storage\n'), ((3829, 3865), 'test_global_storage.Get', 'test_global_storage.Get', (['"""loss_diff"""'], {}), "('loss_diff')\n", (3852, 3865), False, 'import test_global_storage\n'), ((4521, 4536), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4534, 4536), False, 'import unittest\n'), ((1357, 1402), 'numpy.allclose', 'np.allclose', (['ref', 'out'], {'atol': '(1e-05)', 'rtol': '(1e-05)'}), '(ref, out, atol=1e-05, rtol=1e-05)\n', (1368, 1402), True, 'import numpy as np\n'), ((1680, 1704), 'numpy.array_equal', 'np.array_equal', (['ref', 'out'], {}), '(ref, out)\n', (1694, 1704), True, 'import numpy as np\n'), ((4207, 4220), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4218, 4220), False, 'from collections import OrderedDict\n'), ((4427, 4447), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4437, 4447), False, 'from test_util import GenArgList\n'), ((2122, 2149), 'numpy.random.rand', 'np.random.rand', (['*data_shape'], {}), '(*data_shape)\n', (2136, 2149), True, 'import numpy as np\n'), ((2403, 2454), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['data.shape'], {'dtype': 'flow.float'}), '(data.shape, dtype=flow.float)\n', (2424, 2454), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2477, 2535), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['segment_ids.shape'], {'dtype': 'flow.int32'}), '(segment_ids.shape, dtype=flow.int32)\n', (2498, 2535), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2557, 2592), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device', '"""0:0"""'], {}), "(device, '0:0')\n", (2577, 2592), True, 'from oneflow.compatible import single_client as flow\n'), ((2841, 2944), 'oneflow.compatible.single_client.math.unsorted_batch_segment_sum', 'flow.math.unsorted_batch_segment_sum', ([], {'data': 'data', 'segment_ids': 'segment_ids', 'num_segments': 'num_segments'}), '(data=data, segment_ids=segment_ids,\n num_segments=num_segments)\n', (2877, 2944), True, 'from oneflow.compatible import single_client as flow\n'), ((3145, 3181), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (3171, 3181), False, 'import test_global_storage\n'), ((3216, 3255), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss_diff"""'], {}), "('loss_diff')\n", (3242, 3255), False, 'import test_global_storage\n'), ((2751, 2779), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (2776, 2779), True, 'from oneflow.compatible import single_client as flow\n'), ((3019, 3073), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (3060, 3073), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# RUN: python3 %s | FileCheck %s
from typing import Tuple
import unittest
import numpy as np
from numpy.core.fromnumeric import shape
import oneflow.compatible.single_client as flow
import oneflow.compatible.single_client.typing as oft
import oneflow.framework.dtype as dtype_util
from test_util import GenArgDict
from collections import OrderedDict
def _get_regularizer(model_name):
# all decay
return flow.regularizers.l2(0.00004)
def _batch_norm(inputs, last=False):
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
weight_regularizer = flow.regularizers.l2(0.5)
trainable = True
training = True
data_format = "NHWC"
if data_format == "NHWC":
axis = 3
return flow.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=0.9, # 97,
epsilon=1e-5,
center=True,
scale=True,
trainable=trainable,
training=training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=weight_regularizer,
beta_regularizer=weight_regularizer,
)
@flow.unittest.skip_unless_1n1d()
class TestMLIROptimizations(flow.unittest.TestCase):
@unittest.skip("")
def test_cpu(self):
d = OrderedDict(
{"shape": [(2, 96, 96, 3)], "in_type": [flow.float32], "device": ["cpu"],}
)
for arg in GenArgDict(d):
self.run_job(**arg)
def test_gpu(self):
d = OrderedDict(
{"shape": [(2, 96, 96, 3)], "in_type": [flow.float32], "device": ["gpu"],}
)
for arg in GenArgDict(d):
self.run_job(**arg)
def run_job(test_case, device=None, in_type=None, shape=None):
assert shape is not None
flow.clear_default_session()
func_config = flow.FunctionConfig()
@flow.global_function(type="train", function_config=func_config)
def FuseBnAddReluJob(
x: oft.Numpy.Placeholder(shape, dtype=in_type)
) -> oft.Numpy:
addend = flow.constant_like(x, 2)
with flow.scope.placement(device, "0:0-0"):
x = (
flow.get_variable(
"x1",
shape=shape,
dtype=in_type,
initializer=flow.random_uniform_initializer(
minval=-10, maxval=10
),
trainable=True,
)
+ x
)
loss = flow.nn.relu(_batch_norm(x, last=False) + addend) + 1
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0
).minimize(loss)
return loss
np_in_type = dtype_util.convert_oneflow_dtype_to_numpy_dtype(in_type)
x = (np.random.rand(*shape) * 10).astype(np_in_type)
FuseBnAddReluJob(x)
# CHECK: %y, %reserve_space, %mean, %inv_variance = "oneflow.normalization_add_relu"
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.zeros_initializer",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.random_uniform_initializer",
"oneflow.compatible.single_client.... | [((1751, 1783), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1781, 1783), True, 'import oneflow.compatible.single_client as flow\n'), ((1003, 1030), 'oneflow.compatible.single_client.regularizers.l2', 'flow.regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (1023, 1030), True, 'import oneflow.compatible.single_client as flow\n'), ((1190, 1215), 'oneflow.compatible.single_client.regularizers.l2', 'flow.regularizers.l2', (['(0.5)'], {}), '(0.5)\n', (1210, 1215), True, 'import oneflow.compatible.single_client as flow\n'), ((1340, 1657), 'oneflow.compatible.single_client.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': 'axis', 'momentum': '(0.9)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': 'trainable', 'training': 'training', 'gamma_initializer': 'initializer', 'moving_variance_initializer': 'initializer', 'gamma_regularizer': 'weight_regularizer', 'beta_regularizer': 'weight_regularizer'}), '(inputs=inputs, axis=axis, momentum=0.9,\n epsilon=1e-05, center=True, scale=True, trainable=trainable, training=\n training, gamma_initializer=initializer, moving_variance_initializer=\n initializer, gamma_regularizer=weight_regularizer, beta_regularizer=\n weight_regularizer)\n', (1371, 1657), True, 'import oneflow.compatible.single_client as flow\n'), ((1842, 1859), 'unittest.skip', 'unittest.skip', (['""""""'], {}), "('')\n", (1855, 1859), False, 'import unittest\n'), ((3722, 3737), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3735, 3737), False, 'import unittest\n'), ((1090, 1114), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1112, 1114), True, 'import oneflow.compatible.single_client as flow\n'), ((1128, 1151), 'oneflow.compatible.single_client.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (1149, 1151), True, 'import oneflow.compatible.single_client as flow\n'), ((1896, 1986), 'collections.OrderedDict', 'OrderedDict', (["{'shape': [(2, 96, 96, 3)], 'in_type': [flow.float32], 'device': ['cpu']}"], {}), "({'shape': [(2, 96, 96, 3)], 'in_type': [flow.float32], 'device':\n ['cpu']})\n", (1907, 1986), False, 'from collections import OrderedDict\n'), ((2025, 2038), 'test_util.GenArgDict', 'GenArgDict', (['d'], {}), '(d)\n', (2035, 2038), False, 'from test_util import GenArgDict\n'), ((2109, 2199), 'collections.OrderedDict', 'OrderedDict', (["{'shape': [(2, 96, 96, 3)], 'in_type': [flow.float32], 'device': ['gpu']}"], {}), "({'shape': [(2, 96, 96, 3)], 'in_type': [flow.float32], 'device':\n ['gpu']})\n", (2120, 2199), False, 'from collections import OrderedDict\n'), ((2238, 2251), 'test_util.GenArgDict', 'GenArgDict', (['d'], {}), '(d)\n', (2248, 2251), False, 'from test_util import GenArgDict\n'), ((2394, 2422), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2420, 2422), True, 'import oneflow.compatible.single_client as flow\n'), ((2445, 2466), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2464, 2466), True, 'import oneflow.compatible.single_client as flow\n'), ((2477, 2540), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2497, 2540), True, 'import oneflow.compatible.single_client as flow\n'), ((3457, 3513), 'oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['in_type'], {}), '(in_type)\n', (3504, 3513), True, 'import oneflow.framework.dtype as dtype_util\n'), ((2675, 2699), 'oneflow.compatible.single_client.constant_like', 'flow.constant_like', (['x', '(2)'], {}), '(x, 2)\n', (2693, 2699), True, 'import oneflow.compatible.single_client as flow\n'), ((2586, 2629), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['shape'], {'dtype': 'in_type'}), '(shape, dtype=in_type)\n', (2607, 2629), True, 'import oneflow.compatible.single_client.typing as oft\n'), ((2717, 2754), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device', '"""0:0-0"""'], {}), "(device, '0:0-0')\n", (2737, 2754), True, 'import oneflow.compatible.single_client as flow\n'), ((3527, 3549), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (3541, 3549), True, 'import numpy as np\n'), ((2959, 3013), 'oneflow.compatible.single_client.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(-10)', 'maxval': '(10)'}), '(minval=-10, maxval=10)\n', (2990, 3013), True, 'import oneflow.compatible.single_client as flow\n'), ((3306, 3361), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (3347, 3361), True, 'import oneflow.compatible.single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Optional, Union
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
def arange_op(
start: int = 0,
end: int = None,
step: int = 1,
dtype: flow.dtype = flow.int64,
device: Union[str, flow.device] = None,
placement: flow.placement = None,
sbp: Union[flow.sbp.sbp, List[flow.sbp.sbp]] = None,
requires_grad: bool = False,
):
"""
Returns a 1-D tensor of size :math:`\\left\\lfloor \\frac{\\text{end} - \\text{start}}{\\text{step}} \\right\\rfloor + 1`
with values from :attr:`start` to :attr:`end` with step :attr:`step`. Step is
the gap between two values in the tensor.
.. math::
\\text{out}_{i+1} = \\text{out}_i + \\text{step}.
Args:
start (int): the starting value for the set of points. Default: ``0``.
end (int): the ending value for the set of points
step (int): the gap between each pair of adjacent points. Default: ``1``.
Keyword args:
dtype(flow.dtype, optional): If `dtype` is not given, the `dtype` is inferred to be `flow.int64`.
device(flow.device, optional): the desired device of returned tensor. Default: if None, uses the current device for the default tensor.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: `False`.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> y = flow.arange(0, 5)
>>> y
tensor([0, 1, 2, 3, 4], dtype=oneflow.int64)
"""
if end is None:
end = start
start = 0
if placement is None:
if isinstance(device, str):
device = flow.device(device)
res = flow.F.arange(start, end, step, dtype, device)
else:
assert isinstance(
placement, flow._oneflow_internal.placement
), "placement should be oneflow._oneflow_internal.placement type."
assert isinstance(sbp, (flow.sbp.sbp, tuple, list)), "sbp: %s" % sbp
if isinstance(sbp, flow.sbp.sbp):
sbp = (sbp,)
else:
for elem in sbp:
assert isinstance(elem, flow.sbp.sbp), "sbp: %s" % sbp
assert len(sbp) == len(placement.hierarchy)
res = flow.F.consistent_arange(start, end, step, dtype, placement, sbp)
res.requires_grad = requires_grad
return res
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.F.arange",
"oneflow.F.consistent_arange",
"oneflow.device"
] | [((3076, 3112), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (3091, 3112), False, 'import doctest\n'), ((2364, 2410), 'oneflow.F.arange', 'flow.F.arange', (['start', 'end', 'step', 'dtype', 'device'], {}), '(start, end, step, dtype, device)\n', (2377, 2410), True, 'import oneflow as flow\n'), ((2903, 2968), 'oneflow.F.consistent_arange', 'flow.F.consistent_arange', (['start', 'end', 'step', 'dtype', 'placement', 'sbp'], {}), '(start, end, step, dtype, placement, sbp)\n', (2927, 2968), True, 'import oneflow as flow\n'), ((2330, 2349), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2341, 2349), True, 'import oneflow as flow\n')] |
import math
import oneflow
import oneflow as flow
import oneflow.nn as nn
import numpy as np
class SparseDispatcher(object):
"""Helper for implementing a mixture of experts.
The purpose of this class is to create input minibatches for the
experts and to combine the results of the experts to form a unified
output tensor.
There are two functions:
dispatch - take an input Tensor and create input Tensors for each expert.
combine - take output Tensors from each expert and form a combined output
Tensor. Outputs from different experts for the same batch element are
summed together, weighted by the provided "gates".
The class is initialized with a "gates" Tensor, which specifies which
batch elements go to which experts, and the weights to use when combining
the outputs. Batch element b is sent to expert e iff gates[b, e] != 0.
The inputs and outputs are all two-dimensional [batch, depth].
Caller is responsible for collapsing additional dimensions prior to
calling this class and reshaping the output to the original shape.
See common_layers.reshape_like().
Example use:
gates: a float32 `Tensor` with shape `[batch_size, num_experts]`
inputs: a float32 `Tensor` with shape `[batch_size, input_size]`
experts: a list of length `num_experts` containing sub-networks.
dispatcher = SparseDispatcher(num_experts, gates)
expert_inputs = dispatcher.dispatch(inputs)
expert_outputs = [experts[i](expert_inputs[i]) for i in range(num_experts)]
outputs = dispatcher.combine(expert_outputs)
The preceding code sets the output for a particular example b to:
output[b] = Sum_i(gates[b, i] * experts[i](inputs[b]))
This class takes advantage of sparsity in the gate matrix by including in the
`Tensor`s for expert i only the batch elements for which `gates[b, i] > 0`.
"""
def __init__(self, num_experts, gates):
"""Create a SparseDispatcher."""
self._gates = gates
self._num_experts = num_experts
# sort experts
sorted_experts, index_sorted_experts = flow.nonzero(gates).sort(0)
# drop indices
_, self._expert_index = sorted_experts.split(1, dim=1)
# get according batch index for each expert
self._batch_index = sorted_experts[index_sorted_experts[:, 1], 0]
# calculate num samples that each expert gets
self._part_sizes = list((gates > 0).sum(0).numpy())
# TODO workaround
for i in range(len(self._part_sizes)):
self._part_sizes[i] = self._part_sizes[i].item()
# expand gates to match with self._batch_index
gates_exp = gates[self._batch_index.flatten()]
self._nonzero_gates = flow.gather(gates_exp, 1, self._expert_index)
def dispatch(self, inp):
"""Create one input Tensor for each expert.
The `Tensor` for a expert `i` contains the slices of `inp` corresponding
to the batch elements `b` where `gates[b, i] > 0`.
Args:
inp: a `Tensor` of shape "[batch_size, <extra_input_dims>]`
Returns:
a list of `num_experts` `Tensor`s with shapes
`[expert_batch_size_i, <extra_input_dims>]`.
"""
# assigns samples to experts whose gate is nonzero
# expand according to batch index so we can just split by _part_sizes
inp_exp = inp[self._batch_index].squeeze(1)
return flow.split(inp_exp, self._part_sizes, dim=0)
def combine(self, expert_out, multiply_by_gates=True):
"""Sum together the expert output, weighted by the gates.
The slice corresponding to a particular batch element `b` is computed
as the sum over all experts `i` of the expert output, weighted by the
corresponding gate values. If `multiply_by_gates` is set to False, the
gate values are ignored.
Args:
expert_out: a list of `num_experts` `Tensor`s, each with shape
`[expert_batch_size_i, <extra_output_dims>]`.
multiply_by_gates: a boolean
Returns:
a `Tensor` with shape `[batch_size, <extra_output_dims>]`.
"""
# apply exp to expert outputs, so we are not longer in log space
stitched = flow.cat(expert_out, 0).exp()
if multiply_by_gates:
stitched = stitched.mul(self._nonzero_gates)
zeros = flow.zeros(
self._gates.size(0),
expert_out[-1].size(1),
requires_grad=True,
device=stitched.device,
)
# spanning a index matrix
batch_index = np.zeros([stitched.shape[0], stitched.shape[1]])
for i in range(stitched.shape[0]):
batch_index[i, :] = (
np.ones(batch_index.shape[1]) * self._batch_index[i].item()
)
batch_index_ = flow.Tensor(batch_index, device=stitched.device)
batch_index_ = batch_index_.int()
batch_index_.requires_grad = False
combined = flow.scatter_add(
zeros, dim=0, index=batch_index_, src=stitched.float()
)
# add eps to all zero values in order to avoid nans when going back to log space
combined[combined == 0] = np.finfo(float).eps
# back to log space
return combined.log()
def expert_to_gates(self):
"""Gate values corresponding to the examples in the per-expert `Tensor`s.
Returns:
a list of `num_experts` one-dimensional `Tensor`s with type `tf.float32`
and shapes `[expert_batch_size_i]`
"""
# split nonzero gates for each expert
return flow.split(self._nonzero_gates, self._part_sizes, dim=0)
# should be aware of the placement
def cdf(value, loc=flow.tensor([0.0]), scale=flow.tensor([1.0])):
loc = loc.to(value.device)
scale = scale.to(value.device)
return 0.5 * (1 + oneflow.erf((value - loc) * scale.reciprocal() / math.sqrt(2)))
class MoE(nn.Module):
"""Call a Sparsely gated mixture of experts layer with 1-layer Feed-Forward networks as experts.
Args:
input_size: integer - size of the input
output_size: integer - size of the input
num_experts: an integer - number of experts
hidden_size: an integer - hidden size of the expertsm, FFN
noisy_gating: a boolean
k: an integer - how many experts to use for each batch element
"""
def __init__(
self, model, input_size, output_size, num_experts, noisy_gating=True, k=4
):
super(MoE, self).__init__()
self.noisy_gating = noisy_gating
self.num_experts = num_experts
self.output_size = output_size
self.input_size = input_size
self.k = k
# instantiate experts
self.experts = nn.ModuleList([model for i in range(self.num_experts)])
self.w_gate = nn.Parameter(
flow.zeros(input_size, num_experts), requires_grad=True
)
self.w_noise = nn.Parameter(
flow.zeros(input_size, num_experts), requires_grad=True
)
self.softplus = nn.Softplus()
self.softmax = nn.Softmax(1)
assert self.k <= self.num_experts
def cv_squared(self, x):
"""The squared coefficient of variation of a sample.
Useful as a loss to encourage a positive distribution to be more uniform.
Epsilons added for numerical stability.
Returns 0 for an empty Tensor.
Args:
x: a `Tensor`.
Returns:
a `Scalar`.
"""
eps = 1e-10
# if only num_experts = 1
if x.shape[0] == 1:
return flow.Tensor([0])
return x.float().var() / (x.float().mean() ** 2 + eps)
def _gates_to_load(self, gates):
"""Compute the true load per expert, given the gates.
The load is the number of examples for which the corresponding gate is >0.
Args:
gates: a `Tensor` of shape [batch_size, n]
Returns:
a float32 `Tensor` of shape [n]
"""
return (gates > 0).sum(0)
def _prob_in_top_k(
self, clean_values, noisy_values, noise_stddev, noisy_top_values
):
"""Helper function to NoisyTopKGating.
Computes the probability that value is in top k, given different random noise.
This gives us a way of backpropagating from a loss that balances the number
of times each expert is in the top k experts per example.
In the case of no noise, pass in None for noise_stddev, and the result will
not be differentiable.
Args:
clean_values: a `Tensor` of shape [batch, n].
noisy_values: a `Tensor` of shape [batch, n]. Equal to clean values plus
normally distributed noise with standard deviation noise_stddev.
noise_stddev: a `Tensor` of shape [batch, n], or None
noisy_top_values: a `Tensor` of shape [batch, m].
"values" Output of tf.top_k(noisy_top_values, m). m >= k+1
Returns:
a `Tensor` of shape [batch, n].
"""
batch = clean_values.size(0)
m = noisy_top_values.size(1)
top_values_flat = noisy_top_values.flatten()
threshold_positions_if_in = (
flow.arange(batch, device=noisy_values.device) * m + self.k
)
threshold_if_in = flow.unsqueeze(
flow.gather(top_values_flat, 0, threshold_positions_if_in), 1
)
is_in = flow.gt(noisy_values, threshold_if_in)
threshold_positions_if_out = threshold_positions_if_in - 1
threshold_if_out = flow.unsqueeze(
flow.gather(top_values_flat, 0, threshold_positions_if_out), 1
)
# is each value currently in the top k.
prob_if_in = cdf((clean_values - threshold_if_in) / noise_stddev)
prob_if_out = cdf((clean_values - threshold_if_out) / noise_stddev)
prob = flow.where(is_in, prob_if_in, prob_if_out)
return prob
def noisy_top_k_gating(self, x, train, noise_epsilon=1e-2):
"""Noisy top-k gating.
See paper: https://arxiv.org/abs/1701.06538.
Args:
x: input Tensor with shape [batch_size, input_size]
train: a boolean - we only add noise at training time.
noise_epsilon: a float
Returns:
gates: a Tensor with shape [batch_size, num_experts]
load: a Tensor with shape [num_experts]
"""
clean_logits = oneflow.matmul(x, self.w_gate)
if self.noisy_gating:
raw_noise_stddev = oneflow.matmul(x, self.w_noise)
noise_stddev = (self.softplus(raw_noise_stddev) + noise_epsilon) * train
# noisy_logits = clean_logits + ( torch.randn(clean_logits.size()) * noise_stddev)
# TODO, fix this after torch randn argument fixed
noisy_logits = clean_logits + (
flow.randn(
clean_logits.size()[0],
clean_logits.size()[1],
device=clean_logits.device,
)
* noise_stddev
)
logits = noisy_logits
else:
logits = clean_logits
# calculate topk + 1 that will be needed for the noisy gates
top_logits, top_indices = logits.topk(min(self.k + 1, self.num_experts), dim=1)
top_k_logits = top_logits[:, : self.k]
top_k_indices = top_indices[:, : self.k]
top_k_gates = self.softmax(top_k_logits)
top_k_logits = top_k_logits.to(logits.device)
top_indices = top_indices.to(logits.device)
top_logits = top_logits.to(logits.device)
zeros = flow.zeros(
logits.shape, dtype=logits.dtype, requires_grad=True, device=logits.device
)
gates = oneflow.scatter(zeros, 1, top_k_indices, top_k_gates)
if self.noisy_gating and self.k < self.num_experts:
load = (
self._prob_in_top_k(
clean_logits, noisy_logits, noise_stddev, top_logits
)
).sum(0)
else:
load = self._gates_to_load(gates)
return gates, load
def forward(self, x, train=True, loss_coef=1e-2):
"""Args:
x: tensor shape [batch_size, input_size]
train: a boolean scalar.
loss_coef: a scalar - multiplier on load-balancing losses
Returns:
y: a tensor with shape [batch_size, output_size].
extra_training_loss: a scalar. This should be added into the overall
training loss of the model. The backpropagation of this loss
encourages all experts to be approximately equally used across a batch.
"""
gates, load = self.noisy_top_k_gating(x, train)
# calculate importance loss
importance = gates.sum(0)
loss = self.cv_squared(importance) + self.cv_squared(load)
loss *= loss_coef
dispatcher = SparseDispatcher(self.num_experts, gates)
expert_inputs = dispatcher.dispatch(x)
gates = dispatcher.expert_to_gates()
expert_outputs = []
# TODO, vectorize this part after fixing the zero dimension bug
for i in range(self.num_experts):
if expert_inputs[i].shape.numel() != 0:
expert_outputs.append(self.experts[i](expert_inputs[i]))
y = dispatcher.combine(expert_outputs)
return y, loss
| [
"oneflow.Tensor",
"oneflow.nn.Softplus",
"oneflow.cat",
"oneflow.scatter",
"oneflow.arange",
"oneflow.matmul",
"oneflow.gather",
"oneflow.where",
"oneflow.zeros",
"oneflow.nn.Softmax",
"oneflow.nonzero",
"oneflow.gt",
"oneflow.tensor",
"oneflow.split"
] | [((5752, 5770), 'oneflow.tensor', 'flow.tensor', (['[0.0]'], {}), '([0.0])\n', (5763, 5770), True, 'import oneflow as flow\n'), ((5778, 5796), 'oneflow.tensor', 'flow.tensor', (['[1.0]'], {}), '([1.0])\n', (5789, 5796), True, 'import oneflow as flow\n'), ((2748, 2793), 'oneflow.gather', 'flow.gather', (['gates_exp', '(1)', 'self._expert_index'], {}), '(gates_exp, 1, self._expert_index)\n', (2759, 2793), True, 'import oneflow as flow\n'), ((3447, 3491), 'oneflow.split', 'flow.split', (['inp_exp', 'self._part_sizes'], {'dim': '(0)'}), '(inp_exp, self._part_sizes, dim=0)\n', (3457, 3491), True, 'import oneflow as flow\n'), ((4612, 4660), 'numpy.zeros', 'np.zeros', (['[stitched.shape[0], stitched.shape[1]]'], {}), '([stitched.shape[0], stitched.shape[1]])\n', (4620, 4660), True, 'import numpy as np\n'), ((4852, 4900), 'oneflow.Tensor', 'flow.Tensor', (['batch_index'], {'device': 'stitched.device'}), '(batch_index, device=stitched.device)\n', (4863, 4900), True, 'import oneflow as flow\n'), ((5639, 5695), 'oneflow.split', 'flow.split', (['self._nonzero_gates', 'self._part_sizes'], {'dim': '(0)'}), '(self._nonzero_gates, self._part_sizes, dim=0)\n', (5649, 5695), True, 'import oneflow as flow\n'), ((7074, 7087), 'oneflow.nn.Softplus', 'nn.Softplus', ([], {}), '()\n', (7085, 7087), True, 'import oneflow.nn as nn\n'), ((7111, 7124), 'oneflow.nn.Softmax', 'nn.Softmax', (['(1)'], {}), '(1)\n', (7121, 7124), True, 'import oneflow.nn as nn\n'), ((9427, 9465), 'oneflow.gt', 'flow.gt', (['noisy_values', 'threshold_if_in'], {}), '(noisy_values, threshold_if_in)\n', (9434, 9465), True, 'import oneflow as flow\n'), ((9877, 9919), 'oneflow.where', 'flow.where', (['is_in', 'prob_if_in', 'prob_if_out'], {}), '(is_in, prob_if_in, prob_if_out)\n', (9887, 9919), True, 'import oneflow as flow\n'), ((10445, 10475), 'oneflow.matmul', 'oneflow.matmul', (['x', 'self.w_gate'], {}), '(x, self.w_gate)\n', (10459, 10475), False, 'import oneflow\n'), ((11654, 11745), 'oneflow.zeros', 'flow.zeros', (['logits.shape'], {'dtype': 'logits.dtype', 'requires_grad': '(True)', 'device': 'logits.device'}), '(logits.shape, dtype=logits.dtype, requires_grad=True, device=\n logits.device)\n', (11664, 11745), True, 'import oneflow as flow\n'), ((11779, 11832), 'oneflow.scatter', 'oneflow.scatter', (['zeros', '(1)', 'top_k_indices', 'top_k_gates'], {}), '(zeros, 1, top_k_indices, top_k_gates)\n', (11794, 11832), False, 'import oneflow\n'), ((5225, 5240), 'numpy.finfo', 'np.finfo', (['float'], {}), '(float)\n', (5233, 5240), True, 'import numpy as np\n'), ((6868, 6903), 'oneflow.zeros', 'flow.zeros', (['input_size', 'num_experts'], {}), '(input_size, num_experts)\n', (6878, 6903), True, 'import oneflow as flow\n'), ((6983, 7018), 'oneflow.zeros', 'flow.zeros', (['input_size', 'num_experts'], {}), '(input_size, num_experts)\n', (6993, 7018), True, 'import oneflow as flow\n'), ((7615, 7631), 'oneflow.Tensor', 'flow.Tensor', (['[0]'], {}), '([0])\n', (7626, 7631), True, 'import oneflow as flow\n'), ((9339, 9397), 'oneflow.gather', 'flow.gather', (['top_values_flat', '(0)', 'threshold_positions_if_in'], {}), '(top_values_flat, 0, threshold_positions_if_in)\n', (9350, 9397), True, 'import oneflow as flow\n'), ((9589, 9648), 'oneflow.gather', 'flow.gather', (['top_values_flat', '(0)', 'threshold_positions_if_out'], {}), '(top_values_flat, 0, threshold_positions_if_out)\n', (9600, 9648), True, 'import oneflow as flow\n'), ((10538, 10569), 'oneflow.matmul', 'oneflow.matmul', (['x', 'self.w_noise'], {}), '(x, self.w_noise)\n', (10552, 10569), False, 'import oneflow\n'), ((2116, 2135), 'oneflow.nonzero', 'flow.nonzero', (['gates'], {}), '(gates)\n', (2128, 2135), True, 'import oneflow as flow\n'), ((4262, 4285), 'oneflow.cat', 'flow.cat', (['expert_out', '(0)'], {}), '(expert_out, 0)\n', (4270, 4285), True, 'import oneflow as flow\n'), ((4755, 4784), 'numpy.ones', 'np.ones', (['batch_index.shape[1]'], {}), '(batch_index.shape[1])\n', (4762, 4784), True, 'import numpy as np\n'), ((9214, 9260), 'oneflow.arange', 'flow.arange', (['batch'], {'device': 'noisy_values.device'}), '(batch, device=noisy_values.device)\n', (9225, 9260), True, 'import oneflow as flow\n'), ((5936, 5948), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (5945, 5948), False, 'import math\n')] |
#!/usr/bin/env python
import argparse
import json
import oneflow as flow
import kaldi_io
from transformer import Transformer
from utils import add_results_to_json, process_dict
from data import build_LFR_features
from decoder import Decoder
from encoder import Encoder
parser = argparse.ArgumentParser("End-to-End Automatic Speech Recognition Decoding.")
# data
parser.add_argument(
"--recog-json",
type=str,
default="../../egs/aishell/dump/test/deltafalse/data_simple.json",
help="Filename of recognition data (json)",
)
parser.add_argument(
"--dict",
type=str,
default="../../egs/aishell/data/lang_1char/train_chars.txt",
help="Dictionary which should include <unk> <sos> <eos>",
)
parser.add_argument(
"--result-label",
type=str,
default="exp/decode_test/data.json",
help="Filename of result label data (json)",
)
# model
parser.add_argument(
"--model-path",
type=str,
default="exp/temp/final.pth.tar",
help="Path to model file created by training",
)
# decode
parser.add_argument("--beam-size", default=5, type=int, help="Beam size")
parser.add_argument("--nbest", default=1, type=int, help="Nbest size")
parser.add_argument(
"--decode-max-len",
default=0,
type=int,
help="Max output length. If ==0 (default), it uses a "
"end-detect function to automatically find maximum "
"hypothesis lengths",
)
parser.add_argument(
"--LFR_m", default=4, type=int, help="Low Frame Rate: number of frames to stack"
)
parser.add_argument(
"--LFR_n", default=3, type=int, help="Low Frame Rate: number of frames to skip"
)
# encoder
# TODO: automatically infer input dim
parser.add_argument(
"--d_input", default=80, type=int, help="Dim of encoder input (before LFR)"
)
parser.add_argument(
"--n_layers_enc", default=6, type=int, help="Number of encoder stacks"
)
parser.add_argument(
"--n_head", default=8, type=int, help="Number of Multi Head Attention (MHA)"
)
parser.add_argument("--d_k", default=64, type=int, help="Dimension of key")
parser.add_argument("--d_v", default=64, type=int, help="Dimension of value")
parser.add_argument("--d_model", default=512, type=int, help="Dimension of model")
parser.add_argument("--d_inner", default=2048, type=int, help="Dimension of inner")
parser.add_argument("--dropout", default=0.1, type=float, help="Dropout rate")
parser.add_argument(
"--pe_maxlen", default=5000, type=int, help="Positional Encoding max len"
)
# decoder
parser.add_argument(
"--d_word_vec", default=512, type=int, help="Dim of decoder embedding"
)
parser.add_argument(
"--n_layers_dec", default=6, type=int, help="Number of decoder stacks"
)
parser.add_argument(
"--tgt_emb_prj_weight_sharing",
default=1,
type=int,
help="share decoder embedding with decoder projection",
)
def recognize(args):
# model
char_list, sos_id, eos_id = process_dict(args.dict)
vocab_size = len(char_list)
encoder = Encoder(
args.d_input * args.LFR_m,
args.n_layers_enc,
args.n_head,
args.d_k,
args.d_v,
args.d_model,
args.d_inner,
dropout=args.dropout,
pe_maxlen=args.pe_maxlen,
)
decoder = Decoder(
sos_id,
eos_id,
vocab_size,
args.d_word_vec,
args.n_layers_dec,
args.n_head,
args.d_k,
args.d_v,
args.d_model,
args.d_inner,
dropout=args.dropout,
tgt_emb_prj_weight_sharing=args.tgt_emb_prj_weight_sharing,
pe_maxlen=args.pe_maxlen,
)
model = Transformer(encoder, decoder)
model.load_state_dict(flow.load(args.model_path))
device = flow.device("cuda")
model.eval()
model.to(device)
LFR_m = args.LFR_m
LFR_n = args.LFR_n
char_list, sos_id, eos_id = process_dict(args.dict)
assert model.decoder.sos_id == sos_id and model.decoder.eos_id == eos_id
# read json data
with open(args.recog_json, "rb") as f:
js = json.load(f)["utts"]
# decode each utterance
new_js = {}
with flow.no_grad():
for idx, name in enumerate(js.keys(), 1):
print("(%d/%d) decoding %s" % (idx, len(js.keys()), name), flush=True)
input = kaldi_io.read_mat(js[name]["input"][0]["feat"])
input = build_LFR_features(input, LFR_m, LFR_n)
input = flow.tensor(input).to(dtype=flow.float32)
input_length = flow.tensor([input.size(0)], dtype=flow.int64)
input = input.to(device)
input_length = input_length.to(device)
nbest_hyps = model.recognize(input, input_length, char_list, args)
new_js[name] = add_results_to_json(js[name], nbest_hyps, char_list)
with open(args.result_label, "wb") as f:
f.write(json.dumps({"utts": new_js}, indent=4, sort_keys=True).encode("utf_8"))
args = parser.parse_args()
print(args, flush=True)
recognize(args)
| [
"oneflow.load",
"oneflow.no_grad",
"oneflow.tensor",
"oneflow.device"
] | [((280, 356), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""End-to-End Automatic Speech Recognition Decoding."""'], {}), "('End-to-End Automatic Speech Recognition Decoding.')\n", (303, 356), False, 'import argparse\n'), ((2883, 2906), 'utils.process_dict', 'process_dict', (['args.dict'], {}), '(args.dict)\n', (2895, 2906), False, 'from utils import add_results_to_json, process_dict\n'), ((2953, 3124), 'encoder.Encoder', 'Encoder', (['(args.d_input * args.LFR_m)', 'args.n_layers_enc', 'args.n_head', 'args.d_k', 'args.d_v', 'args.d_model', 'args.d_inner'], {'dropout': 'args.dropout', 'pe_maxlen': 'args.pe_maxlen'}), '(args.d_input * args.LFR_m, args.n_layers_enc, args.n_head, args.d_k,\n args.d_v, args.d_model, args.d_inner, dropout=args.dropout, pe_maxlen=\n args.pe_maxlen)\n', (2960, 3124), False, 'from encoder import Encoder\n'), ((3209, 3463), 'decoder.Decoder', 'Decoder', (['sos_id', 'eos_id', 'vocab_size', 'args.d_word_vec', 'args.n_layers_dec', 'args.n_head', 'args.d_k', 'args.d_v', 'args.d_model', 'args.d_inner'], {'dropout': 'args.dropout', 'tgt_emb_prj_weight_sharing': 'args.tgt_emb_prj_weight_sharing', 'pe_maxlen': 'args.pe_maxlen'}), '(sos_id, eos_id, vocab_size, args.d_word_vec, args.n_layers_dec,\n args.n_head, args.d_k, args.d_v, args.d_model, args.d_inner, dropout=\n args.dropout, tgt_emb_prj_weight_sharing=args.\n tgt_emb_prj_weight_sharing, pe_maxlen=args.pe_maxlen)\n', (3216, 3463), False, 'from decoder import Decoder\n'), ((3573, 3602), 'transformer.Transformer', 'Transformer', (['encoder', 'decoder'], {}), '(encoder, decoder)\n', (3584, 3602), False, 'from transformer import Transformer\n'), ((3670, 3689), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (3681, 3689), True, 'import oneflow as flow\n'), ((3806, 3829), 'utils.process_dict', 'process_dict', (['args.dict'], {}), '(args.dict)\n', (3818, 3829), False, 'from utils import add_results_to_json, process_dict\n'), ((3629, 3655), 'oneflow.load', 'flow.load', (['args.model_path'], {}), '(args.model_path)\n', (3638, 3655), True, 'import oneflow as flow\n'), ((4060, 4074), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (4072, 4074), True, 'import oneflow as flow\n'), ((3985, 3997), 'json.load', 'json.load', (['f'], {}), '(f)\n', (3994, 3997), False, 'import json\n'), ((4229, 4276), 'kaldi_io.read_mat', 'kaldi_io.read_mat', (["js[name]['input'][0]['feat']"], {}), "(js[name]['input'][0]['feat'])\n", (4246, 4276), False, 'import kaldi_io\n'), ((4297, 4336), 'data.build_LFR_features', 'build_LFR_features', (['input', 'LFR_m', 'LFR_n'], {}), '(input, LFR_m, LFR_n)\n', (4315, 4336), False, 'from data import build_LFR_features\n'), ((4667, 4719), 'utils.add_results_to_json', 'add_results_to_json', (['js[name]', 'nbest_hyps', 'char_list'], {}), '(js[name], nbest_hyps, char_list)\n', (4686, 4719), False, 'from utils import add_results_to_json, process_dict\n'), ((4357, 4375), 'oneflow.tensor', 'flow.tensor', (['input'], {}), '(input)\n', (4368, 4375), True, 'import oneflow as flow\n'), ((4782, 4836), 'json.dumps', 'json.dumps', (["{'utts': new_js}"], {'indent': '(4)', 'sort_keys': '(True)'}), "({'utts': new_js}, indent=4, sort_keys=True)\n", (4792, 4836), False, 'import json\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
from oneflow.python.ops.transpose_util import (
get_perm_when_transpose_axis_to_last_dim,
get_inversed_perm,
)
class Argmax(Module):
def __init__(self, dim: int = None, keepdim: bool = False) -> None:
super().__init__()
self._op_softmax_last_dim = (
flow.builtin_op("argmax").Input("in").Output("out").Build()
)
self._flatten = (
flow.builtin_op("flatten")
.Input("in")
.Output("out")
.Attr("start_dim", 0)
.Attr("end_dim", -1)
.Build()
)
self._transpose_op = (
flow.builtin_op("transpose")
.Input("input")
.Output("output")
.Attr("perm", [])
.Build()
)
self.dim = dim
self.keepdim = keepdim
def forward(self, input):
if self.dim == None:
input = self._flatten(input)[0]
self.dim = 0
num_axes = len(input.shape)
axis = self.dim if self.dim >= 0 else self.dim + num_axes
assert 0 <= axis < num_axes, "axis out of range"
if axis == num_axes - 1:
x = self._op_softmax_last_dim(input)[0]
if self.keepdim == True:
x = flow.experimental.unsqueeze(x, -1)
return x
else:
perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis)
x = self._transpose_op(input, perm=perm)[0]
x = self._op_softmax_last_dim(x)[0]
x = flow.experimental.unsqueeze(x, -1)
x = self._transpose_op(x, perm=get_inversed_perm(perm))[0]
if self.keepdim == False:
x = x.squeeze(dim=[axis])
return x
@oneflow_export("argmax")
@register_tensor_op("argmax")
@experimental_api
def argmax_op(input, dim: int = None, keepdim: bool = False):
"""The op computes the index with the largest value of a Tensor at specified axis.
Args:
input (oneflow.Tensor): Input Tensor
dim (int, optional): dimension to be calculated. Defaults to the last dim (-1)
keepdim (bool optional): whether the output tensor has dim retained or not. Ignored if dim=None.
Returns:
oneflow.Tensor: A Tensor(dtype=int32) contains the index with the largest value of `input`
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([[1, 3, 8, 7, 2],
... [1, 9, 4, 3, 2]], dtype=np.float32)
>>> out = flow.argmax(flow.Tensor(x))
>>> print(out.numpy())
[6]
>>> out = flow.argmax(flow.Tensor(x), dim=1)
>>> print(out.numpy())
[2 1]
"""
return Argmax(dim=dim, keepdim=keepdim)(input)
if __name__ == "__main__":
import doctest
doctest.testmod()
| [
"oneflow.python.framework.tensor.register_tensor_op",
"oneflow.builtin_op",
"oneflow.python.ops.transpose_util.get_inversed_perm",
"oneflow.python.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim",
"oneflow.experimental.unsqueeze",
"oneflow.python.oneflow_export.oneflow_export"
] | [((2599, 2623), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""argmax"""'], {}), "('argmax')\n", (2613, 2623), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((2626, 2654), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""argmax"""'], {}), "('argmax')\n", (2644, 2654), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((3795, 3812), 'doctest.testmod', 'doctest.testmod', ([], {}), '()\n', (3810, 3812), False, 'import doctest\n'), ((2202, 2258), 'oneflow.python.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim', 'get_perm_when_transpose_axis_to_last_dim', (['num_axes', 'axis'], {}), '(num_axes, axis)\n', (2242, 2258), False, 'from oneflow.python.ops.transpose_util import get_perm_when_transpose_axis_to_last_dim, get_inversed_perm\n'), ((2382, 2416), 'oneflow.experimental.unsqueeze', 'flow.experimental.unsqueeze', (['x', '(-1)'], {}), '(x, -1)\n', (2409, 2416), True, 'import oneflow as flow\n'), ((2110, 2144), 'oneflow.experimental.unsqueeze', 'flow.experimental.unsqueeze', (['x', '(-1)'], {}), '(x, -1)\n', (2137, 2144), True, 'import oneflow as flow\n'), ((2461, 2484), 'oneflow.python.ops.transpose_util.get_inversed_perm', 'get_inversed_perm', (['perm'], {}), '(perm)\n', (2478, 2484), False, 'from oneflow.python.ops.transpose_util import get_perm_when_transpose_axis_to_last_dim, get_inversed_perm\n'), ((1116, 1141), 'oneflow.builtin_op', 'flow.builtin_op', (['"""argmax"""'], {}), "('argmax')\n", (1131, 1141), True, 'import oneflow as flow\n'), ((1455, 1483), 'oneflow.builtin_op', 'flow.builtin_op', (['"""transpose"""'], {}), "('transpose')\n", (1470, 1483), True, 'import oneflow as flow\n'), ((1227, 1253), 'oneflow.builtin_op', 'flow.builtin_op', (['"""flatten"""'], {}), "('flatten')\n", (1242, 1253), True, 'import oneflow as flow\n')] |
import numpy as np
import sys
import math
import oneflow as flow
import oneflow.nn as nn
sys.path.append("../")
from transformer import TransformerEncoder
class Embeddings(nn.Module):
def __init__(self, vocab, d_model):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = flow.zeros((max_len, d_model))
position = flow.arange(0, max_len, dtype=flow.float).unsqueeze(1)
div_term = flow.exp(flow.arange(
0, d_model, 2).to(flow.float) * (-math.log(10000.0) / d_model)).unsqueeze(0)
pe[:, 0::2] = flow.sin(position * div_term)
pe[:, 1::2] = flow.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.pe = pe
# self.register_buffer('pe', pe)
def forward(self, x):
self.pe = self.pe.to(x.device)
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class TransformerEncoderModel(nn.Module):
def __init__(self, emb_sz, n_classes, d_model, nhead, num_encoder_layers, dim_feedforward, dropout, batch_first):
super(TransformerEncoderModel, self).__init__()
self.transformer_encoder = TransformerEncoder(
num_encoder_layers, d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout, batch_first=batch_first)
self.src_embedding = Embeddings(emb_sz, d_model)
self.pos = PositionalEncoding(d_model, dropout)
self.linear = nn.Linear(d_model, n_classes)
def generate_subsequent_mask(self, tgt_len, src_len):
mask = flow.triu(flow.ones((tgt_len, src_len)), 1)
mask = mask.masked_fill(mask.to(flow.int32), float('-inf'))
return mask
def make_len_mask(self, inp):
inp = (inp.numpy() == 0).astype(np.int32)
inp = flow.tensor(inp, dtype=flow.int32)
return inp
def forward(self, src):
src_key_padding_mask = self.make_len_mask(src).to(src.device)
src_mask = None
src = self.src_embedding(src)
src = self.pos(src)
out = self.transformer_encoder(src, src_mask, src_key_padding_mask)
out = flow.max(out, dim=1)
out = self.linear(out)
return out
| [
"oneflow.arange",
"oneflow.sin",
"oneflow.zeros",
"oneflow.ones",
"oneflow.nn.Dropout",
"oneflow.tensor",
"oneflow.nn.Embedding",
"oneflow.max",
"oneflow.nn.Linear",
"oneflow.cos"
] | [((91, 113), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (106, 113), False, 'import sys\n'), ((289, 317), 'oneflow.nn.Embedding', 'nn.Embedding', (['vocab', 'd_model'], {}), '(vocab, d_model)\n', (301, 317), True, 'import oneflow.nn as nn\n'), ((603, 624), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {'p': 'dropout'}), '(p=dropout)\n', (613, 624), True, 'import oneflow.nn as nn\n'), ((639, 669), 'oneflow.zeros', 'flow.zeros', (['(max_len, d_model)'], {}), '((max_len, d_model))\n', (649, 669), True, 'import oneflow as flow\n'), ((896, 925), 'oneflow.sin', 'flow.sin', (['(position * div_term)'], {}), '(position * div_term)\n', (904, 925), True, 'import oneflow as flow\n'), ((948, 977), 'oneflow.cos', 'flow.cos', (['(position * div_term)'], {}), '(position * div_term)\n', (956, 977), True, 'import oneflow as flow\n'), ((1475, 1614), 'transformer.TransformerEncoder', 'TransformerEncoder', (['num_encoder_layers', 'd_model'], {'nhead': 'nhead', 'dim_feedforward': 'dim_feedforward', 'dropout': 'dropout', 'batch_first': 'batch_first'}), '(num_encoder_layers, d_model, nhead=nhead,\n dim_feedforward=dim_feedforward, dropout=dropout, batch_first=batch_first)\n', (1493, 1614), False, 'from transformer import TransformerEncoder\n'), ((1759, 1788), 'oneflow.nn.Linear', 'nn.Linear', (['d_model', 'n_classes'], {}), '(d_model, n_classes)\n', (1768, 1788), True, 'import oneflow.nn as nn\n'), ((2094, 2128), 'oneflow.tensor', 'flow.tensor', (['inp'], {'dtype': 'flow.int32'}), '(inp, dtype=flow.int32)\n', (2105, 2128), True, 'import oneflow as flow\n'), ((2429, 2449), 'oneflow.max', 'flow.max', (['out'], {'dim': '(1)'}), '(out, dim=1)\n', (2437, 2449), True, 'import oneflow as flow\n'), ((405, 428), 'math.sqrt', 'math.sqrt', (['self.d_model'], {}), '(self.d_model)\n', (414, 428), False, 'import math\n'), ((1873, 1902), 'oneflow.ones', 'flow.ones', (['(tgt_len, src_len)'], {}), '((tgt_len, src_len))\n', (1882, 1902), True, 'import oneflow as flow\n'), ((689, 730), 'oneflow.arange', 'flow.arange', (['(0)', 'max_len'], {'dtype': 'flow.float'}), '(0, max_len, dtype=flow.float)\n', (700, 730), True, 'import oneflow as flow\n'), ((772, 798), 'oneflow.arange', 'flow.arange', (['(0)', 'd_model', '(2)'], {}), '(0, d_model, 2)\n', (783, 798), True, 'import oneflow as flow\n'), ((831, 848), 'math.log', 'math.log', (['(10000.0)'], {}), '(10000.0)\n', (839, 848), False, 'import math\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _of_broadcast_to_compatible_with(x, compatible_shape, x_shape=None):
assert isinstance(compatible_shape, (list, tuple))
if x_shape is None:
x_shape = x.shape
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.ListNumpy.Placeholder(shape=x_shape, dtype=flow.float)
):
compatible_var = [
flow.get_variable(
"compatible_var_{}".format(i),
shape=cp_shape,
dtype=flow.float,
initializer=flow.random_normal_initializer(),
trainable=False,
)
for (i, cp_shape) in enumerate(compatible_shape)
]
return flow.broadcast_to_compatible_with(x_def, compatible_var)
return broadcast_to_compatible_with_fn([x]).get().numpy_list()[0]
def _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_shape=None, a_shape=None, b_shape=None
):
if x_shape is None:
x_shape = x.shape
if a_shape is None:
a_shape = a.shape
if b_shape is None:
b_shape = b.shape
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.ListNumpy.Placeholder(x_shape, dtype=flow.float),
a_def: oft.ListNumpy.Placeholder(a_shape, dtype=flow.float),
b_def: oft.ListNumpy.Placeholder(b_shape, dtype=flow.float),
):
return flow.broadcast_to_compatible_with(
x_def, [flow.identity(a_def), flow.identity(b_def)]
)
return broadcast_to_compatible_with_fn([x], [a], [b]).get().numpy_list()[0]
def _of_broadcast_to_compatible_with_grad(x, compatible_shape, dx_watcher):
assert isinstance(compatible_shape, (list, tuple))
assert callable(dx_watcher)
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(type="train", function_config=func_config)
def broadcast_to_compatible_with_fn(
x_def: oft.Numpy.Placeholder(x.shape, dtype=flow.float)
):
x_var = flow.get_variable(
"x_var",
shape=x.shape,
dtype=flow.float,
initializer=flow.constant_initializer(0),
trainable=True,
)
compatible_var = [
flow.get_variable(
"compatible_var_{}".format(i),
shape=cp_shape,
dtype=flow.float,
initializer=flow.random_normal_initializer(),
trainable=False,
)
for (i, cp_shape) in enumerate(compatible_shape)
]
x_var = x_var + x_def
y = flow.broadcast_to_compatible_with(x_var, compatible_var)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.001]), momentum=0
).minimize(y)
flow.watch_diff(x_var, dx_watcher)
return y
return broadcast_to_compatible_with_fn(x).get().numpy()
@flow.unittest.skip_unless_1n1d()
class TestBroadcastToCompatibleWith(flow.unittest.TestCase):
def test_broadcast_to_compatible_with(test_case):
x = np.random.standard_normal((5, 2)).astype(np.float32)
compatible_shape = [[4, 5, 2], [4, 5, 1]]
ret = _of_broadcast_to_compatible_with(x, compatible_shape)
expected_ret = np.broadcast_to(x, [4, 5, 2])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_dynamic_broadcast_to_compatible_with(test_case):
x = np.random.standard_normal((10, 6)).astype(np.float32)
x_static_shape = (15, 6)
a = np.random.standard_normal((3, 10, 6)).astype(np.float32)
a_static_shape = (3, 15, 6)
b = np.random.standard_normal((3, 10, 1)).astype(np.float32)
b_static_shape = (3, 15, 1)
ret = _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_static_shape, a_static_shape, b_static_shape
)
expected_ret = np.broadcast_to(x, [3, 10, 6])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_dynamic_broadcast_to_compatible_with_case_2(test_case):
x = np.random.standard_normal((20, 1, 1)).astype(np.float32)
x_static_shape = (23, 1, 1)
a = np.random.standard_normal((11, 1)).astype(np.float32)
a_static_shape = (15, 1)
b = np.random.standard_normal((7,)).astype(np.float32)
b_static_shape = (8,)
ret = _of_broadcast_to_compatible_with_dynamic(
x, a, b, x_static_shape, a_static_shape, b_static_shape
)
expected_ret = np.broadcast_to(x, [20, 11, 7])
test_case.assertTrue(np.array_equal(expected_ret, ret))
def test_broadcast_to_compatible_with_grad(test_case):
x = np.random.standard_normal((7, 1, 4)).astype(np.float32)
compatible_shape = [[7, 1, 4], [5, 4]]
def compare_dy(dx_blob):
dx = np.ones([7, 5, 4], dtype=np.float32).sum(axis=1).reshape(x.shape)
test_case.assertTrue(np.array_equal(dx, dx_blob.numpy()))
ret = _of_broadcast_to_compatible_with_grad(x, compatible_shape, compare_dy)
exp_ret = np.broadcast_to(x, [7, 5, 4])
test_case.assertTrue(np.array_equal(exp_ret, ret))
def test_broadcast_to_compatible_with_grad_case_2(test_case):
x = np.random.standard_normal((7, 1, 4)).astype(np.float32)
compatible_shape = [[1, 7, 5, 4]]
def compare_dy(dx_blob):
dx = np.ones([7, 5, 4], dtype=np.float32).sum(axis=1).reshape(x.shape)
test_case.assertTrue(np.array_equal(dx, dx_blob.numpy()))
ret = _of_broadcast_to_compatible_with_grad(x, compatible_shape, compare_dy)
exp_ret = np.broadcast_to(x, [1, 7, 5, 4])
test_case.assertTrue(np.array_equal(exp_ret, ret))
def test_broadcast_to_compatible_with_no_broadcast(test_case):
x = np.random.standard_normal((9, 9, 6)).astype(np.float32)
x_static_shape = (10, 9, 6)
compatible_shape = [[6], [9, 1]]
ret = _of_broadcast_to_compatible_with(x, compatible_shape, x_static_shape)
test_case.assertTrue(np.array_equal(x, ret))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.typing.ListNumpy.Placeholder",
"oneflow.compatible.single_client.constant_initializer",
"oneflow.compatible.single_client.watch_diff",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.broadcast_to_compatible_with",
"oneflow.compatible.s... | [((4221, 4253), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4251, 4253), True, 'from oneflow.compatible import single_client as flow\n'), ((973, 1001), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (999, 1001), True, 'from oneflow.compatible import single_client as flow\n'), ((1020, 1041), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1039, 1041), True, 'from oneflow.compatible import single_client as flow\n'), ((1159, 1208), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1179, 1208), True, 'from oneflow.compatible import single_client as flow\n'), ((2084, 2112), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2110, 2112), True, 'from oneflow.compatible import single_client as flow\n'), ((2131, 2152), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2150, 2152), True, 'from oneflow.compatible import single_client as flow\n'), ((2270, 2319), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2290, 2319), True, 'from oneflow.compatible import single_client as flow\n'), ((2949, 2977), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2975, 2977), True, 'from oneflow.compatible import single_client as flow\n'), ((2996, 3017), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3015, 3017), True, 'from oneflow.compatible import single_client as flow\n'), ((3137, 3200), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (3157, 3200), True, 'from oneflow.compatible import single_client as flow\n'), ((7411, 7426), 'unittest.main', 'unittest.main', ([], {}), '()\n', (7424, 7426), False, 'import unittest\n'), ((1125, 1151), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1149, 1151), True, 'from oneflow.compatible import single_client as flow\n'), ((1697, 1753), 'oneflow.compatible.single_client.broadcast_to_compatible_with', 'flow.broadcast_to_compatible_with', (['x_def', 'compatible_var'], {}), '(x_def, compatible_var)\n', (1730, 1753), True, 'from oneflow.compatible import single_client as flow\n'), ((2236, 2262), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2260, 2262), True, 'from oneflow.compatible import single_client as flow\n'), ((3101, 3129), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (3127, 3129), True, 'from oneflow.compatible import single_client as flow\n'), ((3911, 3967), 'oneflow.compatible.single_client.broadcast_to_compatible_with', 'flow.broadcast_to_compatible_with', (['x_var', 'compatible_var'], {}), '(x_var, compatible_var)\n', (3944, 3967), True, 'from oneflow.compatible import single_client as flow\n'), ((4105, 4139), 'oneflow.compatible.single_client.watch_diff', 'flow.watch_diff', (['x_var', 'dx_watcher'], {}), '(x_var, dx_watcher)\n', (4120, 4139), True, 'from oneflow.compatible import single_client as flow\n'), ((4575, 4604), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[4, 5, 2]'], {}), '(x, [4, 5, 2])\n', (4590, 4604), True, 'import numpy as np\n'), ((5198, 5228), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[3, 10, 6]'], {}), '(x, [3, 10, 6])\n', (5213, 5228), True, 'import numpy as np\n'), ((5817, 5848), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[20, 11, 7]'], {}), '(x, [20, 11, 7])\n', (5832, 5848), True, 'import numpy as np\n'), ((6379, 6408), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[7, 5, 4]'], {}), '(x, [7, 5, 4])\n', (6394, 6408), True, 'import numpy as np\n'), ((6936, 6968), 'numpy.broadcast_to', 'np.broadcast_to', (['x', '[1, 7, 5, 4]'], {}), '(x, [1, 7, 5, 4])\n', (6951, 6968), True, 'import numpy as np\n'), ((1265, 1323), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', ([], {'shape': 'x_shape', 'dtype': 'flow.float'}), '(shape=x_shape, dtype=flow.float)\n', (1290, 1323), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2376, 2428), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['x_shape'], {'dtype': 'flow.float'}), '(x_shape, dtype=flow.float)\n', (2401, 2428), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2445, 2497), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['a_shape'], {'dtype': 'flow.float'}), '(a_shape, dtype=flow.float)\n', (2470, 2497), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2514, 2566), 'oneflow.compatible.single_client.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['b_shape'], {'dtype': 'flow.float'}), '(b_shape, dtype=flow.float)\n', (2539, 2566), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3257, 3305), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x.shape'], {'dtype': 'flow.float'}), '(x.shape, dtype=flow.float)\n', (3278, 3305), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((4634, 4667), 'numpy.array_equal', 'np.array_equal', (['expected_ret', 'ret'], {}), '(expected_ret, ret)\n', (4648, 4667), True, 'import numpy as np\n'), ((5258, 5291), 'numpy.array_equal', 'np.array_equal', (['expected_ret', 'ret'], {}), '(expected_ret, ret)\n', (5272, 5291), True, 'import numpy as np\n'), ((5878, 5911), 'numpy.array_equal', 'np.array_equal', (['expected_ret', 'ret'], {}), '(expected_ret, ret)\n', (5892, 5911), True, 'import numpy as np\n'), ((6438, 6466), 'numpy.array_equal', 'np.array_equal', (['exp_ret', 'ret'], {}), '(exp_ret, ret)\n', (6452, 6466), True, 'import numpy as np\n'), ((6998, 7026), 'numpy.array_equal', 'np.array_equal', (['exp_ret', 'ret'], {}), '(exp_ret, ret)\n', (7012, 7026), True, 'import numpy as np\n'), ((7354, 7376), 'numpy.array_equal', 'np.array_equal', (['x', 'ret'], {}), '(x, ret)\n', (7368, 7376), True, 'import numpy as np\n'), ((2645, 2665), 'oneflow.compatible.single_client.identity', 'flow.identity', (['a_def'], {}), '(a_def)\n', (2658, 2665), True, 'from oneflow.compatible import single_client as flow\n'), ((2667, 2687), 'oneflow.compatible.single_client.identity', 'flow.identity', (['b_def'], {}), '(b_def)\n', (2680, 2687), True, 'from oneflow.compatible import single_client as flow\n'), ((3450, 3478), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (3475, 3478), True, 'from oneflow.compatible import single_client as flow\n'), ((4381, 4414), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(5, 2)'], {}), '((5, 2))\n', (4406, 4414), True, 'import numpy as np\n'), ((4744, 4778), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(10, 6)'], {}), '((10, 6))\n', (4769, 4778), True, 'import numpy as np\n'), ((4843, 4880), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(3, 10, 6)'], {}), '((3, 10, 6))\n', (4868, 4880), True, 'import numpy as np\n'), ((4948, 4985), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(3, 10, 1)'], {}), '((3, 10, 1))\n', (4973, 4985), True, 'import numpy as np\n'), ((5375, 5412), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(20, 1, 1)'], {}), '((20, 1, 1))\n', (5400, 5412), True, 'import numpy as np\n'), ((5480, 5514), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(11, 1)'], {}), '((11, 1))\n', (5505, 5514), True, 'import numpy as np\n'), ((5579, 5610), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(7,)'], {}), '((7,))\n', (5604, 5610), True, 'import numpy as np\n'), ((5985, 6021), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(7, 1, 4)'], {}), '((7, 1, 4))\n', (6010, 6021), True, 'import numpy as np\n'), ((6547, 6583), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(7, 1, 4)'], {}), '((7, 1, 4))\n', (6572, 6583), True, 'import numpy as np\n'), ((7108, 7144), 'numpy.random.standard_normal', 'np.random.standard_normal', (['(9, 9, 6)'], {}), '((9, 9, 6))\n', (7133, 7144), True, 'import numpy as np\n'), ((1530, 1562), 'oneflow.compatible.single_client.random_normal_initializer', 'flow.random_normal_initializer', ([], {}), '()\n', (1560, 1562), True, 'from oneflow.compatible import single_client as flow\n'), ((3717, 3749), 'oneflow.compatible.single_client.random_normal_initializer', 'flow.random_normal_initializer', ([], {}), '()\n', (3747, 3749), True, 'from oneflow.compatible import single_client as flow\n'), ((4008, 4062), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (4049, 4062), True, 'from oneflow.compatible import single_client as flow\n'), ((6139, 6175), 'numpy.ones', 'np.ones', (['[7, 5, 4]'], {'dtype': 'np.float32'}), '([7, 5, 4], dtype=np.float32)\n', (6146, 6175), True, 'import numpy as np\n'), ((6696, 6732), 'numpy.ones', 'np.ones', (['[7, 5, 4]'], {'dtype': 'np.float32'}), '([7, 5, 4], dtype=np.float32)\n', (6703, 6732), True, 'import numpy as np\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import oneflow as flow
from omegaconf import OmegaConf
from oneflow.utils.data import DataLoader, TensorDataset
sys.path.append(".")
from libai.config import LazyCall, default_argument_parser
from libai.engine import DefaultTrainer, default_setup
from libai.optim import get_default_optimizer_params
from libai.scheduler import WarmupMultiStepLR
from tests.layers.test_trainer_model import build_graph, build_model
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = OmegaConf.create()
cfg.train = dict(
output_dir="./demo_output",
train_micro_batch_size=32,
test_micro_batch_size=32,
dist=dict(
data_parallel_size=1,
tensor_parallel_size=1,
pipeline_parallel_size=1,
pipeline_num_layers=4,
),
start_iter=0,
train_iter=20,
train_epoch=1,
warmup_ratio=0.05,
lr_warmup_fraction=0.01,
lr_decay_iter=6000,
eval_period=1000,
log_period=1,
checkpointer=dict(period=100),
nccl_fusion_threshold_mb=16,
nccl_fusion_max_ops=24,
scheduler=LazyCall(WarmupMultiStepLR)(
warmup_factor=0.001,
# alpha=0.01,
warmup_method="linear",
milestones=[0.1, 0.2],
),
)
cfg.optim = LazyCall(flow.optim.AdamW)(
parameters=LazyCall(get_default_optimizer_params)(
# parameters.model is meant to be set to the model object, before
# instantiating the optimizer.
clip_grad_max_norm=1.0,
clip_grad_norm_type=2.0,
weight_decay_norm=0.0,
weight_decay_bias=0.0,
),
lr=1e-4,
weight_decay=0.01,
betas=(0.9, 0.999),
do_bias_correction=True,
)
cfg.graph = dict(
enabled=True,
)
default_setup(cfg, args)
return cfg
class DemoTrainer(DefaultTrainer):
@classmethod
def build_model(cls, cfg):
"""
Returns:
flow.nn.Module:
It now calls :func:`libai.layers.build_model`.
Overwrite it if you'd like a different model.
"""
model = build_model(cfg)
return model
@classmethod
def build_graph(cls, cfg, model, optimizer=None, lr_scheduler=None, is_train=True):
return build_graph(cfg, model, optimizer, lr_scheduler)
@classmethod
def get_batch(cls, data):
return [
flow.randn(
32,
512,
sbp=flow.sbp.split(0),
placement=flow.placement("cuda", [0]),
)
]
@classmethod
def build_train_loader(cls, cfg, tokenizer=None):
return (
DataLoader(
TensorDataset(flow.randn(1000)), batch_size=cfg.train.train_micro_batch_size
),
None,
None,
)
@classmethod
def build_test_loader(cls, cfg):
return []
def main(args):
cfg = setup(args)
trainer = DemoTrainer(cfg)
# trainer.resume_or_load(resume=args.resume)
return trainer.train()
if __name__ == "__main__":
args = default_argument_parser().parse_args()
main(args)
| [
"oneflow.placement",
"oneflow.sbp.split",
"oneflow.randn"
] | [((746, 766), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (761, 766), False, 'import sys\n'), ((1140, 1158), 'omegaconf.OmegaConf.create', 'OmegaConf.create', ([], {}), '()\n', (1156, 1158), False, 'from omegaconf import OmegaConf\n'), ((2512, 2536), 'libai.engine.default_setup', 'default_setup', (['cfg', 'args'], {}), '(cfg, args)\n', (2525, 2536), False, 'from libai.engine import DefaultTrainer, default_setup\n'), ((1983, 2009), 'libai.config.LazyCall', 'LazyCall', (['flow.optim.AdamW'], {}), '(flow.optim.AdamW)\n', (1991, 2009), False, 'from libai.config import LazyCall, default_argument_parser\n'), ((2831, 2847), 'tests.layers.test_trainer_model.build_model', 'build_model', (['cfg'], {}), '(cfg)\n', (2842, 2847), False, 'from tests.layers.test_trainer_model import build_graph, build_model\n'), ((2990, 3038), 'tests.layers.test_trainer_model.build_graph', 'build_graph', (['cfg', 'model', 'optimizer', 'lr_scheduler'], {}), '(cfg, model, optimizer, lr_scheduler)\n', (3001, 3038), False, 'from tests.layers.test_trainer_model import build_graph, build_model\n'), ((3816, 3841), 'libai.config.default_argument_parser', 'default_argument_parser', ([], {}), '()\n', (3839, 3841), False, 'from libai.config import LazyCall, default_argument_parser\n'), ((1790, 1817), 'libai.config.LazyCall', 'LazyCall', (['WarmupMultiStepLR'], {}), '(WarmupMultiStepLR)\n', (1798, 1817), False, 'from libai.config import LazyCall, default_argument_parser\n'), ((2030, 2068), 'libai.config.LazyCall', 'LazyCall', (['get_default_optimizer_params'], {}), '(get_default_optimizer_params)\n', (2038, 2068), False, 'from libai.config import LazyCall, default_argument_parser\n'), ((3189, 3206), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (3203, 3206), True, 'import oneflow as flow\n'), ((3234, 3261), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[0]'], {}), "('cuda', [0])\n", (3248, 3261), True, 'import oneflow as flow\n'), ((3430, 3446), 'oneflow.randn', 'flow.randn', (['(1000)'], {}), '(1000)\n', (3440, 3446), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow.distribute as distribute_util
# init = flow.glorot_uniform_initializer(data_format="NCHW")
# init = flow.glorot_normal_initializer(data_format="NCHW")
# init = flow.random_uniform_initializer()
init = flow.random_normal_initializer(stddev=0.02)
def get_const_initializer():
return flow.constant_initializer(0.00002)
def deconv2d(
input,
filters,
size,
name,
strides=2,
trainable=True,
reuse=False,
const_init=False,
use_bias=False,
):
name_ = name if reuse == False else name + "_reuse"
# weight : [in_channels, out_channels, height, width]
weight_shape = (input.shape[1], filters, size, size)
output_shape = (
input.shape[0],
input.shape[1],
input.shape[2] * strides,
input.shape[3] * strides,
)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=init
if not const_init
else get_const_initializer(),
trainable=trainable,
)
output = flow.nn.conv2d_transpose(
input,
weight,
strides=[strides, strides],
output_shape=output_shape,
padding="SAME",
data_format="NCHW",
name=name_,
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=flow.constant_initializer(0.0),
trainable=trainable,
)
output = flow.nn.bias_add(output, bias, "NCHW")
return output
def conv2d(
input,
filters,
size,
name,
strides=2,
padding="same",
trainable=True,
reuse=False,
const_init=False,
use_bias=True,
):
name_ = name if reuse == False else name + "_reuse"
# (output_dim, k_h, k_w, input.shape[3]) if NHWC
weight_shape = (filters, input.shape[1], size, size)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=init
if not const_init
else get_const_initializer(),
trainable=trainable,
reuse=reuse,
)
output = flow.nn.compat_conv2d(
input,
weight,
strides=[strides, strides],
padding=padding,
data_format="NCHW",
name=name_,
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=flow.constant_initializer(0.0),
trainable=trainable,
reuse=reuse,
)
output = flow.nn.bias_add(output, bias, "NCHW")
return output
def _batch_norm(inputs, name, trainable=True, training=True):
params_shape = [inputs.shape[1]]
# Float32 required to avoid precision-loss when using fp16 input/output
params_dtype = flow.float32 if inputs.dtype == flow.float16 else inputs.dtype
if not flow.current_global_function_desc().IsTrainable() or not trainable:
training = False
with flow.scope.namespace(name):
beta = flow.get_variable(
name="beta",
shape=params_shape,
dtype=params_dtype,
initializer=flow.zeros_initializer(),
trainable=trainable,
distribute=distribute_util.broadcast(),
)
gamma = flow.get_variable(
name="gamma",
shape=params_shape,
dtype=params_dtype,
initializer=flow.ones_initializer(),
trainable=trainable,
distribute=distribute_util.broadcast(),
)
moving_mean = flow.get_variable(
name="moving_mean",
shape=params_shape,
dtype=params_dtype,
initializer=flow.zeros_initializer(),
trainable=False,
distribute=distribute_util.broadcast(),
)
moving_variance = flow.get_variable(
name="moving_variance",
shape=params_shape,
dtype=params_dtype,
initializer=flow.ones_initializer(),
trainable=False,
distribute=distribute_util.broadcast(),
)
builder = (
flow.user_op_builder(name)
.Op("normalization")
.Input("x", [inputs])
.Input("moving_mean", [moving_mean])
.Input("moving_variance", [moving_variance])
.Input("gamma", [gamma])
.Input("beta", [beta])
.Output("y")
.Attr("axis", 1)
.Attr("epsilon", 1.001e-5)
.Attr("training", training)
.Attr("momentum", 0.997)
)
if trainable and training:
builder = builder.Output("mean").Output("inv_variance")
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
def batchnorm(input, name, axis=1, reuse=False, trainable=True):
name = name+'_reuse' if reuse else name
return _batch_norm(input,name,trainable=trainable)
def dense(
input, units, name, use_bias=False, trainable=True, reuse=False, const_init=False
):
name_ = name if reuse == False else name + "_reuse"
in_shape = input.shape
in_num_axes = len(in_shape)
assert in_num_axes >= 2
inputs = flow.reshape(
input, (-1, in_shape[-1])) if in_num_axes > 2 else input
weight = flow.get_variable(
name="{}-weight".format(name),
shape=(units, inputs.shape[1]),
dtype=inputs.dtype,
initializer=init
if not const_init
else get_const_initializer(),
trainable=trainable,
reuse=reuse,
model_name="weight",
)
out = flow.matmul(a=inputs, b=weight, transpose_b=True, name=name_ + "matmul",)
if use_bias:
bias = flow.get_variable(
name="{}-bias".format(name),
shape=(units,),
dtype=inputs.dtype,
# initializer=flow.random_normal_initializer()
initializer=flow.constant_initializer(0.0)
if not const_init
else get_const_initializer(),
trainable=trainable,
reuse=reuse,
model_name="bias",
)
out = flow.nn.bias_add(out, bias, name=name_ + "_bias_add")
out = flow.reshape(out, in_shape[:-1] +
(units,)) if in_num_axes > 2 else out
return out
| [
"oneflow.distribute.broadcast",
"oneflow.current_global_function_desc",
"oneflow.scope.namespace",
"oneflow.matmul",
"oneflow.ones_initializer",
"oneflow.nn.compat_conv2d",
"oneflow.nn.conv2d_transpose",
"oneflow.constant_initializer",
"oneflow.zeros_initializer",
"oneflow.reshape",
"oneflow.use... | [((827, 870), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'stddev': '(0.02)'}), '(stddev=0.02)\n', (857, 870), True, 'import oneflow as flow\n'), ((912, 944), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(2e-05)'], {}), '(2e-05)\n', (937, 944), True, 'import oneflow as flow\n'), ((1669, 1815), 'oneflow.nn.conv2d_transpose', 'flow.nn.conv2d_transpose', (['input', 'weight'], {'strides': '[strides, strides]', 'output_shape': 'output_shape', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': 'name_'}), "(input, weight, strides=[strides, strides],\n output_shape=output_shape, padding='SAME', data_format='NCHW', name=name_)\n", (1693, 1815), True, 'import oneflow as flow\n'), ((2803, 2921), 'oneflow.nn.compat_conv2d', 'flow.nn.compat_conv2d', (['input', 'weight'], {'strides': '[strides, strides]', 'padding': 'padding', 'data_format': '"""NCHW"""', 'name': 'name_'}), "(input, weight, strides=[strides, strides], padding=\n padding, data_format='NCHW', name=name_)\n", (2824, 2921), True, 'import oneflow as flow\n'), ((6229, 6301), 'oneflow.matmul', 'flow.matmul', ([], {'a': 'inputs', 'b': 'weight', 'transpose_b': '(True)', 'name': "(name_ + 'matmul')"}), "(a=inputs, b=weight, transpose_b=True, name=name_ + 'matmul')\n", (6240, 6301), True, 'import oneflow as flow\n'), ((2133, 2171), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', '"""NCHW"""'], {}), "(output, bias, 'NCHW')\n", (2149, 2171), True, 'import oneflow as flow\n'), ((3255, 3293), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', '"""NCHW"""'], {}), "(output, bias, 'NCHW')\n", (3271, 3293), True, 'import oneflow as flow\n'), ((3683, 3709), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (3703, 3709), True, 'import oneflow as flow\n'), ((5825, 5864), 'oneflow.reshape', 'flow.reshape', (['input', '(-1, in_shape[-1])'], {}), '(input, (-1, in_shape[-1]))\n', (5837, 5864), True, 'import oneflow as flow\n'), ((6755, 6808), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['out', 'bias'], {'name': "(name_ + '_bias_add')"}), "(out, bias, name=name_ + '_bias_add')\n", (6771, 6808), True, 'import oneflow as flow\n'), ((6820, 6863), 'oneflow.reshape', 'flow.reshape', (['out', '(in_shape[:-1] + (units,))'], {}), '(out, in_shape[:-1] + (units,))\n', (6832, 6863), True, 'import oneflow as flow\n'), ((2040, 2070), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (2065, 2070), True, 'import oneflow as flow\n'), ((3137, 3167), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (3162, 3167), True, 'import oneflow as flow\n'), ((3858, 3882), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (3880, 3882), True, 'import oneflow as flow\n'), ((3940, 3967), 'oneflow.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (3965, 3967), True, 'import oneflow.distribute as distribute_util\n'), ((4128, 4151), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (4149, 4151), True, 'import oneflow as flow\n'), ((4209, 4236), 'oneflow.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (4234, 4236), True, 'import oneflow.distribute as distribute_util\n'), ((4409, 4433), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (4431, 4433), True, 'import oneflow as flow\n'), ((4487, 4514), 'oneflow.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (4512, 4514), True, 'import oneflow.distribute as distribute_util\n'), ((4695, 4718), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (4716, 4718), True, 'import oneflow as flow\n'), ((4772, 4799), 'oneflow.distribute.broadcast', 'distribute_util.broadcast', ([], {}), '()\n', (4797, 4799), True, 'import oneflow.distribute as distribute_util\n'), ((3581, 3616), 'oneflow.current_global_function_desc', 'flow.current_global_function_desc', ([], {}), '()\n', (3614, 3616), True, 'import oneflow as flow\n'), ((6539, 6569), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (6564, 6569), True, 'import oneflow as flow\n'), ((4835, 4861), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (4855, 4861), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import oneflow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.eager.boxing_util as boxing_util
import oneflow.framework.hob as hob
import oneflow.framework.id_util as id_util
import oneflow.framework.interpret_util as interpret_util
import oneflow.framework.placement_context as placement_ctx
import oneflow.framework.remote_blob as remote_blob_util
import oneflow.support.enable_if as enable_if
def assign(ref, value, dtype=None, name=None):
if name is None:
name = id_util.UniqueStr("Assign_")
op = (
oneflow.consistent_user_op_builder(name)
.Op("assign")
.Input("ref", [ref])
.Input("value", [value])
.Build()
)
op.InferAndTryRun()
def api_system_assign(ref, value, validate_shape=None, use_locking=None, name=None):
api = enable_if.unique([lazy_system_assign, eager_system_assign])
return api(
ref, value, validate_shape=validate_shape, use_locking=use_locking, name=name
)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def lazy_system_assign(ref, value, validate_shape=None, use_locking=None, name=None):
op_conf = _SystemAssignOpConf(ref, value, name=name)
(
device_tag,
machine_device_ids,
hierarchy,
) = oneflow._oneflow_internal.GetDeviceTagAndMachineDeviceIdsAndHierarchy(
ref.parallel_conf
)
if hierarchy is not None:
hierarchy = tuple(hierarchy.dim())
with oneflow.scope.placement(device_tag, machine_device_ids, hierarchy):
interpret_util.Forward(op_conf)
return ref
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def eager_system_assign(ref, value, validate_shape=None, use_locking=None, name=None):
op_conf = _SystemAssignOpConf(ref, value, name=name)
oneflow._oneflow_internal.deprecated.LogicalRun(
lambda builder: boxing_util.BuildAssignInstruction(
builder, ref.blob_object, value.blob_object, op_conf
)
)
return ref
def api_one_to_one_assign(ref, value):
assert hob.eager_execution_enabled(None)
oneflow._oneflow_internal.deprecated.LogicalRun(
lambda builder: builder.Build121AssignInstruction(
ref.blob_object, value.blob_object
)
)
return ref
def _SystemAssignOpConf(ref, value, name=None):
if name is None:
name = id_util.UniqueStr("Assign_")
op_conf = op_conf_util.OperatorConf()
op_conf.name = name
op_conf.assign_conf.ref = ref.unique_name
op_conf.assign_conf.value = value.unique_name
return op_conf
| [
"oneflow.framework.hob.eager_execution_enabled",
"oneflow.framework.interpret_util.Forward",
"oneflow.eager.boxing_util.BuildAssignInstruction",
"oneflow._oneflow_internal.GetDeviceTagAndMachineDeviceIdsAndHierarchy",
"oneflow.support.enable_if.unique",
"oneflow.support.enable_if.condition",
"oneflow.co... | [((1668, 1738), 'oneflow.support.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (1687, 1738), True, 'import oneflow.support.enable_if as enable_if\n'), ((2274, 2343), 'oneflow.support.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (2293, 2343), True, 'import oneflow.support.enable_if as enable_if\n'), ((1497, 1556), 'oneflow.support.enable_if.unique', 'enable_if.unique', (['[lazy_system_assign, eager_system_assign]'], {}), '([lazy_system_assign, eager_system_assign])\n', (1513, 1556), True, 'import oneflow.support.enable_if as enable_if\n'), ((1963, 2056), 'oneflow._oneflow_internal.GetDeviceTagAndMachineDeviceIdsAndHierarchy', 'oneflow._oneflow_internal.GetDeviceTagAndMachineDeviceIdsAndHierarchy', (['ref.parallel_conf'], {}), '(ref.\n parallel_conf)\n', (2032, 2056), False, 'import oneflow\n'), ((2749, 2782), 'oneflow.framework.hob.eager_execution_enabled', 'hob.eager_execution_enabled', (['None'], {}), '(None)\n', (2776, 2782), True, 'import oneflow.framework.hob as hob\n'), ((3102, 3129), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (3127, 3129), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((1180, 1208), 'oneflow.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Assign_"""'], {}), "('Assign_')\n", (1197, 1208), True, 'import oneflow.framework.id_util as id_util\n'), ((2148, 2214), 'oneflow.scope.placement', 'oneflow.scope.placement', (['device_tag', 'machine_device_ids', 'hierarchy'], {}), '(device_tag, machine_device_ids, hierarchy)\n', (2171, 2214), False, 'import oneflow\n'), ((2224, 2255), 'oneflow.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (2246, 2255), True, 'import oneflow.framework.interpret_util as interpret_util\n'), ((3059, 3087), 'oneflow.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Assign_"""'], {}), "('Assign_')\n", (3076, 3087), True, 'import oneflow.framework.id_util as id_util\n'), ((2565, 2658), 'oneflow.eager.boxing_util.BuildAssignInstruction', 'boxing_util.BuildAssignInstruction', (['builder', 'ref.blob_object', 'value.blob_object', 'op_conf'], {}), '(builder, ref.blob_object, value.\n blob_object, op_conf)\n', (2599, 2658), True, 'import oneflow.eager.boxing_util as boxing_util\n'), ((1228, 1268), 'oneflow.consistent_user_op_builder', 'oneflow.consistent_user_op_builder', (['name'], {}), '(name)\n', (1262, 1268), False, 'import oneflow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import shutil
import numpy as np
import google.protobuf.text_format as text_format
import oneflow as flow
import oneflow.core.serving.saved_model_pb2 as saved_model_pb
from alexnet import load_data, alexnet
from ofrecord_dataset import ImageNetRecordDataset
DEFAULT_BATCH_SIZE = 8
DEFAULT_TRAIN_DATA_PATH = "/dataset/imagenet_227/train/32/"
DEFAULT_TRAIN_DATA_PART_NUM = 32
DEFAULT_INFER_DATA_PATH = "/dataset/imagenet_227/train/32/"
DEFAULT_INFER_DATA_PART_NUM = 32
DEFAULT_CHECKPOINT_DIR = "/dataset/PNGS/cnns_model_for_test/alexnet/models/of_model_bk"
DEFAULT_IMAGE_SIZE = 227
def init_env():
flow.env.init()
flow.config.machine_num(1)
flow.config.cpu_device_num(1)
flow.config.gpu_device_num(1)
flow.config.enable_debug_mode(True)
def make_alexnet_train_func(batch_size, data_dir, data_part_num):
@flow.global_function(type="train")
def alexnet_train() -> flow.typing.Numpy:
image, label = load_data(batch_size, data_dir, data_part_num)
loss = alexnet(image, label)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.00001]), momentum=0
).minimize(loss)
return loss
return alexnet_train
def make_alexnet_infer_func(batch_size, image_size):
input_lbns = {}
output_lbns = {}
image_shape = (batch_size,) + tuple(image_size)
label_shape = (batch_size,)
@flow.global_function(type="predict")
def alexnet_inference(
image: flow.typing.Numpy.Placeholder(image_shape, dtype=flow.float32),
label: flow.typing.Numpy.Placeholder(label_shape, dtype=flow.int32),
) -> flow.typing.Numpy:
input_lbns["image"] = image.logical_blob_name
input_lbns["label"] = label.logical_blob_name
image = flow.transpose(image, perm=(0, 3, 1, 2))
loss = alexnet(image, label, trainable=False)
# reduce_mean calculate reduce_count in python api, we should only set attribute for op in python,
# so reduce_count is out of date when we have loaded model and set new batch_size.
# We will modify implementation of reduce_mean
# output = flow.math.reduce_mean(loss)
output = loss
output_lbns["output"] = output.logical_blob_name
return output
return alexnet_inference, input_lbns, output_lbns
def load_saved_model(model_meta_file_path):
saved_model_proto = saved_model_pb.SavedModel()
with open(model_meta_file_path, "rb") as f:
text_format.Merge(f.read(), saved_model_proto)
return saved_model_proto
@flow.unittest.skip_unless_1n1d()
class TestSaveAndLoadModel(flow.unittest.TestCase):
def test_alexnet(test_case, batch_size=DEFAULT_BATCH_SIZE, num_batchs=6):
init_env()
alexnet_infer, input_lbns, output_lbns = make_alexnet_infer_func(
batch_size, (DEFAULT_IMAGE_SIZE, DEFAULT_IMAGE_SIZE, 3)
)
flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR))
# save model
saved_model_path = "alexnet_models"
model_name = "alexnet"
model_version = 1
model_version_path = os.path.join(saved_model_path, str(model_version))
if os.path.exists(saved_model_path) and os.path.isdir(saved_model_path):
print(
"WARNING: The model version path '{}' already exist"
", old version directory will be removed".format(model_version_path)
)
shutil.rmtree(saved_model_path)
saved_model_builder = flow.saved_model.ModelBuilder(saved_model_path)
signature_builder = (
saved_model_builder.ModelName(model_name)
.Version(model_version)
.AddFunction(alexnet_infer)
.AddSignature("regress")
)
for input_name, lbn in input_lbns.items():
signature_builder.Input(input_name, lbn)
for output_name, lbn in output_lbns.items():
signature_builder.Output(output_name, lbn)
saved_model_builder.Save()
# test data
new_batch_size = int(batch_size / 2)
dataset = ImageNetRecordDataset(
batch_size=new_batch_size,
image_resize_size=DEFAULT_IMAGE_SIZE,
data_format="NHWC",
)
image_list, label_list = dataset.load_batchs(num_batchs)
assert image_list[0].shape[0] == new_batch_size
image_size = tuple(image_list[0].shape[1:])
flow.clear_default_session()
alexnet_infer, _, _ = make_alexnet_infer_func(new_batch_size, image_size)
flow.load_variables(flow.checkpoint.get(DEFAULT_CHECKPOINT_DIR))
print("alexnet inference result:")
origin_outputs = []
for i, (image, label) in enumerate(zip(image_list, label_list)):
output = alexnet_infer(image, label)
# origin_outputs.append(output.item())
# print("iter#{:<6} output:".format(i), output.item())
origin_outputs.append(output)
print("iter#{:<6} output:".format(i), output)
origin_outputs = np.array(origin_outputs, dtype=np.float32)
# load model and run
flow.clear_default_session()
model_meta_file_path = os.path.join(
saved_model_path, str(model_version), "saved_model.prototxt"
)
saved_model_proto = load_saved_model(model_meta_file_path)
sess = flow.serving.InferenceSession()
checkpoint_path = os.path.join(
saved_model_path, str(model_version), saved_model_proto.checkpoint_dir
)
sess.set_checkpoint_path(checkpoint_path)
graph_name = saved_model_proto.default_graph_name
graph_def = saved_model_proto.graphs[graph_name]
signature_def = graph_def.signatures[graph_def.default_signature_name]
with sess.open(graph_name, signature_def, new_batch_size):
sess.compile(graph_def.op_list)
# sess.print_job_set()
sess.launch()
job_name = sess.list_jobs()[0]
input_names = sess.list_inputs()
print("input names:", input_names)
for input_name in input_names:
print(
'input "{}" info: {}'.format(
input_name, sess.input_info(input_name, job_name)
)
)
output_names = sess.list_outputs()
print("output names:", output_names)
for output_name in output_names:
print(
'output "{}" info: {}'.format(
output_name, sess.output_info(output_name, job_name)
)
)
print("load saved alexnet and inference result:")
print_input_info = False
cmp_outputs = []
for i, (image, label) in enumerate(zip(image_list, label_list)):
if print_input_info:
print("image shape: {}, dtype: {}".format(image.shape, image.dtype))
print(
"label shape: {}, dtype: {}, data: {}".format(
label.shape, label.dtype, label
)
)
if i > 1:
print((image - image_list[i - 1]).mean())
outputs = sess.run(alexnet_infer.__name__, image=image, label=label)
# cmp_outputs.append(outputs[0].item())
# print("iter#{:<6} output:".format(i), outputs[0].item())
cmp_outputs.append(outputs[0])
print("iter#{:<6} output:".format(i), outputs[0])
cmp_outputs = np.array(cmp_outputs, dtype=np.float32)
test_case.assertTrue(np.allclose(origin_outputs, cmp_outputs))
sess.close()
if __name__ == "__main__":
unittest.main()
| [
"oneflow.config.machine_num",
"oneflow.serving.InferenceSession",
"oneflow.typing.Numpy.Placeholder",
"oneflow.clear_default_session",
"oneflow.config.cpu_device_num",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.transpose",
"oneflow.config.gpu_device_num",
"oneflow.saved_model.ModelBuilder",
"on... | [((3159, 3191), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3189, 3191), True, 'import oneflow as flow\n'), ((1221, 1236), 'oneflow.env.init', 'flow.env.init', ([], {}), '()\n', (1234, 1236), True, 'import oneflow as flow\n'), ((1241, 1267), 'oneflow.config.machine_num', 'flow.config.machine_num', (['(1)'], {}), '(1)\n', (1264, 1267), True, 'import oneflow as flow\n'), ((1272, 1301), 'oneflow.config.cpu_device_num', 'flow.config.cpu_device_num', (['(1)'], {}), '(1)\n', (1298, 1301), True, 'import oneflow as flow\n'), ((1306, 1335), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (1332, 1335), True, 'import oneflow as flow\n'), ((1340, 1375), 'oneflow.config.enable_debug_mode', 'flow.config.enable_debug_mode', (['(True)'], {}), '(True)\n', (1369, 1375), True, 'import oneflow as flow\n'), ((1449, 1483), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""'}), "(type='train')\n", (1469, 1483), True, 'import oneflow as flow\n'), ((2003, 2039), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""'}), "(type='predict')\n", (2023, 2039), True, 'import oneflow as flow\n'), ((2996, 3023), 'oneflow.core.serving.saved_model_pb2.SavedModel', 'saved_model_pb.SavedModel', ([], {}), '()\n', (3021, 3023), True, 'import oneflow.core.serving.saved_model_pb2 as saved_model_pb\n'), ((8262, 8277), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8275, 8277), False, 'import unittest\n'), ((1553, 1599), 'alexnet.load_data', 'load_data', (['batch_size', 'data_dir', 'data_part_num'], {}), '(batch_size, data_dir, data_part_num)\n', (1562, 1599), False, 'from alexnet import load_data, alexnet\n'), ((1615, 1636), 'alexnet.alexnet', 'alexnet', (['image', 'label'], {}), '(image, label)\n', (1622, 1636), False, 'from alexnet import load_data, alexnet\n'), ((2375, 2415), 'oneflow.transpose', 'flow.transpose', (['image'], {'perm': '(0, 3, 1, 2)'}), '(image, perm=(0, 3, 1, 2))\n', (2389, 2415), True, 'import oneflow as flow\n'), ((2431, 2469), 'alexnet.alexnet', 'alexnet', (['image', 'label'], {'trainable': '(False)'}), '(image, label, trainable=False)\n', (2438, 2469), False, 'from alexnet import load_data, alexnet\n'), ((4113, 4160), 'oneflow.saved_model.ModelBuilder', 'flow.saved_model.ModelBuilder', (['saved_model_path'], {}), '(saved_model_path)\n', (4142, 4160), True, 'import oneflow as flow\n'), ((4699, 4810), 'ofrecord_dataset.ImageNetRecordDataset', 'ImageNetRecordDataset', ([], {'batch_size': 'new_batch_size', 'image_resize_size': 'DEFAULT_IMAGE_SIZE', 'data_format': '"""NHWC"""'}), "(batch_size=new_batch_size, image_resize_size=\n DEFAULT_IMAGE_SIZE, data_format='NHWC')\n", (4720, 4810), False, 'from ofrecord_dataset import ImageNetRecordDataset\n'), ((5035, 5063), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (5061, 5063), True, 'import oneflow as flow\n'), ((5656, 5698), 'numpy.array', 'np.array', (['origin_outputs'], {'dtype': 'np.float32'}), '(origin_outputs, dtype=np.float32)\n', (5664, 5698), True, 'import numpy as np\n'), ((5737, 5765), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (5763, 5765), True, 'import oneflow as flow\n'), ((5976, 6007), 'oneflow.serving.InferenceSession', 'flow.serving.InferenceSession', ([], {}), '()\n', (6005, 6007), True, 'import oneflow as flow\n'), ((8097, 8136), 'numpy.array', 'np.array', (['cmp_outputs'], {'dtype': 'np.float32'}), '(cmp_outputs, dtype=np.float32)\n', (8105, 8136), True, 'import numpy as np\n'), ((2082, 2144), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['image_shape'], {'dtype': 'flow.float32'}), '(image_shape, dtype=flow.float32)\n', (2111, 2144), True, 'import oneflow as flow\n'), ((2161, 2221), 'oneflow.typing.Numpy.Placeholder', 'flow.typing.Numpy.Placeholder', (['label_shape'], {'dtype': 'flow.int32'}), '(label_shape, dtype=flow.int32)\n', (2190, 2221), True, 'import oneflow as flow\n'), ((3521, 3564), 'oneflow.checkpoint.get', 'flow.checkpoint.get', (['DEFAULT_CHECKPOINT_DIR'], {}), '(DEFAULT_CHECKPOINT_DIR)\n', (3540, 3564), True, 'import oneflow as flow\n'), ((3781, 3813), 'os.path.exists', 'os.path.exists', (['saved_model_path'], {}), '(saved_model_path)\n', (3795, 3813), False, 'import os\n'), ((3818, 3849), 'os.path.isdir', 'os.path.isdir', (['saved_model_path'], {}), '(saved_model_path)\n', (3831, 3849), False, 'import os\n'), ((4050, 4081), 'shutil.rmtree', 'shutil.rmtree', (['saved_model_path'], {}), '(saved_model_path)\n', (4063, 4081), False, 'import shutil\n'), ((5174, 5217), 'oneflow.checkpoint.get', 'flow.checkpoint.get', (['DEFAULT_CHECKPOINT_DIR'], {}), '(DEFAULT_CHECKPOINT_DIR)\n', (5193, 5217), True, 'import oneflow as flow\n'), ((8166, 8206), 'numpy.allclose', 'np.allclose', (['origin_outputs', 'cmp_outputs'], {}), '(origin_outputs, cmp_outputs)\n', (8177, 8206), True, 'import numpy as np\n'), ((1677, 1731), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[1e-05]'], {}), '([], [1e-05])\n', (1718, 1731), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.blob_trait as blob_trait
from oneflow.python.framework.dtype import convert_proto_dtype_to_oneflow_dtype
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.lib.core.enable_if as enable_if
import oneflow.python.framework.hob as hob
import oneflow.python.eager.eager_blob_util as eager_blob_util
import oneflow.python.eager.blob_register as blob_register_util
import oneflow.python.eager.blob_cache as blob_cache_util
import oneflow.python.eager.vm_util as vm_util
import oneflow.python.eager.gradient_util as gradient_util
import oneflow.python.eager.boxing_util as boxing_util
import oneflow_api.oneflow.core.job.placement as placement_cfg
import oneflow_api.oneflow.core.register.logical_blob_id as lbi_util
import oneflow_api
import traceback
import sys
blob_register = blob_register_util.GetDefaultBlobRegister()
def RemoteBlob(lbi, **kw):
api = enable_if.unique([EagerLogicalBlob, LazyRemoteBlob])
return api(lbi, **kw)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def EagerLogicalBlob(lbi, **kw):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
lbn = lbi.op_name + "/" + lbi.blob_name
if not isinstance(lbi, lbi_util.LogicalBlobId):
cfg_lbi = lbi_util.LogicalBlobId()
cfg_lbi.set_op_name(lbi.op_name)
cfg_lbi.set_blob_name(lbi.blob_name)
lbi = cfg_lbi
blob_type = oneflow_api.EagerConsistentBlob
if c_api_util.JobBuildAndInferCtx_IsMirroredBlob(job_name, lbn):
blob_type = oneflow_api.EagerMirroredBlob
job_name = ""
if ("job_name" in kw) and (kw["job_name"] is not None):
job_name = kw["job_name"]
blob_object = None
if "blob_object" in kw:
blob_object = kw["blob_object"]
distribute = oneflow_api.distribute.auto()
if "distribute" in kw:
distribute = kw["distribute"]
return blob_type(lbi, blob_object, blob_register, job_name, distribute)
@enable_if.condition(~hob.eager_execution_enabled)
def LazyRemoteBlob(lbi, **kw):
job_name = oneflow_api.JobBuildAndInferCtx_GetCurrentJobName()
lbn = lbi.op_name + "/" + lbi.blob_name
blob_type = oneflow_api.LazyConsistentBlob
if c_api_util.JobBuildAndInferCtx_IsMirroredBlob(job_name, lbn):
blob_type = oneflow_api.LazyMirroredBlob
if not isinstance(lbi, lbi_util.LogicalBlobId):
cfg_lbi = lbi_util.LogicalBlobId()
cfg_lbi.set_op_name(lbi.op_name)
cfg_lbi.set_blob_name(lbi.blob_name)
lbi = cfg_lbi
job_name = ""
if ("job_name" in kw) and (kw["job_name"] is not None):
job_name = kw["job_name"]
distribute = oneflow_api.distribute.auto()
if "distribute" in kw:
distribute = kw["distribute"]
return blob_type(lbi, job_name, distribute)
@property
def dtype(self):
ret = convert_proto_dtype_to_oneflow_dtype(self.get_dtype())
assert issubclass(ret, dtype_util.dtype)
return ret
def with_distribute(self, distribute):
new = type(self)(self.lbi, self.job_name, oneflow_api.distribute.auto())
new.set_distribute(distribute)
return new
def with_gradient_distribute(self, distribute):
return oneflow.parallel_cast(self, gradient_distribute=distribute)
def get_lazy_shape_log_warning(self):
if oneflow.scope.mirrored_view_enabled():
return ("%s\n%s\n%s") % (
"WARNING:",
"You access a consistent blob shape in mirrored view, there may be problems,",
"you should add 'x = flow.cast_to_current_logical_view(x)'.",
)
else:
return ""
def get_mirror_shape_log_warning(self):
if oneflow.scope.consistent_view_enabled():
return ("%s\n%s\n%s") % (
"WARNING:",
"You access a mirrored blob shape in consistent view, there may be problems,",
"you should add 'x = flow.cast_to_current_logical_view(x)'.",
)
else:
return ""
def RegisterMethod4BlobDef(blob_class):
blob_class.dtype = dtype
blob_class.with_distribute = with_distribute
blob_class.with_gradient_distribute = with_gradient_distribute
def RegisterMethod4LazyConsistentBlob():
RegisterMethod4BlobDef(oneflow_api.LazyConsistentBlob)
oneflow_api.LazyConsistentBlob.get_lazy_shape_log_warning = (
get_lazy_shape_log_warning
)
def RegisterMethod4LazyMirroredBlob():
RegisterMethod4BlobDef(oneflow_api.LazyMirroredBlob)
oneflow_api.LazyMirroredBlob.get_mirror_shape_log_warning = (
get_mirror_shape_log_warning
)
@property
def sub_consistent_blob_list(self):
raise NotImplementedError
def numpy(self, rank=None):
if rank is None:
if self.numpy_size() == 1:
return self._NumpyAt(0)
else:
assert not self.is_dynamic
assert not self.is_tensor_list
return self._Numpy()
else:
return self._NumpyAt(rank)
def numpy_list(self, rank=None):
assert self.is_tensor_list
assert self.is_dynamic
mirrored_list = self._NumpyMirroredList()
if rank is None:
return mirrored_list
else:
parallel_num = self.blob_object_.parallel_desc_symbol.parallel_num
assert rank >= 0
assert rank < parallel_num
assert len(mirrored_list) == parallel_num
return mirrored_list[rank]
def _NumpyAt(self, rank):
assert self.is_tensor_list is not True
assert rank >= 0
assert rank < self.blob_object.parallel_desc_symbol.parallel_num
ndarray_list = self._NumpyMirroredList()
return ndarray_list[rank]
def _Numpy(self):
assert self.is_tensor_list is not True
def FetchBlobNumpy(blob_object):
consistent_blob_name = None
def BoxingToSingleDevice(builder):
parallel_conf = placement_cfg.ParallelConf()
parallel_conf.set_device_tag(blob_object.parallel_desc_symbol.device_tag)
parallel_conf.add_device_name("{}:{}".format(0, 0))
tmp_parallel_desc_symbol = builder.GetParallelDescSymbol(parallel_conf)
tmp_op_arg_parallel_attr = oneflow_api.OpArgParallelAttribute(
tmp_parallel_desc_symbol,
str(blob_object.op_arg_parallel_attr.sbp_parallel),
str(blob_object.op_arg_parallel_attr.opt_mirrored_parallel),
)
with oneflow.scope.placement(
self.parallel_conf.device_tag(), list(self.parallel_conf.device_name()),
):
tmp_blob_object = boxing_util.BoxingTo(
builder, blob_object, tmp_op_arg_parallel_attr
)
nonlocal consistent_blob_name
consistent_blob_name = "{}-consistent".format(self.logical_blob_name)
if not blob_register.HasObject4BlobName(consistent_blob_name):
blob_register.SetObject4BlobName(consistent_blob_name, tmp_blob_object)
vm_util.LogicalRun(BoxingToSingleDevice)
return oneflow_api.EagerPhysicalBlob(
consistent_blob_name,
blob_register,
eager_blob_util._GetPhysicalBlobHeaderCache,
).numpy()
blob_cache = blob_cache_util.FindOrCreateBlobCache(self.blob_object)
return blob_cache.GetCachedNumpy(FetchBlobNumpy)
def _NumpyMirroredList(self):
physical_blob_objects = []
def UnpackLogicalBlobToPhysicalBlobs(builder):
nonlocal physical_blob_objects
physical_blob_objects = builder.UnpackLogicalBlobToPhysicalBlobs(
self.blob_object
)
def GetPhyBlobNumpy(i, phy_blob_object):
name = "{}/{}".format(self.logical_blob_name, i)
blob_register.SetObject4BlobName(name, phy_blob_object)
return (
oneflow_api.EagerPhysicalBlob(
name, blob_register, eager_blob_util._GetPhysicalBlobHeaderCache
).numpy_list()
if self.is_tensor_list
else oneflow_api.EagerPhysicalBlob(
name, blob_register, eager_blob_util._GetPhysicalBlobHeaderCache
).numpy()
)
def FetchBlobNumpyMirroredList(blob_object):
vm_util.LogicalRun(UnpackLogicalBlobToPhysicalBlobs)
return [
GetPhyBlobNumpy(i, phy_blob_object)
for i, phy_blob_object in enumerate(physical_blob_objects)
]
blob_cache = blob_cache_util.FindOrCreateBlobCache(self.blob_object)
return blob_cache.GetCachedNumpyMirroredList(FetchBlobNumpyMirroredList)
def RegisterMethod4EagerBlobTrait():
oneflow_api.EagerBlobTrait.sub_consistent_blob_list = sub_consistent_blob_list
oneflow_api.EagerBlobTrait.dtype = dtype
oneflow_api.EagerBlobTrait._NumpyMirroredList = _NumpyMirroredList
oneflow_api.EagerBlobTrait._Numpy = _Numpy
oneflow_api.EagerBlobTrait._NumpyAt = _NumpyAt
oneflow_api.EagerBlobTrait.numpy_list = numpy_list
oneflow_api.EagerBlobTrait.numpy = numpy
def eager_with_distribute(self, distribute):
new = type(self)(
self.lbi,
blob_object=self.blob_object,
blob_register=blob_register,
job_name=self.job_name,
distribute=self.distribute,
)
new.set_distribute(distribute)
return new
def RegisterMethod4EagerConsistentBlob():
oneflow_api.EagerConsistentBlob.dtype = dtype
oneflow_api.EagerConsistentBlob.with_distribute = eager_with_distribute
oneflow_api.EagerConsistentBlob.with_gradient_distribute = with_gradient_distribute
| [
"oneflow.python.eager.boxing_util.BoxingTo",
"oneflow.python.lib.core.enable_if.condition",
"oneflow.python.eager.blob_register.GetDefaultBlobRegister",
"oneflow.scope.mirrored_view_enabled",
"oneflow.python.eager.vm_util.LogicalRun",
"oneflow.parallel_cast",
"oneflow.python.eager.blob_cache.FindOrCreat... | [((1670, 1713), 'oneflow.python.eager.blob_register.GetDefaultBlobRegister', 'blob_register_util.GetDefaultBlobRegister', ([], {}), '()\n', (1711, 1713), True, 'import oneflow.python.eager.blob_register as blob_register_util\n'), ((1835, 1904), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (1854, 1904), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((2813, 2862), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(~hob.eager_execution_enabled)'], {}), '(~hob.eager_execution_enabled)\n', (2832, 2862), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((1753, 1805), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[EagerLogicalBlob, LazyRemoteBlob]'], {}), '([EagerLogicalBlob, LazyRemoteBlob])\n', (1769, 1805), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((1953, 2004), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (2002, 2004), False, 'import oneflow_api\n'), ((2307, 2367), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsMirroredBlob', 'c_api_util.JobBuildAndInferCtx_IsMirroredBlob', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (2352, 2367), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((2639, 2668), 'oneflow_api.distribute.auto', 'oneflow_api.distribute.auto', ([], {}), '()\n', (2666, 2668), False, 'import oneflow_api\n'), ((2909, 2960), 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow_api.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (2958, 2960), False, 'import oneflow_api\n'), ((3059, 3119), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsMirroredBlob', 'c_api_util.JobBuildAndInferCtx_IsMirroredBlob', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (3104, 3119), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((3502, 3531), 'oneflow_api.distribute.auto', 'oneflow_api.distribute.auto', ([], {}), '()\n', (3529, 3531), False, 'import oneflow_api\n'), ((4028, 4087), 'oneflow.parallel_cast', 'oneflow.parallel_cast', (['self'], {'gradient_distribute': 'distribute'}), '(self, gradient_distribute=distribute)\n', (4049, 4087), False, 'import oneflow\n'), ((4135, 4172), 'oneflow.scope.mirrored_view_enabled', 'oneflow.scope.mirrored_view_enabled', ([], {}), '()\n', (4170, 4172), False, 'import oneflow\n'), ((4484, 4523), 'oneflow.scope.consistent_view_enabled', 'oneflow.scope.consistent_view_enabled', ([], {}), '()\n', (4521, 4523), False, 'import oneflow\n'), ((7990, 8045), 'oneflow.python.eager.blob_cache.FindOrCreateBlobCache', 'blob_cache_util.FindOrCreateBlobCache', (['self.blob_object'], {}), '(self.blob_object)\n', (8027, 8045), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((9172, 9227), 'oneflow.python.eager.blob_cache.FindOrCreateBlobCache', 'blob_cache_util.FindOrCreateBlobCache', (['self.blob_object'], {}), '(self.blob_object)\n', (9209, 9227), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((2119, 2143), 'oneflow_api.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (2141, 2143), True, 'import oneflow_api.oneflow.core.register.logical_blob_id as lbi_util\n'), ((3240, 3264), 'oneflow_api.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (3262, 3264), True, 'import oneflow_api.oneflow.core.register.logical_blob_id as lbi_util\n'), ((3886, 3915), 'oneflow_api.distribute.auto', 'oneflow_api.distribute.auto', ([], {}), '()\n', (3913, 3915), False, 'import oneflow_api\n'), ((7749, 7789), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['BoxingToSingleDevice'], {}), '(BoxingToSingleDevice)\n', (7767, 7789), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((8955, 9007), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['UnpackLogicalBlobToPhysicalBlobs'], {}), '(UnpackLogicalBlobToPhysicalBlobs)\n', (8973, 9007), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((6627, 6655), 'oneflow_api.oneflow.core.job.placement.ParallelConf', 'placement_cfg.ParallelConf', ([], {}), '()\n', (6653, 6655), True, 'import oneflow_api.oneflow.core.job.placement as placement_cfg\n'), ((7346, 7414), 'oneflow.python.eager.boxing_util.BoxingTo', 'boxing_util.BoxingTo', (['builder', 'blob_object', 'tmp_op_arg_parallel_attr'], {}), '(builder, blob_object, tmp_op_arg_parallel_attr)\n', (7366, 7414), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((7805, 7920), 'oneflow_api.EagerPhysicalBlob', 'oneflow_api.EagerPhysicalBlob', (['consistent_blob_name', 'blob_register', 'eager_blob_util._GetPhysicalBlobHeaderCache'], {}), '(consistent_blob_name, blob_register,\n eager_blob_util._GetPhysicalBlobHeaderCache)\n', (7834, 7920), False, 'import oneflow_api\n'), ((8562, 8662), 'oneflow_api.EagerPhysicalBlob', 'oneflow_api.EagerPhysicalBlob', (['name', 'blob_register', 'eager_blob_util._GetPhysicalBlobHeaderCache'], {}), '(name, blob_register, eager_blob_util.\n _GetPhysicalBlobHeaderCache)\n', (8591, 8662), False, 'import oneflow_api\n'), ((8753, 8853), 'oneflow_api.EagerPhysicalBlob', 'oneflow_api.EagerPhysicalBlob', (['name', 'blob_register', 'eager_blob_util._GetPhysicalBlobHeaderCache'], {}), '(name, blob_register, eager_blob_util.\n _GetPhysicalBlobHeaderCache)\n', (8782, 8853), False, 'import oneflow_api\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.