code stringlengths 118 171k | apis list | extract_api stringlengths 145 164k |
|---|---|---|
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestGraphBlock(flow.unittest.TestCase):
def test_module_has_custom_func(test_case):
class CustomModuleHasFunc(flow.nn.Module):
def __init__(self):
super().__init__()
self.data_mem = 10
def forward(self, x):
return self._custom_func(x)
def _custom_func(self, x):
test_case.assertEqual(self.data_mem, 10)
return x
class CustomGraphHasFunc(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = CustomModuleHasFunc()
def build(self, x):
return self.m(x)
g = CustomGraphHasFunc()
x = np.ones((10, 10))
x = flow.tensor(x, dtype=flow.float32)
out = g(x)
test_case.assertTrue(np.array_equal(x.numpy(), out.numpy()))
def test_block_with_parameter(test_case):
device = "cuda"
linear = flow.nn.Linear(3, 8)
linear = linear.to(device)
flow.nn.init.constant_(linear.weight, 2.068758)
flow.nn.init.constant_(linear.bias, 0.23)
of_sgd = flow.optim.SGD(linear.parameters(), lr=0.001, momentum=0.9)
x = flow.tensor(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=flow.float32,
device=device,
requires_grad=False,
)
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.linear = linear
def forward(self, x):
return self._forward_impl(x)
def _forward_impl(self, x):
test_case.assertTrue(isinstance(self.linear, flow.nn.graph.Block))
return self.linear(x)
class LinearTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = CustomModule()
self.add_optimizer(of_sgd)
def build(self, x):
out = self.m(x)
out = out.sum()
out.backward()
test_case.assertTrue(self.m.linear.weight.is_lazy)
return out
linear_t_g = LinearTrainGraph()
linear_t_g(x)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.Linear",
"oneflow.tensor",
"oneflow.nn.init.constant_"
] | [((763, 795), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (793, 795), True, 'import oneflow as flow\n'), ((703, 737), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (712, 737), False, 'import os\n'), ((3510, 3525), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3523, 3525), False, 'import unittest\n'), ((1520, 1537), 'numpy.ones', 'np.ones', (['(10, 10)'], {}), '((10, 10))\n', (1527, 1537), True, 'import numpy as np\n'), ((1550, 1584), 'oneflow.tensor', 'flow.tensor', (['x'], {'dtype': 'flow.float32'}), '(x, dtype=flow.float32)\n', (1561, 1584), True, 'import oneflow as flow\n'), ((1761, 1781), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(3)', '(8)'], {}), '(3, 8)\n', (1775, 1781), True, 'import oneflow as flow\n'), ((1825, 1872), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['linear.weight', '(2.068758)'], {}), '(linear.weight, 2.068758)\n', (1847, 1872), True, 'import oneflow as flow\n'), ((1881, 1922), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['linear.bias', '(0.23)'], {}), '(linear.bias, 0.23)\n', (1903, 1922), True, 'import oneflow as flow\n'), ((2013, 2424), 'oneflow.tensor', 'flow.tensor', (['[[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -0.28708987, -\n 2.18369248], [0.35217619, -0.67095644, -1.58943879], [0.08086036, -\n 1.81075924, 1.20752494], [0.8901075, -0.49976737, -1.07153746], [-\n 0.44872912, -1.07275683, 0.06256855], [-0.22556897, 0.74798368, \n 0.90416439], [0.48339456, -2.32742195, -0.59321527]]'], {'dtype': 'flow.float32', 'device': 'device', 'requires_grad': '(False)'}), '([[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -\n 0.28708987, -2.18369248], [0.35217619, -0.67095644, -1.58943879], [\n 0.08086036, -1.81075924, 1.20752494], [0.8901075, -0.49976737, -\n 1.07153746], [-0.44872912, -1.07275683, 0.06256855], [-0.22556897, \n 0.74798368, 0.90416439], [0.48339456, -2.32742195, -0.59321527]], dtype\n =flow.float32, device=device, requires_grad=False)\n', (2024, 2424), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
def flip_op(input, dims):
"""
Reverse the order of a n-D tensor along given axis in dims.
.. note::
`flow.flip` makes a copy of :attr:`input`'s data. This is different from NumPy's `np.flip`,
which returns a view in constant time. Since copying a tensor's data is more work than viewing that data,
`flow.flip` is expected to be slower than `np.flip`.
Args:
input (Tensor): the input tensor
dims (a list or tuple): axis to flip on
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> np_arr = np.arange(0, 8).reshape((2, 2, 2)).astype(np.float32)
>>> input = flow.Tensor(np_arr)
>>> out = flow.flip(input, [0, 1])
>>> out
tensor([[[6., 7.],
[4., 5.]],
<BLANKLINE>
[[2., 3.],
[0., 1.]]], dtype=oneflow.float32)
"""
assert isinstance(dims, (int, list, tuple)), f"dims must be int, list or tuple"
if isinstance(dims, int):
dims = [dims]
input_len = len(input.shape)
assert len(dims) <= input_len, f"len of dims must less than len of input tensor"
new_dims = []
for i in dims:
if i < 0:
i += input_len
assert (
i < input_len
), f"IndexError: Dimension out of range (expected to be in range of {input_len}, but got {i})"
new_dims.append(i)
return flow.F.flip(input, new_dims)
@register_tensor_op("flip")
def flip_op_tensor(input, dims):
"""
See :func:`oneflow.flip`
"""
return flip_op(input, dims)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.framework.tensor.register_tensor_op",
"oneflow.F.flip"
] | [((2184, 2210), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""flip"""'], {}), "('flip')\n", (2202, 2210), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((2152, 2180), 'oneflow.F.flip', 'flow.F.flip', (['input', 'new_dims'], {}), '(input, new_dims)\n', (2163, 2180), True, 'import oneflow as flow\n'), ((2374, 2410), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (2389, 2410), False, 'import doctest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.core.operator import op_conf_pb2 as op_conf_util
def _get_initializer():
return flow.variance_scaling_initializer(2.0, "fan_out", "random_normal", "NCHW")
def _get_regularizer():
return flow.regularizers.l2(0.0005)
def _conv2d_layer(
name,
input,
filters,
kernel_size=3,
strides=1,
padding="SAME",
group_num=1,
data_format="NCHW",
dilation_rate=1,
activation=None,
use_bias=False,
weight_initializer=_get_initializer(),
bias_initializer=flow.zeros_initializer(),
weight_regularizer=_get_regularizer(),
bias_regularizer=_get_regularizer(),
):
weight_shape = (
int(filters),
int(input.shape[1] / group_num),
int(kernel_size),
int(kernel_size),
)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=weight_initializer,
regularizer=weight_regularizer,
)
output = flow.nn.conv2d(
input,
weight,
strides,
padding,
None,
data_format,
dilation_rate,
groups=group_num,
name=name,
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=bias_initializer,
regularizer=bias_regularizer,
)
output = flow.nn.bias_add(output, bias, data_format)
if activation is not None:
if activation == op_conf_util.kRelu:
output = flow.math.relu(output)
else:
raise NotImplementedError
return output
def _batch_norm(
inputs,
epsilon,
center=True,
scale=True,
trainable=True,
is_training=True,
name=None,
):
return flow.layers.batch_normalization(
inputs=inputs,
axis=1,
momentum=0.9,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer=flow.zeros_initializer(),
gamma_initializer=flow.ones_initializer(),
beta_regularizer=_get_regularizer(),
gamma_regularizer=_get_regularizer(),
moving_mean_initializer=flow.zeros_initializer(),
moving_variance_initializer=flow.ones_initializer(),
trainable=trainable,
training=is_training,
name=name,
)
def _prelu(inputs, name=None):
return flow.layers.prelu(
inputs,
alpha_initializer=flow.constant_initializer(0.25),
alpha_regularizer=_get_regularizer(),
shared_axes=[2, 3],
name=name,
)
def _avg_pool(inputs, pool_size, strides, padding, name=None):
return flow.nn.avg_pool2d(
input=inputs, ksize=pool_size, strides=strides, padding=padding,
)
def _dropout(input_blob, dropout_prob):
return flow.nn.dropout(input_blob, rate=dropout_prob)
def Linear(
input_blob,
num_filter=1,
kernel=None,
stride=None,
pad="valid",
num_group=1,
bn_is_training=True,
name=None,
suffix="",
):
conv = _conv2d_layer(
name="%s%s_conv2d" % (name, suffix),
input=input_blob,
filters=num_filter,
kernel_size=kernel,
strides=stride,
padding=pad,
group_num=num_group,
use_bias=False,
dilation_rate=1,
activation=None,
)
bn = _batch_norm(
conv,
epsilon=0.001,
is_training=bn_is_training,
name="%s%s_batchnorm" % (name, suffix),
)
return bn
def residual_unit_v3(in_data, num_filter, stride, dim_match, bn_is_training, name):
suffix = ""
use_se = 0
bn1 = _batch_norm(
in_data,
epsilon=2e-5,
is_training=bn_is_training,
name="%s%s_bn1" % (name, suffix),
)
conv1 = _conv2d_layer(
name="%s%s_conv1" % (name, suffix),
input=bn1,
filters=num_filter,
kernel_size=3,
strides=[1, 1],
padding="same",
use_bias=False,
dilation_rate=1,
activation=None,
)
bn2 = _batch_norm(
conv1,
epsilon=2e-5,
is_training=bn_is_training,
name="%s%s_bn2" % (name, suffix),
)
prelu = _prelu(bn2, name="%s%s_relu1" % (name, suffix))
conv2 = _conv2d_layer(
name="%s%s_conv2" % (name, suffix),
input=prelu,
filters=num_filter,
kernel_size=3,
strides=stride,
padding="same",
use_bias=False,
dilation_rate=1,
activation=None,
)
bn3 = _batch_norm(
conv2,
epsilon=2e-5,
is_training=bn_is_training,
name="%s%s_bn3" % (name, suffix),
)
if use_se:
# se begin
input_blob = _avg_pool(bn3, pool_size=[7, 7], strides=[1, 1], padding="VALID")
input_blob = _conv2d_layer(
name="%s%s_se_conv1" % (name, suffix),
input=input_blob,
filters=num_filter // 16,
kernel_size=1,
strides=[1, 1],
padding="valid",
use_bias=True,
dilation_rate=1,
activation=None,
)
input_blob = _prelu(input_blob, name="%s%s_se_relu1" % (name, suffix))
input_blob = _conv2d_layer(
name="%s%s_se_conv2" % (name, suffix),
input=input_blob,
filters=num_filter,
kernel_size=1,
strides=[1, 1],
padding="valid",
use_bias=True,
dilation_rate=1,
activation=None,
)
input_blob = flow.math.sigmoid(input=input_blob)
bn3 = flow.math.multiply(x=input_blob, y=bn3)
# se end
if dim_match:
input_blob = in_data
else:
input_blob = _conv2d_layer(
name="%s%s_conv1sc" % (name, suffix),
input=in_data,
filters=num_filter,
kernel_size=1,
strides=stride,
padding="valid",
use_bias=False,
dilation_rate=1,
activation=None,
)
input_blob = _batch_norm(
input_blob,
epsilon=2e-5,
is_training=bn_is_training,
name="%s%s_sc" % (name, suffix),
)
identity = flow.math.add(x=bn3, y=input_blob)
return identity
def Resnet100(input_blob, embedding_size, fc_type="GDC", bn_is_training=True, **kw):
filter_list = [64, 64, 128, 256, 512]
num_stages = 4
units = [3, 13, 30, 3]
input_blob = _conv2d_layer(
name="conv0",
input=input_blob,
filters=filter_list[0],
kernel_size=3,
strides=[1, 1],
padding="same",
use_bias=False,
dilation_rate=1,
activation=None,
)
input_blob = _batch_norm(
input_blob, epsilon=2e-5, is_training=bn_is_training, name="bn0"
)
input_blob = _prelu(input_blob, name="relu0")
for i in range(num_stages):
input_blob = residual_unit_v3(
input_blob,
filter_list[i + 1],
[2, 2],
False,
bn_is_training=bn_is_training,
name="stage%d_unit%d" % (i + 1, 1),
)
for j in range(units[i] - 1):
input_blob = residual_unit_v3(
input_blob,
filter_list[i + 1],
[1, 1],
True,
bn_is_training=bn_is_training,
name="stage%d_unit%d" % (i + 1, j + 2),
)
if fc_type == "GDC":
input_blob = Linear(
input_blob,
num_filter=512,
num_group=512,
kernel=7,
pad="valid",
stride=[1, 1],
bn_is_training=bn_is_training,
name="conv_6dw7_7",
)
input_blob = flow.reshape(input_blob, (input_blob.shape[0], -1))
pre_fc1 = flow.layers.dense(
inputs=input_blob,
units=embedding_size,
activation=None,
use_bias=True,
kernel_initializer=_get_initializer(),
bias_initializer=flow.zeros_initializer(),
kernel_regularizer=_get_regularizer(),
bias_regularizer=_get_regularizer(),
trainable=True,
name="pre_fc1",
)
fc1 = _batch_norm(
pre_fc1,
epsilon=2e-5,
center=True,
scale=False,
is_training=bn_is_training,
name="fc1",
)
elif fc_type == "E":
input_blob = _batch_norm(
input_blob, epsilon=2e-5, is_training=bn_is_training, name="bn1"
)
input_blob = _dropout(input_blob, dropout_prob=0.4)
input_blob = flow.reshape(input_blob, (input_blob.shape[0], -1))
pre_fc1 = flow.layers.dense(
inputs=input_blob,
units=embedding_size,
activation=None,
use_bias=True,
kernel_initializer=_get_initializer(),
bias_initializer=flow.zeros_initializer(),
kernel_regularizer=_get_regularizer(),
bias_regularizer=_get_regularizer(),
trainable=True,
name="pre_fc1",
)
fc1 = _batch_norm(
pre_fc1,
epsilon=2e-5,
center=True,
scale=False,
is_training=bn_is_training,
name="fc1",
)
elif fc_type == "FC":
input_blob = _batch_norm(
input_blob, epsilon=2e-5, is_training=bn_is_training, name="bn1"
)
input_blob = flow.reshape(input_blob, (input_blob.shape[0], -1))
pre_fc1 = flow.layers.dense(
inputs=input_blob,
units=embedding_size,
activation=None,
use_bias=True,
kernel_initializer=_get_initializer(),
bias_initializer=flow.zeros_initializer(),
kernel_regularizer=_get_regularizer(),
bias_regularizer=_get_regularizer(),
trainable=True,
name="pre_fc1",
)
fc1 = _batch_norm(
pre_fc1,
epsilon=2e-5,
center=True,
scale=False,
is_training=bn_is_training,
name="fc1",
)
else:
print("unimplemented")
return fc1
| [
"oneflow.compatible.single_client.zeros_initializer",
"oneflow.compatible.single_client.math.multiply",
"oneflow.compatible.single_client.regularizers.l2",
"oneflow.compatible.single_client.math.relu",
"oneflow.compatible.single_client.math.sigmoid",
"oneflow.compatible.single_client.nn.conv2d",
"oneflo... | [((767, 841), 'oneflow.compatible.single_client.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2.0)', '"""fan_out"""', '"""random_normal"""', '"""NCHW"""'], {}), "(2.0, 'fan_out', 'random_normal', 'NCHW')\n", (800, 841), True, 'from oneflow.compatible import single_client as flow\n'), ((879, 907), 'oneflow.compatible.single_client.regularizers.l2', 'flow.regularizers.l2', (['(0.0005)'], {}), '(0.0005)\n', (899, 907), True, 'from oneflow.compatible import single_client as flow\n'), ((1184, 1208), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1206, 1208), True, 'from oneflow.compatible import single_client as flow\n'), ((1452, 1594), 'oneflow.compatible.single_client.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer', 'regularizer': 'weight_regularizer'}), "(name + '-weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer, regularizer=weight_regularizer)\n", (1469, 1594), True, 'from oneflow.compatible import single_client as flow\n'), ((1651, 1765), 'oneflow.compatible.single_client.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'None', 'data_format', 'dilation_rate'], {'groups': 'group_num', 'name': 'name'}), '(input, weight, strides, padding, None, data_format,\n dilation_rate, groups=group_num, name=name)\n', (1665, 1765), True, 'from oneflow.compatible import single_client as flow\n'), ((3348, 3436), 'oneflow.compatible.single_client.nn.avg_pool2d', 'flow.nn.avg_pool2d', ([], {'input': 'inputs', 'ksize': 'pool_size', 'strides': 'strides', 'padding': 'padding'}), '(input=inputs, ksize=pool_size, strides=strides, padding=\n padding)\n', (3366, 3436), True, 'from oneflow.compatible import single_client as flow\n'), ((3500, 3546), 'oneflow.compatible.single_client.nn.dropout', 'flow.nn.dropout', (['input_blob'], {'rate': 'dropout_prob'}), '(input_blob, rate=dropout_prob)\n', (3515, 3546), True, 'from oneflow.compatible import single_client as flow\n'), ((6912, 6946), 'oneflow.compatible.single_client.math.add', 'flow.math.add', ([], {'x': 'bn3', 'y': 'input_blob'}), '(x=bn3, y=input_blob)\n', (6925, 6946), True, 'from oneflow.compatible import single_client as flow\n'), ((1873, 2007), 'oneflow.compatible.single_client.get_variable', 'flow.get_variable', (["(name + '-bias')"], {'shape': '(filters,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer', 'regularizer': 'bias_regularizer'}), "(name + '-bias', shape=(filters,), dtype=input.dtype,\n initializer=bias_initializer, regularizer=bias_regularizer)\n", (1890, 2007), True, 'from oneflow.compatible import single_client as flow\n'), ((2092, 2135), 'oneflow.compatible.single_client.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (2108, 2135), True, 'from oneflow.compatible import single_client as flow\n'), ((6227, 6262), 'oneflow.compatible.single_client.math.sigmoid', 'flow.math.sigmoid', ([], {'input': 'input_blob'}), '(input=input_blob)\n', (6244, 6262), True, 'from oneflow.compatible import single_client as flow\n'), ((6277, 6316), 'oneflow.compatible.single_client.math.multiply', 'flow.math.multiply', ([], {'x': 'input_blob', 'y': 'bn3'}), '(x=input_blob, y=bn3)\n', (6295, 6316), True, 'from oneflow.compatible import single_client as flow\n'), ((8454, 8505), 'oneflow.compatible.single_client.reshape', 'flow.reshape', (['input_blob', '(input_blob.shape[0], -1)'], {}), '(input_blob, (input_blob.shape[0], -1))\n', (8466, 8505), True, 'from oneflow.compatible import single_client as flow\n'), ((2234, 2256), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['output'], {}), '(output)\n', (2248, 2256), True, 'from oneflow.compatible import single_client as flow\n'), ((2664, 2688), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2686, 2688), True, 'from oneflow.compatible import single_client as flow\n'), ((2716, 2739), 'oneflow.compatible.single_client.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (2737, 2739), True, 'from oneflow.compatible import single_client as flow\n'), ((2864, 2888), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2886, 2888), True, 'from oneflow.compatible import single_client as flow\n'), ((2926, 2949), 'oneflow.compatible.single_client.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (2947, 2949), True, 'from oneflow.compatible import single_client as flow\n'), ((3140, 3171), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0.25)'], {}), '(0.25)\n', (3165, 3171), True, 'from oneflow.compatible import single_client as flow\n'), ((9362, 9413), 'oneflow.compatible.single_client.reshape', 'flow.reshape', (['input_blob', '(input_blob.shape[0], -1)'], {}), '(input_blob, (input_blob.shape[0], -1))\n', (9374, 9413), True, 'from oneflow.compatible import single_client as flow\n'), ((8744, 8768), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (8766, 8768), True, 'from oneflow.compatible import single_client as flow\n'), ((10210, 10261), 'oneflow.compatible.single_client.reshape', 'flow.reshape', (['input_blob', '(input_blob.shape[0], -1)'], {}), '(input_blob, (input_blob.shape[0], -1))\n', (10222, 10261), True, 'from oneflow.compatible import single_client as flow\n'), ((9652, 9676), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (9674, 9676), True, 'from oneflow.compatible import single_client as flow\n'), ((10500, 10524), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (10522, 10524), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.test_utils.automated_test_util import *
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestMaxModule(flow.unittest.TestCase):
@autotest(n=5, check_allclose=False, check_graph=False)
def test_max_reduce_random_dim(test_case):
device = random_device()
ndim = random().to(int).value()
x = random_tensor(ndim=ndim, dim0=random(1, 8))
y = x.to(device)
dim = random(-ndim, ndim).to(int).value()
keep_dims = random_bool().value()
y = torch.max(x, dim=dim, keepdim=keep_dims)
# pytorch result is an instance of class 'torch.return_types.max', but oneflow is tuple
test_case.assertTrue(
np.allclose(
y.oneflow[0].detach().cpu().numpy(),
y.pytorch.values.detach().cpu().numpy(),
rtol=0.0001,
atol=1e-05,
)
)
test_case.assertTrue(
np.allclose(
y.oneflow[1].detach().cpu().numpy(),
y.pytorch.indices.detach().cpu().numpy(),
rtol=0.0001,
atol=1e-05,
)
)
y.oneflow[0].sum().backward()
y.pytorch.values.sum().backward()
test_case.assertTrue(
np.allclose(
x.oneflow.grad.detach().cpu().numpy(),
x.pytorch.grad.detach().cpu().numpy(),
rtol=0.0001,
atol=1e-05,
)
)
@autotest(n=5, check_graph=False)
def test_max_reduce_all_dim(test_case):
device = random_device()
ndim = random().to(int).value()
x = random_tensor(ndim=ndim, dim0=random(1, 8)).to(device)
return torch.max(x)
@autotest(n=5, check_graph=False)
def test_max_elementwise(test_case):
device = random_device()
ndim = random().to(int).value()
dims = [random(1, 8) for _ in range(ndim)]
x = random_tensor(ndim, *dims).to(device)
y = random_tensor(ndim, *dims).to(device)
return torch.max(x, y)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((730, 762), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (760, 762), True, 'import oneflow as flow\n'), ((2750, 2765), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2763, 2765), False, 'import unittest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow
import oneflow as flow
import oneflow.framework.graph_build_util as graph_build_util
import oneflow.unittest
class SubModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = flow.nn.Conv2d(1, 1, 5)
self.relu = flow.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
return x
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.layer = SubModule()
self.fc1 = flow.nn.Linear(36, 4)
self.register_buffer("dummy_buff", flow.Tensor(1, 4))
def forward(self, x):
x = self.layer(x)
x = oneflow._C.flatten(x, 1)
x = self.fc1(x) + self.dummy_buff
return x
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestGraphWithSysConf(flow.unittest.TestCase):
def test_graph_config(test_case):
flow.boxing.enable_fusion(True)
flow.boxing.nccl.set_fusion_threshold_mbytes(800)
flow.boxing.nccl.set_fusion_max_ops_num(10)
flow.boxing.nccl.allow_fuse_all_reduce(True)
flow.boxing.nccl.allow_fuse_reduce_scatter(True)
flow.boxing.nccl.allow_fuse_all_gather(True)
flow.boxing.nccl.allow_fuse_reduce(True)
flow.boxing.nccl.allow_fuse_broadcast(True)
flow.boxing.nccl.allow_fuse_mixed_ops(True)
flow.boxing.nccl.enable_use_buffer_to_fuse_all_reduce(True)
flow.boxing.nccl.set_stream_num(3)
flow.boxing.nccl.enable_all_to_all(True)
flow.boxing.nccl.enable_use_compute_stream(True)
flow.backends.cudnn.set_reserved_mem_mbytes(1000)
flow.backends.cudnn.enable_fused_normalization_add_relu(True)
flow.utils.load_library("")
class CustomGraphSysConf(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = CustomModule()
# amp
self.config.enable_amp(True)
grad_scaler = flow.amp.GradScaler(
init_scale=3000,
growth_factor=2.0,
backoff_factor=0.5,
growth_interval=1000,
)
self.set_grad_scaler(grad_scaler)
self.config.allow_fuse_model_update_ops(True)
self.config.allow_fuse_add_to_output(True)
self.config.allow_fuse_cast_scale(True)
self.config.set_gradient_accumulation_steps(100)
def build(self, x):
x = self.m(x)
return x
g = CustomGraphSysConf()
print("optimization conf: \n", g._optimization_conf_proto)
g._generate_config_proto()
print("graph conf: \n", g._config_proto)
| [
"oneflow.backends.cudnn.enable_fused_normalization_add_relu",
"oneflow.boxing.nccl.enable_use_compute_stream",
"oneflow._C.flatten",
"oneflow.boxing.nccl.allow_fuse_all_gather",
"oneflow.boxing.nccl.allow_fuse_mixed_ops",
"oneflow.boxing.nccl.allow_fuse_all_reduce",
"oneflow.nn.Linear",
"oneflow.boxin... | [((1475, 1507), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1505, 1507), True, 'import oneflow as flow\n'), ((1415, 1449), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1424, 1449), False, 'import os\n'), ((868, 891), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(5)'], {}), '(1, 1, 5)\n', (882, 891), True, 'import oneflow as flow\n'), ((912, 926), 'oneflow.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (924, 926), True, 'import oneflow as flow\n'), ((1163, 1184), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(36)', '(4)'], {}), '(36, 4)\n', (1177, 1184), True, 'import oneflow as flow\n'), ((1312, 1336), 'oneflow._C.flatten', 'oneflow._C.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1330, 1336), False, 'import oneflow\n'), ((1606, 1637), 'oneflow.boxing.enable_fusion', 'flow.boxing.enable_fusion', (['(True)'], {}), '(True)\n', (1631, 1637), True, 'import oneflow as flow\n'), ((1647, 1696), 'oneflow.boxing.nccl.set_fusion_threshold_mbytes', 'flow.boxing.nccl.set_fusion_threshold_mbytes', (['(800)'], {}), '(800)\n', (1691, 1696), True, 'import oneflow as flow\n'), ((1705, 1748), 'oneflow.boxing.nccl.set_fusion_max_ops_num', 'flow.boxing.nccl.set_fusion_max_ops_num', (['(10)'], {}), '(10)\n', (1744, 1748), True, 'import oneflow as flow\n'), ((1757, 1801), 'oneflow.boxing.nccl.allow_fuse_all_reduce', 'flow.boxing.nccl.allow_fuse_all_reduce', (['(True)'], {}), '(True)\n', (1795, 1801), True, 'import oneflow as flow\n'), ((1810, 1858), 'oneflow.boxing.nccl.allow_fuse_reduce_scatter', 'flow.boxing.nccl.allow_fuse_reduce_scatter', (['(True)'], {}), '(True)\n', (1852, 1858), True, 'import oneflow as flow\n'), ((1867, 1911), 'oneflow.boxing.nccl.allow_fuse_all_gather', 'flow.boxing.nccl.allow_fuse_all_gather', (['(True)'], {}), '(True)\n', (1905, 1911), True, 'import oneflow as flow\n'), ((1920, 1960), 'oneflow.boxing.nccl.allow_fuse_reduce', 'flow.boxing.nccl.allow_fuse_reduce', (['(True)'], {}), '(True)\n', (1954, 1960), True, 'import oneflow as flow\n'), ((1969, 2012), 'oneflow.boxing.nccl.allow_fuse_broadcast', 'flow.boxing.nccl.allow_fuse_broadcast', (['(True)'], {}), '(True)\n', (2006, 2012), True, 'import oneflow as flow\n'), ((2021, 2064), 'oneflow.boxing.nccl.allow_fuse_mixed_ops', 'flow.boxing.nccl.allow_fuse_mixed_ops', (['(True)'], {}), '(True)\n', (2058, 2064), True, 'import oneflow as flow\n'), ((2073, 2132), 'oneflow.boxing.nccl.enable_use_buffer_to_fuse_all_reduce', 'flow.boxing.nccl.enable_use_buffer_to_fuse_all_reduce', (['(True)'], {}), '(True)\n', (2126, 2132), True, 'import oneflow as flow\n'), ((2141, 2175), 'oneflow.boxing.nccl.set_stream_num', 'flow.boxing.nccl.set_stream_num', (['(3)'], {}), '(3)\n', (2172, 2175), True, 'import oneflow as flow\n'), ((2184, 2224), 'oneflow.boxing.nccl.enable_all_to_all', 'flow.boxing.nccl.enable_all_to_all', (['(True)'], {}), '(True)\n', (2218, 2224), True, 'import oneflow as flow\n'), ((2233, 2281), 'oneflow.boxing.nccl.enable_use_compute_stream', 'flow.boxing.nccl.enable_use_compute_stream', (['(True)'], {}), '(True)\n', (2275, 2281), True, 'import oneflow as flow\n'), ((2291, 2340), 'oneflow.backends.cudnn.set_reserved_mem_mbytes', 'flow.backends.cudnn.set_reserved_mem_mbytes', (['(1000)'], {}), '(1000)\n', (2334, 2340), True, 'import oneflow as flow\n'), ((2349, 2410), 'oneflow.backends.cudnn.enable_fused_normalization_add_relu', 'flow.backends.cudnn.enable_fused_normalization_add_relu', (['(True)'], {}), '(True)\n', (2404, 2410), True, 'import oneflow as flow\n'), ((2420, 2447), 'oneflow.utils.load_library', 'flow.utils.load_library', (['""""""'], {}), "('')\n", (2443, 2447), True, 'import oneflow as flow\n'), ((1228, 1245), 'oneflow.Tensor', 'flow.Tensor', (['(1)', '(4)'], {}), '(1, 4)\n', (1239, 1245), True, 'import oneflow as flow\n'), ((2703, 2804), 'oneflow.amp.GradScaler', 'flow.amp.GradScaler', ([], {'init_scale': '(3000)', 'growth_factor': '(2.0)', 'backoff_factor': '(0.5)', 'growth_interval': '(1000)'}), '(init_scale=3000, growth_factor=2.0, backoff_factor=0.5,\n growth_interval=1000)\n', (2722, 2804), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from oneflow.test_utils.test_util import GenArgList, type_name_to_flow_type
from oneflow.test_utils.automated_test_util import *
import oneflow as flow
def _test_consistent_normal(
test_case, placement, sbp, mean, std, shape, dtype, requires_grad
):
dtype = type_name_to_flow_type[dtype]
x = flow.normal(
mean,
std,
*shape,
placement=placement,
sbp=sbp,
dtype=dtype,
requires_grad=requires_grad,
)
test_case.assertEqual(x.shape, shape)
test_case.assertEqual(x.dtype, dtype)
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
test_case.assertEqual(x.requires_grad, requires_grad)
class TestNormalConsistent(flow.unittest.TestCase):
@globaltest
def test_normal_consistent(test_case):
arg_dict = OrderedDict()
arg_dict["mean"] = [-1, 0, 1]
arg_dict["std"] = [1, 2, 8]
arg_dict["shape"] = [(8, 8), (8, 8, 8), (8, 8, 8, 8)]
arg_dict["dtype"] = ["float32", "double"]
arg_dict["requires_grad"] = [True, False]
for arg in GenArgList(arg_dict):
for placement in all_placement():
for sbp in all_sbp(
placement, max_dim=len(arg[2]), except_partial_sum=True
):
_test_consistent_normal(test_case, placement, sbp, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.normal",
"oneflow.test_utils.test_util.GenArgList"
] | [((949, 1055), 'oneflow.normal', 'flow.normal', (['mean', 'std', '*shape'], {'placement': 'placement', 'sbp': 'sbp', 'dtype': 'dtype', 'requires_grad': 'requires_grad'}), '(mean, std, *shape, placement=placement, sbp=sbp, dtype=dtype,\n requires_grad=requires_grad)\n', (960, 1055), True, 'import oneflow as flow\n'), ((2056, 2071), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2069, 2071), False, 'import unittest\n'), ((1478, 1491), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1489, 1491), False, 'from collections import OrderedDict\n'), ((1747, 1767), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (1757, 1767), False, 'from oneflow.test_utils.test_util import GenArgList, type_name_to_flow_type\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import traceback
import oneflow._oneflow_internal
from oneflow.compatible.single_client.framework import hob as hob
from oneflow.compatible.single_client.framework import session_context as session_ctx
from oneflow.compatible.single_client.support import enable_if as enable_if
def api_load_library(val: str) -> None:
"""Load necessary library for job
Args:
val (str): path to shared object file
"""
return enable_if.unique([load_library, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def load_library(val):
assert type(val) is str
sess = session_ctx.GetDefaultSession()
sess.config_proto.load_lib_path.append(val)
def api_load_library_now(val: str) -> None:
"""Load necessary library for job now
Args:
val (str): path to shared object file
"""
return enable_if.unique([load_library_now, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def load_library_now(val):
assert type(val) is str
oneflow._oneflow_internal.LoadLibraryNow(val)
def api_machine_num(val: int) -> None:
"""Set available number of machine/node for running job .
Args:
val (int): available number of machines
"""
return enable_if.unique([machine_num, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def machine_num(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.machine_num = val
def api_gpu_device_num(val: int) -> None:
"""Set number of GPUs on each machine to run oneflow on.
Args:
val (int): number of GPUs. It is identical on every machine. In other words,
you can't specify different number of GPUs you would like to use on each machine.
"""
if oneflow._oneflow_internal.flags.with_cuda():
return enable_if.unique([gpu_device_num, do_nothing])(val)
else:
print(
"INFO: for CPU-only OneFlow, oneflow.compatible.single_client.config.gpu_device_num is equivalent to oneflow.compatible.single_client.config.cpu_device_num"
)
print(traceback.format_stack()[-2])
return enable_if.unique([cpu_device_num, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def gpu_device_num(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.gpu_device_num = val
def api_cpu_device_num(val: int) -> None:
"""Set number of CPUs on each machine to run oneflow on. Usually you don't need to set this.
Args:
val (int): number of CPUs. It is identical on every machine.
"""
return enable_if.unique([cpu_device_num, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def cpu_device_num(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.cpu_device_num = val
def api_comm_net_worker_num(val: int) -> None:
"""Set up the workers number in epoll mode network,
If use RDMA mode network, then doesn't need.
Args:
val (int): number of workers
"""
return enable_if.unique([comm_net_worker_num, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def comm_net_worker_num(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.comm_net_worker_num = val
def api_max_mdsave_worker_num(val: int) -> None:
"""Set up max number of workers for mdsave process.
Args:
val (int): max number of workers
"""
return enable_if.unique([max_mdsave_worker_num, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def max_mdsave_worker_num(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.max_mdsave_worker_num = val
def api_numa_aware_cuda_malloc_host(val: bool = True) -> None:
"""Whether or not let numa know that cuda allocated host's memory.
Args:
val (bool, optional): True or False. Defaults to True.
"""
print(
"'enable_numa_aware_cuda_malloc_host' has been deprecated, has no effect and will be removed in the future."
)
def api_compute_thread_pool_size(val: int) -> None:
"""Set up the size of compute thread pool
Args:
val (int): size of thread pool
"""
return enable_if.unique([compute_thread_pool_size, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def compute_thread_pool_size(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.compute_thread_pool_size = val
def api_rdma_mem_block_mbyte(val: int) -> None:
"""Set up the memory block size in rdma mode.
Args:
val (int): size of block, e.g. 1024(mb)
"""
print(
"'rdma_mem_block_mbyte' has been deprecated, has no effect and will be removed in the future. Use environment variable 'ONEFLOW_COMM_NET_IB_MEM_BLOCK_SIZE' instead."
)
def api_rdma_recv_msg_buf_mbyte(val: int) -> None:
"""Set up the buffer size for receiving messages in rama mode
Args:
val (int): buffer size, e.g. 1024(mb)
"""
print(
"'rdma_recv_msg_buf_mbyte' has been deprecated, has no effect and will be removed in the future."
)
def api_reserved_host_mem_mbyte(val: int) -> None:
"""Set up the memory size of reserved host
Args:
val (int): memory size, e.g. 1024(mb)
"""
return enable_if.unique([reserved_host_mem_mbyte, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def reserved_host_mem_mbyte(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.reserved_host_mem_mbyte = val
def api_reserved_device_mem_mbyte(val: int) -> None:
"""Set up the memory size of reserved device
Args:
val (int): memory size, e.g. 1024(mb)
"""
return enable_if.unique([reserved_device_mem_mbyte, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def reserved_device_mem_mbyte(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.reserved_device_mem_mbyte = val
def api_use_rdma(val: bool = True) -> None:
"""Whether use RDMA to speed up data transmission in cluster nodes or not.
if not, then use normal epoll mode.
Args:
val (bool, optional): Defaults to True.
"""
print(
"'use_rdma' has been deprecated, has no effect and will be removed in the future. Use environment variable 'ONEFLOW_COMM_NET_IB_ENABLE' instead."
)
def api_thread_enable_local_message_queue(val: bool) -> None:
"""Whether or not enable thread using local message queue.
Args:
val (bool): True or False
"""
print(
"'thread_enable_local_message_queue' has been deprecated, has no effect and will be removed in the future. Use environment variable 'ONEFLOW_THREAD_ENABLE_LOCAL_MESSAGE_QUEUE' instead."
)
def api_enable_debug_mode(val: bool) -> None:
"""Whether use debug mode or not.
Args:
val (bool): True or False
"""
return enable_if.unique([enable_debug_mode, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def enable_debug_mode(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.enable_debug_mode = val
def api_legacy_model_io_enabled():
sess = session_ctx.GetDefaultSession()
return sess.config_proto.resource.enable_legacy_model_io
def api_enable_legacy_model_io(val: bool = True):
"""Whether or not use legacy model io.
Args:
val ([type]): True or False
"""
return enable_if.unique([enable_legacy_model_io, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def enable_legacy_model_io(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.enable_legacy_model_io = val
def api_enable_model_io_v2(val):
"""Whether or not use version2 of model input/output function.
Args:
val ([type]): True or False
"""
return enable_if.unique([enable_model_io_v2, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def enable_model_io_v2(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.enable_model_io_v2 = val
def api_collect_act_event(val: bool = True) -> None:
"""Whether or not collect active event.
Args:
val (bool, optional): True or False. Defaults to True.
"""
print(
"'collect_act_event' has been deprecated, has no effect and will be removed in the future."
)
def api_enable_fusion(val: bool = True) -> None:
"""Whether or not allow fusion the operators
Args:
val (bool, optional): True or False. Defaults to True.
"""
return enable_if.unique([enable_fusion, do_nothing])(val=val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def enable_fusion(val=True):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.enable_fusion = val
def api_num_callback_threads(val: int) -> None:
"""Set up number of callback threads for boxing process.
Boxing is used to convert between different parallel properties of logical tensor
Args:
val (int): number of callback threads
"""
return enable_if.unique([num_callback_threads, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def num_callback_threads(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.collective_boxing_conf.num_callback_threads = val
def api_enable_tensor_float_32_compute(val: bool = True) -> None:
"""Whether or not to enable Tensor-float-32 on supported GPUs
Args:
val (bool, optional): True or False. Defaults to True.
"""
return enable_if.unique([enable_tensor_float_32_compute, do_nothing])(val=val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def enable_tensor_float_32_compute(val=True):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.enable_tensor_float_32_compute = val
if not val:
os.environ["ONEFLOW_EP_CUDA_ENABLE_TF32_EXECUTION"] = "0"
def api_enable_mem_chain_merge(val: bool = True) -> None:
"""Whether or not to enable MemChain merge.
Args:
val (bool, optional): True or False. Defaults to True.
"""
return enable_if.unique([enable_mem_chain_merge, do_nothing])(val=val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def enable_mem_chain_merge(val=True):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.enable_mem_chain_merge = val
def api_nccl_use_compute_stream(val: bool = False) -> None:
"""Whether or not nccl use compute stream to reuse nccl memory and speedup
Args:
val (bool, optional): True or False. Defaults to False.
"""
return enable_if.unique([nccl_use_compute_stream, do_nothing])(val=val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_use_compute_stream(val=False):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.nccl_use_compute_stream = val
def api_disable_group_boxing_by_dst_parallel(val: bool = False) -> None:
"""Whether or not disable group boxing by dst parallel pass to reduce boxing memory life cycle.
Args:
val (bool, optional): True or False. Defaults to False.
"""
return enable_if.unique([disable_group_boxing_by_dst_parallel, do_nothing])(val=val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def disable_group_boxing_by_dst_parallel(val=False):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.disable_group_boxing_by_dst_parallel = val
def api_nccl_num_streams(val: int) -> None:
"""Set up the number of nccl parallel streams while use boxing
Args:
val (int): number of streams
"""
return enable_if.unique([nccl_num_streams, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_num_streams(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.collective_boxing_conf.nccl_num_streams = val
def api_nccl_fusion_threshold_mb(val: int) -> None:
"""Set up threshold for oprators fusion
Args:
val (int): int number, e.g. 10(mb)
"""
return enable_if.unique([nccl_fusion_threshold_mb, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_fusion_threshold_mb(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.collective_boxing_conf.nccl_fusion_threshold_mb = val
def api_nccl_fusion_all_reduce_use_buffer(val: bool) -> None:
"""Whether or not use buffer during nccl fusion progress
Args:
val (bool): True or False
"""
return enable_if.unique([nccl_fusion_all_reduce_use_buffer, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_fusion_all_reduce_use_buffer(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.nccl_fusion_all_reduce_use_buffer = (
val
)
def api_nccl_fusion_all_reduce(val: bool) -> None:
"""Whether or not use nccl fusion during all reduce progress
Args:
val (bool): True or False
"""
return enable_if.unique([nccl_fusion_all_reduce, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_fusion_all_reduce(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.nccl_fusion_all_reduce = val
def api_nccl_fusion_reduce_scatter(val: bool) -> None:
"""Whether or not use nccl fusion during reduce scatter progress
Args:
val (bool): True or False
"""
return enable_if.unique([nccl_fusion_reduce_scatter, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_fusion_reduce_scatter(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.nccl_fusion_reduce_scatter = val
def api_nccl_fusion_all_gather(val: bool) -> None:
"""Whether or not use nccl fusion during all gather progress
Args:
val (bool): True or False
"""
return enable_if.unique([nccl_fusion_all_gather, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_fusion_all_gather(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.nccl_fusion_all_gather = val
def api_nccl_fusion_reduce(val: bool) -> None:
"""Whether or not use nccl fusion during reduce progress
Args:
val (bool): True or False
"""
return enable_if.unique([nccl_fusion_reduce, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_fusion_reduce(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.nccl_fusion_reduce = val
def api_nccl_fusion_broadcast(val: bool) -> None:
"""Whether or not use nccl fusion during broadcast progress
Args:
val (bool): True or False
"""
return enable_if.unique([nccl_fusion_broadcast, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_fusion_broadcast(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.nccl_fusion_broadcast = val
def api_nccl_fusion_max_ops(val: int) -> None:
"""Maximum number of ops for nccl fusion.
Args:
val (int): Maximum number of ops
"""
return enable_if.unique([nccl_fusion_max_ops, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_fusion_max_ops(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is int
sess.config_proto.resource.collective_boxing_conf.nccl_fusion_max_ops = val
def api_nccl_enable_all_to_all(val: bool) -> None:
"""Whether or not use nccl all2all during s2s boxing
Args:
val (bool): True or False
"""
return enable_if.unique([nccl_enable_all_to_all, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_enable_all_to_all(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.nccl_enable_all_to_all = val
def api_nccl_enable_mixed_fusion(val: bool) -> None:
"""Whether or not use nccl mixed fusion
Args:
val (bool): True or False
"""
return enable_if.unique([nccl_enable_mixed_fusion, do_nothing])(val)
@enable_if.condition(hob.in_normal_mode & ~hob.session_initialized)
def nccl_enable_mixed_fusion(val):
sess = session_ctx.GetDefaultSession()
assert type(val) is bool
sess.config_proto.resource.collective_boxing_conf.nccl_enable_mixed_fusion = val
@enable_if.condition(hob.in_normal_mode & hob.session_initialized)
def do_nothing(*args, **kwargs):
print("Nothing happened because the session is running")
return False
| [
"oneflow.compatible.single_client.support.enable_if.condition",
"oneflow.compatible.single_client.support.enable_if.unique",
"oneflow.compatible.single_client.framework.session_context.GetDefaultSession"
] | [((1088, 1154), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (1107, 1154), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((1518, 1584), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (1537, 1584), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((1924, 1990), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (1943, 1990), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((2869, 2935), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (2888, 2935), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((3379, 3445), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (3398, 3445), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((3884, 3950), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (3903, 3950), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((4350, 4416), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (4369, 4416), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((5168, 5234), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (5187, 5234), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((6309, 6375), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (6328, 6375), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((6789, 6855), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (6808, 6855), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((8038, 8104), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (8057, 8104), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((8625, 8691), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (8644, 8691), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((9085, 9151), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (9104, 9151), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((9858, 9924), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (9877, 9924), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((10443, 10509), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (10462, 10509), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((10995, 11061), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (11014, 11061), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((11598, 11664), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (11617, 11664), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((12138, 12204), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (12157, 12204), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((12728, 12794), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (12747, 12794), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((13231, 13297), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (13250, 13297), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((13709, 13775), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (13728, 13775), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((14230, 14296), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (14249, 14296), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((14769, 14835), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (14788, 14835), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((15282, 15348), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (15301, 15348), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((15791, 15857), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (15810, 15857), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((16279, 16345), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (16298, 16345), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((16768, 16834), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (16787, 16834), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((17247, 17313), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (17266, 17313), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((17732, 17798), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (17751, 17798), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((18215, 18281), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & ~hob.session_initialized)'], {}), '(hob.in_normal_mode & ~hob.session_initialized)\n', (18234, 18281), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((18477, 18542), 'oneflow.compatible.single_client.support.enable_if.condition', 'enable_if.condition', (['(hob.in_normal_mode & hob.session_initialized)'], {}), '(hob.in_normal_mode & hob.session_initialized)\n', (18496, 18542), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((1217, 1248), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1246, 1248), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((2024, 2055), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (2053, 2055), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((2972, 3003), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (3001, 3003), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((3482, 3513), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (3511, 3513), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((3992, 4023), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4021, 4023), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((4460, 4491), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4489, 4491), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((5281, 5312), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (5310, 5312), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((6421, 6452), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (6450, 6452), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((6903, 6934), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (6932, 6934), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((8144, 8175), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (8173, 8175), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((8308, 8339), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (8337, 8339), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((8736, 8767), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (8765, 8767), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((9192, 9223), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (9221, 9223), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((9965, 9996), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (9994, 9996), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((10552, 10583), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (10581, 10583), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((11119, 11150), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (11148, 11150), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((11714, 11745), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (11743, 11745), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((12256, 12287), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (12285, 12287), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((12859, 12890), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (12888, 12890), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((13336, 13367), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (13365, 13367), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((13822, 13853), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (13851, 13853), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((14352, 14383), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (14381, 14383), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((14880, 14911), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (14909, 14911), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((15397, 15428), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (15426, 15428), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((15902, 15933), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (15931, 15933), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((16386, 16417), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (16415, 16417), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((16878, 16909), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (16907, 16909), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((17355, 17386), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (17384, 17386), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((17843, 17874), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (17872, 17874), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((18328, 18359), 'oneflow.compatible.single_client.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (18357, 18359), True, 'from oneflow.compatible.single_client.framework import session_context as session_ctx\n'), ((1035, 1079), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[load_library, do_nothing]'], {}), '([load_library, do_nothing])\n', (1051, 1079), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((1461, 1509), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[load_library_now, do_nothing]'], {}), '([load_library_now, do_nothing])\n', (1477, 1509), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((1872, 1915), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[machine_num, do_nothing]'], {}), '([machine_num, do_nothing])\n', (1888, 1915), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((3324, 3370), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[cpu_device_num, do_nothing]'], {}), '([cpu_device_num, do_nothing])\n', (3340, 3370), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((3824, 3875), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[comm_net_worker_num, do_nothing]'], {}), '([comm_net_worker_num, do_nothing])\n', (3840, 3875), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((4288, 4341), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[max_mdsave_worker_num, do_nothing]'], {}), '([max_mdsave_worker_num, do_nothing])\n', (4304, 4341), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((5103, 5159), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[compute_thread_pool_size, do_nothing]'], {}), '([compute_thread_pool_size, do_nothing])\n', (5119, 5159), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((6245, 6300), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[reserved_host_mem_mbyte, do_nothing]'], {}), '([reserved_host_mem_mbyte, do_nothing])\n', (6261, 6300), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((6723, 6780), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[reserved_device_mem_mbyte, do_nothing]'], {}), '([reserved_device_mem_mbyte, do_nothing])\n', (6739, 6780), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((7980, 8029), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[enable_debug_mode, do_nothing]'], {}), '([enable_debug_mode, do_nothing])\n', (7996, 8029), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((8562, 8616), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[enable_legacy_model_io, do_nothing]'], {}), '([enable_legacy_model_io, do_nothing])\n', (8578, 8616), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((9026, 9076), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[enable_model_io_v2, do_nothing]'], {}), '([enable_model_io_v2, do_nothing])\n', (9042, 9076), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((9800, 9845), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[enable_fusion, do_nothing]'], {}), '([enable_fusion, do_nothing])\n', (9816, 9845), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((10382, 10434), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[num_callback_threads, do_nothing]'], {}), '([num_callback_threads, do_nothing])\n', (10398, 10434), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((10920, 10982), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[enable_tensor_float_32_compute, do_nothing]'], {}), '([enable_tensor_float_32_compute, do_nothing])\n', (10936, 10982), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((11531, 11585), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[enable_mem_chain_merge, do_nothing]'], {}), '([enable_mem_chain_merge, do_nothing])\n', (11547, 11585), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((12070, 12125), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_use_compute_stream, do_nothing]'], {}), '([nccl_use_compute_stream, do_nothing])\n', (12086, 12125), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((12647, 12715), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[disable_group_boxing_by_dst_parallel, do_nothing]'], {}), '([disable_group_boxing_by_dst_parallel, do_nothing])\n', (12663, 12715), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((13174, 13222), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_num_streams, do_nothing]'], {}), '([nccl_num_streams, do_nothing])\n', (13190, 13222), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((13644, 13700), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_fusion_threshold_mb, do_nothing]'], {}), '([nccl_fusion_threshold_mb, do_nothing])\n', (13660, 13700), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((14156, 14221), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_fusion_all_reduce_use_buffer, do_nothing]'], {}), '([nccl_fusion_all_reduce_use_buffer, do_nothing])\n', (14172, 14221), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((14706, 14760), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_fusion_all_reduce, do_nothing]'], {}), '([nccl_fusion_all_reduce, do_nothing])\n', (14722, 14760), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((15215, 15273), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_fusion_reduce_scatter, do_nothing]'], {}), '([nccl_fusion_reduce_scatter, do_nothing])\n', (15231, 15273), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((15728, 15782), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_fusion_all_gather, do_nothing]'], {}), '([nccl_fusion_all_gather, do_nothing])\n', (15744, 15782), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((16220, 16270), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_fusion_reduce, do_nothing]'], {}), '([nccl_fusion_reduce, do_nothing])\n', (16236, 16270), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((16706, 16759), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_fusion_broadcast, do_nothing]'], {}), '([nccl_fusion_broadcast, do_nothing])\n', (16722, 16759), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((17187, 17238), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_fusion_max_ops, do_nothing]'], {}), '([nccl_fusion_max_ops, do_nothing])\n', (17203, 17238), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((17669, 17723), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_enable_all_to_all, do_nothing]'], {}), '([nccl_enable_all_to_all, do_nothing])\n', (17685, 17723), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((18150, 18206), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[nccl_enable_mixed_fusion, do_nothing]'], {}), '([nccl_enable_mixed_fusion, do_nothing])\n', (18166, 18206), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((2499, 2545), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[gpu_device_num, do_nothing]'], {}), '([gpu_device_num, do_nothing])\n', (2515, 2545), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((2814, 2860), 'oneflow.compatible.single_client.support.enable_if.unique', 'enable_if.unique', (['[cpu_device_num, do_nothing]'], {}), '([cpu_device_num, do_nothing])\n', (2830, 2860), True, 'from oneflow.compatible.single_client.support import enable_if as enable_if\n'), ((2769, 2793), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (2791, 2793), False, 'import traceback\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from contextlib import contextmanager
import os
import warnings
from typing import Any, Callable, Dict, Iterable, List, Optional, Sequence, Tuple, Union
from pathlib import Path
import pickle
import numpy as np
from google.protobuf import text_format
import oneflow
import oneflow as flow
import oneflow._oneflow_internal
import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb
import oneflow.framework.dtype as dtype_util
import oneflow.framework.id_util as id_util
from oneflow.framework.tensor import Tensor
import oneflow.nn.graph.graph as graph_util
import pickle
SNAPSHOT_DONE_FILENAME = "snapshot_done"
META_INFO_FILENAME = "meta"
PICKLE_FILENAME = "pickled_data"
DATA_FILENAME = "out"
PROTOCOL_VERSION = 1
class FileBackendVariableBlob:
def __init__(
self,
var_dir: str,
dtype: Optional[oneflow.dtype] = None,
shape: Optional[Sequence[int]] = None,
):
data_path = os.path.join(var_dir, DATA_FILENAME)
if not os.path.isfile(data_path):
raise FileNotFoundError()
self.var_dir_ = var_dir
meta_info_path = os.path.join(self.var_dir_, META_INFO_FILENAME)
if os.path.exists(meta_info_path):
meta_info = variable_meta_info_pb.VariableMetaInfo()
with open(meta_info_path) as f:
text_format.Parse(f.read(), meta_info)
self.has_meta_info_ = True
else:
self.has_meta_info_ = False
if self.has_meta_info_:
assert dtype is None and shape is None
self.shape_ = tuple(meta_info.shape.dim)
self.dtype_ = dtype_util.convert_proto_dtype_to_oneflow_dtype(
meta_info.data_type
)
elif shape is not None and dtype is not None:
self.shape_ = shape
self.dtype_ = dtype
self.has_meta_info_ = True
elif shape is not None or dtype is not None:
raise RuntimeError("both or neither of shape and dtype should be None")
else:
pass
if self.has_meta_info_:
itemsize = np.dtype(
dtype_util.convert_oneflow_dtype_to_numpy_dtype(self.dtype_)
).itemsize
assert os.path.getsize(data_path) == np.prod(self.shape).item() * itemsize
@property
def file_path(self) -> str:
return os.path.join(self.var_dir_, DATA_FILENAME)
@property
def shape(self) -> Tuple[int]:
return self.shape_
@property
def quant_info(self):
raise NotImplementedError()
@property
def dtype(self) -> oneflow.dtype:
return self.dtype_
def numpy(self) -> np.ndarray:
if not self.has_meta_info_:
raise RuntimeError("This variable does not have meta info")
return np.fromfile(
self.file_path,
dtype=dtype_util.convert_oneflow_dtype_to_numpy_dtype(self.dtype),
).reshape(self.shape)
def _save_tensor_to_disk(tensor: "oneflow.Tensor", dir_name: Union[str, Path]) -> None:
os.makedirs(dir_name, exist_ok=True)
meta_info = variable_meta_info_pb.VariableMetaInfo()
meta_info.shape.dim[:] = tensor.shape
meta_info.data_type = oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(
tensor.dtype
)
data_path = os.path.join(dir_name, DATA_FILENAME)
with open(data_path, "wb") as f:
f.write(tensor.numpy().tobytes())
with open(os.path.join(dir_name, META_INFO_FILENAME), "w") as f:
f.write(text_format.MessageToString(meta_info))
ValueContainer = Union[FileBackendVariableBlob, np.ndarray, "oneflow.Tensor"]
def _LoadSingleVariable(
path: Optional[str], global_src_rank: Optional[int] = None
) -> "flow.Tensor":
if global_src_rank is not None:
rank = flow.env.get_rank()
if rank == global_src_rank:
assert isinstance(path, str)
file_backed_blob = FileBackendVariableBlob(path)
loaded = flow.tensor(
file_backed_blob.numpy(), dtype=file_backed_blob.dtype
).to("cuda")
else:
loaded = flow.tensor([]).to("cuda")
loaded = loaded.to_global(
flow.placement("cuda", [global_src_rank]), flow.sbp.broadcast
)
return loaded
assert isinstance(path, str)
return flow.tensor(FileBackendVariableBlob(path).numpy())
def _broadcast_py_object(obj, src: int = 0):
rank = flow.env.get_rank()
if src == rank:
obj_bytes = pickle.dumps(obj)
return pickle.loads(flow._oneflow_internal.cpu_broadcast(obj_bytes, src))
else:
return pickle.loads(flow._oneflow_internal.cpu_broadcast(None, src))
# NOTE(jianhao):
# (de)serializing a container of global tensors requires the order
# of those tensors are the same across all ranks.
def tensor_getstate(self):
if save_load_path is not None:
# save_load_path is not None means setstate/getstate is called inside
# flow.save or flow.load
assert isinstance(save_load_path, Path)
if global_src_dsk_rank is None:
assert self.is_local
rel_dir_name = id_util.UniqueStr("tensor_")
abs_dir_name = save_load_path / rel_dir_name
tensor = self
else:
assert not self.is_local
rel_dir_name = f"global_tensor_{self.global_id()}"
abs_dir_name = save_load_path / rel_dir_name
tensor = self.to_global(
sbp=flow.sbp.broadcast,
placement=flow.placement("cpu", [global_src_dsk_rank]),
).to_local()
if global_src_dsk_rank is None or global_src_dsk_rank == flow.env.get_rank():
_save_tensor_to_disk(tensor, abs_dir_name)
return {"path": rel_dir_name}
else:
# save_load_path is None means setstate/getstate is called inside
# methods other than flow.save/load, for example, copy.deepcopy
if self.is_local:
if self.is_cuda:
device = "cuda"
else:
device = "cpu"
return {"data": self.numpy(), "dtype": self.dtype, "device": device}
else:
return {
"data": self.numpy(),
"dtype": self.dtype,
"placement": self.placement,
"sbp": self.sbp,
}
def tensor_setstate(self, pickle_dict):
if save_load_path is not None:
assert isinstance(save_load_path, Path)
rel_dir_name = pickle_dict["path"]
abs_dir_name = save_load_path / rel_dir_name
self.__init__(_LoadSingleVariable(str(abs_dir_name), global_src_dsk_rank))
else:
if "placement" in pickle_dict:
return self.__init__(
flow.tensor(
pickle_dict["data"],
dtype=pickle_dict["dtype"],
placement=pickle_dict["placement"],
sbp=pickle_dict["sbp"],
)
)
else:
return self.__init__(
flow.tensor(
pickle_dict["data"],
dtype=pickle_dict["dtype"],
device=pickle_dict["device"],
)
)
def placement_getstate(self):
return {
"type": self.type,
"ranks": self.ranks,
}
def placement_setstate(self, state):
return self.__init__(state["type"], state["ranks"])
def RegisterMethods():
Tensor.__setstate__ = tensor_setstate
Tensor.__getstate__ = tensor_getstate
flow._oneflow_internal.placement.__getstate__ = placement_getstate
flow._oneflow_internal.placement.__setstate__ = placement_setstate
def legacy_load(
path: Union[str, Path], global_src_rank: Optional[int] = None,
) -> Dict[str, "flow.Tensor"]:
assert os.path.isdir(path), "Directory {} doesn't exist!".format(path)
rank = flow.env.get_rank()
var_dict = {}
if global_src_rank is None or rank == global_src_rank:
all_files = os.listdir(path)
assert SNAPSHOT_DONE_FILENAME in all_files
all_files.remove(SNAPSHOT_DONE_FILENAME)
if global_src_rank is not None:
_broadcast_py_object(all_files, global_src_rank)
else:
all_files = _broadcast_py_object(None, global_src_rank)
for f in all_files:
var_dir = os.path.join(path, f)
try:
var_dict[f] = _LoadSingleVariable(var_dir, global_src_rank)
except FileNotFoundError:
warnings.warn(
f"'{var_dir}' does not have valid tensor data. Please check it if it is unexpected.",
stacklevel=2,
)
return var_dict
@contextmanager
def tensor_pickling_context(path: Path, global_src_dst_rank: int):
global save_load_path
global global_src_dsk_rank
global_src_dsk_rank = global_src_dst_rank
save_load_path = path
try:
yield
finally:
global_src_dsk_rank = None
save_load_path = None
def load(path: str, global_src_rank: Optional[int] = None,) -> Any:
r"""Loads an object saved with oneflow.save() from a directory.
Args:
path (str): The directory containing the object
global_src_rank (int, optional): The source rank for
loading global tensors. When specified, only the
process whose rank == global_src_rank will really
read the files in `path`, and tensors in the loaded
object will be consistent with placement =
`flow.placement('cuda', [global_src_rank])`
Returns:
The loaded object
"""
path: Path = Path(path)
assert path.is_dir(), "Directory {} doesn't exist!".format(path)
pickle_path = path / PICKLE_FILENAME
rank = flow.env.get_rank()
if global_src_rank is None or global_src_rank == rank:
is_legacy = not pickle_path.exists()
if global_src_rank is not None:
_broadcast_py_object(is_legacy, global_src_rank)
else:
is_legacy = _broadcast_py_object(None, global_src_rank)
if is_legacy:
return legacy_load(path, global_src_rank)
if global_src_rank is not None:
if rank == global_src_rank:
pickle_bytes = pickle_path.read_bytes()
_broadcast_py_object(pickle_bytes, global_src_rank)
else:
pickle_bytes = _broadcast_py_object(None, global_src_rank)
else:
pickle_bytes = pickle_path.read_bytes()
with tensor_pickling_context(path, global_src_rank):
res = pickle.loads(pickle_bytes)
assert res["protocol_version"] == PROTOCOL_VERSION
return res["data"]
def save(
obj: Any, path: Union[str, Path], global_dst_rank: Optional[int] = None,
) -> None:
r"""Save an object to a directory.
Args:
obj: The object to be saved
path (str): The directory in which the object is saved
global_dst_rank (int, optional): The destination rank for
saving global tensors. When specified, whole tensors
will be saved by the process whose rank ==
global_src_rank, while other processes will not do any
disk I/O.
"""
path: Path = Path(path)
if isinstance(obj, graph_util.Graph):
graph: graph_util.Graph = obj
if not graph._is_compiled:
raise RuntimeError("graph must be compiled first.")
path.mkdir(exist_ok=True)
serialized_job = str(text_format.MessageToString(graph._forward_job_proto))
oneflow._oneflow_internal.nn.graph.SaveJobToIR(serialized_job, str(path))
for x in graph._state():
_save_tensor_to_disk(x.origin, path / f"{x.name_prefix}{x.name}")
return
obj = {"protocol_version": PROTOCOL_VERSION, "data": obj}
with tensor_pickling_context(path, global_dst_rank):
pickled_bytes = pickle.dumps(obj)
def write_to_path(path):
path.mkdir(exist_ok=True)
pickle_path = path / PICKLE_FILENAME
pickle_path.write_bytes(pickled_bytes)
if global_dst_rank is not None:
assert isinstance(
global_dst_rank, int
), f"global_dst_rank expected type int, but got {type(global_dst_rank)}."
assert (
global_dst_rank >= 0 and global_dst_rank < flow.env.get_world_size()
), f"out of range (expected to be in range of [0, {flow.env.get_world_size()}), but got {global_dst_rank})."
if flow.env.get_rank() == global_dst_rank:
write_to_path(path)
else:
# global_dst_rank is None
write_to_path(path)
save_load_path = None
global_src_dsk_rank = None
| [
"oneflow.tensor",
"oneflow.env.get_rank",
"oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype",
"oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype",
"oneflow.env.get_world_size",
"oneflow.framework.dtype.convert_proto_dtype_to_oneflow_dtype",
"oneflow._oneflow_internal.cpu_broadcast",... | [((3639, 3675), 'os.makedirs', 'os.makedirs', (['dir_name'], {'exist_ok': '(True)'}), '(dir_name, exist_ok=True)\n', (3650, 3675), False, 'import os\n'), ((3692, 3732), 'oneflow.core.framework.variable_meta_info_pb2.VariableMetaInfo', 'variable_meta_info_pb.VariableMetaInfo', ([], {}), '()\n', (3730, 3732), True, 'import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb\n'), ((3801, 3873), 'oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype', 'oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype', (['tensor.dtype'], {}), '(tensor.dtype)\n', (3859, 3873), False, 'import oneflow\n'), ((3904, 3941), 'os.path.join', 'os.path.join', (['dir_name', 'DATA_FILENAME'], {}), '(dir_name, DATA_FILENAME)\n', (3916, 3941), False, 'import os\n'), ((5033, 5052), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (5050, 5052), True, 'import oneflow as flow\n'), ((8417, 8436), 'os.path.isdir', 'os.path.isdir', (['path'], {}), '(path)\n', (8430, 8436), False, 'import os\n'), ((8492, 8511), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (8509, 8511), True, 'import oneflow as flow\n'), ((10224, 10234), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (10228, 10234), False, 'from pathlib import Path\n'), ((10356, 10375), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (10373, 10375), True, 'import oneflow as flow\n'), ((11783, 11793), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (11787, 11793), False, 'from pathlib import Path\n'), ((1537, 1573), 'os.path.join', 'os.path.join', (['var_dir', 'DATA_FILENAME'], {}), '(var_dir, DATA_FILENAME)\n', (1549, 1573), False, 'import os\n'), ((1711, 1758), 'os.path.join', 'os.path.join', (['self.var_dir_', 'META_INFO_FILENAME'], {}), '(self.var_dir_, META_INFO_FILENAME)\n', (1723, 1758), False, 'import os\n'), ((1770, 1800), 'os.path.exists', 'os.path.exists', (['meta_info_path'], {}), '(meta_info_path)\n', (1784, 1800), False, 'import os\n'), ((2959, 3001), 'os.path.join', 'os.path.join', (['self.var_dir_', 'DATA_FILENAME'], {}), '(self.var_dir_, DATA_FILENAME)\n', (2971, 3001), False, 'import os\n'), ((4388, 4407), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (4405, 4407), True, 'import oneflow as flow\n'), ((5093, 5110), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (5105, 5110), False, 'import pickle\n'), ((8609, 8625), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (8619, 8625), False, 'import os\n'), ((8943, 8964), 'os.path.join', 'os.path.join', (['path', 'f'], {}), '(path, f)\n', (8955, 8964), False, 'import os\n'), ((11127, 11153), 'pickle.loads', 'pickle.loads', (['pickle_bytes'], {}), '(pickle_bytes)\n', (11139, 11153), False, 'import pickle\n'), ((12448, 12465), 'pickle.dumps', 'pickle.dumps', (['obj'], {}), '(obj)\n', (12460, 12465), False, 'import pickle\n'), ((1589, 1614), 'os.path.isfile', 'os.path.isfile', (['data_path'], {}), '(data_path)\n', (1603, 1614), False, 'import os\n'), ((1826, 1866), 'oneflow.core.framework.variable_meta_info_pb2.VariableMetaInfo', 'variable_meta_info_pb.VariableMetaInfo', ([], {}), '()\n', (1864, 1866), True, 'import oneflow.core.framework.variable_meta_info_pb2 as variable_meta_info_pb\n'), ((2221, 2289), 'oneflow.framework.dtype.convert_proto_dtype_to_oneflow_dtype', 'dtype_util.convert_proto_dtype_to_oneflow_dtype', (['meta_info.data_type'], {}), '(meta_info.data_type)\n', (2268, 2289), True, 'import oneflow.framework.dtype as dtype_util\n'), ((4036, 4078), 'os.path.join', 'os.path.join', (['dir_name', 'META_INFO_FILENAME'], {}), '(dir_name, META_INFO_FILENAME)\n', (4048, 4078), False, 'import os\n'), ((4107, 4145), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['meta_info'], {}), '(meta_info)\n', (4134, 4145), False, 'from google.protobuf import text_format\n'), ((4785, 4826), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[global_src_rank]'], {}), "('cuda', [global_src_rank])\n", (4799, 4826), True, 'import oneflow as flow\n'), ((5139, 5191), 'oneflow._oneflow_internal.cpu_broadcast', 'flow._oneflow_internal.cpu_broadcast', (['obj_bytes', 'src'], {}), '(obj_bytes, src)\n', (5175, 5191), True, 'import oneflow as flow\n'), ((5231, 5278), 'oneflow._oneflow_internal.cpu_broadcast', 'flow._oneflow_internal.cpu_broadcast', (['None', 'src'], {}), '(None, src)\n', (5267, 5278), True, 'import oneflow as flow\n'), ((5737, 5765), 'oneflow.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""tensor_"""'], {}), "('tensor_')\n", (5754, 5765), True, 'import oneflow.framework.id_util as id_util\n'), ((12039, 12092), 'google.protobuf.text_format.MessageToString', 'text_format.MessageToString', (['graph._forward_job_proto'], {}), '(graph._forward_job_proto)\n', (12066, 12092), False, 'from google.protobuf import text_format\n'), ((13027, 13046), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (13044, 13046), True, 'import oneflow as flow\n'), ((2829, 2855), 'os.path.getsize', 'os.path.getsize', (['data_path'], {}), '(data_path)\n', (2844, 2855), False, 'import os\n'), ((6261, 6280), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (6278, 6280), True, 'import oneflow as flow\n'), ((7354, 7479), 'oneflow.tensor', 'flow.tensor', (["pickle_dict['data']"], {'dtype': "pickle_dict['dtype']", 'placement': "pickle_dict['placement']", 'sbp': "pickle_dict['sbp']"}), "(pickle_dict['data'], dtype=pickle_dict['dtype'], placement=\n pickle_dict['placement'], sbp=pickle_dict['sbp'])\n", (7365, 7479), True, 'import oneflow as flow\n'), ((7652, 7747), 'oneflow.tensor', 'flow.tensor', (["pickle_dict['data']"], {'dtype': "pickle_dict['dtype']", 'device': "pickle_dict['device']"}), "(pickle_dict['data'], dtype=pickle_dict['dtype'], device=\n pickle_dict['device'])\n", (7663, 7747), True, 'import oneflow as flow\n'), ((9096, 9219), 'warnings.warn', 'warnings.warn', (['f"""\'{var_dir}\' does not have valid tensor data. Please check it if it is unexpected."""'], {'stacklevel': '(2)'}), '(\n f"\'{var_dir}\' does not have valid tensor data. Please check it if it is unexpected."\n , stacklevel=2)\n', (9109, 9219), False, 'import warnings\n'), ((12873, 12898), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (12896, 12898), True, 'import oneflow as flow\n'), ((12958, 12983), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (12981, 12983), True, 'import oneflow as flow\n'), ((2726, 2786), 'oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['self.dtype_'], {}), '(self.dtype_)\n', (2773, 2786), True, 'import oneflow.framework.dtype as dtype_util\n'), ((4711, 4726), 'oneflow.tensor', 'flow.tensor', (['[]'], {}), '([])\n', (4722, 4726), True, 'import oneflow as flow\n'), ((3454, 3513), 'oneflow.framework.dtype.convert_oneflow_dtype_to_numpy_dtype', 'dtype_util.convert_oneflow_dtype_to_numpy_dtype', (['self.dtype'], {}), '(self.dtype)\n', (3501, 3513), True, 'import oneflow.framework.dtype as dtype_util\n'), ((2859, 2878), 'numpy.prod', 'np.prod', (['self.shape'], {}), '(self.shape)\n', (2866, 2878), True, 'import numpy as np\n'), ((6125, 6169), 'oneflow.placement', 'flow.placement', (['"""cpu"""', '[global_src_dsk_rank]'], {}), "('cpu', [global_src_dsk_rank])\n", (6139, 6169), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.ones_like,
"""
Returns a tensor filled with the scalar value 1, with the same size as input.
flow.ones_like(input) is equivalent to flow.ones(input.shape, dtype=input.dtype)
Args:
other(Tensor): The size of input will determine size of the output tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.random.rand(5), dtype=flow.float32)
>>> y = flow.ones_like(x)
>>> y
tensor([1., 1., 1., 1., 1.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.zeros_like,
"""
Returns a tensor filled with the scalar value 0, with the same size as input.
flow.zeros_like(input) is equivalent to flow.zeros(input.shape, dtype=input.dtype)
Args:
other(Tensor): The size of input will determine size of the output tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = flow.tensor(np.random.rand(5), dtype=flow.float32)
>>> y = flow.zeros_like(x)
>>> y
tensor([0., 0., 0., 0., 0.], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.new_ones,
"""
new_ones(x, size=None, dtype=None, device=None, placement=None, sbp=None, requires_grad=False) -> Tensor
Returns a Tensor of size size filled with 1. By default, the returned Tensor has the same torch.dtype and torch.device as this tensor.
Args:
size (int...): a list, tuple, or flow.Size of integers defining the shape of the output tensor.
dtype (flow.dtype, optional): the desired type of returned tensor. Default: if None, same flow.dtype as this tensor.
device (flow.device, optional): the desired device of returned tensor. Default: if None, same flow.device as this tensor.
placement (flow.placement, optional): the desired placement of returned consistent tensor. Default: if None, the returned tensor is local one using the argument `device`.
sbp (flow.sbp.sbp or tuple of flow.sbp.sbp, optional): the desired sbp descriptor of returned consistent tensor. Default: if None, the returned tensor is local one using the argument `device`.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = flow.Tensor(np.ones((1, 2, 3)))
>>> y = x.new_ones((2, 2))
>>> y
tensor([[1., 1.],
[1., 1.]], dtype=oneflow.float32)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1261), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.ones_like', '"""\n Returns a tensor filled with the scalar value 1, with the same size as input.\n flow.ones_like(input) is equivalent to flow.ones(input.shape, dtype=input.dtype)\n\n Args:\n other(Tensor): The size of input will determine size of the output tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.random.rand(5), dtype=flow.float32)\n >>> y = flow.ones_like(x)\n >>> y\n tensor([1., 1., 1., 1., 1.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.ones_like,\n """\n Returns a tensor filled with the scalar value 1, with the same size as input.\n flow.ones_like(input) is equivalent to flow.ones(input.shape, dtype=input.dtype)\n\n Args:\n other(Tensor): The size of input will determine size of the output tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.random.rand(5), dtype=flow.float32)\n >>> y = flow.ones_like(x)\n >>> y\n tensor([1., 1., 1., 1., 1.], dtype=oneflow.float32)\n\n """\n )\n', (670, 1261), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1265, 1870), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.zeros_like', '"""\n Returns a tensor filled with the scalar value 0, with the same size as input.\n flow.zeros_like(input) is equivalent to flow.zeros(input.shape, dtype=input.dtype)\n\n Args:\n other(Tensor): The size of input will determine size of the output tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.random.rand(5), dtype=flow.float32)\n >>> y = flow.zeros_like(x)\n >>> y\n tensor([0., 0., 0., 0., 0.], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.zeros_like,\n """\n Returns a tensor filled with the scalar value 0, with the same size as input.\n flow.zeros_like(input) is equivalent to flow.zeros(input.shape, dtype=input.dtype)\n\n Args:\n other(Tensor): The size of input will determine size of the output tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> x = flow.tensor(np.random.rand(5), dtype=flow.float32)\n >>> y = flow.zeros_like(x)\n >>> y\n tensor([0., 0., 0., 0., 0.], dtype=oneflow.float32)\n\n """\n )\n', (1275, 1870), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1874, 3328), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.new_ones', '"""\n new_ones(x, size=None, dtype=None, device=None, placement=None, sbp=None, requires_grad=False) -> Tensor\n\n Returns a Tensor of size size filled with 1. By default, the returned Tensor has the same torch.dtype and torch.device as this tensor.\n\n Args:\n size (int...): a list, tuple, or flow.Size of integers defining the shape of the output tensor.\n dtype (flow.dtype, optional): the desired type of returned tensor. Default: if None, same flow.dtype as this tensor.\n device (flow.device, optional): the desired device of returned tensor. Default: if None, same flow.device as this tensor.\n placement (flow.placement, optional): the desired placement of returned consistent tensor. Default: if None, the returned tensor is local one using the argument `device`.\n sbp (flow.sbp.sbp or tuple of flow.sbp.sbp, optional): the desired sbp descriptor of returned consistent tensor. Default: if None, the returned tensor is local one using the argument `device`.\n requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.Tensor(np.ones((1, 2, 3)))\n >>> y = x.new_ones((2, 2))\n >>> y\n tensor([[1., 1.],\n [1., 1.]], dtype=oneflow.float32)\n """'], {}), '(oneflow.new_ones,\n """\n new_ones(x, size=None, dtype=None, device=None, placement=None, sbp=None, requires_grad=False) -> Tensor\n\n Returns a Tensor of size size filled with 1. By default, the returned Tensor has the same torch.dtype and torch.device as this tensor.\n\n Args:\n size (int...): a list, tuple, or flow.Size of integers defining the shape of the output tensor.\n dtype (flow.dtype, optional): the desired type of returned tensor. Default: if None, same flow.dtype as this tensor.\n device (flow.device, optional): the desired device of returned tensor. Default: if None, same flow.device as this tensor.\n placement (flow.placement, optional): the desired placement of returned consistent tensor. Default: if None, the returned tensor is local one using the argument `device`.\n sbp (flow.sbp.sbp or tuple of flow.sbp.sbp, optional): the desired sbp descriptor of returned consistent tensor. Default: if None, the returned tensor is local one using the argument `device`.\n requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.Tensor(np.ones((1, 2, 3)))\n >>> y = x.new_ones((2, 2))\n >>> y\n tensor([[1., 1.],\n [1., 1.]], dtype=oneflow.float32)\n """\n )\n', (1884, 3328), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
#from oneflow_yolov3.ops.upsample_nearest import upsample_nearest
#from oneflow_yolov3.ops.yolo_detect import yolo_detect
#from oneflow_yolov3.ops.yolo_nms import yolo_nms
layer_number = 1
route_dict = {}
yolo_pos_result=[]
yolo_prob_result=[]
yolo_loss_result=[]
num_classes=80
ignore_thresh=0.7
truth_thresh=1.0
image_height=608
image_width=608
max_out_boxes=120
nms=True
#nms=False
nms_threshold=0.45
#anchor_boxes_size_list=[flow.detection.anchor_boxes_size(10, 13), flow.detection.anchor_boxes_size(16, 30), flow.detection.anchor_boxes_size(33, 23), flow.detection.anchor_boxes_size(30,61), flow.detection.anchor_boxes_size(62, 45), flow.detection.anchor_boxes_size(59, 119), flow.detection.anchor_boxes_size(116,90), flow.detection.anchor_boxes_size(156, 198), flow.detection.anchor_boxes_size(373, 326)]
anchor_boxes_size_list=[10, 13, 16, 30, 33, 23, 30,61, 62, 45, 59, 119, 116,90, 156, 198, 373, 326]
yolo_box_diff_conf=[{'image_height': image_height, 'image_width': image_width, 'layer_height': 19, 'layer_width': 19, 'ignore_thresh': ignore_thresh, 'truth_thresh': truth_thresh, 'anchor_boxes_size': anchor_boxes_size_list, 'box_mask': [6,7,8]},
{'image_height': image_height, 'image_width': image_width, 'layer_height': 38, 'layer_width': 38, 'ignore_thresh': ignore_thresh, 'truth_thresh': truth_thresh, 'anchor_boxes_size': anchor_boxes_size_list, 'box_mask': [3,4,5]},
{'image_height': image_height, 'image_width': image_width, 'layer_height': 76, 'layer_width': 76, 'ignore_thresh': ignore_thresh, 'truth_thresh': truth_thresh, 'anchor_boxes_size': anchor_boxes_size_list, 'box_mask': [0,1,2]}]
#to confirm wh pos, gr 12.19 check with 11 ~/yolov3/predict.job
yolo_conf=[{'layer_height': 19, 'layer_width': 19, 'prob_thresh': 0.5, 'num_classes': 80, 'anchor_boxes_size': [116,90, 156, 198, 373, 326]},
{'layer_height': 38, 'layer_width': 38, 'prob_thresh': 0.5, 'num_classes': 80, 'anchor_boxes_size': [30,61, 62, 45, 59, 119]},
{'layer_height': 76, 'layer_width': 76, 'prob_thresh': 0.5, 'num_classes': 80, 'anchor_boxes_size': [10, 13, 16, 30, 33, 23]}]
def _conv2d_layer(
name,
input,
filters,
kernel_size=3,
strides=1,
padding="SAME",
group_num = 1,
data_format="NCHW",
dilation_rate=1,
activation=op_conf_util.kRelu,
use_bias=False,
weight_initializer=flow.random_uniform_initializer(),
bias_initializer=flow.random_uniform_initializer(),
trainable=True,
):
if data_format == "NCHW":
weight_shape = (int(filters), int(input.shape[1]), int(kernel_size[0]), int(kernel_size[0]))
elif data_format == "NHWC":
weight_shape = (int(filters), int(kernel_size[0]), int(kernel_size[0]), int(input.static_shape[3]))
else:
raise ValueError('data_format must be "NCHW" or "NHWC".')
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=weight_initializer,
trainable=trainable,
)
output = flow.nn.conv2d(
input, weight, strides, padding, data_format, dilation_rate, name=name
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=bias_initializer,
model_name="bias",
trainable=trainable,
)
output = flow.nn.bias_add(output, bias, data_format)
if activation is not None:
if activation == op_conf_util.kRelu:
output = flow.keras.activations.relu(output)
else:
raise NotImplementedError
return output
def _batch_norm(inputs, axis, momentum, epsilon, center=True, scale=True, trainable=True, name=None):
return flow.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
trainable=trainable,
name=name
)
def _leaky_relu(input, alpha=None, name=None):
return flow.nn.leaky_relu(input, alpha=alpha, name=None)
#return flow.math.relu(input)
def _upsample(input, name=None):
#return flow.detection.upsample_nearest(input, name=name, scale=2, data_format="channels_first")
return flow.layers.upsample_2d(input, size=2, data_format='NCHW', interpolation="nearest", name=name)
#return upsample_nearest(input, name=name, scale=2, data_format="channels_first")
def conv_unit(data, num_filter=1, kernel=(1, 1), stride=(1, 1), pad="same", data_format="NCHW", use_bias=False, trainable=True, prefix=''):
conv = _conv2d_layer(name=prefix + '-conv', input=data, filters=num_filter, kernel_size=kernel, strides=stride, padding='same', data_format=data_format, dilation_rate=1, activation=None, use_bias=use_bias, trainable=trainable)
bn = _batch_norm(conv, axis=1, momentum=0.99, epsilon = 1.0001e-5, trainable=trainable, name=prefix + '-bn')
leaky_relu = _leaky_relu(bn, alpha=0.1, name = prefix + '-leakyRelu')
return leaky_relu
def ResidualBlock(data, prefix, filter, trainable):
global layer_number
layer_number += 1
blob = conv_unit(data, num_filter=filter, kernel=[1,1], stride=[1,1], pad="same", data_format="NCHW", use_bias=False, trainable=trainable, prefix='yolo-layer' + str(layer_number))
layer_number += 1
blob = conv_unit(blob, num_filter=filter*2, kernel=[3,3], stride=[1,1], pad="same", data_format="NCHW", use_bias=False, trainable=trainable, prefix='yolo-layer' + str(layer_number))
layer_number += 1
shortcut = flow.math.add(data,blob, name= 'yolo-layer' + str(layer_number) + '-shortcut')
return shortcut
def ResidualStage(data, prefix, n, filters, trainable):
global layer_number
layer_number += 1
blob = conv_unit(data, num_filter=filters*2, kernel=[3,3], stride=[2,2], pad="same", data_format="NCHW", use_bias=False, trainable=trainable, prefix='yolo-layer' + str(layer_number))
for i in range(n):
blob = ResidualBlock(blob,"%s_%d"%(prefix, i), filters, trainable=trainable)
return blob
def DarknetNetConvXBody(in_blob, trainable, on_stage_end=lambda x: x):
global layer_number
filter = [32, 64, 128, 256, 512]
block_counts = [1, 2, 8, 8, 4]
blob = in_blob
for i in range(len(block_counts)):
blob = ResidualStage(blob, "block%d"%i, block_counts[i],
filter[i], trainable=trainable)
if i == 2:
route_dict['layer_36'] = blob
if i == 3:
route_dict['layer_61'] = blob
if i == 4:
route_dict['layer_74'] = blob
on_stage_end(blob)
return blob
def YoloBlock(in_blob, prefix, filter, stage_idx, block_idx, trainable):
global layer_number
layer_number += 1
blob = conv_unit(in_blob, num_filter=filter, kernel=[1,1], stride=[1,1], pad="same", data_format="NCHW", use_bias=False, prefix='yolo-layer' + str(layer_number))
if stage_idx == 0 and block_idx == 2:
route_dict['layer_79'] = blob
if stage_idx == 1 and block_idx == 2:
route_dict['layer_91'] = blob
layer_number += 1
blob = conv_unit(blob, num_filter=filter*2, kernel=[3,3], stride=[1,1], pad="same", data_format="NCHW", use_bias=False, prefix='yolo-layer' + str(layer_number))
return blob
def YoloStage(in_blob, prefix, n, filters, stage_idx, trainable):
global layer_number
blob=in_blob
for i in range(n):
blob = YoloBlock(blob,"%s_%d"%(prefix, i), filters, stage_idx, i, trainable=trainable)
layer_number += 1
blob = _conv2d_layer(name='yolo-layer' + str(layer_number) + '-conv', input=blob, filters=255, kernel_size=[1,1], strides=[1,1], padding='same', data_format="NCHW", dilation_rate=1, activation=None, use_bias=True, trainable=trainable)
return blob
def YoloPredictLayer(in_blob, origin_image_info, i, trainable):
global layer_number
layer_name='yolo-layer' + str(layer_number)
#placeholder for a reshape from (n,h,w,255)->(n,h,w*3,85)
blob = flow.transpose(in_blob, name=layer_name + '-yolo_transpose', perm=[0, 2, 3, 1])
reshape_blob = flow.reshape(blob, shape=(blob.shape[0], -1, 85), name = layer_name + '-yolo_reshape')
position = flow.slice(reshape_blob, [None, 0, 0], [None, -1, 4], name = layer_name+'-yolo_slice_pos')
xy = flow.slice(position, [None, 0, 0], [None, -1, 2], name = layer_name + '-yolo_slice_xy')
wh = flow.slice(position, [None, 0, 2], [None, -1, 2], name = layer_name + '-yolo_slice_wh')
xy = flow.math.sigmoid(xy, name = layer_name + '-yolo_ligistic_xy')
position = flow.concat([xy, wh], axis=2, name = layer_name + '-yolo_concat')
confidence = flow.slice(reshape_blob, [None, 0, 4], [None, -1, 81], name = layer_name + '-yolo_slice_prob')
confidence = flow.math.sigmoid(confidence, name = layer_name+ '-yolo_ligistic_prob')
#[out_bbox, out_probs, valid_num] = flow.detection.yolo_detect(bbox=position, probs=confidence, origin_image_info=origin_image_info, image_height=608, image_width=608, layer_height=yolo_conf[i]['layer_height'], layer_width=yolo_conf[i]['layer_width'], prob_thresh=0.5, num_classes=80, max_out_boxes = max_out_boxes, anchor_boxes=yolo_conf[i]['anchor_boxes_size'])
[out_bbox, out_probs, valid_num] = flow.yolo_detect(bbox=position, probs=confidence, origin_image_info=origin_image_info, image_height=608, image_width=608, layer_height=yolo_conf[i]['layer_height'], layer_width=yolo_conf[i]['layer_width'], prob_thresh=0.5, num_classes=80, max_out_boxes = max_out_boxes, anchor_boxes=yolo_conf[i]['anchor_boxes_size'], name=str(layer_name)+"yolo_detect")
#print("out_bbox.shape",out_bbox.shape)
return out_bbox, out_probs, valid_num
def YoloTrainLayer(in_blob, gt_bbox_blob, gt_label_blob, gt_valid_num_blob, i):
global layer_number
layer_name='yolo-layer' + str(layer_number)
#placeholder for a reshape from (n,h,w,255)->(n,h,w*3,85)
blob = flow.transpose(in_blob, name=layer_name + '-yolo_transpose', perm=[0, 2, 3, 1])
reshape_blob = flow.reshape(blob, shape=(blob.shape[0], -1, 85), name = layer_name + '-yolo_reshape')
position = flow.slice(reshape_blob, [None, 0, 0], [None, -1, 4], name = layer_name+'-yolo_slice_pos')
xy = flow.slice(position, [None, 0, 0], [None, -1, 2], name = layer_name + '-yolo_slice_xy')
wh = flow.slice(position, [None, 0, 2], [None, -1, 2], name = layer_name + '-yolo_slice_wh')
xy = flow.math.logistic(xy, name = layer_name + '-yolo_ligistic_xy')
#xy = flow.math.sigmoid(xy, name = layer_name + '-yolo_ligistic_xy')
position = flow.concat([xy, wh], axis=2, name = layer_name + '-yolo_concat')
confidence = flow.slice(reshape_blob, [None, 0, 4], [None, -1, 81], name = layer_name + '-yolo_slice_prob')
confidence = flow.math.logistic(confidence, name = layer_name+ '-yolo_ligistic_prob')
#confidence = flow.math.sigmoid(confidence, name = layer_name+ '-yolo_ligistic_prob')
objness = flow.slice(confidence, [None, 0, 0], [None, -1, 1], name = layer_name + '-yolo_slice_objness')
clsprob = flow.slice(confidence, [None, 0, 1], [None, -1, 80], name = layer_name + '-yolo_slice_clsprob')
bbox_loc_diff, pos_inds, pos_cls_label, neg_inds, valid_num = flow.yolo_box_diff(position, gt_bbox_blob, gt_label_blob, gt_valid_num_blob, image_height=yolo_box_diff_conf[i]['image_height'], image_width=yolo_box_diff_conf[i]['image_width'], layer_height=yolo_box_diff_conf[i]['layer_height'], layer_width=yolo_box_diff_conf[i]['layer_width'], ignore_thresh=yolo_box_diff_conf[i]['ignore_thresh'], truth_thresh=yolo_box_diff_conf[i]['truth_thresh'], box_mask=yolo_box_diff_conf[i]['box_mask'], anchor_boxes_size= yolo_box_diff_conf[i]['anchor_boxes_size'], name = layer_name +'-yolo_box_loss') #placeholder for yolobox layer
bbox_objness_out, bbox_clsprob_out = flow.yolo_prob_loss(objness, clsprob, pos_inds, pos_cls_label, neg_inds, valid_num, num_classes = 80, name = layer_name +'-yolo_prob_loss')
bbox_loss = flow.concat([bbox_loc_diff, bbox_objness_out, bbox_clsprob_out], axis=2, name = layer_name + '-loss_concat')
bbox_loss_reduce_sum = flow.math.reduce_sum(bbox_loss, axis = [1,2], name = layer_name+ '-bbox_loss_reduce_sum')
return bbox_loss_reduce_sum
def YoloNetBody(in_blob, gt_bbox_blob=None, gt_label_blob=None,gt_valid_num_blob=None, origin_image_info=None, trainable=False):
global layer_number
filter = [512, 256, 128]
block_counts = [3, 3, 3]
blob=in_blob
yolo_result=[]
for i in range(len(filter)):
if i == 0:
blob = route_dict['layer_74']
elif i == 1:
layer_number += 1
#placeholder for route layer
layer_number += 1
blob = conv_unit(route_dict['layer_79'], num_filter=filter[i], kernel=[1,1], stride=[1,1], pad="same", data_format="NCHW", use_bias=False, trainable=trainable, prefix='yolo-layer' + str(layer_number))
layer_number += 1
blob = _upsample(blob, name='upsample'+str(i))
layer_number += 1
blob = flow.concat([blob, route_dict['layer_61']], name='route'+str(i), axis=1)
elif i == 2:
layer_number += 1
#placeholder for route layer
layer_number += 1
blob = conv_unit(route_dict['layer_91'], num_filter=filter[i], kernel=[1,1], stride=[1,1], pad="same", data_format="NCHW", use_bias=False, trainable=trainable, prefix='yolo-layer' + str(layer_number))
layer_number += 1
blob = _upsample(blob, name='upsample'+str(i))
layer_number += 1
blob = flow.concat([blob, route_dict['layer_36']], axis = 1, name='route'+str(i))
yolo_blob = YoloStage(blob, "%d"%i, block_counts[i],
filter[i], i, trainable=trainable)
layer_number += 1
if trainable == False:
yolo_position, yolo_prob, valid_num = YoloPredictLayer(yolo_blob, origin_image_info, i, trainable=trainable)
yolo_pos_result.append(yolo_position)
yolo_prob_result.append(yolo_prob)
else:
loss = YoloTrainLayer(yolo_blob, gt_bbox_blob, gt_label_blob, gt_valid_num_blob, i)
yolo_loss_result.append(loss)
if trainable == False:
yolo_positions = flow.concat(yolo_pos_result, axis=1, name="concat_pos") #(b, n_boxes, 4)
yolo_probs = flow.concat(yolo_prob_result, axis=1) #(b, n_boxes, 81)
#print(yolo_positions.shape)
#print(yolo_probs.shape)
if nms:
yolo_probs_transpose = flow.transpose(yolo_probs, perm=[0, 2, 1]) #(b, 81, n_boxes)
pre_nms_top_k_inds = flow.math.top_k(yolo_probs_transpose,k=20000) #(b, 81, n_boxes)
pre_nms_top_k_inds1 = flow.reshape(pre_nms_top_k_inds, shape=(pre_nms_top_k_inds.shape[0], pre_nms_top_k_inds.shape[1]*pre_nms_top_k_inds.shape[2]), name="reshape1")#(b, 81*n_boxes)
gathered_yolo_positions = flow.gather(yolo_positions, pre_nms_top_k_inds1, axis=1, batch_dims=1) #(b, 81*n_boxes, 4)
gathered_yolo_positions = flow.reshape(gathered_yolo_positions, shape=(gathered_yolo_positions.shape[0], yolo_probs.shape[2], yolo_positions.shape[1], yolo_positions.shape[2]), name="reshape2") #(b, 81, n_boxes, 4)
gathered_yolo_probs = flow.gather(yolo_probs_transpose, pre_nms_top_k_inds, axis=2, batch_dims=2)#(b, 81, n_boxes)
nms_val = flow.yolo_nms(gathered_yolo_positions, gathered_yolo_probs, iou_threshold=nms_threshold, keep_n=-1, batch_dims=2, name="nms") #b, 81, n_boxes
nms_val_cast = flow.cast(nms_val, dtype=flow.float) #(b, 81, n_boxes)
nms_val_reshape = flow.reshape(nms_val_cast, shape=(nms_val.shape[0], nms_val.shape[1], nms_val.shape[2], 1)) #(b, 81, n_boxes, 1)
final_boxes = flow.math.multiply(gathered_yolo_positions, nms_val_reshape) #(b, 81, 270, 4)
final_probs = flow.math.multiply(gathered_yolo_probs, nms_val_cast) #(b, 81, 270)
return final_boxes, final_probs
return yolo_positions, yolo_probs
else:
return yolo_loss_result
def YoloPredictNet(data, origin_image_info, trainable=False):
#print("nms:", nms)
global layer_number
#data = flow.transpose(data, perm=[0, 3, 1, 2])
blob = conv_unit(data, num_filter=32, kernel=[3,3], stride=[1,1], pad="same", data_format="NCHW", use_bias=False, trainable=trainable, prefix='yolo-layer' + str(layer_number))
blob = DarknetNetConvXBody(blob, trainable, lambda x: x)
yolo_pos_result, yolo_prob_result=YoloNetBody(in_blob=blob, origin_image_info=origin_image_info, trainable=trainable)
return yolo_pos_result, yolo_prob_result
def YoloTrainNet(data, gt_box, gt_label,gt_valid_num, trainable=True):
global layer_number
#data = flow.transpose(data, perm=[0, 3, 1, 2])
blob = conv_unit(data, num_filter=32, kernel=[3,3], stride=[1,1], pad="same", data_format="NCHW", use_bias=False, trainable=trainable, prefix='yolo-layer' + str(layer_number))
blob = DarknetNetConvXBody(blob, trainable, lambda x: x)
yolo_loss_result=YoloNetBody(in_blob=blob, gt_bbox_blob=gt_box, gt_label_blob=gt_label,gt_valid_num_blob=gt_valid_num, trainable=trainable)
return yolo_loss_result
| [
"oneflow.nn.leaky_relu",
"oneflow.layers.upsample_2d",
"oneflow.keras.activations.relu",
"oneflow.transpose",
"oneflow.math.sigmoid",
"oneflow.yolo_box_diff",
"oneflow.math.logistic",
"oneflow.slice",
"oneflow.gather",
"oneflow.concat",
"oneflow.cast",
"oneflow.yolo_nms",
"oneflow.math.top_k... | [((3009, 3042), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (3040, 3042), True, 'import oneflow as flow\n'), ((3065, 3098), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (3096, 3098), True, 'import oneflow as flow\n'), ((3469, 3600), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer', 'trainable': 'trainable'}), "(name + '-weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer, trainable=trainable)\n", (3486, 3600), True, 'import oneflow as flow\n'), ((3643, 3733), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'data_format', 'dilation_rate'], {'name': 'name'}), '(input, weight, strides, padding, data_format, dilation_rate,\n name=name)\n', (3657, 3733), True, 'import oneflow as flow\n'), ((4345, 4507), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': 'axis', 'momentum': 'momentum', 'epsilon': 'epsilon', 'center': 'center', 'scale': 'scale', 'trainable': 'trainable', 'name': 'name'}), '(inputs=inputs, axis=axis, momentum=momentum,\n epsilon=epsilon, center=center, scale=scale, trainable=trainable, name=name\n )\n', (4376, 4507), True, 'import oneflow as flow\n'), ((4628, 4677), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['input'], {'alpha': 'alpha', 'name': 'None'}), '(input, alpha=alpha, name=None)\n', (4646, 4677), True, 'import oneflow as flow\n'), ((4857, 4956), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', (['input'], {'size': '(2)', 'data_format': '"""NCHW"""', 'interpolation': '"""nearest"""', 'name': 'name'}), "(input, size=2, data_format='NCHW', interpolation=\n 'nearest', name=name)\n", (4880, 4956), True, 'import oneflow as flow\n'), ((8476, 8555), 'oneflow.transpose', 'flow.transpose', (['in_blob'], {'name': "(layer_name + '-yolo_transpose')", 'perm': '[0, 2, 3, 1]'}), "(in_blob, name=layer_name + '-yolo_transpose', perm=[0, 2, 3, 1])\n", (8490, 8555), True, 'import oneflow as flow\n'), ((8573, 8661), 'oneflow.reshape', 'flow.reshape', (['blob'], {'shape': '(blob.shape[0], -1, 85)', 'name': "(layer_name + '-yolo_reshape')"}), "(blob, shape=(blob.shape[0], -1, 85), name=layer_name +\n '-yolo_reshape')\n", (8585, 8661), True, 'import oneflow as flow\n'), ((8673, 8767), 'oneflow.slice', 'flow.slice', (['reshape_blob', '[None, 0, 0]', '[None, -1, 4]'], {'name': "(layer_name + '-yolo_slice_pos')"}), "(reshape_blob, [None, 0, 0], [None, -1, 4], name=layer_name +\n '-yolo_slice_pos')\n", (8683, 8767), True, 'import oneflow as flow\n'), ((8771, 8860), 'oneflow.slice', 'flow.slice', (['position', '[None, 0, 0]', '[None, -1, 2]'], {'name': "(layer_name + '-yolo_slice_xy')"}), "(position, [None, 0, 0], [None, -1, 2], name=layer_name +\n '-yolo_slice_xy')\n", (8781, 8860), True, 'import oneflow as flow\n'), ((8866, 8955), 'oneflow.slice', 'flow.slice', (['position', '[None, 0, 2]', '[None, -1, 2]'], {'name': "(layer_name + '-yolo_slice_wh')"}), "(position, [None, 0, 2], [None, -1, 2], name=layer_name +\n '-yolo_slice_wh')\n", (8876, 8955), True, 'import oneflow as flow\n'), ((8961, 9021), 'oneflow.math.sigmoid', 'flow.math.sigmoid', (['xy'], {'name': "(layer_name + '-yolo_ligistic_xy')"}), "(xy, name=layer_name + '-yolo_ligistic_xy')\n", (8978, 9021), True, 'import oneflow as flow\n'), ((9037, 9100), 'oneflow.concat', 'flow.concat', (['[xy, wh]'], {'axis': '(2)', 'name': "(layer_name + '-yolo_concat')"}), "([xy, wh], axis=2, name=layer_name + '-yolo_concat')\n", (9048, 9100), True, 'import oneflow as flow\n'), ((9118, 9214), 'oneflow.slice', 'flow.slice', (['reshape_blob', '[None, 0, 4]', '[None, -1, 81]'], {'name': "(layer_name + '-yolo_slice_prob')"}), "(reshape_blob, [None, 0, 4], [None, -1, 81], name=layer_name +\n '-yolo_slice_prob')\n", (9128, 9214), True, 'import oneflow as flow\n'), ((9228, 9298), 'oneflow.math.sigmoid', 'flow.math.sigmoid', (['confidence'], {'name': "(layer_name + '-yolo_ligistic_prob')"}), "(confidence, name=layer_name + '-yolo_ligistic_prob')\n", (9245, 9298), True, 'import oneflow as flow\n'), ((10358, 10437), 'oneflow.transpose', 'flow.transpose', (['in_blob'], {'name': "(layer_name + '-yolo_transpose')", 'perm': '[0, 2, 3, 1]'}), "(in_blob, name=layer_name + '-yolo_transpose', perm=[0, 2, 3, 1])\n", (10372, 10437), True, 'import oneflow as flow\n'), ((10455, 10543), 'oneflow.reshape', 'flow.reshape', (['blob'], {'shape': '(blob.shape[0], -1, 85)', 'name': "(layer_name + '-yolo_reshape')"}), "(blob, shape=(blob.shape[0], -1, 85), name=layer_name +\n '-yolo_reshape')\n", (10467, 10543), True, 'import oneflow as flow\n'), ((10555, 10649), 'oneflow.slice', 'flow.slice', (['reshape_blob', '[None, 0, 0]', '[None, -1, 4]'], {'name': "(layer_name + '-yolo_slice_pos')"}), "(reshape_blob, [None, 0, 0], [None, -1, 4], name=layer_name +\n '-yolo_slice_pos')\n", (10565, 10649), True, 'import oneflow as flow\n'), ((10653, 10742), 'oneflow.slice', 'flow.slice', (['position', '[None, 0, 0]', '[None, -1, 2]'], {'name': "(layer_name + '-yolo_slice_xy')"}), "(position, [None, 0, 0], [None, -1, 2], name=layer_name +\n '-yolo_slice_xy')\n", (10663, 10742), True, 'import oneflow as flow\n'), ((10748, 10837), 'oneflow.slice', 'flow.slice', (['position', '[None, 0, 2]', '[None, -1, 2]'], {'name': "(layer_name + '-yolo_slice_wh')"}), "(position, [None, 0, 2], [None, -1, 2], name=layer_name +\n '-yolo_slice_wh')\n", (10758, 10837), True, 'import oneflow as flow\n'), ((10843, 10904), 'oneflow.math.logistic', 'flow.math.logistic', (['xy'], {'name': "(layer_name + '-yolo_ligistic_xy')"}), "(xy, name=layer_name + '-yolo_ligistic_xy')\n", (10861, 10904), True, 'import oneflow as flow\n'), ((10991, 11054), 'oneflow.concat', 'flow.concat', (['[xy, wh]'], {'axis': '(2)', 'name': "(layer_name + '-yolo_concat')"}), "([xy, wh], axis=2, name=layer_name + '-yolo_concat')\n", (11002, 11054), True, 'import oneflow as flow\n'), ((11072, 11168), 'oneflow.slice', 'flow.slice', (['reshape_blob', '[None, 0, 4]', '[None, -1, 81]'], {'name': "(layer_name + '-yolo_slice_prob')"}), "(reshape_blob, [None, 0, 4], [None, -1, 81], name=layer_name +\n '-yolo_slice_prob')\n", (11082, 11168), True, 'import oneflow as flow\n'), ((11182, 11253), 'oneflow.math.logistic', 'flow.math.logistic', (['confidence'], {'name': "(layer_name + '-yolo_ligistic_prob')"}), "(confidence, name=layer_name + '-yolo_ligistic_prob')\n", (11200, 11253), True, 'import oneflow as flow\n'), ((11356, 11452), 'oneflow.slice', 'flow.slice', (['confidence', '[None, 0, 0]', '[None, -1, 1]'], {'name': "(layer_name + '-yolo_slice_objness')"}), "(confidence, [None, 0, 0], [None, -1, 1], name=layer_name +\n '-yolo_slice_objness')\n", (11366, 11452), True, 'import oneflow as flow\n'), ((11463, 11560), 'oneflow.slice', 'flow.slice', (['confidence', '[None, 0, 1]', '[None, -1, 80]'], {'name': "(layer_name + '-yolo_slice_clsprob')"}), "(confidence, [None, 0, 1], [None, -1, 80], name=layer_name +\n '-yolo_slice_clsprob')\n", (11473, 11560), True, 'import oneflow as flow\n'), ((11623, 12184), 'oneflow.yolo_box_diff', 'flow.yolo_box_diff', (['position', 'gt_bbox_blob', 'gt_label_blob', 'gt_valid_num_blob'], {'image_height': "yolo_box_diff_conf[i]['image_height']", 'image_width': "yolo_box_diff_conf[i]['image_width']", 'layer_height': "yolo_box_diff_conf[i]['layer_height']", 'layer_width': "yolo_box_diff_conf[i]['layer_width']", 'ignore_thresh': "yolo_box_diff_conf[i]['ignore_thresh']", 'truth_thresh': "yolo_box_diff_conf[i]['truth_thresh']", 'box_mask': "yolo_box_diff_conf[i]['box_mask']", 'anchor_boxes_size': "yolo_box_diff_conf[i]['anchor_boxes_size']", 'name': "(layer_name + '-yolo_box_loss')"}), "(position, gt_bbox_blob, gt_label_blob, gt_valid_num_blob,\n image_height=yolo_box_diff_conf[i]['image_height'], image_width=\n yolo_box_diff_conf[i]['image_width'], layer_height=yolo_box_diff_conf[i\n ]['layer_height'], layer_width=yolo_box_diff_conf[i]['layer_width'],\n ignore_thresh=yolo_box_diff_conf[i]['ignore_thresh'], truth_thresh=\n yolo_box_diff_conf[i]['truth_thresh'], box_mask=yolo_box_diff_conf[i][\n 'box_mask'], anchor_boxes_size=yolo_box_diff_conf[i][\n 'anchor_boxes_size'], name=layer_name + '-yolo_box_loss')\n", (11641, 12184), True, 'import oneflow as flow\n'), ((12224, 12364), 'oneflow.yolo_prob_loss', 'flow.yolo_prob_loss', (['objness', 'clsprob', 'pos_inds', 'pos_cls_label', 'neg_inds', 'valid_num'], {'num_classes': '(80)', 'name': "(layer_name + '-yolo_prob_loss')"}), "(objness, clsprob, pos_inds, pos_cls_label, neg_inds,\n valid_num, num_classes=80, name=layer_name + '-yolo_prob_loss')\n", (12243, 12364), True, 'import oneflow as flow\n'), ((12378, 12488), 'oneflow.concat', 'flow.concat', (['[bbox_loc_diff, bbox_objness_out, bbox_clsprob_out]'], {'axis': '(2)', 'name': "(layer_name + '-loss_concat')"}), "([bbox_loc_diff, bbox_objness_out, bbox_clsprob_out], axis=2,\n name=layer_name + '-loss_concat')\n", (12389, 12488), True, 'import oneflow as flow\n'), ((12512, 12603), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['bbox_loss'], {'axis': '[1, 2]', 'name': "(layer_name + '-bbox_loss_reduce_sum')"}), "(bbox_loss, axis=[1, 2], name=layer_name +\n '-bbox_loss_reduce_sum')\n", (12532, 12603), True, 'import oneflow as flow\n'), ((3768, 3912), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-bias')"], {'shape': '(filters,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer', 'model_name': '"""bias"""', 'trainable': 'trainable'}), "(name + '-bias', shape=(filters,), dtype=input.dtype,\n initializer=bias_initializer, model_name='bias', trainable=trainable)\n", (3785, 3912), True, 'import oneflow as flow\n'), ((3993, 4036), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (4009, 4036), True, 'import oneflow as flow\n'), ((14495, 14550), 'oneflow.concat', 'flow.concat', (['yolo_pos_result'], {'axis': '(1)', 'name': '"""concat_pos"""'}), "(yolo_pos_result, axis=1, name='concat_pos')\n", (14506, 14550), True, 'import oneflow as flow\n'), ((14585, 14622), 'oneflow.concat', 'flow.concat', (['yolo_prob_result'], {'axis': '(1)'}), '(yolo_prob_result, axis=1)\n', (14596, 14622), True, 'import oneflow as flow\n'), ((4129, 4164), 'oneflow.keras.activations.relu', 'flow.keras.activations.relu', (['output'], {}), '(output)\n', (4156, 4164), True, 'import oneflow as flow\n'), ((14744, 14786), 'oneflow.transpose', 'flow.transpose', (['yolo_probs'], {'perm': '[0, 2, 1]'}), '(yolo_probs, perm=[0, 2, 1])\n', (14758, 14786), True, 'import oneflow as flow\n'), ((14832, 14878), 'oneflow.math.top_k', 'flow.math.top_k', (['yolo_probs_transpose'], {'k': '(20000)'}), '(yolo_probs_transpose, k=20000)\n', (14847, 14878), True, 'import oneflow as flow\n'), ((14924, 15079), 'oneflow.reshape', 'flow.reshape', (['pre_nms_top_k_inds'], {'shape': '(pre_nms_top_k_inds.shape[0], pre_nms_top_k_inds.shape[1] *\n pre_nms_top_k_inds.shape[2])', 'name': '"""reshape1"""'}), "(pre_nms_top_k_inds, shape=(pre_nms_top_k_inds.shape[0], \n pre_nms_top_k_inds.shape[1] * pre_nms_top_k_inds.shape[2]), name='reshape1'\n )\n", (14936, 15079), True, 'import oneflow as flow\n'), ((15116, 15186), 'oneflow.gather', 'flow.gather', (['yolo_positions', 'pre_nms_top_k_inds1'], {'axis': '(1)', 'batch_dims': '(1)'}), '(yolo_positions, pre_nms_top_k_inds1, axis=1, batch_dims=1)\n', (15127, 15186), True, 'import oneflow as flow\n'), ((15239, 15416), 'oneflow.reshape', 'flow.reshape', (['gathered_yolo_positions'], {'shape': '(gathered_yolo_positions.shape[0], yolo_probs.shape[2], yolo_positions.\n shape[1], yolo_positions.shape[2])', 'name': '"""reshape2"""'}), "(gathered_yolo_positions, shape=(gathered_yolo_positions.shape[\n 0], yolo_probs.shape[2], yolo_positions.shape[1], yolo_positions.shape[\n 2]), name='reshape2')\n", (15251, 15416), True, 'import oneflow as flow\n'), ((15456, 15531), 'oneflow.gather', 'flow.gather', (['yolo_probs_transpose', 'pre_nms_top_k_inds'], {'axis': '(2)', 'batch_dims': '(2)'}), '(yolo_probs_transpose, pre_nms_top_k_inds, axis=2, batch_dims=2)\n', (15467, 15531), True, 'import oneflow as flow\n'), ((15565, 15695), 'oneflow.yolo_nms', 'flow.yolo_nms', (['gathered_yolo_positions', 'gathered_yolo_probs'], {'iou_threshold': 'nms_threshold', 'keep_n': '(-1)', 'batch_dims': '(2)', 'name': '"""nms"""'}), "(gathered_yolo_positions, gathered_yolo_probs, iou_threshold=\n nms_threshold, keep_n=-1, batch_dims=2, name='nms')\n", (15578, 15695), True, 'import oneflow as flow\n'), ((15728, 15764), 'oneflow.cast', 'flow.cast', (['nms_val'], {'dtype': 'flow.float'}), '(nms_val, dtype=flow.float)\n', (15737, 15764), True, 'import oneflow as flow\n'), ((15807, 15902), 'oneflow.reshape', 'flow.reshape', (['nms_val_cast'], {'shape': '(nms_val.shape[0], nms_val.shape[1], nms_val.shape[2], 1)'}), '(nms_val_cast, shape=(nms_val.shape[0], nms_val.shape[1],\n nms_val.shape[2], 1))\n', (15819, 15902), True, 'import oneflow as flow\n'), ((15940, 16000), 'oneflow.math.multiply', 'flow.math.multiply', (['gathered_yolo_positions', 'nms_val_reshape'], {}), '(gathered_yolo_positions, nms_val_reshape)\n', (15958, 16000), True, 'import oneflow as flow\n'), ((16038, 16091), 'oneflow.math.multiply', 'flow.math.multiply', (['gathered_yolo_probs', 'nms_val_cast'], {}), '(gathered_yolo_probs, nms_val_cast)\n', (16056, 16091), True, 'import oneflow as flow\n')] |
import numpy as np
import oneflow as flow
import utils
def StyleEncoder(
input, num_downsamples, num_filters, style_channels,
padding_mode, activation_norm_type, weight_norm_type,
nonlinearity, name="StyleEncoder"
):
assert padding_mode == "reflect"
assert activation_norm_type == "none"
assert weight_norm_type == ""
assert nonlinearity == "relu"
namer = utils.namer_factory()
def Conv2dBlock(input, num_filters, kernel_size, strides, padding):
out = flow.reflection_pad2d(input, padding=padding)
out = utils.conv2d_layer(
out, num_filters=num_filters, kernel_size=kernel_size,
strides=strides, padding="VALID", name=namer("conv")
)
out = flow.nn.relu(out)
return out
out = input
with flow.scope.namespace(name):
out = Conv2dBlock(
out, num_filters=num_filters,
kernel_size=7, strides=1, padding=3
)
for _ in range(2):
num_filters *= 2
out = Conv2dBlock(
out, num_filters=num_filters,
kernel_size=4, strides=2, padding=1
)
for _ in range(num_downsamples - 2):
out = Conv2dBlock(
out, num_filters=num_filters,
kernel_size=4, strides=2, padding=1
)
# AdaptiveAvgPool2D(1)
out = flow.reshape(out, shape=[*out.shape[:2], 1, -1])
out = flow.math.reduce_mean(out, axis=3, keepdims=True)
out = utils.conv2d_layer(
out, num_filters=style_channels, kernel_size=1, strides=1,
padding="VALID", use_bias=False, name=namer("conv")
)
return out
def ContentEncoder(
input, num_downsamples, num_res_blocks, image_channels, num_filters,
padding_mode, activation_norm_type, weight_norm_type, nonlinearity,
name="ContentEncoder"
):
assert padding_mode == "reflect"
assert activation_norm_type == "instance"
assert weight_norm_type == ""
assert nonlinearity == "relu"
namer = utils.namer_factory()
def Conv2dBlock(input, num_filters, kernel_size, strides, padding):
out = input
out = flow.reflection_pad2d(out, padding=padding)
out = utils.conv2d_layer(
out, num_filters=num_filters, kernel_size=kernel_size,
strides=strides, padding="VALID", name=namer("conv")
)
out = flow.nn.InstanceNorm2d(out, affine=True)
out = flow.nn.relu(out)
return out
def Res2dBlock(input, num_filters):
out = input
for _ in range(2):
out = flow.reflection_pad2d(out, padding=1)
out = utils.conv2d_layer(
out, num_filters=num_filters, kernel_size=3,
strides=1, padding="VALID", name=namer("conv")
)
out = flow.nn.InstanceNorm2d(out, affine=True)
out = flow.nn.relu(out)
if input.shape[1] == out.shape[1]:
return input + out
else:
shortcut_out = utils.conv2d_layer(
input, num_filters=num_filters, kernel_size=1,
strides=1, padding="VALID", name=namer("conv")
)
return shortcut_out + out
out = input
with flow.scope.namespace(name):
out = Conv2dBlock(
out, num_filters=num_filters,
kernel_size=7, strides=1, padding=3
)
for _ in range(num_downsamples):
num_filters *= 2
out = Conv2dBlock(
out, num_filters=num_filters,
kernel_size=4, strides=2, padding=1
)
for _ in range(num_res_blocks):
out = Res2dBlock(out, num_filters=num_filters)
return out
def MLP(
input, output_dim, latent_dim, num_layers,
activation_norm_type, nonlinearity, name="MLP"
):
assert activation_norm_type == "none"
assert nonlinearity == "relu"
assert len(input.shape) == 2
namer = utils.namer_factory()
def LinearBlock(input, out_features):
out = utils.dense_layer(input, units=out_features, name=namer("dense"))
out = flow.nn.relu(out)
return out
out = input
with flow.scope.namespace(name):
for _ in range(num_layers - 2):
out = LinearBlock(out, out_features=latent_dim)
out = LinearBlock(out, out_features=output_dim)
return out
def Decoder(
input, style, num_enc_output_channels, num_image_channels=3,
num_upsamples=4, padding_type="reflect", weight_norm_type="",
nonlinearity="relu", name="Decoder",
_persistent_namer=utils.namer_factory()
):
assert padding_type == "reflect"
assert weight_norm_type == ""
assert nonlinearity == "relu"
namer = utils.namer_factory()
def AdaptiveNorm(input, style):
assert len(input.shape) == 4
assert len(style.shape) == 2
out1 = flow.nn.InstanceNorm2d(input, affine=False)
out2 = utils.dense_layer(
style, units=input.shape[1] * 2,
use_bias=False, name=namer("dense"))
gamma = flow.slice(out2, begin=[None, 0], size=[None, input.shape[1]])
gamma = flow.reshape(gamma, shape=(*gamma.shape, 1, 1))
beta = flow.slice(
out2, begin=[None, input.shape[1]], size=[None, input.shape[1]]
)
beta = flow.reshape(gamma, shape=(*beta.shape, 1, 1))
return out1 * (1 + gamma) + beta
def base_res_block(input, style, num_filters):
out = input
for _ in range(2):
out = flow.reflection_pad2d(out, padding=1)
out = utils.conv2d_layer(
out, num_filters=num_filters, kernel_size=3,
strides=1, padding="VALID", name=namer("conv")
)
out = AdaptiveNorm(out, style)
out = flow.nn.relu(out)
if input.shape[1] == out.shape[1]:
return input + out
else:
shortcut_out = utils.conv2d_layer(
input, num_filters=num_filters, kernel_size=1,
strides=1, padding="VALID", name=namer("conv")
)
return shortcut_out + out
def base_up_res_block(input, style, num_filters):
out = flow.reflection_pad2d(input, padding=2)
out = utils.conv2d_layer(
out, num_filters=num_filters, kernel_size=5,
strides=1, padding="VALID", name=namer("conv")
)
out = AdaptiveNorm(out, style)
out = flow.nn.relu(out)
out = flow.layers.upsample_2d(
out, name=_persistent_namer("upsample")
)
out = flow.reflection_pad2d(out, padding=2)
out = utils.conv2d_layer(
out, num_filters=num_filters, kernel_size=5,
strides=1, padding="VALID", name=namer("conv")
)
out = AdaptiveNorm(out, style)
out = flow.nn.relu(out)
shortcut = flow.layers.upsample_2d(
input, name=_persistent_namer("upsample")
)
if shortcut.shape[1] != out.shape[1]:
shortcut = utils.conv2d_layer(
shortcut, num_filters=num_filters, kernel_size=1,
strides=1, padding="VALID", name=namer("conv")
)
shortcut = flow.nn.InstanceNorm2d(shortcut, affine=True)
return shortcut + out
def Conv2dBlock(input, num_filters, kernel_size, strides, padding):
out = input
out = flow.reflection_pad2d(out, padding=padding)
out = utils.conv2d_layer(
out, num_filters=num_filters, kernel_size=kernel_size,
strides=strides, padding="VALID", name=namer("conv")
)
return flow.math.tanh(out)
out = input
with flow.scope.namespace(name):
out = base_res_block(out, style, num_filters=num_enc_output_channels)
out = base_res_block(out, style, num_filters=num_enc_output_channels)
for _ in range(num_upsamples):
num_enc_output_channels //= 2
out = base_up_res_block(out, style, num_filters=num_enc_output_channels)
out = Conv2dBlock(
out, num_filters=num_image_channels,
kernel_size=7, strides=1, padding=3
)
return out
| [
"oneflow.scope.namespace",
"oneflow.math.reduce_mean",
"oneflow.slice",
"oneflow.reflection_pad2d",
"oneflow.nn.relu",
"oneflow.math.tanh",
"oneflow.nn.InstanceNorm2d",
"oneflow.reshape"
] | [((393, 414), 'utils.namer_factory', 'utils.namer_factory', ([], {}), '()\n', (412, 414), False, 'import utils\n'), ((2083, 2104), 'utils.namer_factory', 'utils.namer_factory', ([], {}), '()\n', (2102, 2104), False, 'import utils\n'), ((4021, 4042), 'utils.namer_factory', 'utils.namer_factory', ([], {}), '()\n', (4040, 4042), False, 'import utils\n'), ((4658, 4679), 'utils.namer_factory', 'utils.namer_factory', ([], {}), '()\n', (4677, 4679), False, 'import utils\n'), ((4802, 4823), 'utils.namer_factory', 'utils.namer_factory', ([], {}), '()\n', (4821, 4823), False, 'import utils\n'), ((502, 547), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['input'], {'padding': 'padding'}), '(input, padding=padding)\n', (523, 547), True, 'import oneflow as flow\n'), ((739, 756), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (751, 756), True, 'import oneflow as flow\n'), ((802, 828), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (822, 828), True, 'import oneflow as flow\n'), ((1411, 1459), 'oneflow.reshape', 'flow.reshape', (['out'], {'shape': '[*out.shape[:2], 1, -1]'}), '(out, shape=[*out.shape[:2], 1, -1])\n', (1423, 1459), True, 'import oneflow as flow\n'), ((1474, 1523), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['out'], {'axis': '(3)', 'keepdims': '(True)'}), '(out, axis=3, keepdims=True)\n', (1495, 1523), True, 'import oneflow as flow\n'), ((2213, 2256), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['out'], {'padding': 'padding'}), '(out, padding=padding)\n', (2234, 2256), True, 'import oneflow as flow\n'), ((2448, 2488), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['out'], {'affine': '(True)'}), '(out, affine=True)\n', (2470, 2488), True, 'import oneflow as flow\n'), ((2503, 2520), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (2515, 2520), True, 'import oneflow as flow\n'), ((3299, 3325), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (3319, 3325), True, 'import oneflow as flow\n'), ((4180, 4197), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (4192, 4197), True, 'import oneflow as flow\n'), ((4243, 4269), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (4263, 4269), True, 'import oneflow as flow\n'), ((4951, 4994), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'affine': '(False)'}), '(input, affine=False)\n', (4973, 4994), True, 'import oneflow as flow\n'), ((5142, 5204), 'oneflow.slice', 'flow.slice', (['out2'], {'begin': '[None, 0]', 'size': '[None, input.shape[1]]'}), '(out2, begin=[None, 0], size=[None, input.shape[1]])\n', (5152, 5204), True, 'import oneflow as flow\n'), ((5221, 5268), 'oneflow.reshape', 'flow.reshape', (['gamma'], {'shape': '(*gamma.shape, 1, 1)'}), '(gamma, shape=(*gamma.shape, 1, 1))\n', (5233, 5268), True, 'import oneflow as flow\n'), ((5285, 5360), 'oneflow.slice', 'flow.slice', (['out2'], {'begin': '[None, input.shape[1]]', 'size': '[None, input.shape[1]]'}), '(out2, begin=[None, input.shape[1]], size=[None, input.shape[1]])\n', (5295, 5360), True, 'import oneflow as flow\n'), ((5398, 5444), 'oneflow.reshape', 'flow.reshape', (['gamma'], {'shape': '(*beta.shape, 1, 1)'}), '(gamma, shape=(*beta.shape, 1, 1))\n', (5410, 5444), True, 'import oneflow as flow\n'), ((6291, 6330), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['input'], {'padding': '(2)'}), '(input, padding=2)\n', (6312, 6330), True, 'import oneflow as flow\n'), ((6545, 6562), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (6557, 6562), True, 'import oneflow as flow\n'), ((6680, 6717), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['out'], {'padding': '(2)'}), '(out, padding=2)\n', (6701, 6717), True, 'import oneflow as flow\n'), ((6932, 6949), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (6944, 6949), True, 'import oneflow as flow\n'), ((7513, 7556), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['out'], {'padding': 'padding'}), '(out, padding=padding)\n', (7534, 7556), True, 'import oneflow as flow\n'), ((7750, 7769), 'oneflow.math.tanh', 'flow.math.tanh', (['out'], {}), '(out)\n', (7764, 7769), True, 'import oneflow as flow\n'), ((7796, 7822), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (7816, 7822), True, 'import oneflow as flow\n'), ((2648, 2685), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['out'], {'padding': '(1)'}), '(out, padding=1)\n', (2669, 2685), True, 'import oneflow as flow\n'), ((2881, 2921), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['out'], {'affine': '(True)'}), '(out, affine=True)\n', (2903, 2921), True, 'import oneflow as flow\n'), ((2940, 2957), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (2952, 2957), True, 'import oneflow as flow\n'), ((5605, 5642), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['out'], {'padding': '(1)'}), '(out, padding=1)\n', (5626, 5642), True, 'import oneflow as flow\n'), ((5881, 5898), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (5893, 5898), True, 'import oneflow as flow\n'), ((7324, 7369), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['shortcut'], {'affine': '(True)'}), '(shortcut, affine=True)\n', (7346, 7369), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
from oneflow.nn.parallel import DistributedDataParallel as ddp
import oneflow.unittest
import numpy as np
@flow.unittest.skip_unless_1n2d()
class TestDDP(flow.unittest.TestCase):
def test_ddp_basic(test_case):
class Mul(flow.nn.Module):
def __init__(self):
super().__init__()
self.w = flow.nn.Parameter(flow.Tensor([1]))
def forward(self, x):
return x * self.w
rank = flow.framework.distribute.get_rank()
if rank == 0:
x = flow.Tensor([1])
elif rank == 1:
x = flow.Tensor([2])
else:
raise ValueError()
x = x.to("cuda")
m = Mul().to("cuda")
m = ddp(m)
y = m(x)
y.backward()
test_case.assertTrue(np.allclose(m.w.grad.numpy(), np.array([1.5])))
def test_ddp_with_unused_param(test_case):
class Model(flow.nn.Module):
def __init__(self):
super().__init__()
self.w = flow.nn.Parameter(flow.Tensor([1]))
self.used_only_in_rank0 = flow.nn.Parameter(flow.Tensor([2]))
self.unused_in_all_ranks = flow.nn.Parameter(flow.Tensor([3]))
def forward(self, x):
x = x * self.w
if flow.framework.distribute.get_rank() == 0:
x = x * self.used_only_in_rank0
return x
rank = flow.framework.distribute.get_rank()
if rank == 0:
x = flow.Tensor([1])
elif rank == 1:
x = flow.Tensor([2])
else:
raise ValueError()
x = x.to("cuda")
m = Model().to("cuda")
m = ddp(m)
y = m(x)
y.backward()
test_case.assertTrue(np.allclose(m.w.grad.numpy(), np.array([2])))
test_case.assertTrue(
np.allclose(m.used_only_in_rank0.grad.numpy(), np.array([0.5]))
)
test_case.assertTrue(
np.allclose(m.unused_in_all_ranks.grad.numpy(), np.array([0]))
)
def test_out_of_order_execution(test_case):
class Model(flow.nn.Module):
def __init__(self):
super().__init__()
self.w1 = flow.nn.Parameter(flow.Tensor([1]))
self.w2 = flow.nn.Parameter(flow.Tensor([2]))
self.w3 = flow.nn.Parameter(flow.Tensor([3]))
def forward(self, x):
if flow.framework.distribute.get_rank() == 0:
x *= self.w1
x *= self.w2
x *= self.w3
else:
x *= self.w3
x *= self.w2
x *= self.w1
return x
rank = flow.framework.distribute.get_rank()
if rank == 0:
x = flow.Tensor([1])
elif rank == 1:
x = flow.Tensor([2])
else:
raise ValueError()
x = x.to("cuda")
m = Model().to("cuda")
m = ddp(m)
y = m(x)
y.backward()
test_case.assertTrue(np.allclose(m.w1.grad.numpy(), np.array([9])))
test_case.assertTrue(np.allclose(m.w2.grad.numpy(), np.array([4.5])))
test_case.assertTrue(np.allclose(m.w3.grad.numpy(), np.array([3])))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.parallel.DistributedDataParallel",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.framework.distribute.get_rank",
"oneflow.Tensor"
] | [((739, 771), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (769, 771), True, 'import oneflow as flow\n'), ((3953, 3968), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3966, 3968), False, 'import unittest\n'), ((1094, 1130), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (1128, 1130), True, 'import oneflow as flow\n'), ((1355, 1361), 'oneflow.nn.parallel.DistributedDataParallel', 'ddp', (['m'], {}), '(m)\n', (1358, 1361), True, 'from oneflow.nn.parallel import DistributedDataParallel as ddp\n'), ((2069, 2105), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (2103, 2105), True, 'import oneflow as flow\n'), ((2332, 2338), 'oneflow.nn.parallel.DistributedDataParallel', 'ddp', (['m'], {}), '(m)\n', (2335, 2338), True, 'from oneflow.nn.parallel import DistributedDataParallel as ddp\n'), ((3381, 3417), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (3415, 3417), True, 'import oneflow as flow\n'), ((3644, 3650), 'oneflow.nn.parallel.DistributedDataParallel', 'ddp', (['m'], {}), '(m)\n', (3647, 3650), True, 'from oneflow.nn.parallel import DistributedDataParallel as ddp\n'), ((1169, 1185), 'oneflow.Tensor', 'flow.Tensor', (['[1]'], {}), '([1])\n', (1180, 1185), True, 'import oneflow as flow\n'), ((2144, 2160), 'oneflow.Tensor', 'flow.Tensor', (['[1]'], {}), '([1])\n', (2155, 2160), True, 'import oneflow as flow\n'), ((3456, 3472), 'oneflow.Tensor', 'flow.Tensor', (['[1]'], {}), '([1])\n', (3467, 3472), True, 'import oneflow as flow\n'), ((1226, 1242), 'oneflow.Tensor', 'flow.Tensor', (['[2]'], {}), '([2])\n', (1237, 1242), True, 'import oneflow as flow\n'), ((1460, 1475), 'numpy.array', 'np.array', (['[1.5]'], {}), '([1.5])\n', (1468, 1475), True, 'import numpy as np\n'), ((2201, 2217), 'oneflow.Tensor', 'flow.Tensor', (['[2]'], {}), '([2])\n', (2212, 2217), True, 'import oneflow as flow\n'), ((2437, 2450), 'numpy.array', 'np.array', (['[2]'], {}), '([2])\n', (2445, 2450), True, 'import numpy as np\n'), ((2542, 2557), 'numpy.array', 'np.array', (['[0.5]'], {}), '([0.5])\n', (2550, 2557), True, 'import numpy as np\n'), ((2659, 2672), 'numpy.array', 'np.array', (['[0]'], {}), '([0])\n', (2667, 2672), True, 'import numpy as np\n'), ((3513, 3529), 'oneflow.Tensor', 'flow.Tensor', (['[2]'], {}), '([2])\n', (3524, 3529), True, 'import oneflow as flow\n'), ((3750, 3763), 'numpy.array', 'np.array', (['[9]'], {}), '([9])\n', (3758, 3763), True, 'import numpy as np\n'), ((3826, 3841), 'numpy.array', 'np.array', (['[4.5]'], {}), '([4.5])\n', (3834, 3841), True, 'import numpy as np\n'), ((3904, 3917), 'numpy.array', 'np.array', (['[3]'], {}), '([3])\n', (3912, 3917), True, 'import numpy as np\n'), ((991, 1007), 'oneflow.Tensor', 'flow.Tensor', (['[1]'], {}), '([1])\n', (1002, 1007), True, 'import oneflow as flow\n'), ((1673, 1689), 'oneflow.Tensor', 'flow.Tensor', (['[1]'], {}), '([1])\n', (1684, 1689), True, 'import oneflow as flow\n'), ((1751, 1767), 'oneflow.Tensor', 'flow.Tensor', (['[2]'], {}), '([2])\n', (1762, 1767), True, 'import oneflow as flow\n'), ((1830, 1846), 'oneflow.Tensor', 'flow.Tensor', (['[3]'], {}), '([3])\n', (1841, 1846), True, 'import oneflow as flow\n'), ((1933, 1969), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (1967, 1969), True, 'import oneflow as flow\n'), ((2881, 2897), 'oneflow.Tensor', 'flow.Tensor', (['[1]'], {}), '([1])\n', (2892, 2897), True, 'import oneflow as flow\n'), ((2943, 2959), 'oneflow.Tensor', 'flow.Tensor', (['[2]'], {}), '([2])\n', (2954, 2959), True, 'import oneflow as flow\n'), ((3005, 3021), 'oneflow.Tensor', 'flow.Tensor', (['[3]'], {}), '([3])\n', (3016, 3021), True, 'import oneflow as flow\n'), ((3077, 3113), 'oneflow.framework.distribute.get_rank', 'flow.framework.distribute.get_rank', ([], {}), '()\n', (3111, 3113), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
test_conv2d_weight = np.array(
[
[
[
[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],
[0.35005471110343933, 0.5360521078109741, 1.5194443464279175],
[1.9040879011154175, -1.5734431743621826, -0.14007866382598877],
]
],
[
[
[0.29670074582099915, 1.3111951351165771, 0.5035904049873352],
[-1.1894450187683105, -0.5502137541770935, -1.591875672340393],
[-1.1081947088241577, 0.07872020453214645, -0.9185634255409241],
]
],
[
[
[-0.7457143664360046, -1.2080862522125244, 1.8140212297439575],
[-1.5227429866790771, -2.515244960784912, -1.3549325466156006],
[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952],
]
],
]
)
test_conv2d_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
],
[
1.5580710172653198,
-0.5459445714950562,
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
],
[
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
0.37723132967948914,
],
[
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
],
[
1.830764889717102,
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
],
]
]
]
)
test_conv2d_output = np.array(
[
[
[
[0.9699610471725464, -0.20758534967899323, 2.3857712745666504],
[0.3666309118270874, 4.690882682800293, -8.203354835510254],
[2.6072847843170166, -1.9033538103103638, 2.331153154373169],
],
[
[2.519343852996826, 2.3757898807525635, -1.6613528728485107],
[0.5777544379234314, -3.5739502906799316, 5.349126815795898],
[0.729295015335083, 1.5791023969650269, 3.7627718448638916],
],
[
[-0.27685487270355225, 6.446267127990723, -2.762883424758911],
[-8.25644588470459, 9.616064071655273, 8.005367279052734],
[-0.6944921016693115, 3.866114854812622, 4.788446426391602],
],
]
]
)
test_conv2d_with_bias_weight = np.array(
[
[
[
[1.8271433115005493, -1.0446699857711792, 1.0062190294265747],
[0.5174201130867004, -0.806931734085083, 1.3769007921218872],
[0.205885112285614, 0.9943519234657288, -0.23580588400363922],
]
],
[
[
[0.29881811141967773, -1.9982075691223145, 0.3511354625225067],
[-0.7644741535186768, 1.2594351768493652, -0.9629734754562378],
[0.5080506205558777, 0.7561734318733215, 1.6839302778244019],
]
],
[
[
[1.2573646306991577, 0.13123232126235962, 1.6403018236160278],
[-1.2138012647628784, 2.399970531463623, -0.38509097695350647],
[-0.9878040552139282, 0.9585888385772705, -1.4976465702056885],
]
],
]
)
test_conv2d_with_bias_bias = np.array(
[0.6605162620544434, -0.18903568387031555, -0.27302607893943787]
)
test_conv2d_with_bias_data = np.array(
[
[
[
[
-0.47827261686325073,
-1.1739492416381836,
-0.7921845316886902,
0.9321041703224182,
-3.1557741165161133,
],
[
2.1935296058654785,
-0.5385921001434326,
-0.8611332774162292,
-1.881519079208374,
-0.7205708026885986,
],
[
-0.35601571202278137,
-0.15963983535766602,
1.797447681427002,
0.19594945013523102,
-1.7376397848129272,
],
[
0.047347065061330795,
0.14580930769443512,
0.32604914903640747,
0.4578782916069031,
-0.8942581415176392,
],
[
0.49383941292762756,
-0.9043426513671875,
-1.2140793800354004,
2.1564064025878906,
1.0938222408294678,
],
]
]
]
)
test_conv2d_with_bias_output = np.array(
[
[
[
[-0.05607491731643677, -0.185230553150177, -3.8808679580688477],
[6.861937046051025, -2.3341472148895264, -0.5597308874130249],
[1.8299254179000854, -2.770848274230957, 2.1958212852478027],
],
[
[2.9348952770233154, 4.117504119873047, -6.278541088104248],
[0.2638452351093292, 3.998856782913208, 2.612290620803833],
[-1.9891828298568726, -1.6476304531097412, 3.39066219329834],
],
[
[-8.44466781616211, 0.5747121572494507, -8.501373291015625],
[-0.036642804741859436, -0.23458999395370483, -2.370849370956421],
[2.8372013568878174, -2.987276077270508, 1.8382092714309692],
],
]
]
)
test_conv2d_group_weight = np.array(
[
[
[
[-0.7248556613922119, 1.1119636297225952, -0.47827261686325073],
[-1.1739492416381836, -0.7921845316886902, 0.9321041703224182],
[-3.1557741165161133, 2.1935296058654785, -0.5385921001434326],
]
],
[
[
[-0.8611332774162292, -1.881519079208374, -0.7205708026885986],
[-0.35601571202278137, -0.15963983535766602, 1.797447681427002],
[0.19594945013523102, -1.7376397848129272, 0.047347065061330795],
]
],
]
)
test_conv2d_group_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
],
[
1.5580710172653198,
-0.5459445714950562,
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
],
[
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
0.37723132967948914,
],
[
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
],
[
1.830764889717102,
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
],
],
[
[
0.8586049675941467,
-0.2279418259859085,
0.2013147622346878,
0.35005471110343933,
0.5360521078109741,
],
[
1.5194443464279175,
1.9040879011154175,
-1.5734431743621826,
-0.14007866382598877,
0.29670074582099915,
],
[
1.3111951351165771,
0.5035904049873352,
-1.1894450187683105,
-0.5502137541770935,
-1.591875672340393,
],
[
-1.1081947088241577,
0.07872020453214645,
-0.9185634255409241,
-0.7457143664360046,
-1.2080862522125244,
],
[
1.8140212297439575,
-1.5227429866790771,
-2.515244960784912,
-1.3549325466156006,
-0.9574840068817139,
],
],
]
]
)
test_conv2d_group_output = np.array(
[
[
[
[-8.836943626403809, 3.2316627502441406, 6.994439601898193],
[-0.8386597037315369, -9.857108116149902, 13.68197250366211],
[-13.020713806152344, 7.310227870941162, -3.3760271072387695],
],
[
[-4.803101539611816, 1.026240587234497, 0.5452112555503845],
[-6.839838027954102, 2.0195930004119873, 0.11328654736280441],
[0.393694669008255, 4.987061023712158, 3.297354221343994],
],
]
]
)
test_conv2d_padding_weight = np.array(
[
[
[
[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],
[0.35005471110343933, 0.5360521078109741, 1.5194443464279175],
[1.9040879011154175, -1.5734431743621826, -0.14007866382598877],
]
]
]
)
test_conv2d_padding_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
],
[
1.5580710172653198,
-0.5459445714950562,
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
],
[
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
0.37723132967948914,
],
[
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
],
[
1.830764889717102,
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
],
]
]
]
)
test_conv2d_padding_output = np.array(
[
[
[
[
1.5489805936813354,
-1.0164761543273926,
5.277345657348633,
3.153532028198242,
-7.301508903503418,
-3.7565059661865234,
4.690962314605713,
],
[
2.425799608230591,
-2.0592665672302246,
0.9699610471725464,
-0.20758534967899323,
2.3857712745666504,
1.1719579696655273,
0.6523551940917969,
],
[
2.1625545024871826,
-1.3517316579818726,
0.3666309118270874,
4.690882682800293,
-8.203354835510254,
3.0248217582702637,
1.2624683380126953,
],
[
0.6193475723266602,
-2.0285415649414062,
2.6072847843170166,
-1.9033538103103638,
2.331153154373169,
-3.998155355453491,
-1.0176407098770142,
],
[
2.8643176555633545,
-0.7396122217178345,
-0.2253415733575821,
-2.846742630004883,
-4.961236476898193,
-0.1308247298002243,
-0.7344070672988892,
],
]
]
]
)
test_conv2d_stride_weight = np.array(
[
[
[
[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],
[0.35005471110343933, 0.5360521078109741, 1.5194443464279175],
[1.9040879011154175, -1.5734431743621826, -0.14007866382598877],
]
]
]
)
test_conv2d_stride_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
],
[
1.5580710172653198,
-0.5459445714950562,
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
],
[
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
0.37723132967948914,
],
[
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
],
[
1.830764889717102,
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
],
]
]
]
)
test_conv2d_stride_output = np.array(
[
[
[
[-1.0164761543273926, -7.301508903503418],
[-1.3517316579818726, -8.203354835510254],
[-0.7396122217178345, -4.961236476898193],
]
]
]
)
test_conv2d_kernel_weight = np.array(
[
[
[
[
-0.9574840068817139,
-0.7248556613922119,
1.1119636297225952,
-0.47827261686325073,
-1.1739492416381836,
],
[
-0.7921845316886902,
0.9321041703224182,
-3.1557741165161133,
2.1935296058654785,
-0.5385921001434326,
],
[
-0.8611332774162292,
-1.881519079208374,
-0.7205708026885986,
-0.35601571202278137,
-0.15963983535766602,
],
]
]
]
)
test_conv2d_kernel_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
1.5580710172653198,
-0.5459445714950562,
],
[
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
],
[
0.37723132967948914,
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
1.830764889717102,
],
[
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
0.8586049675941467,
-0.2279418259859085,
0.2013147622346878,
],
[
0.35005471110343933,
0.5360521078109741,
1.5194443464279175,
1.9040879011154175,
-1.5734431743621826,
-0.14007866382598877,
0.29670074582099915,
],
[
1.3111951351165771,
0.5035904049873352,
-1.1894450187683105,
-0.5502137541770935,
-1.591875672340393,
-1.1081947088241577,
0.07872020453214645,
],
[
-0.9185634255409241,
-0.7457143664360046,
-1.2080862522125244,
1.8140212297439575,
-1.5227429866790771,
-2.515244960784912,
-1.3549325466156006,
],
]
]
]
)
test_conv2d_kernel_output = np.array(
[
[
[
[-3.5647754669189453, -4.234736919403076, 1.4046944379806519],
[-0.6964312791824341, 16.42838478088379, -9.649789810180664],
[4.312150478363037, -6.283960819244385, -4.8443922996521],
[-2.772286891937256, -4.483709812164307, 12.315184593200684],
[7.39893913269043, 1.305102825164795, -2.049992561340332],
]
]
]
)
test_conv2d_dilation_weight = np.array(
[
[
[
[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952],
[-0.47827261686325073, -1.1739492416381836, -0.7921845316886902],
[0.9321041703224182, -3.1557741165161133, 2.1935296058654785],
]
]
]
)
test_conv2d_dilation_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
1.5580710172653198,
-0.5459445714950562,
],
[
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
],
[
0.37723132967948914,
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
1.830764889717102,
],
[
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
0.8586049675941467,
-0.2279418259859085,
0.2013147622346878,
],
[
0.35005471110343933,
0.5360521078109741,
1.5194443464279175,
1.9040879011154175,
-1.5734431743621826,
-0.14007866382598877,
0.29670074582099915,
],
[
1.3111951351165771,
0.5035904049873352,
-1.1894450187683105,
-0.5502137541770935,
-1.591875672340393,
-1.1081947088241577,
0.07872020453214645,
],
[
-0.9185634255409241,
-0.7457143664360046,
-1.2080862522125244,
1.8140212297439575,
-1.5227429866790771,
-2.515244960784912,
-1.3549325466156006,
],
]
]
]
)
test_conv2d_dilation_output = np.array(
[[[[-5.2563982009887695], [5.410353183746338], [-8.517012596130371]]]]
)
def _test_conv2d(test_case, conv, data, output, weight, bias=None):
x = flow.Tensor(data)
conv.weight = flow.nn.Parameter(flow.Tensor(weight))
if bias is not None:
conv.bias = flow.nn.Parameter(flow.Tensor(bias))
of_out = conv(x)
test_case.assertTrue(np.allclose(of_out.numpy(), output, rtol=1e-4, atol=1e-8))
# TODO: skip this test, for layernorm doesn't have cpu implementation
@unittest.skipIf(True, "CPU conv is not supported")
class TestConv2d(flow.unittest.TestCase):
def test_conv2d_default_init(test_case):
conv = flow.nn.Conv2d(1, 1, (3, 3), bias=True)
test_case.assertTrue(
not np.allclose(
conv.weight.numpy(), np.zeros((1, 1, 3, 3)), rtol=1e-9, atol=1e-10
)
)
test_case.assertTrue(
not np.allclose(conv.bias.numpy(), np.zeros((1,)), rtol=1e-9, atol=1e-10)
)
def test_conv2d(test_case):
conv = flow.nn.Conv2d(1, 3, (3, 3), bias=False)
_test_conv2d(
test_case, conv, test_conv2d_data, test_conv2d_output, test_conv2d_weight
)
def test_conv2d_with_bias(test_case):
conv = flow.nn.Conv2d(1, 3, (3, 3), bias=True)
_test_conv2d(
test_case,
conv,
test_conv2d_with_bias_data,
test_conv2d_with_bias_output,
test_conv2d_with_bias_weight,
test_conv2d_with_bias_bias,
)
def test_conv2d_group(test_case):
conv = flow.nn.Conv2d(2, 2, (3, 3), groups=2, bias=False)
_test_conv2d(
test_case,
conv,
test_conv2d_group_data,
test_conv2d_group_output,
test_conv2d_group_weight,
)
def test_conv2d_padding(test_case):
conv = flow.nn.Conv2d(1, 1, (3, 3), padding=(1, 2), bias=False)
_test_conv2d(
test_case,
conv,
test_conv2d_padding_data,
test_conv2d_padding_output,
test_conv2d_padding_weight,
)
def test_conv2d_stride(test_case):
conv = flow.nn.Conv2d(1, 1, (3, 3), padding=(1, 1), stride=(2, 3), bias=False)
_test_conv2d(
test_case,
conv,
test_conv2d_stride_data,
test_conv2d_stride_output,
test_conv2d_stride_weight,
)
def test_conv2d_kernel(test_case):
conv = flow.nn.Conv2d(1, 1, (3, 5), bias=False)
_test_conv2d(
test_case,
conv,
test_conv2d_kernel_data,
test_conv2d_kernel_output,
test_conv2d_kernel_weight,
)
def test_conv2d_dilation(test_case):
conv = flow.nn.Conv2d(1, 1, (3, 3), dilation=(2, 3), bias=False)
_test_conv2d(
test_case,
conv,
test_conv2d_dilation_data,
test_conv2d_dilation_output,
test_conv2d_dilation_weight,
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.nn.Conv2d",
"oneflow.experimental.Tensor"
] | [((683, 1319), 'numpy.array', 'np.array', (['[[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878], [\n 0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]], [[[\n 0.29670074582099915, 1.3111951351165771, 0.5035904049873352], [-\n 1.1894450187683105, -0.5502137541770935, -1.591875672340393], [-\n 1.1081947088241577, 0.07872020453214645, -0.9185634255409241]]], [[[-\n 0.7457143664360046, -1.2080862522125244, 1.8140212297439575], [-\n 1.5227429866790771, -2.515244960784912, -1.3549325466156006], [-\n 0.9574840068817139, -0.7248556613922119, 1.1119636297225952]]]]'], {}), '([[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],\n [0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]], [[[\n 0.29670074582099915, 1.3111951351165771, 0.5035904049873352], [-\n 1.1894450187683105, -0.5502137541770935, -1.591875672340393], [-\n 1.1081947088241577, 0.07872020453214645, -0.9185634255409241]]], [[[-\n 0.7457143664360046, -1.2080862522125244, 1.8140212297439575], [-\n 1.5227429866790771, -2.515244960784912, -1.3549325466156006], [-\n 0.9574840068817139, -0.7248556613922119, 1.1119636297225952]]]])\n', (691, 1319), True, 'import numpy as np\n'), ((1592, 2168), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]])\n', (1600, 2168), True, 'import numpy as np\n'), ((2883, 3495), 'numpy.array', 'np.array', (['[[[[0.9699610471725464, -0.20758534967899323, 2.3857712745666504], [\n 0.3666309118270874, 4.690882682800293, -8.203354835510254], [\n 2.6072847843170166, -1.9033538103103638, 2.331153154373169]], [[\n 2.519343852996826, 2.3757898807525635, -1.6613528728485107], [\n 0.5777544379234314, -3.5739502906799316, 5.349126815795898], [\n 0.729295015335083, 1.5791023969650269, 3.7627718448638916]], [[-\n 0.27685487270355225, 6.446267127990723, -2.762883424758911], [-\n 8.25644588470459, 9.616064071655273, 8.005367279052734], [-\n 0.6944921016693115, 3.866114854812622, 4.788446426391602]]]]'], {}), '([[[[0.9699610471725464, -0.20758534967899323, 2.3857712745666504],\n [0.3666309118270874, 4.690882682800293, -8.203354835510254], [\n 2.6072847843170166, -1.9033538103103638, 2.331153154373169]], [[\n 2.519343852996826, 2.3757898807525635, -1.6613528728485107], [\n 0.5777544379234314, -3.5739502906799316, 5.349126815795898], [\n 0.729295015335083, 1.5791023969650269, 3.7627718448638916]], [[-\n 0.27685487270355225, 6.446267127990723, -2.762883424758911], [-\n 8.25644588470459, 9.616064071655273, 8.005367279052734], [-\n 0.6944921016693115, 3.866114854812622, 4.788446426391602]]]])\n', (2891, 3495), True, 'import numpy as np\n'), ((3745, 4375), 'numpy.array', 'np.array', (['[[[[1.8271433115005493, -1.0446699857711792, 1.0062190294265747], [\n 0.5174201130867004, -0.806931734085083, 1.3769007921218872], [\n 0.205885112285614, 0.9943519234657288, -0.23580588400363922]]], [[[\n 0.29881811141967773, -1.9982075691223145, 0.3511354625225067], [-\n 0.7644741535186768, 1.2594351768493652, -0.9629734754562378], [\n 0.5080506205558777, 0.7561734318733215, 1.6839302778244019]]], [[[\n 1.2573646306991577, 0.13123232126235962, 1.6403018236160278], [-\n 1.2138012647628784, 2.399970531463623, -0.38509097695350647], [-\n 0.9878040552139282, 0.9585888385772705, -1.4976465702056885]]]]'], {}), '([[[[1.8271433115005493, -1.0446699857711792, 1.0062190294265747],\n [0.5174201130867004, -0.806931734085083, 1.3769007921218872], [\n 0.205885112285614, 0.9943519234657288, -0.23580588400363922]]], [[[\n 0.29881811141967773, -1.9982075691223145, 0.3511354625225067], [-\n 0.7644741535186768, 1.2594351768493652, -0.9629734754562378], [\n 0.5080506205558777, 0.7561734318733215, 1.6839302778244019]]], [[[\n 1.2573646306991577, 0.13123232126235962, 1.6403018236160278], [-\n 1.2138012647628784, 2.399970531463623, -0.38509097695350647], [-\n 0.9878040552139282, 0.9585888385772705, -1.4976465702056885]]]])\n', (3753, 4375), True, 'import numpy as np\n'), ((4658, 4732), 'numpy.array', 'np.array', (['[0.6605162620544434, -0.18903568387031555, -0.27302607893943787]'], {}), '([0.6605162620544434, -0.18903568387031555, -0.27302607893943787])\n', (4666, 4732), True, 'import numpy as np\n'), ((4768, 5352), 'numpy.array', 'np.array', (['[[[[-0.47827261686325073, -1.1739492416381836, -0.7921845316886902, \n 0.9321041703224182, -3.1557741165161133], [2.1935296058654785, -\n 0.5385921001434326, -0.8611332774162292, -1.881519079208374, -\n 0.7205708026885986], [-0.35601571202278137, -0.15963983535766602, \n 1.797447681427002, 0.19594945013523102, -1.7376397848129272], [\n 0.047347065061330795, 0.14580930769443512, 0.32604914903640747, \n 0.4578782916069031, -0.8942581415176392], [0.49383941292762756, -\n 0.9043426513671875, -1.2140793800354004, 2.1564064025878906, \n 1.0938222408294678]]]]'], {}), '([[[[-0.47827261686325073, -1.1739492416381836, -0.7921845316886902,\n 0.9321041703224182, -3.1557741165161133], [2.1935296058654785, -\n 0.5385921001434326, -0.8611332774162292, -1.881519079208374, -\n 0.7205708026885986], [-0.35601571202278137, -0.15963983535766602, \n 1.797447681427002, 0.19594945013523102, -1.7376397848129272], [\n 0.047347065061330795, 0.14580930769443512, 0.32604914903640747, \n 0.4578782916069031, -0.8942581415176392], [0.49383941292762756, -\n 0.9043426513671875, -1.2140793800354004, 2.1564064025878906, \n 1.0938222408294678]]]])\n', (4776, 5352), True, 'import numpy as np\n'), ((6078, 6698), 'numpy.array', 'np.array', (['[[[[-0.05607491731643677, -0.185230553150177, -3.8808679580688477], [\n 6.861937046051025, -2.3341472148895264, -0.5597308874130249], [\n 1.8299254179000854, -2.770848274230957, 2.1958212852478027]], [[\n 2.9348952770233154, 4.117504119873047, -6.278541088104248], [\n 0.2638452351093292, 3.998856782913208, 2.612290620803833], [-\n 1.9891828298568726, -1.6476304531097412, 3.39066219329834]], [[-\n 8.44466781616211, 0.5747121572494507, -8.501373291015625], [-\n 0.036642804741859436, -0.23458999395370483, -2.370849370956421], [\n 2.8372013568878174, -2.987276077270508, 1.8382092714309692]]]]'], {}), '([[[[-0.05607491731643677, -0.185230553150177, -3.8808679580688477],\n [6.861937046051025, -2.3341472148895264, -0.5597308874130249], [\n 1.8299254179000854, -2.770848274230957, 2.1958212852478027]], [[\n 2.9348952770233154, 4.117504119873047, -6.278541088104248], [\n 0.2638452351093292, 3.998856782913208, 2.612290620803833], [-\n 1.9891828298568726, -1.6476304531097412, 3.39066219329834]], [[-\n 8.44466781616211, 0.5747121572494507, -8.501373291015625], [-\n 0.036642804741859436, -0.23458999395370483, -2.370849370956421], [\n 2.8372013568878174, -2.987276077270508, 1.8382092714309692]]]])\n', (6086, 6698), True, 'import numpy as np\n'), ((6943, 7373), 'numpy.array', 'np.array', (['[[[[-0.7248556613922119, 1.1119636297225952, -0.47827261686325073], [-\n 1.1739492416381836, -0.7921845316886902, 0.9321041703224182], [-\n 3.1557741165161133, 2.1935296058654785, -0.5385921001434326]]], [[[-\n 0.8611332774162292, -1.881519079208374, -0.7205708026885986], [-\n 0.35601571202278137, -0.15963983535766602, 1.797447681427002], [\n 0.19594945013523102, -1.7376397848129272, 0.047347065061330795]]]]'], {}), '([[[[-0.7248556613922119, 1.1119636297225952, -0.47827261686325073],\n [-1.1739492416381836, -0.7921845316886902, 0.9321041703224182], [-\n 3.1557741165161133, 2.1935296058654785, -0.5385921001434326]]], [[[-\n 0.8611332774162292, -1.881519079208374, -0.7205708026885986], [-\n 0.35601571202278137, -0.15963983535766602, 1.797447681427002], [\n 0.19594945013523102, -1.7376397848129272, 0.047347065061330795]]]])\n', (6951, 7373), True, 'import numpy as np\n'), ((7575, 8719), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]], [[0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878, 0.35005471110343933, 0.5360521078109741], [\n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393], [-1.1081947088241577, 0.07872020453214645, -\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244], [\n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006, -0.9574840068817139]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]], [[0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878, 0.35005471110343933, 0.5360521078109741], [\n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393], [-1.1081947088241577, 0.07872020453214645, -\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244], [\n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006, -0.9574840068817139]]]])\n', (7583, 8719), True, 'import numpy as np\n'), ((10102, 10512), 'numpy.array', 'np.array', (['[[[[-8.836943626403809, 3.2316627502441406, 6.994439601898193], [-\n 0.8386597037315369, -9.857108116149902, 13.68197250366211], [-\n 13.020713806152344, 7.310227870941162, -3.3760271072387695]], [[-\n 4.803101539611816, 1.026240587234497, 0.5452112555503845], [-\n 6.839838027954102, 2.0195930004119873, 0.11328654736280441], [\n 0.393694669008255, 4.987061023712158, 3.297354221343994]]]]'], {}), '([[[[-8.836943626403809, 3.2316627502441406, 6.994439601898193], [-\n 0.8386597037315369, -9.857108116149902, 13.68197250366211], [-\n 13.020713806152344, 7.310227870941162, -3.3760271072387695]], [[-\n 4.803101539611816, 1.026240587234497, 0.5452112555503845], [-\n 6.839838027954102, 2.0195930004119873, 0.11328654736280441], [\n 0.393694669008255, 4.987061023712158, 3.297354221343994]]]])\n', (10110, 10512), True, 'import numpy as np\n'), ((10698, 10912), 'numpy.array', 'np.array', (['[[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878], [\n 0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]]]'], {}), '([[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],\n [0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]]])\n', (10706, 10912), True, 'import numpy as np\n'), ((11036, 11612), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]])\n', (11044, 11612), True, 'import numpy as np\n'), ((12334, 13120), 'numpy.array', 'np.array', (['[[[[1.5489805936813354, -1.0164761543273926, 5.277345657348633, \n 3.153532028198242, -7.301508903503418, -3.7565059661865234, \n 4.690962314605713], [2.425799608230591, -2.0592665672302246, \n 0.9699610471725464, -0.20758534967899323, 2.3857712745666504, \n 1.1719579696655273, 0.6523551940917969], [2.1625545024871826, -\n 1.3517316579818726, 0.3666309118270874, 4.690882682800293, -\n 8.203354835510254, 3.0248217582702637, 1.2624683380126953], [\n 0.6193475723266602, -2.0285415649414062, 2.6072847843170166, -\n 1.9033538103103638, 2.331153154373169, -3.998155355453491, -\n 1.0176407098770142], [2.8643176555633545, -0.7396122217178345, -\n 0.2253415733575821, -2.846742630004883, -4.961236476898193, -\n 0.1308247298002243, -0.7344070672988892]]]]'], {}), '([[[[1.5489805936813354, -1.0164761543273926, 5.277345657348633, \n 3.153532028198242, -7.301508903503418, -3.7565059661865234, \n 4.690962314605713], [2.425799608230591, -2.0592665672302246, \n 0.9699610471725464, -0.20758534967899323, 2.3857712745666504, \n 1.1719579696655273, 0.6523551940917969], [2.1625545024871826, -\n 1.3517316579818726, 0.3666309118270874, 4.690882682800293, -\n 8.203354835510254, 3.0248217582702637, 1.2624683380126953], [\n 0.6193475723266602, -2.0285415649414062, 2.6072847843170166, -\n 1.9033538103103638, 2.331153154373169, -3.998155355453491, -\n 1.0176407098770142], [2.8643176555633545, -0.7396122217178345, -\n 0.2253415733575821, -2.846742630004883, -4.961236476898193, -\n 0.1308247298002243, -0.7344070672988892]]]])\n', (12342, 13120), True, 'import numpy as np\n'), ((14027, 14241), 'numpy.array', 'np.array', (['[[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878], [\n 0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]]]'], {}), '([[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],\n [0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]]])\n', (14035, 14241), True, 'import numpy as np\n'), ((14364, 14940), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]])\n', (14372, 14940), True, 'import numpy as np\n'), ((15661, 15808), 'numpy.array', 'np.array', (['[[[[-1.0164761543273926, -7.301508903503418], [-1.3517316579818726, -\n 8.203354835510254], [-0.7396122217178345, -4.961236476898193]]]]'], {}), '([[[[-1.0164761543273926, -7.301508903503418], [-1.3517316579818726,\n -8.203354835510254], [-0.7396122217178345, -4.961236476898193]]]])\n', (15669, 15808), True, 'import numpy as np\n'), ((15938, 16292), 'numpy.array', 'np.array', (['[[[[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952, -\n 0.47827261686325073, -1.1739492416381836], [-0.7921845316886902, \n 0.9321041703224182, -3.1557741165161133, 2.1935296058654785, -\n 0.5385921001434326], [-0.8611332774162292, -1.881519079208374, -\n 0.7205708026885986, -0.35601571202278137, -0.15963983535766602]]]]'], {}), '([[[[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952, \n -0.47827261686325073, -1.1739492416381836], [-0.7921845316886902, \n 0.9321041703224182, -3.1557741165161133, 2.1935296058654785, -\n 0.5385921001434326], [-0.8611332774162292, -1.881519079208374, -\n 0.7205708026885986, -0.35601571202278137, -0.15963983535766602]]]])\n', (15946, 16292), True, 'import numpy as np\n'), ((16761, 17876), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942, 1.5580710172653198, -\n 0.5459445714950562], [-2.3556296825408936, 0.5414402484893799, \n 2.678506374359131, 1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253], [0.37723132967948914, \n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821, 1.830764889717102], [-\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839, 0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878], [0.35005471110343933, 0.5360521078109741, \n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393, -1.1081947088241577, 0.07872020453214645], [-\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244, \n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942, 1.5580710172653198, -\n 0.5459445714950562], [-2.3556296825408936, 0.5414402484893799, \n 2.678506374359131, 1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253], [0.37723132967948914, \n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821, 1.830764889717102], [-\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839, 0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878], [0.35005471110343933, 0.5360521078109741, \n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393, -1.1081947088241577, 0.07872020453214645], [-\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244, \n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006]]]])\n', (16769, 17876), True, 'import numpy as np\n'), ((19108, 19446), 'numpy.array', 'np.array', (['[[[[-3.5647754669189453, -4.234736919403076, 1.4046944379806519], [-\n 0.6964312791824341, 16.42838478088379, -9.649789810180664], [\n 4.312150478363037, -6.283960819244385, -4.8443922996521], [-\n 2.772286891937256, -4.483709812164307, 12.315184593200684], [\n 7.39893913269043, 1.305102825164795, -2.049992561340332]]]]'], {}), '([[[[-3.5647754669189453, -4.234736919403076, 1.4046944379806519],\n [-0.6964312791824341, 16.42838478088379, -9.649789810180664], [\n 4.312150478363037, -6.283960819244385, -4.8443922996521], [-\n 2.772286891937256, -4.483709812164307, 12.315184593200684], [\n 7.39893913269043, 1.305102825164795, -2.049992561340332]]]])\n', (19116, 19446), True, 'import numpy as np\n'), ((19595, 19811), 'numpy.array', 'np.array', (['[[[[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952], [-\n 0.47827261686325073, -1.1739492416381836, -0.7921845316886902], [\n 0.9321041703224182, -3.1557741165161133, 2.1935296058654785]]]]'], {}), '([[[[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952],\n [-0.47827261686325073, -1.1739492416381836, -0.7921845316886902], [\n 0.9321041703224182, -3.1557741165161133, 2.1935296058654785]]]])\n', (19603, 19811), True, 'import numpy as np\n'), ((19936, 21051), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942, 1.5580710172653198, -\n 0.5459445714950562], [-2.3556296825408936, 0.5414402484893799, \n 2.678506374359131, 1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253], [0.37723132967948914, \n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821, 1.830764889717102], [-\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839, 0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878], [0.35005471110343933, 0.5360521078109741, \n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393, -1.1081947088241577, 0.07872020453214645], [-\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244, \n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942, 1.5580710172653198, -\n 0.5459445714950562], [-2.3556296825408936, 0.5414402484893799, \n 2.678506374359131, 1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253], [0.37723132967948914, \n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821, 1.830764889717102], [-\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839, 0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878], [0.35005471110343933, 0.5360521078109741, \n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393, -1.1081947088241577, 0.07872020453214645], [-\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244, \n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006]]]])\n', (19944, 21051), True, 'import numpy as np\n'), ((22284, 22369), 'numpy.array', 'np.array', (['[[[[-5.2563982009887695], [5.410353183746338], [-8.517012596130371]]]]'], {}), '([[[[-5.2563982009887695], [5.410353183746338], [-8.517012596130371]]]]\n )\n', (22292, 22369), True, 'import numpy as np\n'), ((22784, 22834), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""CPU conv is not supported"""'], {}), "(True, 'CPU conv is not supported')\n", (22799, 22834), False, 'import unittest\n'), ((22449, 22466), 'oneflow.experimental.Tensor', 'flow.Tensor', (['data'], {}), '(data)\n', (22460, 22466), True, 'import oneflow.experimental as flow\n'), ((25346, 25361), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25359, 25361), False, 'import unittest\n'), ((22503, 22522), 'oneflow.experimental.Tensor', 'flow.Tensor', (['weight'], {}), '(weight)\n', (22514, 22522), True, 'import oneflow.experimental as flow\n'), ((22937, 22976), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'bias': '(True)'}), '(1, 1, (3, 3), bias=True)\n', (22951, 22976), True, 'import oneflow.experimental as flow\n'), ((23317, 23357), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(3)', '(3, 3)'], {'bias': '(False)'}), '(1, 3, (3, 3), bias=False)\n', (23331, 23357), True, 'import oneflow.experimental as flow\n'), ((23534, 23573), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(3)', '(3, 3)'], {'bias': '(True)'}), '(1, 3, (3, 3), bias=True)\n', (23548, 23573), True, 'import oneflow.experimental as flow\n'), ((23865, 23915), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(2)', '(2)', '(3, 3)'], {'groups': '(2)', 'bias': '(False)'}), '(2, 2, (3, 3), groups=2, bias=False)\n', (23879, 23915), True, 'import oneflow.experimental as flow\n'), ((24157, 24213), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'padding': '(1, 2)', 'bias': '(False)'}), '(1, 1, (3, 3), padding=(1, 2), bias=False)\n', (24171, 24213), True, 'import oneflow.experimental as flow\n'), ((24460, 24531), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'padding': '(1, 1)', 'stride': '(2, 3)', 'bias': '(False)'}), '(1, 1, (3, 3), padding=(1, 1), stride=(2, 3), bias=False)\n', (24474, 24531), True, 'import oneflow.experimental as flow\n'), ((24775, 24815), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 5)'], {'bias': '(False)'}), '(1, 1, (3, 5), bias=False)\n', (24789, 24815), True, 'import oneflow.experimental as flow\n'), ((25061, 25118), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'dilation': '(2, 3)', 'bias': '(False)'}), '(1, 1, (3, 3), dilation=(2, 3), bias=False)\n', (25075, 25118), True, 'import oneflow.experimental as flow\n'), ((22587, 22604), 'oneflow.experimental.Tensor', 'flow.Tensor', (['bias'], {}), '(bias)\n', (22598, 22604), True, 'import oneflow.experimental as flow\n'), ((23073, 23095), 'numpy.zeros', 'np.zeros', (['(1, 1, 3, 3)'], {}), '((1, 1, 3, 3))\n', (23081, 23095), True, 'import numpy as np\n'), ((23220, 23234), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (23228, 23234), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from oneflow.compatible.single_client.core.common import data_type_pb2 as data_type_util
from oneflow.compatible.single_client.core.operator import op_conf_pb2 as op_conf_util
from oneflow.compatible.single_client.core.register import (
logical_blob_id_pb2 as logical_blob_id_util,
)
from oneflow.compatible.single_client.python.framework import (
compile_context as compile_context,
)
from oneflow.compatible.single_client.python.framework import (
distribute as distribute_util,
)
from oneflow.compatible.single_client.python.framework import id_util as id_util
from oneflow.compatible.single_client.python.framework import (
remote_blob as remote_blob_util,
)
from oneflow.compatible.single_client.python.framework import hob as hob
from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if
from oneflow.compatible.single_client.python.oneflow_export import (
oneflow_export,
stable_api,
)
from oneflow.compatible import single_client as flow
import oneflow._oneflow_internal
from typing import Union, Optional, Sequence
@oneflow_export("repeat")
@stable_api
def api_repeat(
input: oneflow._oneflow_internal.BlobDesc,
repeat_num: int,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
func = enable_if.unique([repeat])
return func(input, repeat_num, name=name)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def repeat(input, repeat_num, name=None):
assert not flow.eager_execution_enabled()
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Repeat_"))
.Op("repeat")
.Input("in", [input])
.Output("out")
.Attr("repeat_num", repeat_num)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("acc")
def api_acc(
one: oneflow._oneflow_internal.BlobDesc,
max_acc_num: int,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
func = enable_if.unique([acc])
return func(one, max_acc_num, name=name)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def acc(one, max_acc_num, name=None):
assert not flow.eager_execution_enabled()
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Acc_"))
.Op("acc")
.Input("in", [one])
.Output("out")
.Attr("max_acc_num", max_acc_num)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("unpack")
def api_unpack(
input: oneflow._oneflow_internal.BlobDesc,
unpack_num: int,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
func = enable_if.unique([unpack])
return func(input, unpack_num, name=name)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def unpack(input, unpack_num, name=None):
assert not flow.eager_execution_enabled()
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Unpack_"))
.Op("unpack")
.Input("in", [input])
.Output("out")
.Attr("unpack_num", unpack_num)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("pack")
def api_pack(
input: oneflow._oneflow_internal.BlobDesc, pack_num: int, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
func = enable_if.unique([pack])
return func(input, pack_num, name=name)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def pack(input, pack_num, name=None):
assert not flow.eager_execution_enabled()
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Pack_"))
.Op("pack")
.Input("in", [input])
.Output("out")
.Attr("pack_num", pack_num)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("parallel_cast")
def api_parallel_cast(
input: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
distribute: Optional[oneflow._oneflow_internal.distribute.Distribute] = None,
gradient_distribute: Optional[
oneflow._oneflow_internal.distribute.Distribute
] = None,
) -> oneflow._oneflow_internal.BlobDesc:
func = enable_if.unique([parallel_cast])
return func(
input, name=name, distribute=distribute, gradient_distribute=gradient_distribute
)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def parallel_cast(input, name=None, distribute=None, gradient_distribute=None):
if name is None:
name = id_util.UniqueStr("ParallelCast_")
def distribute_to_str(dist):
dist_str = ""
if dist is None:
pass
elif type(dist) is oneflow._oneflow_internal.distribute.SplitDistribute:
dist_str = "S({})".format(dist.axis)
elif type(dist) is oneflow._oneflow_internal.distribute.BroadcastDistribute:
dist_str = "B"
else:
raise ValueError("unsupported distribute")
return dist_str
sbp_parallel = distribute_to_str(distribute)
grad_sbp_parallel = distribute_to_str(gradient_distribute)
op = (
flow.user_op_builder(name)
.Op("parallel_cast")
.Input("in", [input])
.Output("out")
.Attr("sbp_parallel", sbp_parallel)
.Attr("grad_sbp_parallel", grad_sbp_parallel)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("hierarchical_parallel_cast")
def api_hierarchical_parallel_cast(
input: oneflow._oneflow_internal.BlobDesc,
parallel_distribution: Sequence[str],
grad_mode: Optional[str] = None,
grad_parallel_distribution: Sequence[str] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
func = enable_if.unique([hierarchical_parallel_cast])
return func(
input,
parallel_distribution=parallel_distribution,
grad_mode=grad_mode,
grad_parallel_distribution=grad_parallel_distribution,
name=name,
)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def hierarchical_parallel_cast(
input, parallel_distribution, grad_mode, grad_parallel_distribution, name,
):
if name is None:
name = id_util.UniqueStr("HierarchicalParallelCast_")
def distribute_to_str(dist):
if dist is None:
return ""
elif type(dist) is str:
return dist
elif type(dist) is oneflow._oneflow_internal.distribute.SplitDistribute:
return "S({})".format(dist.axis)
elif type(dist) is oneflow._oneflow_internal.distribute.BroadcastDistribute:
return "B"
else:
raise ValueError("unsupported distribute")
op = (
flow.user_op_builder(name)
.Op("hierarchical_parallel_cast")
.Input("in", [input])
.Output("out")
.Attr(
"parallel_distribution", list(map(distribute_to_str, parallel_distribution))
)
.Attr("grad_mode", grad_mode or "restore")
.Attr(
"grad_parallel_distribution",
list(map(distribute_to_str, grad_parallel_distribution))
if grad_parallel_distribution
else [],
)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
| [
"oneflow.compatible.single_client.python.lib.core.enable_if.condition",
"oneflow.compatible.single_client.python.framework.id_util.UniqueStr",
"oneflow.compatible.single_client.python.oneflow_export.oneflow_export",
"oneflow.compatible.single_client.user_op_builder",
"oneflow.compatible.single_client.eager_... | [((1707, 1731), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""repeat"""'], {}), "('repeat')\n", (1721, 1731), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export, stable_api\n'), ((1988, 2058), 'oneflow.compatible.single_client.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (2007, 2058), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((2445, 2466), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""acc"""'], {}), "('acc')\n", (2459, 2466), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export, stable_api\n'), ((2703, 2773), 'oneflow.compatible.single_client.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (2722, 2773), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((3150, 3174), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""unpack"""'], {}), "('unpack')\n", (3164, 3174), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export, stable_api\n'), ((3419, 3489), 'oneflow.compatible.single_client.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (3438, 3489), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((3876, 3898), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""pack"""'], {}), "('pack')\n", (3890, 3898), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export, stable_api\n'), ((4126, 4196), 'oneflow.compatible.single_client.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (4145, 4196), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((4571, 4602), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""parallel_cast"""'], {}), "('parallel_cast')\n", (4585, 4602), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export, stable_api\n'), ((5093, 5163), 'oneflow.compatible.single_client.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (5112, 5163), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((6161, 6205), 'oneflow.compatible.single_client.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""hierarchical_parallel_cast"""'], {}), "('hierarchical_parallel_cast')\n", (6175, 6205), False, 'from oneflow.compatible.single_client.python.oneflow_export import oneflow_export, stable_api\n'), ((6758, 6828), 'oneflow.compatible.single_client.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (6777, 6828), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((1912, 1938), 'oneflow.compatible.single_client.python.lib.core.enable_if.unique', 'enable_if.unique', (['[repeat]'], {}), '([repeat])\n', (1928, 1938), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((2631, 2654), 'oneflow.compatible.single_client.python.lib.core.enable_if.unique', 'enable_if.unique', (['[acc]'], {}), '([acc])\n', (2647, 2654), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((3343, 3369), 'oneflow.compatible.single_client.python.lib.core.enable_if.unique', 'enable_if.unique', (['[unpack]'], {}), '([unpack])\n', (3359, 3369), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((4054, 4078), 'oneflow.compatible.single_client.python.lib.core.enable_if.unique', 'enable_if.unique', (['[pack]'], {}), '([pack])\n', (4070, 4078), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((4944, 4977), 'oneflow.compatible.single_client.python.lib.core.enable_if.unique', 'enable_if.unique', (['[parallel_cast]'], {}), '([parallel_cast])\n', (4960, 4977), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((6506, 6552), 'oneflow.compatible.single_client.python.lib.core.enable_if.unique', 'enable_if.unique', (['[hierarchical_parallel_cast]'], {}), '([hierarchical_parallel_cast])\n', (6522, 6552), True, 'from oneflow.compatible.single_client.python.lib.core import enable_if as enable_if\n'), ((2116, 2146), 'oneflow.compatible.single_client.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (2144, 2146), True, 'from oneflow.compatible import single_client as flow\n'), ((2827, 2857), 'oneflow.compatible.single_client.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (2855, 2857), True, 'from oneflow.compatible import single_client as flow\n'), ((3547, 3577), 'oneflow.compatible.single_client.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (3575, 3577), True, 'from oneflow.compatible import single_client as flow\n'), ((4250, 4280), 'oneflow.compatible.single_client.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (4278, 4280), True, 'from oneflow.compatible import single_client as flow\n'), ((5280, 5314), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ParallelCast_"""'], {}), "('ParallelCast_')\n", (5297, 5314), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((6979, 7025), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""HierarchicalParallelCast_"""'], {}), "('HierarchicalParallelCast_')\n", (6996, 7025), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((5880, 5906), 'oneflow.compatible.single_client.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (5900, 5906), True, 'from oneflow.compatible import single_client as flow\n'), ((7486, 7512), 'oneflow.compatible.single_client.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (7506, 7512), True, 'from oneflow.compatible import single_client as flow\n'), ((2219, 2247), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Repeat_"""'], {}), "('Repeat_')\n", (2236, 2247), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((2930, 2955), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Acc_"""'], {}), "('Acc_')\n", (2947, 2955), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((3650, 3678), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Unpack_"""'], {}), "('Unpack_')\n", (3667, 3678), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n'), ((4353, 4379), 'oneflow.compatible.single_client.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Pack_"""'], {}), "('Pack_')\n", (4370, 4379), True, 'from oneflow.compatible.single_client.python.framework import id_util as id_util\n')] |
# import torch
# import torch.nn as nn
# import torch.nn.functional as F
import oneflow as of
import oneflow.nn as nn
import oneflow.nn.functional as F
class TDNNLayer(nn.Module):
'''
TDNN + activation + BN
The paper "How to Improve Your Speaker Embeddings Extractor in Generic Toolkits" shows BN after activation is
better than BN before activation
'''
def __init__(self, input_channel, output_channel, context, padding = 0, stride = 1):
super(TDNNLayer, self).__init__()
kernel_size = len(context)
if len(context) > 1:
dilation = (context[-1] - context[0]) // (len(context) - 1)
else:
dilation = 1
self.context_layer = nn.Conv1d(
input_channel,
output_channel,
kernel_size = kernel_size,
stride = stride,
padding = padding,
dilation = dilation
)
self.bn = nn.BatchNorm1d(output_channel)
# self.activation = nn.ReLU()
self.activation = nn.LeakyReLU(negative_slope = 0.2)
def forward(self, x):
x = self.context_layer(x)
x = self.activation(x)
x = self.bn(x)
return x
class Conv3x3(nn.Conv2d):
def __init__(self, in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1):
super(Conv3x3, self).__init__(in_planes, out_planes, kernel_size = 3,
stride = stride, padding = dilation, groups = groups,
bias = False, dilation = dilation)
class Conv1x1(nn.Conv2d):
def __init__(self, in_planes: int, out_planes: int, stride: int = 1):
super(Conv1x1, self).__init__(in_planes, out_planes, kernel_size = 1,
stride = stride, bias = False)
class FTDNNLayer(nn.Module):
def __init__(self):
super(FTDNNLayer, self).__init__()
def forward(self):
pass
def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class ResidualTDNNGLU(nn.Module):
'''
Residual Time Delay Neural Network + Gated Linear Unit
'''
def __init__(self, input_channel, output_channel, context, stride = 1):
super(ResidualTDNNGLU, self).__init__()
kernel_size = len(context)
if len(context) > 1:
dilation = (context[-1] - context[0]) // (len(context) - 1)
else:
dilation = 1
# length = 300
padding = (kernel_size - 1) * dilation // 2
self.context_layer = nn.Conv1d(
input_channel,
output_channel,
kernel_size = kernel_size,
stride = stride,
padding = padding,
dilation = dilation
)
self.res_conv = nn.Conv1d(output_channel // 2, input_channel,
kernel_size = 1, padding = 0,
dilation = 1, bias = False)
self.bn = nn.BatchNorm1d(input_channel)
self.activation = nn.ReLU()
def forward(self, x):
'''
x: (B, C, T)
'''
residual = x
x = self.context_layer(x)
g_tanh, g_sigmoid = x.split(x.size(1) // 2, 1)
# x = torch.tanh(g_tanh) * torch.sigmoid(g_sigmoid)
x = of.tanh(g_tanh) * of.sigmoid(g_sigmoid)
x = self.res_conv(x)
x = self.activation(x)
x = self.bn(x)
x += residual
return x
| [
"oneflow.tanh",
"oneflow.nn.LeakyReLU",
"oneflow.sigmoid",
"oneflow.nn.Conv1d",
"oneflow.nn.Conv2d",
"oneflow.nn.BatchNorm1d",
"oneflow.nn.ReLU"
] | [((2159, 2290), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': 'dilation', 'groups': 'groups', 'bias': '(False)', 'dilation': 'dilation'}), '(in_planes, out_planes, kernel_size=3, stride=stride, padding=\n dilation, groups=groups, bias=False, dilation=dilation)\n', (2168, 2290), True, 'import oneflow.nn as nn\n'), ((2422, 2496), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['in_planes', 'out_planes'], {'kernel_size': '(1)', 'stride': 'stride', 'bias': '(False)'}), '(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n', (2431, 2496), True, 'import oneflow.nn as nn\n'), ((714, 835), 'oneflow.nn.Conv1d', 'nn.Conv1d', (['input_channel', 'output_channel'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation'}), '(input_channel, output_channel, kernel_size=kernel_size, stride=\n stride, padding=padding, dilation=dilation)\n', (723, 835), True, 'import oneflow.nn as nn\n'), ((971, 1001), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['output_channel'], {}), '(output_channel)\n', (985, 1001), True, 'import oneflow.nn as nn\n'), ((1067, 1099), 'oneflow.nn.LeakyReLU', 'nn.LeakyReLU', ([], {'negative_slope': '(0.2)'}), '(negative_slope=0.2)\n', (1079, 1099), True, 'import oneflow.nn as nn\n'), ((3010, 3131), 'oneflow.nn.Conv1d', 'nn.Conv1d', (['input_channel', 'output_channel'], {'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding', 'dilation': 'dilation'}), '(input_channel, output_channel, kernel_size=kernel_size, stride=\n stride, padding=padding, dilation=dilation)\n', (3019, 3131), True, 'import oneflow.nn as nn\n'), ((3273, 3372), 'oneflow.nn.Conv1d', 'nn.Conv1d', (['(output_channel // 2)', 'input_channel'], {'kernel_size': '(1)', 'padding': '(0)', 'dilation': '(1)', 'bias': '(False)'}), '(output_channel // 2, input_channel, kernel_size=1, padding=0,\n dilation=1, bias=False)\n', (3282, 3372), True, 'import oneflow.nn as nn\n'), ((3463, 3492), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['input_channel'], {}), '(input_channel)\n', (3477, 3492), True, 'import oneflow.nn as nn\n'), ((3519, 3528), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3526, 3528), True, 'import oneflow.nn as nn\n'), ((3792, 3807), 'oneflow.tanh', 'of.tanh', (['g_tanh'], {}), '(g_tanh)\n', (3799, 3807), True, 'import oneflow as of\n'), ((3810, 3831), 'oneflow.sigmoid', 'of.sigmoid', (['g_sigmoid'], {}), '(g_sigmoid)\n', (3820, 3831), True, 'import oneflow as of\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import inspect
import unittest
from collections import OrderedDict
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _run_functional_doctest(
test_case,
globs=None,
verbose=None,
optionflags=0,
raise_on_error=True,
module=flow._C,
):
import doctest
parser = doctest.DocTestParser()
if raise_on_error:
runner = doctest.DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = doctest.DocTestRunner(verbose=verbose, optionflags=optionflags)
r = inspect.getmembers(flow._C)
for (name, fun) in r:
if fun.__doc__ is not None:
print("test on docstr of: ", ".".join([module.__name__, name]))
test = parser.get_doctest(fun.__doc__, {}, __name__, __file__, 0)
runner.run(test)
@flow.unittest.skip_unless_1n1d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestFunctionalDocstrModule(flow.unittest.TestCase):
def test_functional_docstr(test_case):
arg_dict = OrderedDict()
arg_dict["module"] = [flow._C]
for arg in GenArgList(arg_dict):
_run_functional_doctest(
test_case, raise_on_error=True, verbose=True, module=arg[0]
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((1430, 1462), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1460, 1462), True, 'import oneflow as flow\n'), ((929, 952), 'doctest.DocTestParser', 'doctest.DocTestParser', ([], {}), '()\n', (950, 952), False, 'import doctest\n'), ((1154, 1181), 'inspect.getmembers', 'inspect.getmembers', (['flow._C'], {}), '(flow._C)\n', (1172, 1181), False, 'import inspect\n'), ((1480, 1514), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1489, 1514), False, 'import os\n'), ((1913, 1928), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1926, 1928), False, 'import unittest\n'), ((993, 1054), 'doctest.DebugRunner', 'doctest.DebugRunner', ([], {'verbose': 'verbose', 'optionflags': 'optionflags'}), '(verbose=verbose, optionflags=optionflags)\n', (1012, 1054), False, 'import doctest\n'), ((1082, 1145), 'doctest.DocTestRunner', 'doctest.DocTestRunner', ([], {'verbose': 'verbose', 'optionflags': 'optionflags'}), '(verbose=verbose, optionflags=optionflags)\n', (1103, 1145), False, 'import doctest\n'), ((1659, 1672), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1670, 1672), False, 'from collections import OrderedDict\n'), ((1731, 1751), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (1741, 1751), False, 'from test_util import GenArgList\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestMedian(flow.unittest.TestCase):
def test_median_exception_dim_out_of_range(test_case):
x = flow.tensor((2, 2))
with test_case.assertRaises(IndexError) as ctx:
y = flow.median(x, 1)
test_case.assertTrue(
"Dimension out of range (expected to be in range of [-1, 0], but got 1)"
in str(ctx.exception)
)
def test_median_exception_reduce_0dim(test_case):
x = flow.randn(2, 0, 2)
with test_case.assertRaises(IndexError) as ctx:
y = flow.median(x, 1)
test_case.assertTrue(
"IndexError: Expected reduction dim 1 to have non-zero size."
in str(ctx.exception)
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.randn",
"oneflow.tensor",
"oneflow.median"
] | [((658, 690), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (688, 690), True, 'import oneflow as flow\n'), ((1431, 1446), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1444, 1446), False, 'import unittest\n'), ((804, 823), 'oneflow.tensor', 'flow.tensor', (['(2, 2)'], {}), '((2, 2))\n', (815, 823), True, 'import oneflow as flow\n'), ((1140, 1159), 'oneflow.randn', 'flow.randn', (['(2)', '(0)', '(2)'], {}), '(2, 0, 2)\n', (1150, 1159), True, 'import oneflow as flow\n'), ((896, 913), 'oneflow.median', 'flow.median', (['x', '(1)'], {}), '(x, 1)\n', (907, 913), True, 'import oneflow as flow\n'), ((1232, 1249), 'oneflow.median', 'flow.median', (['x', '(1)'], {}), '(x, 1)\n', (1243, 1249), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
from test_util import GenArgList
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
def compare_with_tensorflow(test_case, device_type, value, shape, rtol=1e-5, atol=1e-5):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
@flow.global_function(function_config=func_config)
def ConstantJob():
with flow.scope.placement(device_type, "0:0"):
x = flow.constant(value, dtype=flow.float, shape=shape)
y = flow.math.relu(x)
z = flow.math.relu(y)
return x
numpy0 = ConstantJob().get().numpy()
of_out = ConstantJob().get()
test_case.assertTrue(np.allclose(of_out.numpy(), numpy0, rtol=rtol, atol=atol))
tf_out = tf.constant(value, dtype=float, shape=shape)
test_case.assertTrue(
np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
)
def test_constant(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["value"] = [6, 6.66]
arg_dict["shape"] = [(2, 3), (3, 3, 3)]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(test_case, *arg)
| [
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.constant",
"oneflow.scope.placement",
"oneflow.math.relu",
"oneflow.FunctionConfig"
] | [((751, 772), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (770, 772), True, 'import oneflow as flow\n'), ((951, 979), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (977, 979), True, 'import oneflow as flow\n'), ((986, 1035), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1006, 1035), True, 'import oneflow as flow\n'), ((1443, 1487), 'tensorflow.constant', 'tf.constant', (['value'], {'dtype': 'float', 'shape': 'shape'}), '(value, dtype=float, shape=shape)\n', (1454, 1487), True, 'import tensorflow as tf\n'), ((1641, 1654), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1652, 1654), False, 'from collections import OrderedDict\n'), ((1793, 1813), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (1803, 1813), False, 'from test_util import GenArgList\n'), ((1072, 1112), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1092, 1112), True, 'import oneflow as flow\n'), ((1130, 1181), 'oneflow.constant', 'flow.constant', (['value'], {'dtype': 'flow.float', 'shape': 'shape'}), '(value, dtype=flow.float, shape=shape)\n', (1143, 1181), True, 'import oneflow as flow\n'), ((1198, 1215), 'oneflow.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (1212, 1215), True, 'import oneflow as flow\n'), ((1232, 1249), 'oneflow.math.relu', 'flow.math.relu', (['y'], {}), '(y)\n', (1246, 1249), True, 'import oneflow as flow\n')] |
import cflow
import numpy as np
import oneflow.data as data
(x_train, y_train), (x_test, y_test) = data.load_mnist()
x_train, x_test = np.concatenate(x_train, axis=0), np.concatenate(x_test, axis=0)
y_train = np.concatenate(y_train, axis=0)[..., None]
y_test = np.concatenate(y_test, axis=0)[..., None]
data = cflow.cv.TensorData(x_train, y_train, x_test, y_test)
m = cflow.cv.CarefreePipeline(
"clf",
dict(
in_channels=1,
num_classes=10,
img_size=28,
latent_dim=128,
encoder1d="lenet",
),
fixed_epoch=5,
loss_name="cross_entropy",
metric_names=["acc", "auc"],
tqdm_settings={"use_tqdm": True, "use_step_tqdm": True},
)
m.fit(data, cuda=0)
| [
"oneflow.data.load_mnist"
] | [((102, 119), 'oneflow.data.load_mnist', 'data.load_mnist', ([], {}), '()\n', (117, 119), True, 'import oneflow.data as data\n'), ((314, 367), 'cflow.cv.TensorData', 'cflow.cv.TensorData', (['x_train', 'y_train', 'x_test', 'y_test'], {}), '(x_train, y_train, x_test, y_test)\n', (333, 367), False, 'import cflow\n'), ((138, 169), 'numpy.concatenate', 'np.concatenate', (['x_train'], {'axis': '(0)'}), '(x_train, axis=0)\n', (152, 169), True, 'import numpy as np\n'), ((171, 201), 'numpy.concatenate', 'np.concatenate', (['x_test'], {'axis': '(0)'}), '(x_test, axis=0)\n', (185, 201), True, 'import numpy as np\n'), ((212, 243), 'numpy.concatenate', 'np.concatenate', (['y_train'], {'axis': '(0)'}), '(y_train, axis=0)\n', (226, 243), True, 'import numpy as np\n'), ((264, 294), 'numpy.concatenate', 'np.concatenate', (['y_test'], {'axis': '(0)'}), '(y_test, axis=0)\n', (278, 294), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("nn.init.uniform_")
def uniform_(tensor, a=0.0, b=1.0):
# TODO(jianhao): add with torch.no_grad() when autograd is ready
tensor.uniform_(a, b)
@oneflow_export("nn.init.normal_")
def normal_(tensor, mean=0.0, std=1.0):
tensor.normal_(mean, std)
@oneflow_export("nn.init.constant_")
def constant_(tensor, val):
tensor.fill_(val)
@oneflow_export("nn.init.ones_")
def ones_(tensor):
tensor.fill_(1)
@oneflow_export("nn.init.zeros_")
def zeros_(tensor):
tensor.fill_(0)
| [
"oneflow.python.oneflow_export.oneflow_export"
] | [((650, 684), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.init.uniform_"""'], {}), "('nn.init.uniform_')\n", (664, 684), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((819, 852), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.init.normal_"""'], {}), "('nn.init.normal_')\n", (833, 852), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((926, 961), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.init.constant_"""'], {}), "('nn.init.constant_')\n", (940, 961), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1015, 1046), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.init.ones_"""'], {}), "('nn.init.ones_')\n", (1029, 1046), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1089, 1121), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.init.zeros_"""'], {}), "('nn.init.zeros_')\n", (1103, 1121), False, 'from oneflow.python.oneflow_export import oneflow_export\n')] |
import argparse
import time
import numpy as np
from PIL import Image
import oneflow as flow
from skimage.metrics import peak_signal_noise_ratio
from skimage.metrics import structural_similarity
from models.of_model import Generator
parser = argparse.ArgumentParser(description="Test Single Image")
parser.add_argument(
"--upscale_factor", default=4, type=int, help="super resolution upscale factor"
)
parser.add_argument(
"--test_mode",
default="GPU",
type=str,
choices=["GPU", "CPU"],
help="using GPU or CPU",
)
parser.add_argument(
"--image_path",
default="data/Set14/LR_bicubic/X4/monarchx4.png",
type=str,
help="test low resolution image path",
)
parser.add_argument(
"--hr_path",
default="data/Set14/HR/monarch.png",
type=str,
help="test low resolution image path",
)
parser.add_argument(
"--save_image",
default="data/Set14/SR/X4/monarchx4-oneflow.png",
type=str,
help="super resolution image path",
)
parser.add_argument(
"--model_path",
default="netG_epoch_4_99",
type=str,
help="generator model epoch name",
)
opt = parser.parse_args()
UPSCALE_FACTOR = opt.upscale_factor
TEST_MODE = True if opt.test_mode == "GPU" else False
IMAGE_NAME = opt.image_path
SAVE_IMAGE = opt.save_image
MODEL_NAME = opt.model_path
model = Generator(UPSCALE_FACTOR).eval()
if TEST_MODE:
model.to("cuda")
model.load_state_dict(flow.load(MODEL_NAME))
image0 = Image.open(IMAGE_NAME)
# oneflow
image = image0.copy().convert("RGB")
tensor = np.ascontiguousarray(image).astype("float32")
tensor = tensor / 255
tensor = flow.Tensor(tensor)
tensor1 = tensor.unsqueeze(0).permute(0, 3, 1, 2)
if TEST_MODE:
image = tensor1.to("cuda")
start = time.process_time()
with flow.no_grad():
out = model(image)
elapsed = time.process_time() - start
print("cost" + str(elapsed) + "s")
## oneflow
out_a = out[0].data.to("cpu") * 255
out_b = out_a.squeeze(0).permute(1, 2, 0)
_img = out_b.numpy().astype(np.uint8)
if opt.hr_path != "":
HR_NAME = opt.hr_path
image_hr = np.array(Image.open(HR_NAME))
psnr = peak_signal_noise_ratio(image_hr, _img)
ssim = structural_similarity(image_hr, _img, multichannel=True)
print("psnr:{},ssim:{}".format(psnr, ssim))
out_img = Image.fromarray(_img)
out_img.save(SAVE_IMAGE, quality=95)
| [
"oneflow.no_grad",
"oneflow.load",
"oneflow.Tensor"
] | [((245, 301), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test Single Image"""'}), "(description='Test Single Image')\n", (268, 301), False, 'import argparse\n'), ((1445, 1467), 'PIL.Image.open', 'Image.open', (['IMAGE_NAME'], {}), '(IMAGE_NAME)\n', (1455, 1467), False, 'from PIL import Image\n'), ((1601, 1620), 'oneflow.Tensor', 'flow.Tensor', (['tensor'], {}), '(tensor)\n', (1612, 1620), True, 'import oneflow as flow\n'), ((1726, 1745), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1743, 1745), False, 'import time\n'), ((2262, 2283), 'PIL.Image.fromarray', 'Image.fromarray', (['_img'], {}), '(_img)\n', (2277, 2283), False, 'from PIL import Image\n'), ((1411, 1432), 'oneflow.load', 'flow.load', (['MODEL_NAME'], {}), '(MODEL_NAME)\n', (1420, 1432), True, 'import oneflow as flow\n'), ((1751, 1765), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (1763, 1765), True, 'import oneflow as flow\n'), ((1800, 1819), 'time.process_time', 'time.process_time', ([], {}), '()\n', (1817, 1819), False, 'import time\n'), ((2095, 2134), 'skimage.metrics.peak_signal_noise_ratio', 'peak_signal_noise_ratio', (['image_hr', '_img'], {}), '(image_hr, _img)\n', (2118, 2134), False, 'from skimage.metrics import peak_signal_noise_ratio\n'), ((2146, 2202), 'skimage.metrics.structural_similarity', 'structural_similarity', (['image_hr', '_img'], {'multichannel': '(True)'}), '(image_hr, _img, multichannel=True)\n', (2167, 2202), False, 'from skimage.metrics import structural_similarity\n'), ((1321, 1346), 'models.of_model.Generator', 'Generator', (['UPSCALE_FACTOR'], {}), '(UPSCALE_FACTOR)\n', (1330, 1346), False, 'from models.of_model import Generator\n'), ((1524, 1551), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['image'], {}), '(image)\n', (1544, 1551), True, 'import numpy as np\n'), ((2063, 2082), 'PIL.Image.open', 'Image.open', (['HR_NAME'], {}), '(HR_NAME)\n', (2073, 2082), False, 'from PIL import Image\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.argsort,
"""
argsort() -> Tensor
This operator sorts the input Tensor at specified dim and returns the indices of the sorted Tensor.
Args:
input (oneflow.Tensor): the input Tensor.
dim (int, optional): the dimension to be sorted. Defaults to the last dim (-1).
descending (bool, optional): controls the sorting order (ascending or descending).
Returns:
oneflow.Tensor: The indices of the sorted Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = np.array([[10, 2, 9, 3, 7],
... [1, 9, 4, 3, 2]]).astype("float32")
>>> input = flow.Tensor(x)
>>> output = flow.argsort(input)
>>> output
tensor([[1, 3, 4, 2, 0],
[0, 4, 3, 2, 1]], dtype=oneflow.int32)
>>> output = flow.argsort(input, descending=True)
>>> output
tensor([[0, 2, 4, 3, 1],
[1, 2, 3, 4, 0]], dtype=oneflow.int32)
>>> output = flow.argsort(input, dim=0)
>>> output
tensor([[1, 0, 1, 0, 1],
[0, 1, 0, 1, 0]], dtype=oneflow.int32)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1872), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.argsort', '"""\n argsort() -> Tensor\n This operator sorts the input Tensor at specified dim and returns the indices of the sorted Tensor.\n\n Args:\n input (oneflow.Tensor): the input Tensor.\n dim (int, optional): the dimension to be sorted. Defaults to the last dim (-1).\n descending (bool, optional): controls the sorting order (ascending or descending).\n\n Returns:\n oneflow.Tensor: The indices of the sorted Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n >>> x = np.array([[10, 2, 9, 3, 7],\n ... [1, 9, 4, 3, 2]]).astype("float32")\n >>> input = flow.Tensor(x)\n >>> output = flow.argsort(input)\n >>> output\n tensor([[1, 3, 4, 2, 0],\n [0, 4, 3, 2, 1]], dtype=oneflow.int32)\n >>> output = flow.argsort(input, descending=True)\n >>> output\n tensor([[0, 2, 4, 3, 1],\n [1, 2, 3, 4, 0]], dtype=oneflow.int32)\n >>> output = flow.argsort(input, dim=0)\n >>> output\n tensor([[1, 0, 1, 0, 1],\n [0, 1, 0, 1, 0]], dtype=oneflow.int32)\n\n """'], {}), '(oneflow.argsort,\n """\n argsort() -> Tensor\n This operator sorts the input Tensor at specified dim and returns the indices of the sorted Tensor.\n\n Args:\n input (oneflow.Tensor): the input Tensor.\n dim (int, optional): the dimension to be sorted. Defaults to the last dim (-1).\n descending (bool, optional): controls the sorting order (ascending or descending).\n\n Returns:\n oneflow.Tensor: The indices of the sorted Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n >>> x = np.array([[10, 2, 9, 3, 7],\n ... [1, 9, 4, 3, 2]]).astype("float32")\n >>> input = flow.Tensor(x)\n >>> output = flow.argsort(input)\n >>> output\n tensor([[1, 3, 4, 2, 0],\n [0, 4, 3, 2, 1]], dtype=oneflow.int32)\n >>> output = flow.argsort(input, descending=True)\n >>> output\n tensor([[0, 2, 4, 3, 1],\n [1, 2, 3, 4, 0]], dtype=oneflow.int32)\n >>> output = flow.argsort(input, dim=0)\n >>> output\n tensor([[1, 0, 1, 0, 1],\n [0, 1, 0, 1, 0]], dtype=oneflow.int32)\n\n """\n )\n', (670, 1872), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
from test_util import GenArgList
from oneflow.test_utils.automated_test_util import *
def _test_0_dim_tensor(test_case, device):
scalar = 9.999
input_np = np.array(scalar)
input = flow.tensor(input_np, device=device)
test_case.assertEqual(input.numel(), 1)
test_case.assertEqual(input.ndimension(), 0)
x1 = flow.tensor(np.array(2), dtype=flow.float32, device=device)
x2 = flow.tensor(np.array(3), dtype=flow.float32, device=device)
y1 = x1 * x2
y2 = x1 + x2
test_case.assertEqual(y1.numpy(), 6.0)
test_case.assertEqual(y2.numpy(), 5.0)
def _test_scalar_mul(test_case, device):
for dim in range(5):
test_case.assertEqual(
np.ones([2] * dim).sum(), flow.ones([2] * dim, device=device).sum().numpy()
)
def _test_slice(test_case, device):
x = flow.tensor(np.arange(10), device=device)
for i in range(x.numel()):
scalar_i = x[i]
test_case.assertEqual(i, scalar_i.numpy())
test_case.assertEqual(scalar_i.numel(), 1)
test_case.assertEqual(scalar_i.ndimension(), 0)
def _test_slice_backward(test_case, device):
np_grad = np.zeros(10)
x = flow.tensor(np.arange(10).astype(np.float32), device=device, requires_grad=True)
for i in range(x.numel()):
y = x[i]
z = y.sum()
z.backward()
np_grad[i] = 1
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-04, 1e-04))
x2 = flow.tensor(
np.arange(100).astype(np.float32), device=device, requires_grad=True
)
y2 = x2[1:100]
z2 = y2.sum()
z2.backward()
np_grad2 = np.ones(100)
np_grad2[0] = 0
test_case.assertTrue(np.allclose(x2.grad.numpy(), np_grad2, 1e-04, 1e-04))
def _test_slice_scalar_graph(test_case, device):
x = flow.tensor(3.0, device=device)
class MyModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.weight = flow.nn.Parameter(
flow.tensor([1.0, 2.0, 3.0, 4.0], device=device)
)
def forward(self, x):
return x * self.weight[3]
my_module = MyModule()
of_eager_out = my_module(x)
class ScalarGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = my_module
def build(self, x):
return self.m(x)
scalar_g = ScalarGraph()
of_lazy_out = scalar_g(x)
test_case.assertTrue(np.array_equal(of_lazy_out.numpy(), of_eager_out.numpy()))
def _test_slice_scalar_train_graph(test_case, device):
class MyModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.weight = flow.nn.Parameter(
flow.tensor([1.0, 2.0, 3.0, 4.0], device=device)
)
def forward(self, x):
return x * self.weight[3] + 1.0
my_module = MyModule()
of_sgd = flow.optim.SGD(my_module.parameters(), lr=0.001, momentum=0.9)
eager_out_list = []
for i in range(3):
x = flow.tensor(i * 1.0, device=device, requires_grad=False)
of_eager_out = my_module(x)
of_eager_out.backward()
of_sgd.step()
of_sgd.zero_grad()
eager_out_list.append(of_eager_out)
lazy_module = MyModule()
class ScalarTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = lazy_module
of_sgd = flow.optim.SGD(lazy_module.parameters(), lr=0.001, momentum=0.9)
self.add_optimizer(of_sgd)
def build(self, x):
loss = self.m(x)
loss.backward()
return loss
lazy_out_list = []
scalar_g = ScalarTrainGraph()
for i in range(3):
x = flow.tensor(i * 1.0, device=device)
of_lazy_out = scalar_g(x)
lazy_out_list.append(of_lazy_out)
for i in range(3):
test_case.assertTrue(
np.array_equal(lazy_out_list[i].numpy(), eager_out_list[i].numpy())
)
@flow.unittest.skip_unless_1n1d()
class TestZeroDimensionTensor(flow.unittest.TestCase):
def test_0_dim_tensor(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_0_dim_tensor,
_test_scalar_mul,
_test_slice,
_test_slice_backward,
_test_slice_scalar_graph,
_test_slice_scalar_train_graph,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.ones",
"oneflow.tensor"
] | [((4671, 4703), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4701, 4703), True, 'import oneflow as flow\n'), ((854, 870), 'numpy.array', 'np.array', (['scalar'], {}), '(scalar)\n', (862, 870), True, 'import numpy as np\n'), ((883, 919), 'oneflow.tensor', 'flow.tensor', (['input_np'], {'device': 'device'}), '(input_np, device=device)\n', (894, 919), True, 'import oneflow as flow\n'), ((1832, 1844), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1840, 1844), True, 'import numpy as np\n'), ((2303, 2315), 'numpy.ones', 'np.ones', (['(100)'], {}), '(100)\n', (2310, 2315), True, 'import numpy as np\n'), ((2474, 2505), 'oneflow.tensor', 'flow.tensor', (['(3.0)'], {'device': 'device'}), '(3.0, device=device)\n', (2485, 2505), True, 'import oneflow as flow\n'), ((5235, 5250), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5248, 5250), False, 'import unittest\n'), ((1036, 1047), 'numpy.array', 'np.array', (['(2)'], {}), '(2)\n', (1044, 1047), True, 'import numpy as np\n'), ((1105, 1116), 'numpy.array', 'np.array', (['(3)'], {}), '(3)\n', (1113, 1116), True, 'import numpy as np\n'), ((1528, 1541), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1537, 1541), True, 'import numpy as np\n'), ((3700, 3756), 'oneflow.tensor', 'flow.tensor', (['(i * 1.0)'], {'device': 'device', 'requires_grad': '(False)'}), '(i * 1.0, device=device, requires_grad=False)\n', (3711, 3756), True, 'import oneflow as flow\n'), ((4412, 4447), 'oneflow.tensor', 'flow.tensor', (['(i * 1.0)'], {'device': 'device'}), '(i * 1.0, device=device)\n', (4423, 4447), True, 'import oneflow as flow\n'), ((4816, 4829), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4827, 4829), False, 'from collections import OrderedDict\n'), ((5140, 5160), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5150, 5160), False, 'from test_util import GenArgList\n'), ((1865, 1878), 'numpy.arange', 'np.arange', (['(10)'], {}), '(10)\n', (1874, 1878), True, 'import numpy as np\n'), ((2158, 2172), 'numpy.arange', 'np.arange', (['(100)'], {}), '(100)\n', (2167, 2172), True, 'import numpy as np\n'), ((2663, 2711), 'oneflow.tensor', 'flow.tensor', (['[1.0, 2.0, 3.0, 4.0]'], {'device': 'device'}), '([1.0, 2.0, 3.0, 4.0], device=device)\n', (2674, 2711), True, 'import oneflow as flow\n'), ((3399, 3447), 'oneflow.tensor', 'flow.tensor', (['[1.0, 2.0, 3.0, 4.0]'], {'device': 'device'}), '([1.0, 2.0, 3.0, 4.0], device=device)\n', (3410, 3447), True, 'import oneflow as flow\n'), ((1384, 1402), 'numpy.ones', 'np.ones', (['([2] * dim)'], {}), '([2] * dim)\n', (1391, 1402), True, 'import numpy as np\n'), ((1410, 1445), 'oneflow.ones', 'flow.ones', (['([2] * dim)'], {'device': 'device'}), '([2] * dim, device=device)\n', (1419, 1445), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import tempfile
import os
import numpy as np
from oneflow.test_utils.test_util import GenArgDict
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
from oneflow.nn.parameter import Parameter
def compare_with_numpy_adagrad(
test_case, weight_decay, lr_decay, scale, learning_rate, train_iters,
):
num_rows = 500
embedding_size = 128
model_shape = (num_rows, embedding_size)
line_size = embedding_size * 2
num_valid_seq = np.random.randint(1, num_rows, (train_iters))
skip_if_seq = [np.random.randint(2) for i in range(train_iters)]
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=model_shape).astype(np.float32))
init_value = np.random.uniform(size=(num_rows, line_size)).astype(np.float32)
down_scale_by = 10
epsilon = 1e-5
def adagrad_by_oneflow():
unique_embeddings_tensor = flow.tensor(init_value, requires_grad=False).to(
"cuda"
)
lr_tensor = flow.tensor(
np.array(learning_rate).reshape(1,).astype(np.float32)
).to("cuda")
down_scale_by_tensor = flow.tensor(
np.array(down_scale_by).astype(np.float32)
).to("cuda")
def train_one_iter(
num_valid, unique_embeddings, embedding_grad, skip_if, train_step
):
return flow._C.one_embedding_adagrad_update(
num_valid,
unique_embeddings,
embedding_grad,
lr_tensor,
down_scale_by_tensor,
skip_if,
train_step,
scale,
weight_decay,
lr_decay,
epsilon,
)
for i in range(1, train_iters):
num_valid_tensor = flow.tensor(
np.array(num_valid_seq[i]).reshape(1,).astype(np.int32)
).to("cuda")
grad_tensor = flow.tensor(random_grad_seq[i]).to("cuda")
skip_if_tensor = flow.tensor(
np.array(skip_if_seq[i]).reshape(1,).astype(np.int64)
).to("cuda")
step_tensor = flow.tensor(np.array(i).reshape(1,).astype(np.int64)).to(
"cuda"
)
updated_tensor = train_one_iter(
num_valid_tensor,
unique_embeddings_tensor,
grad_tensor,
skip_if_tensor,
step_tensor,
)
unique_embeddings_tensor[0 : num_valid_seq[i]] = updated_tensor[
0 : num_valid_seq[i]
]
return unique_embeddings_tensor
def adagrad_by_numpy():
x = init_value[:, 0:embedding_size]
st = init_value[:, embedding_size:]
def train_one_iter(iter, num_valid, grad, model, state):
grad[0:num_valid] = grad[0:num_valid] * (scale / down_scale_by)
lr = learning_rate / (1 + iter * lr_decay)
state[0:num_valid] = (
state[0:num_valid] + grad[0:num_valid] * grad[0:num_valid]
)
model[0:num_valid] = (
model[0:num_valid]
- lr / (np.sqrt(state[0:num_valid]) + epsilon) * grad[0:num_valid]
- lr * weight_decay * model[0:num_valid]
)
return (model, state)
for i in range(1, train_iters):
if skip_if_seq[i] > 0:
pass
else:
(x, st) = train_one_iter(
i, int(num_valid_seq[i]), random_grad_seq[i], x, st
)
return x, st
oneflow_res = adagrad_by_oneflow().numpy()
of_model = oneflow_res[:, 0:embedding_size]
of_sum = oneflow_res[:, embedding_size:]
np_model, np_sum = adagrad_by_numpy()
test_case.assertTrue(
np.allclose(of_model.flatten(), np_model.flatten(), rtol=0.001, atol=0.001)
)
test_case.assertTrue(
np.allclose(of_sum.flatten(), np_sum.flatten(), rtol=0.001, atol=0.001)
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestOptimizers(flow.unittest.TestCase):
def test_one_embedding_adagrad(test_case):
arg_dict = OrderedDict()
arg_dict["weight_decay"] = [0, 0.1]
arg_dict["lr_decay"] = [0, 0.1]
arg_dict["scale"] = [1, 0.1]
arg_dict["learning_rate"] = [0.3, 1.5]
arg_dict["train_iters"] = [10]
for arg in GenArgDict(arg_dict):
compare_with_numpy_adagrad(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow._C.one_embedding_adagrad_update",
"oneflow.tensor",
"oneflow.test_utils.test_util.GenArgDict"
] | [((4748, 4780), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4778, 4780), True, 'import oneflow as flow\n'), ((1114, 1157), 'numpy.random.randint', 'np.random.randint', (['(1)', 'num_rows', 'train_iters'], {}), '(1, num_rows, train_iters)\n', (1131, 1157), True, 'import numpy as np\n'), ((4688, 4722), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (4697, 4722), False, 'import os\n'), ((5245, 5260), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5258, 5260), False, 'import unittest\n'), ((1179, 1199), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (1196, 1199), True, 'import numpy as np\n'), ((4893, 4906), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4904, 4906), False, 'from collections import OrderedDict\n'), ((5133, 5153), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (5143, 5153), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((1393, 1438), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(num_rows, line_size)'}), '(size=(num_rows, line_size))\n', (1410, 1438), True, 'import numpy as np\n'), ((2023, 2207), 'oneflow._C.one_embedding_adagrad_update', 'flow._C.one_embedding_adagrad_update', (['num_valid', 'unique_embeddings', 'embedding_grad', 'lr_tensor', 'down_scale_by_tensor', 'skip_if', 'train_step', 'scale', 'weight_decay', 'lr_decay', 'epsilon'], {}), '(num_valid, unique_embeddings,\n embedding_grad, lr_tensor, down_scale_by_tensor, skip_if, train_step,\n scale, weight_decay, lr_decay, epsilon)\n', (2059, 2207), True, 'import oneflow as flow\n'), ((1567, 1611), 'oneflow.tensor', 'flow.tensor', (['init_value'], {'requires_grad': '(False)'}), '(init_value, requires_grad=False)\n', (1578, 1611), True, 'import oneflow as flow\n'), ((1319, 1354), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'model_shape'}), '(size=model_shape)\n', (1336, 1354), True, 'import numpy as np\n'), ((2599, 2630), 'oneflow.tensor', 'flow.tensor', (['random_grad_seq[i]'], {}), '(random_grad_seq[i])\n', (2610, 2630), True, 'import oneflow as flow\n'), ((1822, 1845), 'numpy.array', 'np.array', (['down_scale_by'], {}), '(down_scale_by)\n', (1830, 1845), True, 'import numpy as np\n'), ((3825, 3852), 'numpy.sqrt', 'np.sqrt', (['state[0:num_valid]'], {}), '(state[0:num_valid])\n', (3832, 3852), True, 'import numpy as np\n'), ((1690, 1713), 'numpy.array', 'np.array', (['learning_rate'], {}), '(learning_rate)\n', (1698, 1713), True, 'import numpy as np\n'), ((2492, 2518), 'numpy.array', 'np.array', (['num_valid_seq[i]'], {}), '(num_valid_seq[i])\n', (2500, 2518), True, 'import numpy as np\n'), ((2700, 2724), 'numpy.array', 'np.array', (['skip_if_seq[i]'], {}), '(skip_if_seq[i])\n', (2708, 2724), True, 'import numpy as np\n'), ((2817, 2828), 'numpy.array', 'np.array', (['i'], {}), '(i)\n', (2825, 2828), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
def _matmul_bias_relu(x, weight, bias, skip_activation):
out = flow._C.bias_add(flow._C.matmul(x, weight, transpose_b=True), bias, axis=1)
if not skip_activation:
out = flow._C.relu(out)
return out
def _test_fused_matmul_bias_add_relu(
test_case,
batchsize,
in_feature,
hidden_size_list,
out_feature,
skip_final_activation,
dtype,
device,
):
x = np.random.uniform(low=-1, high=1, size=(batchsize, in_feature))
fused_x = flow.tensor(x, dtype=dtype, device=device, requires_grad=True)
naive_x = flow.tensor(x, dtype=dtype, device=device, requires_grad=True)
fused_weight_list = []
naive_weight_list = []
fused_bias_list = []
naive_bias_list = []
hidden_num = len(hidden_size_list)
if hidden_num != 0:
np_first_weight = np.random.uniform(
low=-1, high=1, size=(hidden_size_list[0], in_feature)
)
np_first_bias = np.random.uniform(low=-1, high=1, size=hidden_size_list[0])
fused_weight_list.append(
flow.tensor(np_first_weight, dtype=dtype, device=device, requires_grad=True)
)
fused_bias_list.append(
flow.tensor(np_first_bias, dtype=dtype, device=device, requires_grad=True)
)
naive_weight_list.append(
flow.tensor(np_first_weight, dtype=dtype, device=device, requires_grad=True)
)
naive_bias_list.append(
flow.tensor(np_first_bias, dtype=dtype, device=device, requires_grad=True)
)
for idx in range(1, hidden_num):
np_weight = np.random.uniform(
low=-1, high=1, size=(hidden_size_list[idx], hidden_size_list[idx - 1])
)
np_bias = np.random.uniform(low=-1, high=1, size=hidden_size_list[idx])
fused_weight_list.append(
flow.tensor(np_weight, dtype=dtype, device=device, requires_grad=True)
)
fused_bias_list.append(
flow.tensor(np_bias, dtype=dtype, device=device, requires_grad=True)
)
naive_weight_list.append(
flow.tensor(np_weight, dtype=dtype, device=device, requires_grad=True)
)
naive_bias_list.append(
flow.tensor(np_bias, dtype=dtype, device=device, requires_grad=True)
)
np_final_weight = np.random.uniform(low=-1, high=1, size=(out_feature, in_feature))
if hidden_num != 0:
np_final_weight = np.random.uniform(
low=-1, high=1, size=(out_feature, hidden_size_list[-1])
)
np_final_bias = np.random.uniform(low=-1, high=1, size=(out_feature))
fused_weight_list.append(
flow.tensor(np_final_weight, dtype=dtype, device=device, requires_grad=True)
)
fused_bias_list.append(
flow.tensor(np_final_bias, dtype=dtype, device=device, requires_grad=True)
)
naive_weight_list.append(
flow.tensor(np_final_weight, dtype=dtype, device=device, requires_grad=True)
)
naive_bias_list.append(
flow.tensor(np_final_bias, dtype=dtype, device=device, requires_grad=True)
)
fused_out = flow._C.fused_mlp(
fused_x,
fused_weight_list,
fused_bias_list,
skip_final_activation=skip_final_activation,
)
naive_out = _matmul_bias_relu(
naive_x,
naive_weight_list[0],
naive_bias_list[0],
False if hidden_num != 0 else skip_final_activation,
)
for idx in range(1, hidden_num + 1):
if idx == hidden_num:
naive_out = _matmul_bias_relu(
naive_out,
naive_weight_list[idx],
naive_bias_list[idx],
skip_final_activation,
)
else:
naive_out = _matmul_bias_relu(
naive_out, naive_weight_list[idx], naive_bias_list[idx], False
)
total_out = fused_out.sum() + naive_out.sum()
total_out.backward()
# Test output equality
test_case.assertTrue(
np.allclose(fused_out.numpy(), naive_out.numpy(), atol=1e-4, rtol=1e-4)
)
# Test weight grad equality
for idx in range(hidden_num + 1):
test_case.assertTrue(
np.allclose(
fused_weight_list[idx].grad.numpy(),
naive_weight_list[idx].grad.numpy(),
atol=1e-4,
rtol=1e-4,
)
)
test_case.assertTrue(
np.allclose(
fused_bias_list[idx].grad.numpy(),
naive_bias_list[idx].grad.numpy(),
atol=1e-4,
rtol=1e-4,
)
)
# Test dx equality
test_case.assertTrue(
np.allclose(fused_x.grad.numpy(), naive_x.grad.numpy(), atol=1e-4, rtol=1e-4)
)
@flow.unittest.skip_unless_1n1d()
class TestFusedMatmulBiasAddRelu(flow.unittest.TestCase):
def test_fused_matmul_op(test_case):
args_dict = OrderedDict()
args_dict["test_fun"] = [_test_fused_matmul_bias_add_relu]
args_dict["batchsize"] = [1, 2, 4]
args_dict["in_feature"] = [96, 128]
args_dict["hidden_size_list"] = [[256, 512], [256], [96, 144], []]
args_dict["out_feature"] = [512, 1024, 288, 1]
args_dict["skip_final_activation"] = [True, False]
args_dict["dtype"] = [flow.float32, flow.float64]
args_dict["device"] = ["cuda", "cpu"]
for arg in GenArgList(args_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow._C.fused_mlp",
"oneflow.tensor",
"oneflow._C.relu",
"oneflow.test_utils.test_util.GenArgList",
"oneflow._C.matmul"
] | [((5474, 5506), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (5504, 5506), True, 'import oneflow as flow\n'), ((1143, 1206), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(batchsize, in_feature)'}), '(low=-1, high=1, size=(batchsize, in_feature))\n', (1160, 1206), True, 'import numpy as np\n'), ((1222, 1284), 'oneflow.tensor', 'flow.tensor', (['x'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(x, dtype=dtype, device=device, requires_grad=True)\n', (1233, 1284), True, 'import oneflow as flow\n'), ((1299, 1361), 'oneflow.tensor', 'flow.tensor', (['x'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(x, dtype=dtype, device=device, requires_grad=True)\n', (1310, 1361), True, 'import oneflow as flow\n'), ((3038, 3103), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(out_feature, in_feature)'}), '(low=-1, high=1, size=(out_feature, in_feature))\n', (3055, 3103), True, 'import numpy as np\n'), ((3274, 3325), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': 'out_feature'}), '(low=-1, high=1, size=out_feature)\n', (3291, 3325), True, 'import numpy as np\n'), ((3822, 3933), 'oneflow._C.fused_mlp', 'flow._C.fused_mlp', (['fused_x', 'fused_weight_list', 'fused_bias_list'], {'skip_final_activation': 'skip_final_activation'}), '(fused_x, fused_weight_list, fused_bias_list,\n skip_final_activation=skip_final_activation)\n', (3839, 3933), True, 'import oneflow as flow\n'), ((6203, 6218), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6216, 6218), False, 'import unittest\n'), ((823, 866), 'oneflow._C.matmul', 'flow._C.matmul', (['x', 'weight'], {'transpose_b': '(True)'}), '(x, weight, transpose_b=True)\n', (837, 866), True, 'import oneflow as flow\n'), ((924, 941), 'oneflow._C.relu', 'flow._C.relu', (['out'], {}), '(out)\n', (936, 941), True, 'import oneflow as flow\n'), ((1558, 1631), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(hidden_size_list[0], in_feature)'}), '(low=-1, high=1, size=(hidden_size_list[0], in_feature))\n', (1575, 1631), True, 'import numpy as np\n'), ((1678, 1737), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': 'hidden_size_list[0]'}), '(low=-1, high=1, size=hidden_size_list[0])\n', (1695, 1737), True, 'import numpy as np\n'), ((2321, 2415), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(hidden_size_list[idx], hidden_size_list[idx - 1])'}), '(low=-1, high=1, size=(hidden_size_list[idx],\n hidden_size_list[idx - 1]))\n', (2338, 2415), True, 'import numpy as np\n'), ((2452, 2513), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': 'hidden_size_list[idx]'}), '(low=-1, high=1, size=hidden_size_list[idx])\n', (2469, 2513), True, 'import numpy as np\n'), ((3155, 3230), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1)', 'high': '(1)', 'size': '(out_feature, hidden_size_list[-1])'}), '(low=-1, high=1, size=(out_feature, hidden_size_list[-1]))\n', (3172, 3230), True, 'import numpy as np\n'), ((3367, 3443), 'oneflow.tensor', 'flow.tensor', (['np_final_weight'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_final_weight, dtype=dtype, device=device, requires_grad=True)\n', (3378, 3443), True, 'import oneflow as flow\n'), ((3486, 3560), 'oneflow.tensor', 'flow.tensor', (['np_final_bias'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_final_bias, dtype=dtype, device=device, requires_grad=True)\n', (3497, 3560), True, 'import oneflow as flow\n'), ((3605, 3681), 'oneflow.tensor', 'flow.tensor', (['np_final_weight'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_final_weight, dtype=dtype, device=device, requires_grad=True)\n', (3616, 3681), True, 'import oneflow as flow\n'), ((3724, 3798), 'oneflow.tensor', 'flow.tensor', (['np_final_bias'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_final_bias, dtype=dtype, device=device, requires_grad=True)\n', (3735, 3798), True, 'import oneflow as flow\n'), ((5626, 5639), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5637, 5639), False, 'from collections import OrderedDict\n'), ((6107, 6128), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['args_dict'], {}), '(args_dict)\n', (6117, 6128), False, 'from oneflow.test_utils.test_util import GenArgList\n'), ((1785, 1861), 'oneflow.tensor', 'flow.tensor', (['np_first_weight'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_first_weight, dtype=dtype, device=device, requires_grad=True)\n', (1796, 1861), True, 'import oneflow as flow\n'), ((1916, 1990), 'oneflow.tensor', 'flow.tensor', (['np_first_bias'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_first_bias, dtype=dtype, device=device, requires_grad=True)\n', (1927, 1990), True, 'import oneflow as flow\n'), ((2047, 2123), 'oneflow.tensor', 'flow.tensor', (['np_first_weight'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_first_weight, dtype=dtype, device=device, requires_grad=True)\n', (2058, 2123), True, 'import oneflow as flow\n'), ((2178, 2252), 'oneflow.tensor', 'flow.tensor', (['np_first_bias'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_first_bias, dtype=dtype, device=device, requires_grad=True)\n', (2189, 2252), True, 'import oneflow as flow\n'), ((2561, 2631), 'oneflow.tensor', 'flow.tensor', (['np_weight'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_weight, dtype=dtype, device=device, requires_grad=True)\n', (2572, 2631), True, 'import oneflow as flow\n'), ((2686, 2754), 'oneflow.tensor', 'flow.tensor', (['np_bias'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_bias, dtype=dtype, device=device, requires_grad=True)\n', (2697, 2754), True, 'import oneflow as flow\n'), ((2811, 2881), 'oneflow.tensor', 'flow.tensor', (['np_weight'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_weight, dtype=dtype, device=device, requires_grad=True)\n', (2822, 2881), True, 'import oneflow as flow\n'), ((2936, 3004), 'oneflow.tensor', 'flow.tensor', (['np_bias'], {'dtype': 'dtype', 'device': 'device', 'requires_grad': '(True)'}), '(np_bias, dtype=dtype, device=device, requires_grad=True)\n', (2947, 3004), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import oneflow
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.test_util import GenArgList
from oneflow.test_utils.automated_test_util import *
import time
import os
os.environ["ONEFLOW_BOXING_DISABLE_MIDDLE_NODE_AND_CHECK"] = "1"
def _test_nccl_logical_send_recv(test_case, src_nd_sbp, dst_nd_sbp):
# can not process p in dst
if flow.sbp.partial_sum() in dst_nd_sbp:
return
# skip src == dst
if src_nd_sbp == dst_nd_sbp:
return
# in this case, use intra group boxing
if src_nd_sbp[0] == dst_nd_sbp[0]:
return
# in this case, use inter group boxing
if (
src_nd_sbp[1] == dst_nd_sbp[1]
and src_nd_sbp[0] != src_nd_sbp[1]
and dst_nd_sbp[0] != dst_nd_sbp[1]
):
return
# input
placement = flow.placement("cuda", ranks=[[0, 1], [2, 3]])
local_np = np.arange(4 * 4 * 4).reshape(4, 4, 4)
x = flow.tensor(local_np, sbp=src_nd_sbp, placement=placement)
# check eager boxing
eager_out = x.to_global(sbp=dst_nd_sbp, placement=placement)
test_case.assertTrue(np.array_equal(eager_out.numpy(), x.numpy()))
# check graph boxing
flow.boxing.nccl.enable_use_compute_stream(True)
class TestNcclLogicalSendRecvGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, x):
y = x.to_global(sbp=dst_nd_sbp, placement=placement)
return y
graph = TestNcclLogicalSendRecvGraph()
# graph.debug()
y = graph(x)
out_np = y.numpy()
in_np = x.numpy()
# if flow.env.get_rank() == 0:
# print("src sbp ", src_nd_sbp, ", dst sbp ", dst_nd_sbp)
# equal = np.array_equal(out_np, in_np)
# if not equal:
# print("in ", in_np)
# print("out ", out_np)
test_case.assertTrue(np.array_equal(out_np, in_np))
def gen_nd_sbp():
sbp_list = [
flow.sbp.partial_sum(),
flow.sbp.broadcast(),
flow.sbp.split(0),
flow.sbp.split(1),
flow.sbp.split(2),
]
nd_sbp_list = []
for sbp0 in sbp_list:
for sbp1 in sbp_list:
nd_sbp_list.append([sbp0, sbp1])
return nd_sbp_list
@flow.unittest.skip_unless_1n4d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestNcclLogicalSendRecv(flow.unittest.TestCase):
def test_nccl_logical_send_recv(test_case):
arg_dict = OrderedDict()
arg_dict["src_nd_sbp"] = gen_nd_sbp()
arg_dict["dst_nd_sbp"] = gen_nd_sbp()
for arg in GenArgList(arg_dict):
_test_nccl_logical_send_recv(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n4d",
"oneflow.boxing.nccl.enable_use_compute_stream",
"oneflow.tensor",
"oneflow.sbp.split",
"oneflow.placement",
"oneflow.test_utils.test_util.GenArgList",
"oneflow.sbp.broadcast",
"oneflow.sbp.partial_sum"
] | [((2870, 2902), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (2900, 2902), True, 'import oneflow as flow\n'), ((1478, 1524), 'oneflow.placement', 'flow.placement', (['"""cuda"""'], {'ranks': '[[0, 1], [2, 3]]'}), "('cuda', ranks=[[0, 1], [2, 3]])\n", (1492, 1524), True, 'import oneflow as flow\n'), ((1586, 1644), 'oneflow.tensor', 'flow.tensor', (['local_np'], {'sbp': 'src_nd_sbp', 'placement': 'placement'}), '(local_np, sbp=src_nd_sbp, placement=placement)\n', (1597, 1644), True, 'import oneflow as flow\n'), ((1837, 1885), 'oneflow.boxing.nccl.enable_use_compute_stream', 'flow.boxing.nccl.enable_use_compute_stream', (['(True)'], {}), '(True)\n', (1879, 1885), True, 'import oneflow as flow\n'), ((2920, 2954), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (2929, 2954), False, 'import os\n'), ((3339, 3354), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3352, 3354), False, 'import unittest\n'), ((1027, 1049), 'oneflow.sbp.partial_sum', 'flow.sbp.partial_sum', ([], {}), '()\n', (1047, 1049), True, 'import oneflow as flow\n'), ((2505, 2534), 'numpy.array_equal', 'np.array_equal', (['out_np', 'in_np'], {}), '(out_np, in_np)\n', (2519, 2534), True, 'import numpy as np\n'), ((2581, 2603), 'oneflow.sbp.partial_sum', 'flow.sbp.partial_sum', ([], {}), '()\n', (2601, 2603), True, 'import oneflow as flow\n'), ((2613, 2633), 'oneflow.sbp.broadcast', 'flow.sbp.broadcast', ([], {}), '()\n', (2631, 2633), True, 'import oneflow as flow\n'), ((2643, 2660), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (2657, 2660), True, 'import oneflow as flow\n'), ((2670, 2687), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (2684, 2687), True, 'import oneflow as flow\n'), ((2697, 2714), 'oneflow.sbp.split', 'flow.sbp.split', (['(2)'], {}), '(2)\n', (2711, 2714), True, 'import oneflow as flow\n'), ((3101, 3114), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3112, 3114), False, 'from collections import OrderedDict\n'), ((3226, 3246), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3236, 3246), False, 'from oneflow.test_utils.test_util import GenArgList\n'), ((1540, 1560), 'numpy.arange', 'np.arange', (['(4 * 4 * 4)'], {}), '(4 * 4 * 4)\n', (1549, 1560), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.is_floating_point,
r"""Returns True if the data type of input is a floating point data type i.e., one of flow.float64, flow.float32, flow.float16.
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([1, 2, 3, 4, 5], dtype=flow.int)
>>> output = flow.is_floating_point(input)
>>> output
False
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1136), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.is_floating_point', '"""Returns True if the data type of input is a floating point data type i.e., one of flow.float64, flow.float32, flow.float16.\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> input = flow.tensor([1, 2, 3, 4, 5], dtype=flow.int)\n >>> output = flow.is_floating_point(input)\n >>> output\n False\n """'], {}), '(oneflow.is_floating_point,\n """Returns True if the data type of input is a floating point data type i.e., one of flow.float64, flow.float32, flow.float16.\n\n Args:\n input (Tensor): the input tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> input = flow.tensor([1, 2, 3, 4, 5], dtype=flow.int)\n >>> output = flow.is_floating_point(input)\n >>> output\n False\n """\n )\n', (670, 1136), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""OneFlow implementation of Mixup and Cutmix
Modified from https://github.com/rwightman/pytorch-image-models/blob/master/timm/data/mixup.py
"""
import numpy as np
import oneflow as flow
def one_hot(x, num_classes, on_value=1.0, off_value=0.0, device="cuda"):
x = x.long().view(-1, 1)
# TODO: switch to tensor.scatter method
return flow.scatter(
flow.full((x.size()[0], num_classes), off_value, device=device),
dim=1,
index=x,
src=on_value,
)
def mixup_target(target, num_classes, lam=1.0, smoothing=0.0, device="cuda"):
off_value = smoothing / num_classes
on_value = 1.0 - smoothing + off_value
y1 = one_hot(
target, num_classes, on_value=on_value, off_value=off_value, device=device
)
y2 = one_hot(
target.flip(0),
num_classes,
on_value=on_value,
off_value=off_value,
device=device,
)
return y1 * lam + y2 * (1.0 - lam)
def rand_bbox(img_shape, lam, margin=0.0, count=None):
""" Standard CutMix bounding-box
Generates a random square bbox based on lambda value. This impl includes
support for enforcing a border margin as percent of bbox dimensions.
Args:
img_shape (tuple): Image shape as tuple
lam (float): Cutmix lambda value
margin (float): Percentage of bbox dimension to enforce as margin (reduce amount of box outside image)
count (int): Number of bbox to generate
"""
ratio = np.sqrt(1 - lam)
img_h, img_w = img_shape[-2:]
cut_h, cut_w = int(img_h * ratio), int(img_w * ratio)
margin_y, margin_x = int(margin * cut_h), int(margin * cut_w)
cy = np.random.randint(0 + margin_y, img_h - margin_y, size=count)
cx = np.random.randint(0 + margin_x, img_w - margin_x, size=count)
yl = np.clip(cy - cut_h // 2, 0, img_h)
yh = np.clip(cy + cut_h // 2, 0, img_h)
xl = np.clip(cx - cut_w // 2, 0, img_w)
xh = np.clip(cx + cut_w // 2, 0, img_w)
return yl, yh, xl, xh
def rand_bbox_minmax(img_shape, minmax, count=None):
""" Min-Max CutMix bounding-box
Inspired by Darknet cutmix impl, generates a random rectangular bbox
based on min/max percent values applied to each dimension of the input image.
Typical defaults for minmax are usually in the .2-.3 for min and .8-.9 range for max.
Args:
img_shape (tuple): Image shape as tuple
minmax (tuple or list): Min and max bbox ratios (as percent of image size)
count (int): Number of bbox to generate
"""
assert len(minmax) == 2
img_h, img_w = img_shape[-2:]
cut_h = np.random.randint(
int(img_h * minmax[0]), int(img_h * minmax[1]), size=count
)
cut_w = np.random.randint(
int(img_w * minmax[0]), int(img_w * minmax[1]), size=count
)
yl = np.random.randint(0, img_h - cut_h, size=count)
xl = np.random.randint(0, img_w - cut_w, size=count)
yu = yl + cut_h
xu = xl + cut_w
return yl, yu, xl, xu
def cutmix_bbox_and_lam(
img_shape, lam, ratio_minmax=None, correct_lam=True, count=None
):
""" Generate bbox and apply lambda correction.
"""
if ratio_minmax is not None:
yl, yu, xl, xu = rand_bbox_minmax(img_shape, ratio_minmax, count=count)
else:
yl, yu, xl, xu = rand_bbox(img_shape, lam, count=count)
# lam may not be the correct one due to the clip func
if correct_lam or ratio_minmax is not None:
bbox_area = (yu - yl) * (xu - xl)
lam = 1.0 - bbox_area / float(img_shape[-2] * img_shape[-1])
return (yl, yu, xl, xu), lam
class Mixup:
""" Mixup/Cutmix that applies different params to each element or whole batch
Args:
mixup_alpha (float): Mixup alpha value, mixup is active if > 0
cutmix_alpha (float): Cutmix alpha value, cutmix is active if > 0
cutmix_minmax (List[float]): Cutmix min/max image ratio, cutmix is active and uses this vs alpha if not None
prob (float): Probability of applying mixup or cutmix per batch or element
switch_prob (float): Probability of switching to cutmix instead of mixup when both are active
mode (str): How to apply mixup/cutmix params (per 'batch', 'pair' (pair of elements), 'elem' (element)
correct_lam (bool): Apply lambda correction when cutmix bbox clipped by image borders
label_smoothing (float): Apply label smoothing to the mixed target tensor
num_classes (int): Number of classes for target
"""
def __init__(
self,
mixup_alpha=1.0,
cutmix_alpha=0.0,
cutmix_minmax=None,
prob=1.0,
switch_prob=0.5,
mode="batch",
correct_lam=True,
label_smoothing=0.1,
num_classes=1000,
):
self.mixup_alpha = mixup_alpha
self.cutmix_alpha = cutmix_alpha
self.cutmix_minmax = cutmix_minmax
if self.cutmix_minmax is not None:
assert len(self.cutmix_minmax) == 2
# force cutmix alpha == 1.0 when minmax active to keep logic simple & safe
self.cutmix_alpha = 1.0
self.mix_prob = prob
self.switch_prob = switch_prob
self.label_smoothing = label_smoothing
self.num_classes = num_classes
self.mode = mode
self.correct_lam = (
correct_lam # correct lambda based on clipped area for cutmix
)
self.mixup_enabled = (
True # set to false to disable mixing (intended tp be set by train loop)
)
def _params_per_elem(self, batch_size):
lam = np.ones(batch_size, dtype=np.float32)
use_cutmix = np.zeros(batch_size, dtype=np.bool)
if self.mixup_enabled:
if self.mixup_alpha > 0.0 and self.cutmix_alpha > 0.0:
use_cutmix = np.random.rand(batch_size) < self.switch_prob
lam_mix = np.where(
use_cutmix,
np.random.beta(
self.cutmix_alpha, self.cutmix_alpha, size=batch_size
),
np.random.beta(self.mixup_alpha, self.mixup_alpha, size=batch_size),
)
elif self.mixup_alpha > 0.0:
lam_mix = np.random.beta(
self.mixup_alpha, self.mixup_alpha, size=batch_size
)
elif self.cutmix_alpha > 0.0:
use_cutmix = np.ones(batch_size, dtype=np.bool)
lam_mix = np.random.beta(
self.cutmix_alpha, self.cutmix_alpha, size=batch_size
)
else:
assert (
False
), "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = np.where(
np.random.rand(batch_size) < self.mix_prob,
lam_mix.astype(np.float32),
lam,
)
return lam, use_cutmix
def _params_per_batch(self):
lam = 1.0
use_cutmix = False
if self.mixup_enabled and np.random.rand() < self.mix_prob:
if self.mixup_alpha > 0.0 and self.cutmix_alpha > 0.0:
use_cutmix = np.random.rand() < self.switch_prob
lam_mix = (
np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
if use_cutmix
else np.random.beta(self.mixup_alpha, self.mixup_alpha)
)
elif self.mixup_alpha > 0.0:
lam_mix = np.random.beta(self.mixup_alpha, self.mixup_alpha)
elif self.cutmix_alpha > 0.0:
use_cutmix = True
lam_mix = np.random.beta(self.cutmix_alpha, self.cutmix_alpha)
else:
assert (
False
), "One of mixup_alpha > 0., cutmix_alpha > 0., cutmix_minmax not None should be true."
lam = float(lam_mix)
return lam, use_cutmix
def _mix_elem(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size)
x_orig = x.clone()
for i in range(batch_size):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape,
lam,
ratio_minmax=self.cutmix_minmax,
correct_lam=self.correct_lam,
)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
lam = flow.tensor(lam, device=x.device, dtype=x.dtype)
x[i] = x[i] * lam + x_orig[i] * (1 - lam)
return flow.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_pair(self, x):
batch_size = len(x)
lam_batch, use_cutmix = self._params_per_elem(batch_size // 2)
x_orig = x.clone()
for i in range(batch_size // 2):
j = batch_size - i - 1
lam = lam_batch[i]
if lam != 1:
if use_cutmix[i]:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x[i].shape,
lam,
ratio_minmax=self.cutmix_minmax,
correct_lam=self.correct_lam,
)
x[i][:, yl:yh, xl:xh] = x_orig[j][:, yl:yh, xl:xh]
x[j][:, yl:yh, xl:xh] = x_orig[i][:, yl:yh, xl:xh]
lam_batch[i] = lam
else:
# TODO: support tensor * numpy
lam = flow.tensor(lam, device=x.device, dtype=x.dtype)
x[i] = x[i] * lam + x_orig[j] * (1 - lam)
x[j] = x[j] * lam + x_orig[i] * (1 - lam)
lam_batch = np.concatenate((lam_batch, lam_batch[::-1]))
return flow.tensor(lam_batch, device=x.device, dtype=x.dtype).unsqueeze(1)
def _mix_batch(self, x):
lam, use_cutmix = self._params_per_batch()
if lam == 1.0:
return 1.0
if use_cutmix:
(yl, yh, xl, xh), lam = cutmix_bbox_and_lam(
x.shape,
lam,
ratio_minmax=self.cutmix_minmax,
correct_lam=self.correct_lam,
)
x[:, :, yl:yh, xl:xh] = x.flip(0)[:, :, yl:yh, xl:xh]
else:
x_flipped = x.flip(0).mul(1.0 - lam)
x.mul_(lam).add_(x_flipped)
return lam
def __call__(self, x, target):
assert len(x) % 2 == 0, "Batch size should be even when using this"
if self.mode == "elem":
lam = self._mix_elem(x)
elif self.mode == "pair":
lam = self._mix_pair(x)
else:
lam = self._mix_batch(x)
target = mixup_target(
target, self.num_classes, lam, self.label_smoothing, x.device
)
return x, target
| [
"oneflow.tensor"
] | [((1474, 1490), 'numpy.sqrt', 'np.sqrt', (['(1 - lam)'], {}), '(1 - lam)\n', (1481, 1490), True, 'import numpy as np\n'), ((1658, 1719), 'numpy.random.randint', 'np.random.randint', (['(0 + margin_y)', '(img_h - margin_y)'], {'size': 'count'}), '(0 + margin_y, img_h - margin_y, size=count)\n', (1675, 1719), True, 'import numpy as np\n'), ((1729, 1790), 'numpy.random.randint', 'np.random.randint', (['(0 + margin_x)', '(img_w - margin_x)'], {'size': 'count'}), '(0 + margin_x, img_w - margin_x, size=count)\n', (1746, 1790), True, 'import numpy as np\n'), ((1800, 1834), 'numpy.clip', 'np.clip', (['(cy - cut_h // 2)', '(0)', 'img_h'], {}), '(cy - cut_h // 2, 0, img_h)\n', (1807, 1834), True, 'import numpy as np\n'), ((1844, 1878), 'numpy.clip', 'np.clip', (['(cy + cut_h // 2)', '(0)', 'img_h'], {}), '(cy + cut_h // 2, 0, img_h)\n', (1851, 1878), True, 'import numpy as np\n'), ((1888, 1922), 'numpy.clip', 'np.clip', (['(cx - cut_w // 2)', '(0)', 'img_w'], {}), '(cx - cut_w // 2, 0, img_w)\n', (1895, 1922), True, 'import numpy as np\n'), ((1932, 1966), 'numpy.clip', 'np.clip', (['(cx + cut_w // 2)', '(0)', 'img_w'], {}), '(cx + cut_w // 2, 0, img_w)\n', (1939, 1966), True, 'import numpy as np\n'), ((2806, 2853), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img_h - cut_h)'], {'size': 'count'}), '(0, img_h - cut_h, size=count)\n', (2823, 2853), True, 'import numpy as np\n'), ((2863, 2910), 'numpy.random.randint', 'np.random.randint', (['(0)', '(img_w - cut_w)'], {'size': 'count'}), '(0, img_w - cut_w, size=count)\n', (2880, 2910), True, 'import numpy as np\n'), ((5557, 5594), 'numpy.ones', 'np.ones', (['batch_size'], {'dtype': 'np.float32'}), '(batch_size, dtype=np.float32)\n', (5564, 5594), True, 'import numpy as np\n'), ((5616, 5651), 'numpy.zeros', 'np.zeros', (['batch_size'], {'dtype': 'np.bool'}), '(batch_size, dtype=np.bool)\n', (5624, 5651), True, 'import numpy as np\n'), ((9922, 9966), 'numpy.concatenate', 'np.concatenate', (['(lam_batch, lam_batch[::-1])'], {}), '((lam_batch, lam_batch[::-1]))\n', (9936, 9966), True, 'import numpy as np\n'), ((7034, 7050), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7048, 7050), True, 'import numpy as np\n'), ((8797, 8851), 'oneflow.tensor', 'flow.tensor', (['lam_batch'], {'device': 'x.device', 'dtype': 'x.dtype'}), '(lam_batch, device=x.device, dtype=x.dtype)\n', (8808, 8851), True, 'import oneflow as flow\n'), ((9982, 10036), 'oneflow.tensor', 'flow.tensor', (['lam_batch'], {'device': 'x.device', 'dtype': 'x.dtype'}), '(lam_batch, device=x.device, dtype=x.dtype)\n', (9993, 10036), True, 'import oneflow as flow\n'), ((5779, 5805), 'numpy.random.rand', 'np.random.rand', (['batch_size'], {}), '(batch_size)\n', (5793, 5805), True, 'import numpy as np\n'), ((5913, 5982), 'numpy.random.beta', 'np.random.beta', (['self.cutmix_alpha', 'self.cutmix_alpha'], {'size': 'batch_size'}), '(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)\n', (5927, 5982), True, 'import numpy as np\n'), ((6050, 6117), 'numpy.random.beta', 'np.random.beta', (['self.mixup_alpha', 'self.mixup_alpha'], {'size': 'batch_size'}), '(self.mixup_alpha, self.mixup_alpha, size=batch_size)\n', (6064, 6117), True, 'import numpy as np\n'), ((6204, 6271), 'numpy.random.beta', 'np.random.beta', (['self.mixup_alpha', 'self.mixup_alpha'], {'size': 'batch_size'}), '(self.mixup_alpha, self.mixup_alpha, size=batch_size)\n', (6218, 6271), True, 'import numpy as np\n'), ((6767, 6793), 'numpy.random.rand', 'np.random.rand', (['batch_size'], {}), '(batch_size)\n', (6781, 6793), True, 'import numpy as np\n'), ((7164, 7180), 'numpy.random.rand', 'np.random.rand', ([], {}), '()\n', (7178, 7180), True, 'import numpy as np\n'), ((7248, 7300), 'numpy.random.beta', 'np.random.beta', (['self.cutmix_alpha', 'self.cutmix_alpha'], {}), '(self.cutmix_alpha, self.cutmix_alpha)\n', (7262, 7300), True, 'import numpy as np\n'), ((7360, 7410), 'numpy.random.beta', 'np.random.beta', (['self.mixup_alpha', 'self.mixup_alpha'], {}), '(self.mixup_alpha, self.mixup_alpha)\n', (7374, 7410), True, 'import numpy as np\n'), ((7496, 7546), 'numpy.random.beta', 'np.random.beta', (['self.mixup_alpha', 'self.mixup_alpha'], {}), '(self.mixup_alpha, self.mixup_alpha)\n', (7510, 7546), True, 'import numpy as np\n'), ((8671, 8719), 'oneflow.tensor', 'flow.tensor', (['lam'], {'device': 'x.device', 'dtype': 'x.dtype'}), '(lam, device=x.device, dtype=x.dtype)\n', (8682, 8719), True, 'import oneflow as flow\n'), ((9729, 9777), 'oneflow.tensor', 'flow.tensor', (['lam'], {'device': 'x.device', 'dtype': 'x.dtype'}), '(lam, device=x.device, dtype=x.dtype)\n', (9740, 9777), True, 'import oneflow as flow\n'), ((6381, 6415), 'numpy.ones', 'np.ones', (['batch_size'], {'dtype': 'np.bool'}), '(batch_size, dtype=np.bool)\n', (6388, 6415), True, 'import numpy as np\n'), ((6442, 6511), 'numpy.random.beta', 'np.random.beta', (['self.cutmix_alpha', 'self.cutmix_alpha'], {'size': 'batch_size'}), '(self.cutmix_alpha, self.cutmix_alpha, size=batch_size)\n', (6456, 6511), True, 'import numpy as np\n'), ((7649, 7701), 'numpy.random.beta', 'np.random.beta', (['self.cutmix_alpha', 'self.cutmix_alpha'], {}), '(self.cutmix_alpha, self.cutmix_alpha)\n', (7663, 7701), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
@register_tensor_op("unsqueeze")
def unsqueeze_op(input, dim):
"""Returns a new tensor with a dimension of size one inserted at the
specified position.
The returned tensor shares the same underlying data with this tensor.
A :attr:`dim` value within the range `[-input.ndimension() - 1, input.ndimension() + 1)`
can be used. Negative :attr:`dim` will correspond to :meth:`unsqueeze`
applied at :attr:`dim` = ``dim + input.ndimension() + 1``.
Args:
input (Tensor): the input tensor.
dim (int): the index at which to insert the singleton dimension
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = flow.Tensor(np.random.rand(2, 3, 4))
>>> y = x.unsqueeze(2)
>>> y.shape
flow.Size([2, 3, 1, 4])
"""
assert (
-(1 + input.ndimension()) <= dim <= input.ndimension()
), "dim should within the range [-input.ndimension() - 1, input.ndimension() + 1)"
if dim < 0:
dim = 1 + input.ndimension() + dim
return flow.F.expand_dims(input, dim)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.F.expand_dims",
"oneflow.framework.tensor.register_tensor_op"
] | [((672, 703), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""unsqueeze"""'], {}), "('unsqueeze')\n", (690, 703), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((1764, 1794), 'oneflow.F.expand_dims', 'flow.F.expand_dims', (['input', 'dim'], {}), '(input, dim)\n', (1782, 1794), True, 'import oneflow as flow\n'), ((1848, 1884), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (1863, 1884), False, 'import doctest\n')] |
import numpy as np
import oneflow.experimental as flow
from .base import Distribution
__all__ = [
'Normal',
]
class Normal(Distribution):
def __init__(self,
dtype=flow.float32,
param_dtype=flow.float32,
is_continues=True,
is_reparameterized=True,
group_ndims=0,
**kwargs):
super(Normal, self).__init__(dtype,
param_dtype,
is_continues,
is_reparameterized,
group_ndims=group_ndims,
**kwargs)
try:
self._std = flow.cast(flow.Tensor([kwargs['std']], requires_grad=True), dtype=self.dtype) \
if type(kwargs['std']) in [type(1.), type(1)] else kwargs['std']
self._logstd = flow.log(self._std)
except:
self._logstd = flow.cast(flow.Tensor([kwargs['logstd']], requires_grad=True), self.dtype) \
if type(kwargs['logstd']) in [type(1.), type(1)] else kwargs['logstd']
self._std = flow.exp(self._logstd)
self._mean = kwargs['mean']
@property
def mean(self):
"""The mean of the Normal distribution."""
return self._mean
@property
def logstd(self):
"""The log standard deviation of the Normal distribution."""
try:
return self._logstd
except:
self._logstd = flow.log(self._std)
return self._logstd
@property
def std(self):
"""The standard deviation of the Normal distribution."""
return self._std
def _sample(self, n_samples=1, **kwargs):
if n_samples > 1:
_shape = [n_samples]
_shape = _shape + list(self._mean.shape)
_len = len(self._std.shape)
_std = flow.tile(self._std, reps=(n_samples, *_len*[1]))
_mean = flow.tile(self._mean, reps=(n_samples, *_len*[1]))
else:
_shape = self._mean.shape
_std = self._std + 0.
_mean = self._mean + 0.
if self.is_reparameterized:
d = "cuda" if _mean.is_cuda else "cpu"
epsilon = flow.Tensor(np.random.normal(size=tuple(list(_shape))), device=flow.device(d))
sample_ = _mean + _std * epsilon
else:
_std = _std.detach()
_mean = _mean.detach()
d = "cuda" if _mean.is_cuda else "cpu"
epsilon = flow.Tensor(np.random.normal(size=tuple(list(_shape))), device=flow.device(d))
sample_ = _mean + _std * epsilon
self.sample_cache = sample_
if n_samples > 1:
assert(sample_.shape[0] == n_samples)
return sample_
def _log_prob(self, sample=None):
if sample is None:
sample = self.sample_cache
if sample.is_cuda:
self._std = self._std.to('cuda')
self._mean = self._mean.to('cuda')
if len(sample.shape) > len(self._mean.shape):
n_samples = sample.shape[0]
_len = len(self._std.shape)
_std = flow.tile(self._std, reps=(n_samples, *_len*[1]))
_mean = flow.tile(self._mean, reps=(n_samples, *_len*[1]))
else:
_std = self._std
_mean = self._mean
## Log Prob
if not self.is_reparameterized:
_mean = _mean.detach()
_std = _std.detach()
logstd = flow.log(_std)
c = -0.5 * np.log(2 * np.pi)
precision = flow.exp(-2 * logstd)
log_prob = c - logstd - 0.5 * precision * flow.square(sample - _mean)
return log_prob
| [
"oneflow.experimental.log",
"oneflow.experimental.tile",
"oneflow.experimental.Tensor",
"oneflow.experimental.exp",
"oneflow.experimental.device",
"oneflow.experimental.square"
] | [((3512, 3526), 'oneflow.experimental.log', 'flow.log', (['_std'], {}), '(_std)\n', (3520, 3526), True, 'import oneflow.experimental as flow\n'), ((3584, 3605), 'oneflow.experimental.exp', 'flow.exp', (['(-2 * logstd)'], {}), '(-2 * logstd)\n', (3592, 3605), True, 'import oneflow.experimental as flow\n'), ((885, 904), 'oneflow.experimental.log', 'flow.log', (['self._std'], {}), '(self._std)\n', (893, 904), True, 'import oneflow.experimental as flow\n'), ((1897, 1950), 'oneflow.experimental.tile', 'flow.tile', (['self._std'], {'reps': '(n_samples, *(_len * [1]))'}), '(self._std, reps=(n_samples, *(_len * [1])))\n', (1906, 1950), True, 'import oneflow.experimental as flow\n'), ((1967, 2021), 'oneflow.experimental.tile', 'flow.tile', (['self._mean'], {'reps': '(n_samples, *(_len * [1]))'}), '(self._mean, reps=(n_samples, *(_len * [1])))\n', (1976, 2021), True, 'import oneflow.experimental as flow\n'), ((3169, 3222), 'oneflow.experimental.tile', 'flow.tile', (['self._std'], {'reps': '(n_samples, *(_len * [1]))'}), '(self._std, reps=(n_samples, *(_len * [1])))\n', (3178, 3222), True, 'import oneflow.experimental as flow\n'), ((3240, 3294), 'oneflow.experimental.tile', 'flow.tile', (['self._mean'], {'reps': '(n_samples, *(_len * [1]))'}), '(self._mean, reps=(n_samples, *(_len * [1])))\n', (3249, 3294), True, 'import oneflow.experimental as flow\n'), ((3546, 3563), 'numpy.log', 'np.log', (['(2 * np.pi)'], {}), '(2 * np.pi)\n', (3552, 3563), True, 'import numpy as np\n'), ((1136, 1158), 'oneflow.experimental.exp', 'flow.exp', (['self._logstd'], {}), '(self._logstd)\n', (1144, 1158), True, 'import oneflow.experimental as flow\n'), ((1502, 1521), 'oneflow.experimental.log', 'flow.log', (['self._std'], {}), '(self._std)\n', (1510, 1521), True, 'import oneflow.experimental as flow\n'), ((3656, 3683), 'oneflow.experimental.square', 'flow.square', (['(sample - _mean)'], {}), '(sample - _mean)\n', (3667, 3683), True, 'import oneflow.experimental as flow\n'), ((707, 755), 'oneflow.experimental.Tensor', 'flow.Tensor', (["[kwargs['std']]"], {'requires_grad': '(True)'}), "([kwargs['std']], requires_grad=True)\n", (718, 755), True, 'import oneflow.experimental as flow\n'), ((2313, 2327), 'oneflow.experimental.device', 'flow.device', (['d'], {}), '(d)\n', (2324, 2327), True, 'import oneflow.experimental as flow\n'), ((2593, 2607), 'oneflow.experimental.device', 'flow.device', (['d'], {}), '(d)\n', (2604, 2607), True, 'import oneflow.experimental as flow\n'), ((958, 1009), 'oneflow.experimental.Tensor', 'flow.Tensor', (["[kwargs['logstd']]"], {'requires_grad': '(True)'}), "([kwargs['logstd']], requires_grad=True)\n", (969, 1009), True, 'import oneflow.experimental as flow\n')] |
from __future__ import print_function, absolute_import
import os
import gc
import sys
import time
import h5py
import scipy
import datetime
import argparse
import os.path as osp
import oneflow as flow
import oneflow.nn as nn
import shutil
import oneflow.typing as tp
from typing import Tuple
import oneflow.math as math
import numpy as np
import models
import models.getresnet as getresnet
import transforms.spatial_transforms as ST
import transforms.temporal_transforms as TT
import tools.data_manager as data_manager
from tools.video_loader import VideoDataset
from tools.losses import TestTripletLoss as TripletLoss
from tools.losses import _CrossEntropyLoss as _CrossEntropyLoss
from tools.utils import AverageMeter, Logger
from tools.eval_metrics import evaluate
from tools.samplers import RandomIdentitySampler
parser = argparse.ArgumentParser(description='Test AP3D using all frames')
# Datasets
parser.add_argument('--root', type=str, default='/content/mars/')
parser.add_argument('-d', '--dataset', type=str, default='mars')
parser.add_argument('--height', type=int, default=256)
parser.add_argument('--width', type=int, default=128)
# Augment
parser.add_argument("--model_load_dir", type=str, default='/content/resnet_v15_of_best_model_val_top1_77318', required=False,
help="model load directory")
parser.add_argument('--seq_len', type=int, default=4,
help="number of images to sample in a tracklet")
parser.add_argument('--sample_stride', type=int, default=8,
help="stride of images to sample in a tracklet")
# Optimization options
parser.add_argument('--max_epoch', default=240, type=int)
parser.add_argument('--start_epoch', default=0, type=int)
parser.add_argument('--train_batch', default=32, type=int)
parser.add_argument('--test_batch', default=32, type=int)
parser.add_argument('--lr', default=0.0001, type=float)
parser.add_argument('--stepsize', default=[14100, 28200, 42300], nargs='+', type=int,
help="stepsize to decay learning rate")
parser.add_argument('--gamma', default=0.1, type=float,
help="learning rate decay")
parser.add_argument('--weight_decay', default=5e-04, type=float)
parser.add_argument('--margin', type=float, default=0.3,
help="margin for triplet loss")
parser.add_argument('--distance', type=str, default='cosine',
help="euclidean or cosine")
parser.add_argument('--num_instances', type=int, default=4,
help="number of instances per identity")
# Architecture
parser.add_argument('-a', '--arch', type=str, default='ap3dres50',
help="ap3dres50, ap3dnlres50")
# Miscs
parser.add_argument('--eval_step', type=int, default=10)
parser.add_argument('--start_eval', type=int, default=0,
help="start to evaluate after specific epoch")
parser.add_argument('--save_dir', type=str, default='log-mars-ap3d')
parser.add_argument('--gpu', default='0', type=str,
help='gpu device ids for CUDA_VISIBLE_DEVICES')
args = parser.parse_args()
test_image = tp.Numpy.Placeholder((args.test_batch , 3, args.seq_len, args.height,args.width))
input_pid = tp.Numpy.Placeholder((args.train_batch,))
func_config = flow.FunctionConfig()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
func_config.default_data_type(flow.float)
dataset = data_manager.init_dataset(name=args.dataset, root=args.root)
@flow.global_function(function_config=func_config)
def gallery_job(
image:test_image
)->tp.Numpy:
model = models.init_model(name=args.arch, num_classes=dataset.num_gallery_pids,training=False,resnetblock=getresnet)
feat=model.build_network(image)
feat=math.reduce_mean(feat,1)
feat=flow.layers.batch_normalization(inputs=feat,
axis=1,
momentum=0.997,
epsilon=1.001e-5,
center=True,
scale=True,
trainable=False,
name= "gallery_feature_bn")
return feat
@flow.global_function(function_config=func_config)
def query_job(
image:test_image
)->tp.Numpy:
model = models.init_model(name=args.arch, num_classes=dataset.num_query_pids,training=False,resnetblock=getresnet)
feat=model.build_network(image)
feat=math.reduce_mean(feat,1)
feat=flow.layers.batch_normalization(inputs=feat,
momentum=0.997,
epsilon=1.001e-5,
center=True,
scale=True,
trainable=False,
axis=1,
name= "query_feature_bn")
return feat
def getDataSets(dataset):
spatial_transform_test = ST.Compose([
ST.Scale((args.height, args.width), interpolation=3),
ST.ToNumpy(),
ST.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
temporal_transform_test = TT.TemporalBeginCrop()
queryset = VideoDataset(dataset.query, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test)
galleryset =VideoDataset(dataset.gallery, spatial_transform=spatial_transform_test, temporal_transform=temporal_transform_test)
return queryset,galleryset
def addmm(mat,mat1,mat2,beta=1,alpha=1):
temp=np.matmul(mat,mat2)
out=(beta*mat+alpha*temp)
return out
def main():
sys.stdout = Logger(osp.join(args.save_dir, 'log_test.txt'))
print("==========\nArgs:{}\n==========".format(args))
assert os.path.isdir(args.model_load_dir)
print("Restoring model from {}.".format(args.model_load_dir))
checkpoint=flow.train.CheckPoint()
checkpoint.load(args.model_load_dir)
queryset,galleryset=getDataSets(dataset)
print("==> Test")
rank1=test(queryset,galleryset,dataset)
def test(queryset, galleryset, dataset,ranks=[1, 5, 10, 20]):
since=time.time()
qf, q_pids, q_camids = [], [], []
batch_size=args.test_batch
query_img, query_id, query_cam_id = map(list, zip(*dataset.query))
indicies=np.arange(len(query_id))
for i in range(len(indicies) // batch_size):
try:
test_batch = queryset.__getbatch__(indicies[i * batch_size:(i + 1) * batch_size])
except:
test_batch = queryset.__getbatch__(indicies[-batch_size:])
feat=query_job(test_batch[0])
qf.append(feat)
q_pids.extend(test_batch[1].astype(np.float32))
q_camids.extend(test_batch[2])
qf=np.concatenate(qf,0)
q_pids=np.asarray(q_pids)
q_camids=np.asarray(q_camids)
print("Extracted features for query set, obtained {} matrix".format(qf.shape))
gf, g_pids, g_camids = [], [], []
gallery_img, gallery_id, gallery_cam_id = map(list, zip(*dataset.gallery))
indicies=np.arange(len(gallery_id))
for i in range(len(indicies) // batch_size):
try:
gallery_batch = galleryset.__getbatch__(indicies[i * batch_size:(i + 1) * batch_size])
except:
gallery_batch = galleryset.__getbatch__(indicies[-batch_size:])
feat=query_job(gallery_batch[0])
gf.append(feat)
g_pids.extend(gallery_batch[1].astype(np.float32))
g_camids.extend(gallery_batch[2])
gf=np.concatenate(gf,0)
g_pids = np.asarray(g_pids)
g_camids = np.asarray(g_camids)
if args.dataset == 'mars':
gf=np.concatenate((qf,gf),0)
g_pids = np.append(q_pids, g_pids)
g_camids = np.append(q_camids, g_camids)
print("Extracted features for gallery set, obtained {} matrix".format(gf.shape))
time_elapsed = time.time() - since
print('Extracting features complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))
print("Computing distance matrix")
m, n = qf.shape[0], gf.shape[0]
distmat = np.zeros((m,n))
if args.distance== 'euclidean':
distmat1=np.power(qf,2)
distmat2=np.power(gf,2)
distmat1=np.sum(distmat1,axis=1,keepdims=True)
distmat2=np.sum(distmat2,axis=1,keepdims=True)
distmat1=np.broadcast_to(distmat1,(m,n))
distmat2=np.broadcast_to(distmat2,(n,m))
distmat2=np.transpose(distmat2)
distmat=distmat2+distmat1
tempgf=np.transpose(gf)
for i in range(m):
distmat[i:i+1]=addmm(
distmat[i:i+1],qf[i:i+1],tempgf,1,-2
)
else:
q_norm=np.linalg.norm(qf,ord=2,axis=1,keepdims=True)
g_norm=np.linalg.norm(gf,ord=2,axis=1,keepdims=True)
q_norm=np.broadcast_to(q_norm,qf.shape)
g_norm=np.broadcast_to(g_norm,gf.shape)
gf=np.divide(gf,g_norm)
qf=np.divide(qf,q_norm)
tempgf=np.transpose(gf)
for i in range(m):
distmat[i] = - np.matmul(qf[i:i+1],tempgf)
print("Computing CMC and mAP")
cmc, mAP = evaluate(distmat, q_pids, g_pids, q_camids, g_camids)
print("Results ----------")
print('top1:{:.1%} top5:{:.1%} top10:{:.1%} mAP:{:.1%}'.format(cmc[0],cmc[4],cmc[9],mAP))
print("------------------")
return cmc[0]
if __name__ == '__main__':
main()
| [
"oneflow.train.CheckPoint",
"oneflow.math.reduce_mean",
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.layers.batch_normalization",
"oneflow.FunctionConfig"
] | [((825, 890), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test AP3D using all frames"""'}), "(description='Test AP3D using all frames')\n", (848, 890), False, 'import argparse\n'), ((3129, 3215), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(args.test_batch, 3, args.seq_len, args.height, args.width)'], {}), '((args.test_batch, 3, args.seq_len, args.height, args.\n width))\n', (3149, 3215), True, 'import oneflow.typing as tp\n'), ((3223, 3264), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(args.train_batch,)'], {}), '((args.train_batch,))\n', (3243, 3264), True, 'import oneflow.typing as tp\n'), ((3281, 3302), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3300, 3302), True, 'import oneflow as flow\n'), ((3401, 3461), 'tools.data_manager.init_dataset', 'data_manager.init_dataset', ([], {'name': 'args.dataset', 'root': 'args.root'}), '(name=args.dataset, root=args.root)\n', (3426, 3461), True, 'import tools.data_manager as data_manager\n'), ((3465, 3514), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3485, 3514), True, 'import oneflow as flow\n'), ((4283, 4332), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4303, 4332), True, 'import oneflow as flow\n'), ((3579, 3693), 'models.init_model', 'models.init_model', ([], {'name': 'args.arch', 'num_classes': 'dataset.num_gallery_pids', 'training': '(False)', 'resnetblock': 'getresnet'}), '(name=args.arch, num_classes=dataset.num_gallery_pids,\n training=False, resnetblock=getresnet)\n', (3596, 3693), False, 'import models\n'), ((3733, 3758), 'oneflow.math.reduce_mean', 'math.reduce_mean', (['feat', '(1)'], {}), '(feat, 1)\n', (3749, 3758), True, 'import oneflow.math as math\n'), ((3767, 3932), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'feat', 'axis': '(1)', 'momentum': '(0.997)', 'epsilon': '(1.001e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': '(False)', 'name': '"""gallery_feature_bn"""'}), "(inputs=feat, axis=1, momentum=0.997,\n epsilon=1.001e-05, center=True, scale=True, trainable=False, name=\n 'gallery_feature_bn')\n", (3798, 3932), True, 'import oneflow as flow\n'), ((4395, 4507), 'models.init_model', 'models.init_model', ([], {'name': 'args.arch', 'num_classes': 'dataset.num_query_pids', 'training': '(False)', 'resnetblock': 'getresnet'}), '(name=args.arch, num_classes=dataset.num_query_pids,\n training=False, resnetblock=getresnet)\n', (4412, 4507), False, 'import models\n'), ((4547, 4572), 'oneflow.math.reduce_mean', 'math.reduce_mean', (['feat', '(1)'], {}), '(feat, 1)\n', (4563, 4572), True, 'import oneflow.math as math\n'), ((4581, 4745), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'feat', 'momentum': '(0.997)', 'epsilon': '(1.001e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': '(False)', 'axis': '(1)', 'name': '"""query_feature_bn"""'}), "(inputs=feat, momentum=0.997, epsilon=\n 1.001e-05, center=True, scale=True, trainable=False, axis=1, name=\n 'query_feature_bn')\n", (4612, 4745), True, 'import oneflow as flow\n'), ((5389, 5411), 'transforms.temporal_transforms.TemporalBeginCrop', 'TT.TemporalBeginCrop', ([], {}), '()\n', (5409, 5411), True, 'import transforms.temporal_transforms as TT\n'), ((5427, 5544), 'tools.video_loader.VideoDataset', 'VideoDataset', (['dataset.query'], {'spatial_transform': 'spatial_transform_test', 'temporal_transform': 'temporal_transform_test'}), '(dataset.query, spatial_transform=spatial_transform_test,\n temporal_transform=temporal_transform_test)\n', (5439, 5544), False, 'from tools.video_loader import VideoDataset\n'), ((5558, 5677), 'tools.video_loader.VideoDataset', 'VideoDataset', (['dataset.gallery'], {'spatial_transform': 'spatial_transform_test', 'temporal_transform': 'temporal_transform_test'}), '(dataset.gallery, spatial_transform=spatial_transform_test,\n temporal_transform=temporal_transform_test)\n', (5570, 5677), False, 'from tools.video_loader import VideoDataset\n'), ((5760, 5780), 'numpy.matmul', 'np.matmul', (['mat', 'mat2'], {}), '(mat, mat2)\n', (5769, 5780), True, 'import numpy as np\n'), ((5975, 6009), 'os.path.isdir', 'os.path.isdir', (['args.model_load_dir'], {}), '(args.model_load_dir)\n', (5988, 6009), False, 'import os\n'), ((6091, 6114), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (6112, 6114), True, 'import oneflow as flow\n'), ((6348, 6359), 'time.time', 'time.time', ([], {}), '()\n', (6357, 6359), False, 'import time\n'), ((6945, 6966), 'numpy.concatenate', 'np.concatenate', (['qf', '(0)'], {}), '(qf, 0)\n', (6959, 6966), True, 'import numpy as np\n'), ((6977, 6995), 'numpy.asarray', 'np.asarray', (['q_pids'], {}), '(q_pids)\n', (6987, 6995), True, 'import numpy as np\n'), ((7009, 7029), 'numpy.asarray', 'np.asarray', (['q_camids'], {}), '(q_camids)\n', (7019, 7029), True, 'import numpy as np\n'), ((7701, 7722), 'numpy.concatenate', 'np.concatenate', (['gf', '(0)'], {}), '(gf, 0)\n', (7715, 7722), True, 'import numpy as np\n'), ((7735, 7753), 'numpy.asarray', 'np.asarray', (['g_pids'], {}), '(g_pids)\n', (7745, 7753), True, 'import numpy as np\n'), ((7769, 7789), 'numpy.asarray', 'np.asarray', (['g_camids'], {}), '(g_camids)\n', (7779, 7789), True, 'import numpy as np\n'), ((8272, 8288), 'numpy.zeros', 'np.zeros', (['(m, n)'], {}), '((m, n))\n', (8280, 8288), True, 'import numpy as np\n'), ((9287, 9340), 'tools.eval_metrics.evaluate', 'evaluate', (['distmat', 'q_pids', 'g_pids', 'q_camids', 'g_camids'], {}), '(distmat, q_pids, g_pids, q_camids, g_camids)\n', (9295, 9340), False, 'from tools.eval_metrics import evaluate\n'), ((5865, 5904), 'os.path.join', 'osp.join', (['args.save_dir', '"""log_test.txt"""'], {}), "(args.save_dir, 'log_test.txt')\n", (5873, 5904), True, 'import os.path as osp\n'), ((7832, 7859), 'numpy.concatenate', 'np.concatenate', (['(qf, gf)', '(0)'], {}), '((qf, gf), 0)\n', (7846, 7859), True, 'import numpy as np\n'), ((7875, 7900), 'numpy.append', 'np.append', (['q_pids', 'g_pids'], {}), '(q_pids, g_pids)\n', (7884, 7900), True, 'import numpy as np\n'), ((7920, 7949), 'numpy.append', 'np.append', (['q_camids', 'g_camids'], {}), '(q_camids, g_camids)\n', (7929, 7949), True, 'import numpy as np\n'), ((8055, 8066), 'time.time', 'time.time', ([], {}), '()\n', (8064, 8066), False, 'import time\n'), ((8341, 8356), 'numpy.power', 'np.power', (['qf', '(2)'], {}), '(qf, 2)\n', (8349, 8356), True, 'import numpy as np\n'), ((8373, 8388), 'numpy.power', 'np.power', (['gf', '(2)'], {}), '(gf, 2)\n', (8381, 8388), True, 'import numpy as np\n'), ((8405, 8444), 'numpy.sum', 'np.sum', (['distmat1'], {'axis': '(1)', 'keepdims': '(True)'}), '(distmat1, axis=1, keepdims=True)\n', (8411, 8444), True, 'import numpy as np\n'), ((8460, 8499), 'numpy.sum', 'np.sum', (['distmat2'], {'axis': '(1)', 'keepdims': '(True)'}), '(distmat2, axis=1, keepdims=True)\n', (8466, 8499), True, 'import numpy as np\n'), ((8515, 8548), 'numpy.broadcast_to', 'np.broadcast_to', (['distmat1', '(m, n)'], {}), '(distmat1, (m, n))\n', (8530, 8548), True, 'import numpy as np\n'), ((8564, 8597), 'numpy.broadcast_to', 'np.broadcast_to', (['distmat2', '(n, m)'], {}), '(distmat2, (n, m))\n', (8579, 8597), True, 'import numpy as np\n'), ((8613, 8635), 'numpy.transpose', 'np.transpose', (['distmat2'], {}), '(distmat2)\n', (8625, 8635), True, 'import numpy as np\n'), ((8685, 8701), 'numpy.transpose', 'np.transpose', (['gf'], {}), '(gf)\n', (8697, 8701), True, 'import numpy as np\n'), ((8856, 8904), 'numpy.linalg.norm', 'np.linalg.norm', (['qf'], {'ord': '(2)', 'axis': '(1)', 'keepdims': '(True)'}), '(qf, ord=2, axis=1, keepdims=True)\n', (8870, 8904), True, 'import numpy as np\n'), ((8917, 8965), 'numpy.linalg.norm', 'np.linalg.norm', (['gf'], {'ord': '(2)', 'axis': '(1)', 'keepdims': '(True)'}), '(gf, ord=2, axis=1, keepdims=True)\n', (8931, 8965), True, 'import numpy as np\n'), ((8978, 9011), 'numpy.broadcast_to', 'np.broadcast_to', (['q_norm', 'qf.shape'], {}), '(q_norm, qf.shape)\n', (8993, 9011), True, 'import numpy as np\n'), ((9026, 9059), 'numpy.broadcast_to', 'np.broadcast_to', (['g_norm', 'gf.shape'], {}), '(g_norm, gf.shape)\n', (9041, 9059), True, 'import numpy as np\n'), ((9070, 9091), 'numpy.divide', 'np.divide', (['gf', 'g_norm'], {}), '(gf, g_norm)\n', (9079, 9091), True, 'import numpy as np\n'), ((9102, 9123), 'numpy.divide', 'np.divide', (['qf', 'q_norm'], {}), '(qf, q_norm)\n', (9111, 9123), True, 'import numpy as np\n'), ((9138, 9154), 'numpy.transpose', 'np.transpose', (['gf'], {}), '(gf)\n', (9150, 9154), True, 'import numpy as np\n'), ((5185, 5237), 'transforms.spatial_transforms.Scale', 'ST.Scale', (['(args.height, args.width)'], {'interpolation': '(3)'}), '((args.height, args.width), interpolation=3)\n', (5193, 5237), True, 'import transforms.spatial_transforms as ST\n'), ((5255, 5267), 'transforms.spatial_transforms.ToNumpy', 'ST.ToNumpy', ([], {}), '()\n', (5265, 5267), True, 'import transforms.spatial_transforms as ST\n'), ((5285, 5343), 'transforms.spatial_transforms.Normalize', 'ST.Normalize', (['[0.485, 0.456, 0.406]', '[0.229, 0.224, 0.225]'], {}), '([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n', (5297, 5343), True, 'import transforms.spatial_transforms as ST\n'), ((9209, 9239), 'numpy.matmul', 'np.matmul', (['qf[i:i + 1]', 'tempgf'], {}), '(qf[i:i + 1], tempgf)\n', (9218, 9239), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# RUN: python3 %s | FileCheck %s
# CHECK-NOT: oneflow.bias_add
import unittest
import numpy as np
import os
os.environ["ONEFLOW_MLIR_ENABLE_ROUND_TRIP"] = "1"
import oneflow as flow
import oneflow.unittest
import oneflow.sysconfig
def do_bias_add_dropout_graph(test_case, with_cuda, prob):
x = flow.randn(2, 3, 4, 5)
bias = flow.randn(5)
dropout = flow.nn.Dropout(p=prob)
if with_cuda:
x = x.cuda()
bias = bias.to("cuda")
dropout.to("cuda")
eager_res = dropout(flow._C.bias_add(x, bias, axis=3))
class GraphToRun(flow.nn.Graph):
def __init__(self):
super().__init__()
self.dropout = dropout
def build(self, x, bias):
return self.dropout(flow._C.bias_add(x, bias, axis=3))
graph_to_run = GraphToRun()
lazy_res = graph_to_run(x, bias)
test_case.assertTrue(np.array_equal(eager_res.numpy(), lazy_res.numpy()))
@flow.unittest.skip_unless_1n1d()
@unittest.skipUnless(oneflow.sysconfig.with_cuda(), "needs -DBUILD_CUDA=ON")
class TestBiasAddDropout(oneflow.unittest.TestCase):
def test_bias_add_dropout_graph(test_case):
do_bias_add_dropout_graph(test_case, True, 1.0)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.randn",
"oneflow.nn.Dropout",
"oneflow._C.bias_add"
] | [((1522, 1554), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1552, 1554), True, 'import oneflow as flow\n'), ((894, 916), 'oneflow.randn', 'flow.randn', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (904, 916), True, 'import oneflow as flow\n'), ((928, 941), 'oneflow.randn', 'flow.randn', (['(5)'], {}), '(5)\n', (938, 941), True, 'import oneflow as flow\n'), ((956, 979), 'oneflow.nn.Dropout', 'flow.nn.Dropout', ([], {'p': 'prob'}), '(p=prob)\n', (971, 979), True, 'import oneflow as flow\n'), ((1822, 1837), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1835, 1837), False, 'import unittest\n'), ((1102, 1135), 'oneflow._C.bias_add', 'flow._C.bias_add', (['x', 'bias'], {'axis': '(3)'}), '(x, bias, axis=3)\n', (1118, 1135), True, 'import oneflow as flow\n'), ((1336, 1369), 'oneflow._C.bias_add', 'flow._C.bias_add', (['x', 'bias'], {'axis': '(3)'}), '(x, bias, axis=3)\n', (1352, 1369), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
def TestListDataTypeAndListShapeAndListStringAttr(
input, out_shapes, out_types, string_list
):
assert isinstance(out_shapes, list)
assert isinstance(out_types, list)
return (
flow.user_op_builder("TestListDataTypeAndListShapeAndListStringAttr")
.Op("TestListDataTypeAndListShapeAndListStringAttr")
.Input("in", [input])
.Output("out", 3)
.Attr("out_shapes", out_shapes)
.Attr("out_types", out_types)
.Attr("string_list", string_list)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
def RunTest(out_shapes, out_types):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(function_config=func_config)
def TestListDataTypeAndListShapeAndListStringAttrJob(
input: oft.Numpy.Placeholder((10, 10), dtype=flow.float)
):
return TestListDataTypeAndListShapeAndListStringAttr(
input,
out_shapes,
[type_name_to_flow_type[data_type] for data_type in out_types],
["string1", "string2", "string3"],
)
input = np.random.random_sample((10, 10)).astype(np.float32)
outputs = [
x.numpy() for x in TestListDataTypeAndListShapeAndListStringAttrJob(input).get()
]
for i in range(len(outputs)):
assert outputs[i].shape == out_shapes[i]
assert outputs[i].dtype == type_name_to_np_type[out_types[i]]
def gen_arg_list():
arg_dict = OrderedDict()
arg_dict["out_shapes"] = [[(4, 4), (6, 6), (8, 8)]]
# TODO: fix bugs in ForeignOutputKernel with "float16" and "char" dtype, do not test these two dtypes here
arg_dict["out_types"] = [["float32", "double", "int8"], ["int32", "int64", "uint8"]]
return GenArgList(arg_dict)
@flow.unittest.skip_unless_1n1d()
class Test_TestListDataTypeAndListShapeAndListStringAttr(flow.unittest.TestCase):
def test_data_type_attr(test_case):
for arg in gen_arg_list():
RunTest(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.user_op_builder",
"oneflow.compatible.single_client.unittest.skip_unless_1n1d",
"oneflow.compatible.single_client.typing.Numpy.Placeholder",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_c... | [((2692, 2724), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2722, 2724), True, 'from oneflow.compatible import single_client as flow\n'), ((1480, 1508), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1506, 1508), True, 'from oneflow.compatible import single_client as flow\n'), ((1527, 1548), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1546, 1548), True, 'from oneflow.compatible import single_client as flow\n'), ((1601, 1650), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1621, 1650), True, 'from oneflow.compatible import single_client as flow\n'), ((2386, 2399), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2397, 2399), False, 'from collections import OrderedDict\n'), ((2668, 2688), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2678, 2688), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((2941, 2956), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2954, 2956), False, 'import unittest\n'), ((1724, 1773), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10, 10)'], {'dtype': 'flow.float'}), '((10, 10), dtype=flow.float)\n', (1745, 1773), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2032, 2065), 'numpy.random.random_sample', 'np.random.random_sample', (['(10, 10)'], {}), '((10, 10))\n', (2055, 2065), True, 'import numpy as np\n'), ((1056, 1125), 'oneflow.compatible.single_client.user_op_builder', 'flow.user_op_builder', (['"""TestListDataTypeAndListShapeAndListStringAttr"""'], {}), "('TestListDataTypeAndListShapeAndListStringAttr')\n", (1076, 1125), True, 'from oneflow.compatible import single_client as flow\n')] |
import numpy as np
import argparse
import cv2
import os
import oneflow as flow
import oneflow.typing as tp
import networks
def load_image2ndarray(image_path, resize):
im = cv2.imread(image_path)
height, width, channels = im.shape
im = cv2.resize(im, (resize, resize), interpolation = cv2.INTER_CUBIC)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = np.transpose(im, (2, 0, 1))
im = ((im.astype(np.float32) / 255.0) - 0.5) / 0.5
im = np.expand_dims(im, axis=0)
return np.ascontiguousarray(im, 'float32'), height, width
def ndarray2image(im):
im = np.squeeze(im)
im = (np.transpose(im, (1, 2, 0)) + 1) / 2.0 * 255.0
im = cv2.cvtColor(np.float32(im), cv2.COLOR_RGB2BGR)
return im.astype(np.uint8)
def get_test_config():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.consistent_view())
return func_config
def main(args):
netG_name = "netG_A" if args.direction == "A2B" else "netG_B"
@flow.global_function("predict", get_test_config())
def TestGenerator(
real: tp.Numpy.Placeholder((1, 3, args.network_input_size, args.network_input_size), dtype = flow.float32)) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0-0"):
fake = networks.define_G(real, netG_name, ngf = 64, n_blocks = 9, trainable = False, reuse = True)
return fake
check_point = flow.train.CheckPoint()
assert args.checkpoint_load_dir != ""
check_point.load(args.checkpoint_load_dir)
in_images = os.listdir(args.input_images)
for i in in_images:
input_image, org_height, org_width = load_image2ndarray("%s/%s" % (args.input_images, i), args.network_input_size)
output = ndarray2image(TestGenerator(input_image))
output = cv2.resize(output, (org_width, org_height), interpolation = cv2.INTER_CUBIC)
output = np.concatenate((ndarray2image(input_image), output), axis = 1)
cv2.imwrite("%s/%s" % (args.output_images, i), output)
def get_parser(parser = None):
parser = argparse.ArgumentParser("flags for test CycleGan")
parser.add_argument("--checkpoint_load_dir", type = str, default = "", help = "load previous saved checkpoint from.")
parser.add_argument("--input_images", type = str, default = "", help = "")
parser.add_argument("--output_images", type = str, default = "", help = "")
parser.add_argument("--network_input_size", type = int, default = 256, help = "")
parser.add_argument("--direction", type = str, default = "A2B", help = "'A2B' or 'B2A' .Transform image from domain A to domain B or reverse.")
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| [
"oneflow.train.CheckPoint",
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.placement",
"oneflow.FunctionConfig",
"oneflow.scope.consistent_view"
] | [((179, 201), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (189, 201), False, 'import cv2\n'), ((250, 313), 'cv2.resize', 'cv2.resize', (['im', '(resize, resize)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(im, (resize, resize), interpolation=cv2.INTER_CUBIC)\n', (260, 313), False, 'import cv2\n'), ((325, 360), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (337, 360), False, 'import cv2\n'), ((370, 397), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (382, 397), True, 'import numpy as np\n'), ((462, 488), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (476, 488), True, 'import numpy as np\n'), ((584, 598), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (594, 598), True, 'import numpy as np\n'), ((786, 807), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (805, 807), True, 'import oneflow as flow\n'), ((1440, 1463), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (1461, 1463), True, 'import oneflow as flow\n'), ((1570, 1599), 'os.listdir', 'os.listdir', (['args.input_images'], {}), '(args.input_images)\n', (1580, 1599), False, 'import os\n'), ((2089, 2139), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for test CycleGan"""'], {}), "('flags for test CycleGan')\n", (2112, 2139), False, 'import argparse\n'), ((500, 535), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im', '"""float32"""'], {}), "(im, 'float32')\n", (520, 535), True, 'import numpy as np\n'), ((678, 692), 'numpy.float32', 'np.float32', (['im'], {}), '(im)\n', (688, 692), True, 'import numpy as np\n'), ((893, 921), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (919, 921), True, 'import oneflow as flow\n'), ((1824, 1898), 'cv2.resize', 'cv2.resize', (['output', '(org_width, org_height)'], {'interpolation': 'cv2.INTER_CUBIC'}), '(output, (org_width, org_height), interpolation=cv2.INTER_CUBIC)\n', (1834, 1898), False, 'import cv2\n'), ((1989, 2043), 'cv2.imwrite', 'cv2.imwrite', (["('%s/%s' % (args.output_images, i))", 'output'], {}), "('%s/%s' % (args.output_images, i), output)\n", (2000, 2043), False, 'import cv2\n'), ((1124, 1227), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, args.network_input_size, args.network_input_size)'], {'dtype': 'flow.float32'}), '((1, 3, args.network_input_size, args.\n network_input_size), dtype=flow.float32)\n', (1144, 1227), True, 'import oneflow.typing as tp\n'), ((1252, 1288), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-0"""'], {}), "('gpu', '0:0-0')\n", (1272, 1288), True, 'import oneflow as flow\n'), ((1309, 1396), 'networks.define_G', 'networks.define_G', (['real', 'netG_name'], {'ngf': '(64)', 'n_blocks': '(9)', 'trainable': '(False)', 'reuse': '(True)'}), '(real, netG_name, ngf=64, n_blocks=9, trainable=False,\n reuse=True)\n', (1326, 1396), False, 'import networks\n'), ((609, 636), 'numpy.transpose', 'np.transpose', (['im', '(1, 2, 0)'], {}), '(im, (1, 2, 0))\n', (621, 636), True, 'import numpy as np\n')] |
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
maskedimg: tp.Numpy.Placeholder((6, 3, 256, 256), dtype=flow.float),
masks: tp.Numpy.Placeholder((6, 3, 256, 256), dtype=flow.float),
images: tp.Numpy.Placeholder((6, 3, 256, 256), dtype=flow.float),
) -> tp.Numpy:
init = RFRNetModel()
ret, mmask, fake, comp = init.buildnet(maskedimg, masks, images)
loss = init.get_g_loss(ret, mmask, fake, comp)
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [2e-4])
# Set Adam optimizer
flow.optimizer.Adam(lr_scheduler, do_bias_correction=False).minimize(loss)
return loss
x = np.random.randn(6, 3, 256, 256).astype(np.float32)
y = np.random.randn(6, 3, 256, 256).astype(np.float32)
z = np.random.randn(6, 3, 256, 256).astype(np.float32)
kk = train_job(x,y,z)
print(kk.shape)
print(kk) | [
"oneflow.global_function",
"oneflow.optimizer.Adam",
"oneflow.typing.Numpy.Placeholder",
"oneflow.optimizer.PiecewiseConstantScheduler"
] | [((77, 111), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""'}), "(type='train')\n", (97, 111), True, 'import oneflow as flow\n'), ((551, 606), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0002]'], {}), '([], [0.0002])\n', (592, 606), True, 'import oneflow as flow\n'), ((152, 208), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(6, 3, 256, 256)'], {'dtype': 'flow.float'}), '((6, 3, 256, 256), dtype=flow.float)\n', (172, 208), True, 'import oneflow.typing as tp\n'), ((230, 286), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(6, 3, 256, 256)'], {'dtype': 'flow.float'}), '((6, 3, 256, 256), dtype=flow.float)\n', (250, 286), True, 'import oneflow.typing as tp\n'), ((309, 365), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(6, 3, 256, 256)'], {'dtype': 'flow.float'}), '((6, 3, 256, 256), dtype=flow.float)\n', (329, 365), True, 'import oneflow.typing as tp\n'), ((737, 768), 'numpy.random.randn', 'np.random.randn', (['(6)', '(3)', '(256)', '(256)'], {}), '(6, 3, 256, 256)\n', (752, 768), True, 'import numpy as np\n'), ((793, 824), 'numpy.random.randn', 'np.random.randn', (['(6)', '(3)', '(256)', '(256)'], {}), '(6, 3, 256, 256)\n', (808, 824), True, 'import numpy as np\n'), ((849, 880), 'numpy.random.randn', 'np.random.randn', (['(6)', '(3)', '(256)', '(256)'], {}), '(6, 3, 256, 256)\n', (864, 880), True, 'import numpy as np\n'), ((636, 695), 'oneflow.optimizer.Adam', 'flow.optimizer.Adam', (['lr_scheduler'], {'do_bias_correction': '(False)'}), '(lr_scheduler, do_bias_correction=False)\n', (655, 695), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import oneflow
import oneflow.experimental as flow
import oneflow.python.framework.session_context as session_ctx
from oneflow.python.framework.multi_client_session import MultiClientSession
class TestMultiClientSession(unittest.TestCase):
def test_case1(self):
# print("test_case1")
self.assertTrue(flow.distributed.is_multi_client())
# print(f"is_multi_client: {flow.distributed.is_multi_client()}")
sess = session_ctx.GetDefaultSession()
# print(f"sess type: {type(sess)}")
self.assertTrue(isinstance(sess, MultiClientSession))
sess.TryInit()
self.assertEqual(sess.status, sess.Status.INITED)
# sess.TryClose()
# self.assertEqual(sess.status, sess.Status.CLOSED)
def test_case2(self):
print("test_case2")
self.assertTrue(flow.distributed.is_multi_client())
sess = session_ctx.GetDefaultSession()
self.assertTrue(isinstance(sess, MultiClientSession))
sess.TryInit()
self.assertEqual(sess.status, sess.Status.INITED)
sess.TryClose()
self.assertEqual(sess.status, sess.Status.CLOSED)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.python.framework.session_context.GetDefaultSession",
"oneflow.experimental.distributed.is_multi_client"
] | [((1795, 1810), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1808, 1810), False, 'import unittest\n'), ((1065, 1096), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1094, 1096), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((1503, 1534), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1532, 1534), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((939, 973), 'oneflow.experimental.distributed.is_multi_client', 'flow.distributed.is_multi_client', ([], {}), '()\n', (971, 973), True, 'import oneflow.experimental as flow\n'), ((1451, 1485), 'oneflow.experimental.distributed.is_multi_client', 'flow.distributed.is_multi_client', ([], {}), '()\n', (1483, 1485), True, 'import oneflow.experimental as flow\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import oneflow as flow
# from core.resnet_module import make_bottleneck_layer, make_basic_layer
from core.resnet_module import *
import datetime
import os
# time = datetime.datetime.now().strftime('_%f')
time = datetime.datetime.now().strftime('%Y-%m-%d-%H_%M_%S_%f')
def _conv2d_layer(name,
input,
filters,
kernel_size,
strides=1,
padding="VALID",
groups_num=1,
data_format="NHWC",
# dilation_rate=1,
# activation='Relu',
use_bias=True,
# use_bn=True,
weight_initializer=flow.glorot_uniform_initializer(),
bias_initializer=flow.zeros_initializer(),
trainable=True,
groups=1,
):
return flow.layers.conv2d(
input, filters, kernel_size, strides, padding,
data_format=data_format, dilation_rate=1, groups=groups,
activation=None, use_bias=use_bias,
kernel_initializer=flow.xavier_normal_initializer(),
bias_initializer=flow.zeros_initializer(),
# kernel_regularizer=flow.variance_scaling_initializer(2.0, mode="fan_out", distribution="random_normal", data_format="NHWC"),
# bias_regularizer=flow.zeros_initializer(),
trainable=trainable, name=name)
def _batch_norm(inputs, momentum, epsilon, name, training=True):
return flow.layers.batch_normalization(
inputs=inputs,
axis=-1,
momentum=momentum,
epsilon=epsilon,
center=True,
scale=True,
# beta_initializer=flow.zeros_initializer(),
# gamma_initializer=flow.ones_initializer(),
# beta_regularizer=flow.zeros_initializer(),
# gamma_regularizer=flow.ones_initializer(),
moving_mean_initializer=flow.zeros_initializer(),
moving_variance_initializer=flow.ones_initializer(),
trainable=True,
training=training,
name=name
)
def bottleneck_block(inputs, filters_num, strides=1, downsample=False,
name='bottleneck', training=True):
expansion = 4
residual = inputs
x = _conv2d_layer(f'{name}_conv1'+ str(time), inputs, filters_num//expansion, 1, padding="SAME", use_bias=False)
x = _batch_norm(x, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1'+ str(time), training=training)
x = flow.nn.relu(x, name=f'{name}_relu1')
x = _conv2d_layer(f'{name}_conv2'+ str(time), x, filters_num//expansion, 3, strides, padding="SAME", use_bias=False)
x = _batch_norm(x, momentum=0.1, epsilon=1e-5, name=f'{name}_bn2'+ str(time), training=training)
x = flow.nn.relu(x, name=f'{name}_relu2')
x = _conv2d_layer(f'{name}_conv3'+ str(time), x, filters_num, 1, 1, use_bias=False)
x = _batch_norm(x, momentum=0.1, epsilon=1e-5, name=f'{name}_bn3'+ str(time), training=training)
if downsample:
residual = _conv2d_layer(f'{name}_down_conv'+ str(time), inputs, filters_num, 1, strides, use_bias=False)
residual = _batch_norm(residual, momentum=0.1, epsilon=1e-5, name=f'{name}_down_bn'+ str(time), training=training)
output = flow.nn.relu(flow.math.add_n([x, residual],name=f'{name}_res'), name=f'{name}_out')
return output
def transion_layer1(inputs, filters=[32, 64], name='stage1_transition', training=None):
x1 = _conv2d_layer(f'{name}_conv1' + str(time), inputs, filters[0], 3, 1, padding="SAME")
x1 = _batch_norm(x1, momentum=0.1, epsilon=1e-5, training=training, name=f'{name}_bn1' + str(time))
x1 = flow.nn.relu(x1, name=f'{name}_relu1')
x2 = _conv2d_layer(f'{name}_conv2' + str(time), inputs, filters[1], 3, 2, padding="SAME", use_bias=False)
x2 = _batch_norm(x2, momentum=0.1, epsilon=1e-5, name=f'{name}_bn2' + str(time), training=training)
x2 = flow.nn.relu(x2, name=f'{name}_relu2')
return [x1, x2]
def make_branch(inputs, filters, name='branch', training=None):
x = basic_block(inputs, filters, downsample=False, name=f'{name}_basic1', training=training)
x = basic_block(x, filters, downsample=False, name=f'{name}_basic2', training=training)
x = basic_block(x, filters, downsample=False, name=f'{name}_basic3', training=training)
x = basic_block(x, filters, downsample=False, name=f'{name}_basic4', training=training)
return x
def basic_block(inputs, filters, strides=1,training=True, downsample=False, name='basic'):
expansion = 1
residual = inputs
x = _conv2d_layer(f'{name}_conv1'+ str(time), inputs, filters//expansion, 3, strides, padding="SAME")
x = _batch_norm(x, momentum=0.1, epsilon=1e-5, training=training, name=f'{name}_bn1'+ str(time))
x = flow.nn.relu(x)
x = _conv2d_layer(f'{name}_conv2'+ str(time), x, filters//expansion, 3, 1, padding="SAME")
x = _batch_norm(x, momentum=0.1, epsilon=1e-5, training=training, name=f'{name}_bn2'+ str(time))
if downsample:
residual = _conv2d_layer(f'{name}_down_conv'+ str(time), inputs, filters, 1, strides,)
residual = _batch_norm(residual, momentum=0.1, epsilon=1e-5, name=f'{name}_down_bn'+ str(time))
output = flow.nn.relu(flow.math.add(x, residual,name=f'{name}_res'), name=f'{name}_out')
return output
def fuse_layer1(inputs, filters=[32, 64], name='stage2_fuse', training=None):
x1, x2 = inputs
x11 = x1
x21 = _conv2d_layer(f'{name}_conv_2_1' + str(time), x2, filters[0], 1, 1,
use_bias=False)
x21 = _batch_norm(x21, momentum=0.1, epsilon=1e-5, name=f'{name}_bn_2_1' + str(time), training=training)
x21 = flow.layers.upsample_2d(x=x21,size=(2,2),data_format="NHWC", name=f'{name}_up_2_1')
x1 = flow.nn.relu(flow.math.add_n([x11, x21],name=f'{name}_add1'), name=f'{name}_branch1_out')
x22 = x2
x12 = _conv2d_layer(f'{name}_conv1_2'+ str(time), x1, filters[1], 3, 2, padding="SAME",
use_bias=False)
x12 = _batch_norm(x12, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_2' + str(time), training=training)
x2 = flow.nn.relu(flow.math.add_n([x12, x22],name=f'{name}_add2'), name=f'{name}_branch2_out')
return [x1, x2]
def transition_layer2(inputs, filters, name='stage2_transition', training=None):
x1, x2 = inputs
x1 = _conv2d_layer(f'{name}_conv1'+ str(time), x1, filters[0], 3, 1, padding="SAME",
use_bias=False)
x1 = _batch_norm(x1, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1' + str(time), training=training)
x1 = flow.nn.relu(x1, name=f'{name}_relu1')
x21 = _conv2d_layer(f'{name}_conv2'+ str(time), x2, filters[1], 3, 1, padding="SAME",
use_bias=False)
x21 = _batch_norm(x21, momentum=0.1, epsilon=1e-5, name=f'{name}_bn2' + str(time), training=training)
x21 = flow.nn.relu(x21, name=f'{name}_relu2')
x22 = _conv2d_layer(f'{name}_conv3'+ str(time), x2, filters[2], 3, 2, padding="SAME",
use_bias=False)
x22 = _batch_norm(x22, momentum=0.1, epsilon=1e-5, name=f'{name}_bn3' + str(time), training=training)
x22 = flow.nn.relu(x22, name=f'{name}_relu3')
return [x1, x21, x22]
def fuse_layer2(inputs, filters=[32, 64, 128], name='stage3_fuse', training=None):
x1, x2, x3 = inputs
# branch 1
x11 = x1
x21 = _conv2d_layer(f'{name}_conv2_1'+ str(time), x2, filters[0], 1, 1,
use_bias=False)
x21 = _batch_norm(x21, momentum=0.1, epsilon=1e-5, name=f'{name}_bn2_1' + str(time), training=training)
x21 = flow.layers.upsample_2d(x=x21, size=(2, 2), data_format="NHWC", name=f'{name}_up2_1')
x31 = _conv2d_layer(f'{name}_conv3_1'+ str(time),x3, filters[0], 1, 1,
use_bias=False)
x31 = _batch_norm(x31, momentum=0.1, epsilon=1e-5, name=f'{name}_bn3_1' + str(time), training=training)
x31 = flow.layers.upsample_2d(x=x31, size=(4,4), data_format="NHWC", name=f'{name}_up3_1')
x1 = flow.nn.relu(flow.math.add_n([x11, x21, x31],name=f'{name}_add1'), name=f'{name}_branch1_out')
# branch 2
x22 = x2
x12 = _conv2d_layer(f'{name}_conv1_2'+ str(time),x1, filters[1], 3, 2, padding="SAME",
use_bias=False)
x12 = _batch_norm(x12, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_2' + str(time), training=training)
x32 = _conv2d_layer(f'{name}_conv3_2'+ str(time), x3, filters[1], 1, 1,
use_bias=False)
x32 = _batch_norm(x32, momentum=0.1, epsilon=1e-5, name=f'{name}_bn3_2' + str(time), training=training)
x32 = flow.layers.upsample_2d(x=x32, size=(2, 2), data_format="NHWC", name=f'{name}_up3_2')
x2 = flow.nn.relu(flow.math.add_n([x12, x22, x32], name=f'{name}_add2'), name=f'{name}_branch2_out')
# branch 3
x33 = x3
x13 = _conv2d_layer(f'{name}_conv1_3_1'+ str(time), x1, filters[0], 3, 2, padding="SAME",
use_bias=False)
x13 = _batch_norm(x13, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_3_1' + str(time), training=training)
x13 = flow.nn.relu(x13,name=f'{name}_relu1_3_1')
x13 = _conv2d_layer(f'{name}_conv1_3_2'+ str(time), x13, filters[2], 3, 2, padding="SAME",
use_bias=False)
x13 = _batch_norm(x13, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_3_2' + str(time), training=training)
x23 = _conv2d_layer(f'{name}_conv2_3'+ str(time), x2, filters[2], 3, 2, padding="SAME",
use_bias=False)
x23 = _batch_norm(x23, momentum=0.1, epsilon=1e-5, name=f'{name}_bn2_3' + str(time), training=training)
x3 = flow.nn.relu(flow.math.add_n([x13, x23, x33],name=f'{name}_add3'), name=f'{name}_branch3_out')
return [x1, x2, x3]
def transition_layer3(inputs, filters, name='stage3_transition', training=None):
x1, x2, x3 = inputs
# print(x1.shape,x2.shape,x3.shape)
# print(filters[0],filters[1],filters[2], filters[3])
x1 = _conv2d_layer(f'{name}_conv1'+ str(time), x1, filters[0], kernel_size=3, strides=1, padding="SAME",
use_bias=False)
x1 = _batch_norm(x1, momentum=0.1, epsilon=1e-5, name=f'{name}_bn_1' + str(time), training=training)
x1 = flow.nn.relu(x1,name=f'{name}_relu_1')
# print(x1.shape)
# print(filters[1])
x2 = _conv2d_layer(f'{name}_conv2'+ str(time), x2, filters[1], kernel_size=3, strides=1, padding="SAME",
use_bias=False)
x2 = _batch_norm(x2, momentum=0.1, epsilon=1e-5, name=f'{name}_bn_2' + str(time), training=training)
x2 = flow.nn.relu(x2,name=f'{name}_relu_2')
# print(x2.shape)
# print(filters[2])
x31 = _conv2d_layer(f'{name}_conv3'+ str(time), x3, filters[2], kernel_size=3, strides=1, padding="SAME",
use_bias=False)
x31 = _batch_norm(x31, momentum=0.1, epsilon=1e-5, name=f'{name}_bn_3' + str(time), training=training)
x31 = flow.nn.relu(x31,name=f'{name}_relu_3')
# print(x31.shape)
# print(filters[3])
x32 = _conv2d_layer(f'{name}_conv4'+ str(time), x3, filters[3], kernel_size=3, strides=2, padding="SAME",
use_bias=False)
x32 = _batch_norm(x32, momentum=0.1, epsilon=1e-5, name=f'{name}_bn_4' + str(time), training=training)
x32 = flow.nn.relu(x32,name=f'{name}_relu_4')
# print(x32.shape)
return [x1, x2, x31, x32]
def fuse_layer3(inputs, filters=[32, 64, 128, 256], name='stage4_fuse', training=None):
x1, x2, x3, x4 = inputs
# branch 1
x11 = x1
x21 = _conv2d_layer(f'{name}_conv2_1'+ str(time), x2, filters[0], 1, 1,
use_bias=False)
x21 = _batch_norm(x21, momentum=0.1, epsilon=1e-5, name=f'{name}_bn21' + str(time), training=training)
x21 = flow.layers.upsample_2d(x=x21, size=(2, 2), data_format="NHWC", name=f'{name}_up2_1')
x31 = _conv2d_layer(f'{name}_conv3_1'+ str(time), x3, filters[0], 1, 1,
use_bias=False)
x31 = _batch_norm(x31, momentum=0.1, epsilon=1e-5, name=f'{name}_bn31' + str(time), training=training)
x31 = flow.layers.upsample_2d(x=x31, size=(4,4), data_format="NHWC", name=f'{name}_up3_1')
x41 = _conv2d_layer(f'{name}_conv4_1'+ str(time), x4, filters[0], 1, 1,
use_bias=False)
x41 = _batch_norm(x41, momentum=0.1, epsilon=1e-5, name=f'{name}_bn4_1' + str(time), training=training)
x41 = flow.layers.upsample_2d(x=x41, size=(8, 8), data_format="NHWC", name=f'{name}_up4_1')
x1 = flow.nn.relu(flow.math.add_n([x11, x21, x31, x41],name=f'{name}_add1'), name=f'{name}_branch1_out')
# branch 2
x22 = x2
x12 = _conv2d_layer(f'{name}_conv1_2'+ str(time), x1, filters[1], 3, 2, padding="SAME",
use_bias=False)
x12 = _batch_norm(x12, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_2' + str(time), training=training)
x32 = _conv2d_layer(f'{name}_conv3_2'+ str(time), x3, filters[1], 1, 1,
use_bias=False)
x32 = _batch_norm(x32, momentum=0.1, epsilon=1e-5, name=f'{name}_bn3_2' + str(time), training=training)
x32 = flow.layers.upsample_2d(x=x32, size=(2,2), data_format="NHWC", name=f'{name}_up3_2')
x42 = _conv2d_layer(f'{name}_conv4_2'+ str(time), x4, filters[1], 1, 1,
use_bias=False)
x42 = _batch_norm(x42, momentum=0.1, epsilon=1e-5, name=f'{name}_bn4_2' + str(time), training=training)
x42 = flow.layers.upsample_2d(x=x42, size=(4,4), data_format="NHWC", name=f'{name}_up4_2')
x2 = flow.nn.relu(flow.math.add_n([x12, x22, x32, x42],name=f'{name}_add2'), name=f'{name}_branch2_out')
# branch 3
x33 = x3
x13 = _conv2d_layer(f'{name}_conv1_3_1'+ str(time), x1, filters[0], 3, 2, padding="SAME",
use_bias=False)
x13 = _batch_norm(x13, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_3_1' + str(time), training=training)
x13 = flow.nn.relu(x13,name=f'{name}_relu1_3_1')
x13 = _conv2d_layer(f'{name}_conv1_3_2'+ str(time), x13, filters[2], 3, 2, padding="SAME",
use_bias=False)
x13 = _batch_norm(x13, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_3_2' + str(time), training=training)
x23 = _conv2d_layer(f'{name}_conv2_3'+ str(time), x2, filters[2], 3, 2, padding="SAME",
use_bias=False)
x23 = _batch_norm(x23, momentum=0.1, epsilon=1e-5, name=f'{name}_bn23' + str(time), training=training)
x43 = _conv2d_layer(f'{name}_conv4_3'+ str(time), x4, filters[2], 1, 1,
use_bias=False)
x43 = _batch_norm(x43, momentum=0.1, epsilon=1e-5, name=f'{name}_bn423' + str(time), training=training)
x43 = flow.layers.upsample_2d(x=x43, size=(2,2), data_format="NHWC", name=f'{name}_up4_3')
x3 = flow.nn.relu(flow.math.add_n([x13, x23, x33, x43],name=f'{name}_add3'), name=f'{name}_branch3_out')
# branch 4
x44 = x4
x14 = _conv2d_layer(f'{name}_conv1_4_1'+ str(time), x1, filters[0], 3, 2, padding="SAME",
use_bias=False)
x14 = _batch_norm(x14, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_4_1' + str(time), training=training)
x14 = flow.nn.relu(x14,name=f'{name}_relu1_4_1')
x14 = _conv2d_layer(f'{name}_conv1_4_2' + str(time), x14, filters[0], 3, 2, padding="SAME",
use_bias=False)
x14 = _batch_norm(x14, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_4_2' + str(time), training=training)
x14 = flow.nn.relu(x14, name=f'{name}_relu1_4_2')
x14 = _conv2d_layer(f'{name}_conv1_4_3' + str(time), x14, filters[3], 3, 2, padding="SAME",
use_bias=False)
x14 = _batch_norm(x14, momentum=0.1, epsilon=1e-5, name=f'{name}_bn1_4_3' + str(time), training=training)
x24 = _conv2d_layer(f'{name}_conv2_4_1'+ str(time),x2, filters[1], 3, 2, padding="SAME",
use_bias=False)
x24 = _batch_norm(x24, momentum=0.1, epsilon=1e-5, name=f'{name}_bn2_4_1' + str(time), training=training)
x24 = flow.nn.relu(x24, name=f'{name}_relu2_4_1')
x24 = _conv2d_layer(f'{name}_conv2_4_2' + str(time),x24, filters[3], 3, 2, padding="SAME",
use_bias=False)
x24 = _batch_norm(x24, momentum=0.1, epsilon=1e-5, name=f'{name}_bn2_4_2' + str(time), training=training)
x34 = _conv2d_layer(f'{name}_conv3_4'+ str(time),x3, filters[3], 3, 2, padding="SAME",
use_bias=False)
x34 = _batch_norm(x34, momentum=0.1, epsilon=1e-5, name=f'{name}_bn3_4' + str(time), training=training)
x4 = flow.nn.relu(flow.math.add_n([x14, x24, x34, x44],name=f'{name}_add4'), name=f'{name}_branch4_out')
return [x1, x2, x3, x4]
def fuse_layer4(inputs, filters=32, name='final_fuse', training=None):
x1, x2, x3, x4 = inputs
x11 = x1
x21 = _conv2d_layer(f'{name}_conv2_1'+ str(time),x2, filters, 1, 1,
use_bias=False)
x21 = _batch_norm(x21, momentum=0.1, epsilon=1e-5, name=f'{name}_bn2_1' + str(time), training=training)
x21 = flow.layers.upsample_2d(x=x21, size=(2,2), data_format="NHWC", name=f'{name}_up2_1')
x31 = _conv2d_layer(f'{name}_conv3_1'+ str(time),x3, filters, 1, 1,
use_bias=False)
x31 = _batch_norm(x31, momentum=0.1, epsilon=1e-5, name=f'{name}_bn3_1' + str(time), training=training)
x31 = flow.layers.upsample_2d(x=x31, size=(4,4), data_format="NHWC", name=f'{name}_up3_1')
x41 = _conv2d_layer(f'{name}_conv4_1'+ str(time),x4, filters, 1, 1,
use_bias=False)
x41 = _batch_norm(x41, momentum=0.1, epsilon=1e-5, name=f'{name}_bn4_1' + str(time), training=training)
x41 = flow.layers.upsample_2d(x=x41, size=(8,8), data_format="NHWC", name=f'{name}_up4_1')
x = flow.concat(inputs=[x11, x21, x31, x41],axis=-1,name=f'{name}_out')
return x
def HRNet(img_input,
training=None
):
# STAGE 1
x = _conv2d_layer('stage1_stem_conv1'+ str(time), img_input, 64, 3, 2, padding="SAME", use_bias=False)
x = _batch_norm(x, momentum=0.1, epsilon=1e-5, name='stage1_stem_bn1'+str(time), training=training)
x = flow.nn.relu(x, name='stage1_stem_relu1')
x = _conv2d_layer('stage1_stem_conv2'+ str(time), x, 64, 3, 2, padding="SAME", use_bias=False)
x = _batch_norm(x, momentum=0.1, epsilon=1e-5, name='stage1_stem__bn2'+ str(time), training=training)
x = flow.nn.relu(x, name='stage1_stem__relu2')
x = bottleneck_block(x, 256, downsample=True, name='stage1_bottleneck1', training=training)
x = bottleneck_block(x, 256, downsample=False, name='stage1_bottleneck2', training=training)
x = bottleneck_block(x, 256, downsample=False, name='stage1_bottleneck3', training=training)
x = bottleneck_block(x, 256, downsample=False, name='stage1_bottleneck4', training=training)
x1, x2 = transion_layer1(x, filters=[32, 64], name='stage1_transition', training=training)
# STAGE 2
x1 = make_branch(x1, 32, name='stage2_branch1', training=training)
x2 = make_branch(x2, 64, name='stage2_branch2', training=training)
x1, x2 = fuse_layer1([x1, x2], filters=[32, 64], name='stage2_fuse', training=training)
x1, x2, x3 = transition_layer2([x1, x2], filters=[32, 64, 128],
name='stage2_transition', training=training)
# print(x1.shape,x2.shape,x3.shape)
# STAGE 3
for i in range(4):
x1 = make_branch(x1, 32, name=f'stage3_{i + 1}_branch1', training=training)
x2 = make_branch(x2, 64, name=f'stage3_{i + 1}_branch2', training=training)
x3 = make_branch(x3, 128, name=f'stage3_{i + 1}_branch3', training=training)
x1, x2, x3 = fuse_layer2([x1, x2, x3], filters=[32, 64, 128],
name=f'stage3_{i + 1}_fuse', training=training)
# print(x1.shape,x2.shape,x3.shape)
x1, x2, x3, x4 = transition_layer3([x1, x2, x3], filters=[32, 64, 128, 256],
name='stage3_transition', training=training)
# print(x1.shape,x2.shape,x3.shape,x4.shape)
# STAGE 4
for i in range(3):
x1 = make_branch(x1, 32, name=f'stage4_{i + 1}_branch1', training=training)
x2 = make_branch(x2, 64, name=f'stage4_{i + 1}_branch2', training=training)
x3 = make_branch(x3, 128, name=f'stage4_{i + 1}_branch3', training=training)
x4 = make_branch(x4, 256, name=f'stage4_{i + 1}_branch4', training=training)
if i != 2:
x1, x2, x3, x4 = fuse_layer3([x1, x2, x3, x4],
filters=[32, 64, 128, 256],
name=f'stage4_{i + 1}_fuse', training=training)
else:
x = fuse_layer4([x1, x2, x3, x4], 32, name=f'stage4_{i + 1}_fuse', training=training)
# print(x.shape)
x = _conv2d_layer('predictions', x, 17, 1, 1)
print(x.shape)
return x
| [
"oneflow.math.add_n",
"oneflow.layers.upsample_2d",
"oneflow.xavier_normal_initializer",
"oneflow.zeros_initializer",
"oneflow.glorot_uniform_initializer",
"oneflow.concat",
"oneflow.nn.relu",
"oneflow.ones_initializer",
"oneflow.math.add"
] | [((680, 713), 'oneflow.glorot_uniform_initializer', 'flow.glorot_uniform_initializer', ([], {}), '()\n', (711, 713), True, 'import oneflow as flow\n'), ((740, 764), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (762, 764), True, 'import oneflow as flow\n'), ((2475, 2512), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {'name': 'f"""{name}_relu1"""'}), "(x, name=f'{name}_relu1')\n", (2487, 2512), True, 'import oneflow as flow\n'), ((2743, 2780), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {'name': 'f"""{name}_relu2"""'}), "(x, name=f'{name}_relu2')\n", (2755, 2780), True, 'import oneflow as flow\n'), ((3643, 3681), 'oneflow.nn.relu', 'flow.nn.relu', (['x1'], {'name': 'f"""{name}_relu1"""'}), "(x1, name=f'{name}_relu1')\n", (3655, 3681), True, 'import oneflow as flow\n'), ((3906, 3944), 'oneflow.nn.relu', 'flow.nn.relu', (['x2'], {'name': 'f"""{name}_relu2"""'}), "(x2, name=f'{name}_relu2')\n", (3918, 3944), True, 'import oneflow as flow\n'), ((4764, 4779), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {}), '(x)\n', (4776, 4779), True, 'import oneflow as flow\n'), ((5662, 5753), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x21', 'size': '(2, 2)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up_2_1"""'}), "(x=x21, size=(2, 2), data_format='NHWC', name=\n f'{name}_up_2_1')\n", (5685, 5753), True, 'import oneflow as flow\n'), ((6569, 6607), 'oneflow.nn.relu', 'flow.nn.relu', (['x1'], {'name': 'f"""{name}_relu1"""'}), "(x1, name=f'{name}_relu1')\n", (6581, 6607), True, 'import oneflow as flow\n'), ((6857, 6896), 'oneflow.nn.relu', 'flow.nn.relu', (['x21'], {'name': 'f"""{name}_relu2"""'}), "(x21, name=f'{name}_relu2')\n", (6869, 6896), True, 'import oneflow as flow\n'), ((7146, 7185), 'oneflow.nn.relu', 'flow.nn.relu', (['x22'], {'name': 'f"""{name}_relu3"""'}), "(x22, name=f'{name}_relu3')\n", (7158, 7185), True, 'import oneflow as flow\n'), ((7586, 7676), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x21', 'size': '(2, 2)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up2_1"""'}), "(x=x21, size=(2, 2), data_format='NHWC', name=\n f'{name}_up2_1')\n", (7609, 7676), True, 'import oneflow as flow\n'), ((7908, 7998), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x31', 'size': '(4, 4)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up3_1"""'}), "(x=x31, size=(4, 4), data_format='NHWC', name=\n f'{name}_up3_1')\n", (7931, 7998), True, 'import oneflow as flow\n'), ((8607, 8697), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x32', 'size': '(2, 2)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up3_2"""'}), "(x=x32, size=(2, 2), data_format='NHWC', name=\n f'{name}_up3_2')\n", (8630, 8697), True, 'import oneflow as flow\n'), ((9086, 9129), 'oneflow.nn.relu', 'flow.nn.relu', (['x13'], {'name': 'f"""{name}_relu1_3_1"""'}), "(x13, name=f'{name}_relu1_3_1')\n", (9098, 9129), True, 'import oneflow as flow\n'), ((10221, 10260), 'oneflow.nn.relu', 'flow.nn.relu', (['x1'], {'name': 'f"""{name}_relu_1"""'}), "(x1, name=f'{name}_relu_1')\n", (10233, 10260), True, 'import oneflow as flow\n'), ((10572, 10611), 'oneflow.nn.relu', 'flow.nn.relu', (['x2'], {'name': 'f"""{name}_relu_2"""'}), "(x2, name=f'{name}_relu_2')\n", (10584, 10611), True, 'import oneflow as flow\n'), ((10927, 10967), 'oneflow.nn.relu', 'flow.nn.relu', (['x31'], {'name': 'f"""{name}_relu_3"""'}), "(x31, name=f'{name}_relu_3')\n", (10939, 10967), True, 'import oneflow as flow\n'), ((11285, 11325), 'oneflow.nn.relu', 'flow.nn.relu', (['x32'], {'name': 'f"""{name}_relu_4"""'}), "(x32, name=f'{name}_relu_4')\n", (11297, 11325), True, 'import oneflow as flow\n'), ((11762, 11852), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x21', 'size': '(2, 2)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up2_1"""'}), "(x=x21, size=(2, 2), data_format='NHWC', name=\n f'{name}_up2_1')\n", (11785, 11852), True, 'import oneflow as flow\n'), ((12086, 12176), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x31', 'size': '(4, 4)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up3_1"""'}), "(x=x31, size=(4, 4), data_format='NHWC', name=\n f'{name}_up3_1')\n", (12109, 12176), True, 'import oneflow as flow\n'), ((12409, 12499), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x41', 'size': '(8, 8)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up4_1"""'}), "(x=x41, size=(8, 8), data_format='NHWC', name=\n f'{name}_up4_1')\n", (12432, 12499), True, 'import oneflow as flow\n'), ((13116, 13206), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x32', 'size': '(2, 2)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up3_2"""'}), "(x=x32, size=(2, 2), data_format='NHWC', name=\n f'{name}_up3_2')\n", (13139, 13206), True, 'import oneflow as flow\n'), ((13442, 13532), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x42', 'size': '(4, 4)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up4_2"""'}), "(x=x42, size=(4, 4), data_format='NHWC', name=\n f'{name}_up4_2')\n", (13465, 13532), True, 'import oneflow as flow\n'), ((13924, 13967), 'oneflow.nn.relu', 'flow.nn.relu', (['x13'], {'name': 'f"""{name}_relu1_3_1"""'}), "(x13, name=f'{name}_relu1_3_1')\n", (13936, 13967), True, 'import oneflow as flow\n'), ((14698, 14788), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x43', 'size': '(2, 2)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up4_3"""'}), "(x=x43, size=(2, 2), data_format='NHWC', name=\n f'{name}_up4_3')\n", (14721, 14788), True, 'import oneflow as flow\n'), ((15180, 15223), 'oneflow.nn.relu', 'flow.nn.relu', (['x14'], {'name': 'f"""{name}_relu1_4_1"""'}), "(x14, name=f'{name}_relu1_4_1')\n", (15192, 15223), True, 'import oneflow as flow\n'), ((15479, 15522), 'oneflow.nn.relu', 'flow.nn.relu', (['x14'], {'name': 'f"""{name}_relu1_4_2"""'}), "(x14, name=f'{name}_relu1_4_2')\n", (15491, 15522), True, 'import oneflow as flow\n'), ((16027, 16070), 'oneflow.nn.relu', 'flow.nn.relu', (['x24'], {'name': 'f"""{name}_relu2_4_1"""'}), "(x24, name=f'{name}_relu2_4_1')\n", (16039, 16070), True, 'import oneflow as flow\n'), ((17047, 17137), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x21', 'size': '(2, 2)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up2_1"""'}), "(x=x21, size=(2, 2), data_format='NHWC', name=\n f'{name}_up2_1')\n", (17070, 17137), True, 'import oneflow as flow\n'), ((17366, 17456), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x31', 'size': '(4, 4)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up3_1"""'}), "(x=x31, size=(4, 4), data_format='NHWC', name=\n f'{name}_up3_1')\n", (17389, 17456), True, 'import oneflow as flow\n'), ((17685, 17775), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'x41', 'size': '(8, 8)', 'data_format': '"""NHWC"""', 'name': 'f"""{name}_up4_1"""'}), "(x=x41, size=(8, 8), data_format='NHWC', name=\n f'{name}_up4_1')\n", (17708, 17775), True, 'import oneflow as flow\n'), ((17779, 17848), 'oneflow.concat', 'flow.concat', ([], {'inputs': '[x11, x21, x31, x41]', 'axis': '(-1)', 'name': 'f"""{name}_out"""'}), "(inputs=[x11, x21, x31, x41], axis=-1, name=f'{name}_out')\n", (17790, 17848), True, 'import oneflow as flow\n'), ((18155, 18196), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {'name': '"""stage1_stem_relu1"""'}), "(x, name='stage1_stem_relu1')\n", (18167, 18196), True, 'import oneflow as flow\n'), ((18415, 18457), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {'name': '"""stage1_stem__relu2"""'}), "(x, name='stage1_stem__relu2')\n", (18427, 18457), True, 'import oneflow as flow\n'), ((322, 345), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (343, 345), False, 'import datetime\n'), ((3256, 3306), 'oneflow.math.add_n', 'flow.math.add_n', (['[x, residual]'], {'name': 'f"""{name}_res"""'}), "([x, residual], name=f'{name}_res')\n", (3271, 3306), True, 'import oneflow as flow\n'), ((5222, 5268), 'oneflow.math.add', 'flow.math.add', (['x', 'residual'], {'name': 'f"""{name}_res"""'}), "(x, residual, name=f'{name}_res')\n", (5235, 5268), True, 'import oneflow as flow\n'), ((5768, 5816), 'oneflow.math.add_n', 'flow.math.add_n', (['[x11, x21]'], {'name': 'f"""{name}_add1"""'}), "([x11, x21], name=f'{name}_add1')\n", (5783, 5816), True, 'import oneflow as flow\n'), ((6124, 6172), 'oneflow.math.add_n', 'flow.math.add_n', (['[x12, x22]'], {'name': 'f"""{name}_add2"""'}), "([x12, x22], name=f'{name}_add2')\n", (6139, 6172), True, 'import oneflow as flow\n'), ((8016, 8069), 'oneflow.math.add_n', 'flow.math.add_n', (['[x11, x21, x31]'], {'name': 'f"""{name}_add1"""'}), "([x11, x21, x31], name=f'{name}_add1')\n", (8031, 8069), True, 'import oneflow as flow\n'), ((8716, 8769), 'oneflow.math.add_n', 'flow.math.add_n', (['[x12, x22, x32]'], {'name': 'f"""{name}_add2"""'}), "([x12, x22, x32], name=f'{name}_add2')\n", (8731, 8769), True, 'import oneflow as flow\n'), ((9644, 9697), 'oneflow.math.add_n', 'flow.math.add_n', (['[x13, x23, x33]'], {'name': 'f"""{name}_add3"""'}), "([x13, x23, x33], name=f'{name}_add3')\n", (9659, 9697), True, 'import oneflow as flow\n'), ((12518, 12576), 'oneflow.math.add_n', 'flow.math.add_n', (['[x11, x21, x31, x41]'], {'name': 'f"""{name}_add1"""'}), "([x11, x21, x31, x41], name=f'{name}_add1')\n", (12533, 12576), True, 'import oneflow as flow\n'), ((13550, 13608), 'oneflow.math.add_n', 'flow.math.add_n', (['[x12, x22, x32, x42]'], {'name': 'f"""{name}_add2"""'}), "([x12, x22, x32, x42], name=f'{name}_add2')\n", (13565, 13608), True, 'import oneflow as flow\n'), ((14806, 14864), 'oneflow.math.add_n', 'flow.math.add_n', (['[x13, x23, x33, x43]'], {'name': 'f"""{name}_add3"""'}), "([x13, x23, x33, x43], name=f'{name}_add3')\n", (14821, 14864), True, 'import oneflow as flow\n'), ((16583, 16641), 'oneflow.math.add_n', 'flow.math.add_n', (['[x14, x24, x34, x44]'], {'name': 'f"""{name}_add4"""'}), "([x14, x24, x34, x44], name=f'{name}_add4')\n", (16598, 16641), True, 'import oneflow as flow\n'), ((1078, 1110), 'oneflow.xavier_normal_initializer', 'flow.xavier_normal_initializer', ([], {}), '()\n', (1108, 1110), True, 'import oneflow as flow\n'), ((1145, 1169), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1167, 1169), True, 'import oneflow as flow\n'), ((1916, 1940), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1938, 1940), True, 'import oneflow as flow\n'), ((1978, 2001), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (1999, 2001), True, 'import oneflow as flow\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from libai.inference.text_generation import TextGenerationPipeline
from libai.utils import distributed as dist
from libai.utils.file_utils import get_data_from_cache
VOCAB_URL = "https://oneflow-static.oss-cn-beijing.aliyuncs.com/ci-files/dataset/libai/bert_dataset/bert-base-chinese-vocab.txt" # noqa
VOCAB_MD5 = "65ac8a72466e859cd3c6b279ed8e532a"
class TestTextGenerationPipeline(flow.unittest.TestCase):
def setUp(self) -> None:
self.texts = ["cat ", "you ", "dog ", "dragon ", "牛 ", "羊 "]
cache_dir = os.path.join(os.getenv("ONEFLOW_TEST_CACHE_DIR", "./data_test"), "bert_data")
# prepare tokenizer
if dist.get_local_rank() == 0:
# download tokenzier vocab on main process of each node
get_data_from_cache(VOCAB_URL, cache_dir, md5=VOCAB_MD5)
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_tensor_parallel(self):
self.pipeline = TextGenerationPipeline("configs/t5_pp_pretrain.py", 1, 4, 1)
for _ in range(5):
text = list(np.random.randint(0, 5, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(
text, use_cache=False, max_generate_length=15, return_type="new_text"
)
dict2 = self.pipeline(
text, use_cache=True, max_generate_length=15, return_type="new_text"
)
if dist.is_main_process():
assert dict1["generated_text"] == dict2["generated_text"]
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_pipeline_parallel(self):
self.pipeline = TextGenerationPipeline("configs/t5_pp_pretrain.py", 1, 1, 4)
for _ in range(5):
text = list(np.random.randint(0, 5, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(
text, use_cache=False, max_generate_length=15, return_type="new_text"
)
dict2 = self.pipeline(
text, use_cache=True, max_generate_length=15, return_type="new_text"
)
if dist.is_main_process():
assert dict1["generated_text"] == dict2["generated_text"]
@unittest.skipIf(not flow.cuda.is_available(), "only test gpu cases")
@flow.unittest.skip_unless_1n4d()
def test_pipeline_with_tensor_pipeline_parallel(self):
self.pipeline = TextGenerationPipeline("configs/t5_pp_pretrain.py", 1, 2, 2)
for _ in range(5):
text = list(np.random.randint(0, 5, 10))
text = "".join([self.texts[i] for i in text])
dict1 = self.pipeline(
text, use_cache=False, max_generate_length=15, return_type="new_text"
)
dict2 = self.pipeline(
text, use_cache=True, max_generate_length=15, return_type="new_text"
)
if dist.is_main_process():
assert dict1["generated_text"] == dict2["generated_text"]
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n4d",
"oneflow.cuda.is_available"
] | [((1607, 1639), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (1637, 1639), True, 'import oneflow as flow\n'), ((2376, 2408), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (2406, 2408), True, 'import oneflow as flow\n'), ((3147, 3179), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (3177, 3179), True, 'import oneflow as flow\n'), ((3878, 3893), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3891, 3893), False, 'import unittest\n'), ((1714, 1774), 'libai.inference.text_generation.TextGenerationPipeline', 'TextGenerationPipeline', (['"""configs/t5_pp_pretrain.py"""', '(1)', '(4)', '(1)'], {}), "('configs/t5_pp_pretrain.py', 1, 4, 1)\n", (1736, 1774), False, 'from libai.inference.text_generation import TextGenerationPipeline\n'), ((2485, 2545), 'libai.inference.text_generation.TextGenerationPipeline', 'TextGenerationPipeline', (['"""configs/t5_pp_pretrain.py"""', '(1)', '(1)', '(4)'], {}), "('configs/t5_pp_pretrain.py', 1, 1, 4)\n", (2507, 2545), False, 'from libai.inference.text_generation import TextGenerationPipeline\n'), ((3263, 3323), 'libai.inference.text_generation.TextGenerationPipeline', 'TextGenerationPipeline', (['"""configs/t5_pp_pretrain.py"""', '(1)', '(2)', '(2)'], {}), "('configs/t5_pp_pretrain.py', 1, 2, 2)\n", (3285, 3323), False, 'from libai.inference.text_generation import TextGenerationPipeline\n'), ((1258, 1308), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CACHE_DIR"""', '"""./data_test"""'], {}), "('ONEFLOW_TEST_CACHE_DIR', './data_test')\n", (1267, 1308), False, 'import os\n'), ((1362, 1383), 'libai.utils.distributed.get_local_rank', 'dist.get_local_rank', ([], {}), '()\n', (1381, 1383), True, 'from libai.utils import distributed as dist\n'), ((1470, 1526), 'libai.utils.file_utils.get_data_from_cache', 'get_data_from_cache', (['VOCAB_URL', 'cache_dir'], {'md5': 'VOCAB_MD5'}), '(VOCAB_URL, cache_dir, md5=VOCAB_MD5)\n', (1489, 1526), False, 'from libai.utils.file_utils import get_data_from_cache\n'), ((2198, 2220), 'libai.utils.distributed.is_main_process', 'dist.is_main_process', ([], {}), '()\n', (2218, 2220), True, 'from libai.utils import distributed as dist\n'), ((1553, 1577), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (1575, 1577), True, 'import oneflow as flow\n'), ((2969, 2991), 'libai.utils.distributed.is_main_process', 'dist.is_main_process', ([], {}), '()\n', (2989, 2991), True, 'from libai.utils import distributed as dist\n'), ((2322, 2346), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (2344, 2346), True, 'import oneflow as flow\n'), ((3747, 3769), 'libai.utils.distributed.is_main_process', 'dist.is_main_process', ([], {}), '()\n', (3767, 3769), True, 'from libai.utils import distributed as dist\n'), ((3093, 3117), 'oneflow.cuda.is_available', 'flow.cuda.is_available', ([], {}), '()\n', (3115, 3117), True, 'import oneflow as flow\n'), ((1827, 1854), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(10)'], {}), '(0, 5, 10)\n', (1844, 1854), True, 'import numpy as np\n'), ((2598, 2625), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(10)'], {}), '(0, 5, 10)\n', (2615, 2625), True, 'import numpy as np\n'), ((3376, 3403), 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)', '(10)'], {}), '(0, 5, 10)\n', (3393, 3403), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
def _test_logical_slice_assign(test_case, placement, sbp):
input = random_tensor(2, 4, 4, requires_grad=True).oneflow
x_numpy = input.detach().cpu().numpy()
x = (input + 0).to_global(
placement=placement, sbp=sbp
) # add 0 to change to non-leaf tensor
x[:, :2] = 3
# forward
x_numpy[:, :2] = 3
test_case.assertTrue(x.sbp == sbp)
test_case.assertTrue(np.array_equal(x.numpy(), x_numpy))
# backward
x.sum().backward()
input_grad_np = np.ones((4, 4))
input_grad_np[:, :2] = 0
test_case.assertTrue(np.array_equal(input.grad.numpy(), input_grad_np))
def _test_graph_logical_slice_assign(test_case, placement, sbp):
x = random_tensor(2, 4, 4, requires_grad=True).oneflow
x_numpy = x.detach().cpu().numpy()
class LogicalSliceAssignWithGrad(flow.nn.Module):
def __init__(self):
super().__init__()
self.input_grad = flow.nn.Parameter(flow.zeros(4, 4))
def forward(self, input):
x = input + self.input_grad
x = x.to_global(placement, sbp)
x[:, :2] = 3
return x
logical_slice_assign_with_grad = LogicalSliceAssignWithGrad().to_global(
placement, [flow.sbp.broadcast,] * len(sbp)
)
of_sgd = flow.optim.SGD(
logical_slice_assign_with_grad.parameters(), lr=1.0, momentum=0.0
)
class LogicalSliceAssignTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.module = logical_slice_assign_with_grad
self.add_optimizer(of_sgd)
def build(self, x):
out = self.module(x)
z = out.sum()
z.backward()
return out
graph = LogicalSliceAssignTrainGraph()
input = x.to_global(placement=placement, sbp=sbp)
y = graph(input)
test_case.assertTrue(y.sbp == sbp)
# output
x_numpy[:, :2] = 3
test_case.assertTrue(np.array_equal(y.numpy(), x_numpy))
# input_grad
x_grad_np = np.ones((4, 4))
x_grad_np[:, :2] = 0
test_case.assertTrue(
np.array_equal(-graph.module.input_grad.origin.numpy(), x_grad_np)
)
class TestGlobalLogicalSliceAssign(flow.unittest.TestCase):
@globaltest
def test_logical_slice_assign(test_case):
for placement in all_placement():
for sbp in all_sbp(placement, max_dim=2):
if placement.ranks.size == 1:
continue
_test_logical_slice_assign(test_case, placement, sbp)
_test_graph_logical_slice_assign(test_case, placement, sbp)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.zeros"
] | [((1221, 1236), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (1228, 1236), True, 'import numpy as np\n'), ((2736, 2751), 'numpy.ones', 'np.ones', (['(4, 4)'], {}), '((4, 4))\n', (2743, 2751), True, 'import numpy as np\n'), ((3358, 3373), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3371, 3373), False, 'import unittest\n'), ((1669, 1685), 'oneflow.zeros', 'flow.zeros', (['(4)', '(4)'], {}), '(4, 4)\n', (1679, 1685), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from functools import reduce
from typing import Iterable, List, Optional, Sequence, Union, Tuple
from oneflow.python.oneflow_export import oneflow_export
import numpy as np
import operator
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow_api
@oneflow_export("gather")
def gather(
params: oneflow_api.BlobDesc,
indices: oneflow_api.BlobDesc,
validate_indices: Optional[oneflow_api.BlobDesc] = None,
axis: Optional[int] = None,
batch_dims: int = 0,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""This operator gathers slices from params `axis` according to indices.
Args:
params: A `Blob`. The blob from which to gather values. Must be at least rank `axis + 1`.
indices: A `Blob`. Index blob. Must be in range [0, params.shape[axis]).
axis: A `int`. The axis in params to gather indices from. Defaults to the first dimension.
Supports negative indexes.
batch_dims: An optional `int`. Defaults to 0.
name: A name for the operation (optional).
Returns:
A blob. Has the same type as params.
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def gather_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32),
indice: tp.Numpy.Placeholder(shape=(2, ), dtype=flow.int32)
) -> tp.Numpy:
gather_blob = flow.gather(params=x,
indices=indice,
axis=1)
return gather_blob
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
indice = np.array([0, 2]).astype(np.int32)
out = gather_Job(x, indice)
# out [[1. 3.]
# [4. 6.]
# [7. 9.]]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def gather_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32),
indice: tp.Numpy.Placeholder(shape=(2, ), dtype=flow.int32)
) -> tp.Numpy:
gather_blob = flow.gather(params=x,
indices=indice,
axis=0)
return gather_blob
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
indice = np.array([0, 2]).astype(np.int32)
out = gather_Job(x, indice)
# out [[1. 2. 3.]
# [7. 8. 9.]]
"""
params_ndims = len(params.shape)
if axis is None:
axis = batch_dims
elif axis < 0:
origin_axis = axis
axis += params_ndims
assert axis >= 0 and axis < params_ndims, ValueError(
"Expected axis to between [%d, %d). But received: %d "
% (-params_ndims, params_ndims, origin_axis)
)
if batch_dims > 0:
if axis == batch_dims:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("BatchGather_")
)
.Op("batch_gather")
.Input("in", [params])
.Input("indices", [indices])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
elif axis > batch_dims:
raise NotImplementedError
else:
raise AttributeError
else:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Gather_")
)
.Op("gather")
.Input("in", [params])
.Input("indices", [indices])
.Output("out")
.Attr("axis", int(axis))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("flatten")
def flatten(
input: oneflow_api.BlobDesc,
start_dim: int = 0,
end_dim: int = -1,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Flattens a contiguous range of dims in a Blob.
Args:
input: A `Blob`.
start_dim: The first dim to flatten.
end_dim: The last dim to flatten.
name: A name for the operation (optional).
Returns:
A `Blob`, has the same type as `input`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def flatten_Job(input: tp.Numpy.Placeholder(shape=(4, 4, 3, 2), dtype=flow.float32)
) -> tp.Numpy:
flatten_blob = flow.flatten(input, start_dim=1, end_dim=-1)
return flatten_blob
input = np.zeros((4, 4, 3, 2)).astype(np.float32)
out = flatten_Job(input)
# out.shape (4, 24)
"""
if name is None:
name = id_util.UniqueStr("Flatten_")
return (
flow.user_op_builder(name)
.Op("flatten")
.Input("in", [input])
.Output("out")
.Attr("start_dim", start_dim)
.Attr("end_dim", end_dim)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def infer_shape(x, shape):
dim_index_need_infer = shape.index(-1) if shape.count(-1) == 1 else None
in_elem_cnt = reduce(operator.mul, x.shape, 1)
out_elem_cnt = reduce(operator.mul, shape, 1)
if dim_index_need_infer is not None:
assert (in_elem_cnt % out_elem_cnt) == 0
shape[dim_index_need_infer] = int(abs(in_elem_cnt / out_elem_cnt))
else:
assert in_elem_cnt == out_elem_cnt
return shape
@oneflow_export("reshape")
def reshape(
x: oneflow_api.BlobDesc, shape: Sequence[int], name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""This operator reshapes a Blob.
If the Blob is dynamic, it will call `flow.dynamic_reshape` automatically
We can set one dimension in `shape` as `-1`, the operator will infer the complete shape.
Args:
x: A `Blob`.
shape: Shape of the output blob.
name: A name for the operation (optional).
Returns:
A `Blob`, has the same type as `x`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reshape_Job(x: tp.Numpy.Placeholder(shape=(4, 4), dtype=flow.float32)
) -> tp.Numpy:
reshape_blob = flow.reshape(x,
shape=[2, 2, 2, -1])
return reshape_blob
x = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]).astype(np.float32)
out = reshape_Job(x)
# out.shape (2, 2, 2, 2)
"""
x = flow.cast_to_current_logical_view(x)
assert isinstance(shape, tuple) or isinstance(shape, list)
shape = list(shape)
assert all(dim == -1 or dim > 0 for dim in shape)
assert shape.count(-1) <= 1
if not x.is_dynamic:
if name is None:
name = id_util.UniqueStr("Reshape_")
return (
flow.user_op_builder(name)
.Op("reshape")
.Input("in", [x])
.Output("out")
.Attr("shape", infer_shape(x, shape))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
op_conf = op_conf_util.OperatorConf()
setattr(
op_conf,
"name",
name if name is not None else id_util.UniqueStr("DynamicReshape_"),
)
setattr(op_conf.dynamic_reshape_conf, "in", x.unique_name)
op_conf.dynamic_reshape_conf.shape.dim.extend(list(shape))
setattr(op_conf.dynamic_reshape_conf, "out", "out")
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
return remote_blob_util.RemoteBlob(lbi)
@oneflow_export("reshape_like")
def reshape_like(
x: oneflow_api.BlobDesc, like: oneflow_api.BlobDesc, name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator reshapes the Blob x to be the same as Blob `like` .
Args:
x (oneflow_api.BlobDesc): The input Blob.
like (oneflow_api.BlobDesc): A Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reshape_like_Job(x: tp.Numpy.Placeholder(shape=(4, 4), dtype=flow.float32)
) -> tp.Numpy:
like_blob = flow.constant(value=1,
dtype=flow.int8,
shape=(2, 2, 4))
reshape_like_blob = flow.reshape_like(x,
like=like_blob)
return reshape_like_blob
x = np.array([[1, 2, 3, 4],
[5, 6, 7, 8],
[9, 10, 11, 12],
[13, 14, 15, 16]]).astype(np.float32)
out = reshape_like_Job(x)
# out.shape (2, 2, 4)
"""
if name is None:
name = id_util.UniqueStr("ReshapeLike_")
return (
flow.user_op_builder(name)
.Op("reshape_like")
.Input("in", [x])
.Input("like", [like])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("dynamic_reshape")
def dynamic_reshape(
x: oneflow_api.BlobDesc, shape: Sequence[int], name: Optional[str] = None
) -> oneflow_api.BlobDesc:
"""This operator reshapes a dynamic blob.
Args:
x (oneflow_api.BlobDesc): The input Blob.
shape (Sequence[int]): The output shape.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def dynamic_reshape_Job(x: tp.Numpy.Placeholder(shape=(1, 3, 64, 64), dtype=flow.float32)
) -> tp.Numpy:
reshape_out1 = flow.dynamic_reshape(x, (-1, 64))
variable1 = flow.get_variable(
"var1",
shape=(64, 32),
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
matmul_tensor = flow.matmul(reshape_out1, variable1)
reshape_out2 = flow.dynamic_reshape(matmul_tensor, (-1, 8, 4))
return reshape_out2
x = np.random.rand(1, 3, 64, 64).astype(np.float32)
out = dynamic_reshape_Job(x)
# out.shape (192, 8, 4)
"""
assert isinstance(shape, tuple) or isinstance(shape, list)
shape = list(shape)
op_conf = op_conf_util.OperatorConf()
setattr(
op_conf,
"name",
name if name is not None else id_util.UniqueStr("DynamicReshape_"),
)
setattr(op_conf.dynamic_reshape_conf, "in", x.unique_name)
op_conf.dynamic_reshape_conf.shape.dim.extend(list(shape))
setattr(op_conf.dynamic_reshape_conf, "out", "out")
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
return remote_blob_util.RemoteBlob(lbi)
@oneflow_export("transpose")
def transpose(
a: oneflow_api.BlobDesc,
perm: Sequence[int] = None,
conjugate: bool = False,
batch_axis_non_change: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""This operator transposes the specified axis of input Blob.
Args:
a (oneflow_api.BlobDesc): The input Blob.
perm (Sequence[int], optional): The list of dimension permutation. Defaults to None.
conjugate (bool, optional): Still Unavailable. Defaults to False.
batch_axis_non_change (bool, optional): deprecated. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
NotImplementedError: The attribute `conjugate` still unavailable.
Returns:
oneflow_api.BlobDesc: A transposed blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def transpose_Job(x: tp.Numpy.Placeholder(shape=(1, 2, 3), dtype=flow.float32)
) -> tp.Numpy:
transpose_blob = flow.transpose(x,
perm=[2, 0, 1])
return transpose_blob
x = np.random.randn(1, 2, 3).astype(np.float32)
out = transpose_Job(x)
# out.shape (3, 1, 2)
"""
assert isinstance(perm, (tuple, list))
if name is None:
name = id_util.UniqueStr("Transpose_")
if conjugate:
raise NotImplementedError
return (
flow.user_op_builder(name)
.Op("transpose")
.Input("input", [a])
.Output("output")
.Attr("perm", perm)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("slice")
def slice(
x: oneflow_api.BlobDesc,
begin: Sequence[int],
size: Sequence[int],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Extracts a slice from a tensor.
Args:
x: A `Blob`.
begin: A list or a tuple, indicate each dimension slice begin, whose length must be equal
to x's number of dimensions, the first element of begin must be set to None.
(Because the internal op of OneFlow does not support 0-dimension slice at present.)
size: A list or a tuple, indicate each dimension slice size, whose length must be equal
to x's number of dimensions, the first element of beign must be set to None.
name: A name for the operation (optional).
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def slice_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32)
) -> tp.Numpy:
slice_blob = flow.slice(x,
begin=[None, 0],
size=[None, 2])
return slice_blob
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
out = slice_Job(x)
# out [[1. 2.]
# [4. 5.]
# [7. 8.]]
"""
ndim = len(x.shape)
if not isinstance(begin, (list, tuple)) or len(begin) != ndim:
raise ValueError(
"begin must be a list/tuple with the same length as input tensor's number of dimensions"
)
if not all(isinstance(b, int) or b is None for b in begin):
raise ValueError("element of begin must be a int or None")
if not isinstance(size, (list, tuple)) or len(size) != ndim:
raise ValueError(
"size must be a list/tuple with the same length as input tensor's number of dimensions."
)
if not all(isinstance(s, int) or s is None for s in size):
raise ValueError("element of size must be a int or None")
slice_tup_list = []
for b, s, dim_size in zip(begin, size, x.shape):
start, stop, step = (None, None, 1)
if b is not None:
if b < -dim_size or b >= dim_size:
raise ValueError("element of begin is out of range")
start = b
if s is not None:
if s == -1:
stop = dim_size
else:
if s <= 0 or s > dim_size:
raise ValueError("element of size is invalid")
if b + s < dim_size:
stop = b + s
slice_tup_list.append((start, stop, step))
return slice_v2(x, slice_tup_list, name=name)
def _check_slice_tup_list(slice_tup_list, shape):
ndim = len(shape)
if not isinstance(slice_tup_list, (list, tuple)) or len(slice_tup_list) > ndim:
raise ValueError(
"slice_tup_list must be a list or tuple with length "
"less than or equal to number of dimensions of input tensor"
)
# if length of slice_tup_list is less than number of dimensions of x, fill it to length of ndims reduce 1
if len(slice_tup_list) < ndim:
slice_tup_list += type(slice_tup_list)(
[(None, None, None)] * (ndim - len(slice_tup_list))
)
start_list = []
stop_list = []
step_list = []
for slice_tup, dim_size in zip(slice_tup_list, shape):
if not isinstance(slice_tup, (tuple, list)) or len(slice_tup) != 3:
raise ValueError(
"element of slice_tup_list must be a list or tuple with form (start, stop, step)"
)
if not all(isinstance(idx, int) or idx is None for idx in slice_tup):
raise ValueError("element of slice tuple must int or None")
(start, stop, step) = slice_tup
if step is None:
step = 1
if step == 0:
raise ValueError("slice step can't be 0")
if start is None:
start = 0 if step > 0 else np.iinfo(np.int64).max
elif start < -dim_size or start >= dim_size:
raise ValueError("slice start must be in range [-size, size)")
if stop is None:
stop = np.iinfo(np.int64).max if step > 0 else np.iinfo(np.int64).min
elif stop < -dim_size - 1 or stop > dim_size:
raise ValueError("slice start must be in range [-size-1, size]")
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
return start_list, stop_list, step_list
@oneflow_export("slice_v2")
def slice_v2(
x: oneflow_api.BlobDesc,
slice_tup_list: Sequence[Tuple[int, int, int]],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Extracts a slice from a tensor.
The `slice_tup_list` assigns the slice indices in each dimension, the format is (start, stop, step).
The operator will slice the Blob according to the `slice_top_list`.
Args:
x: A `Blob`.
slice_tup_list: A list of slice tuple, indicate each dimension slice (start, stop, step).
name: A name for the operation (optional).
Returns:
oneflow_api.BlobDesc: The result Blob.
Note: Because the internal op of OneFlow does not support 0-dimension slice at present, we should
set the zero element in `slice_tup_list` as `None`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def slicev2_Job(x: tp.Numpy.Placeholder(shape=(3, 6, 9), dtype=flow.float32)
) -> tp.Numpy:
slicev2_blob = flow.slice_v2(x,
slice_tup_list=[[None, None, None],
[0, 5, 2], # slice in dimension 1, extract [0, 2, 4]
[0, 6, 3]]) # slice in dimension 2, extract [0, 3]
return slicev2_blob
x = np.random.randn(3, 6, 9).astype(np.float32)
out = slicev2_Job(x)
# out.shape (3, 3, 2)
"""
name = name or id_util.UniqueStr("Slice_")
if not isinstance(name, str):
raise ValueError("name must be a string")
start, stop, step = _check_slice_tup_list(slice_tup_list, x.shape)
op = (
flow.user_op_builder(name)
.Op("slice")
.Input("x", [x])
.Output("y")
.Attr("start", start)
.Attr("stop", stop)
.Attr("step", step)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("slice_update")
def api_slice_update(
x: oneflow_api.BlobDesc,
update: oneflow_api.BlobDesc,
slice_tup_list: Sequence[Tuple[int, int, int]],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Update a slice of tensor `x`.
Args:
x: A `Blob`, whose slice will be updated.
update: A `Blob`, indicate the update content.
slice_tup_list: A list of slice tuple, indicate each dimension slice (start, stop, step).
name: A name for the operation (optional).
"""
if name is None:
name = id_util.UniqueStr("SliceUpdate_")
if not isinstance(name, str):
raise ValueError("name must be a string")
start, stop, step = _check_slice_tup_list(slice_tup_list, x.shape)
op = (
flow.user_op_builder(name)
.Op("slice_update")
.Input("x", [x])
.Input("update", [update])
.Output("y")
.Attr("start", start)
.Attr("stop", stop)
.Attr("step", step)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
# Get slice attrs for slice_assign and logical_slice
# Note the step in slice_tup_list must be greater than 0
# as slice_assign and logical_slice only support step > 0
def _GetSliceAttrs(slice_tup_list, input_shape):
ndim = len(input_shape)
if not (isinstance(slice_tup_list, (list, tuple)) and len(slice_tup_list) <= ndim):
raise ValueError(
"slice_tup_list must be a list or tuple with length "
"less than or equal to number of dimensions of input tensor"
)
# Right extends slice_tup_list with [None, None, None] if len(slice_tup_list) < len(input_shape)
if len(slice_tup_list) < ndim:
slice_tup_list += type(slice_tup_list)(
[(None, None, None)] * (ndim - len(slice_tup_list))
)
start_list = []
stop_list = []
step_list = []
for slice_tup, dim_size in zip(slice_tup_list, input_shape):
if not (isinstance(slice_tup, (tuple, list)) and len(slice_tup) == 3):
raise ValueError(
"element of slice_tup_list must be a list or tuple with form (start, stop, step)"
)
if not all(isinstance(idx, int) or idx is None for idx in slice_tup):
raise ValueError("element of slice tuple must int or None")
(start, stop, step) = slice_tup
if step is None:
step = 1
if step <= 0:
raise ValueError("slice_assign/logical_slice step must be greater than 0")
if start is None:
start = 0
elif start < -dim_size or start >= dim_size:
raise ValueError(
"slice_assign/logical_slice start must be in range [-size, size)"
)
elif start < 0:
start += dim_size
if stop is None:
stop = dim_size
elif stop < -dim_size or stop > dim_size:
raise ValueError(
"slice_assign/logical_slice start must be in range [-size, size]"
)
elif stop < 0:
stop += dim_size
start_list.append(start)
stop_list.append(stop)
step_list.append(step)
return start_list, stop_list, step_list
@oneflow_export("experimental.logical_slice")
def logical_slice(
x: oneflow_api.BlobDesc,
slice_tup_list: Sequence[Tuple[int, int, int]],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
name = id_util.UniqueStr("LogicalSlice_") if name is None else name
if not isinstance(name, str):
raise ValueError("name must be a string")
start_list, stop_list, step_list = _GetSliceAttrs(slice_tup_list, x.shape)
op = (
flow.user_op_builder(name)
.Op("logical_slice")
.Input("x", [x])
.Output("y")
.Attr("start", start_list)
.Attr("stop", stop_list)
.Attr("step", step_list)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("experimental.logical_slice_assign")
def logical_slice_assign(
x: oneflow_api.BlobDesc,
value: oneflow_api.BlobDesc,
slice_tup_list: Sequence[Tuple[int, int, int]],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
name = id_util.UniqueStr("LogicalSliceAssign_") if name is None else name
if not isinstance(name, str):
raise ValueError("name must be a string")
start_list, stop_list, step_list = _GetSliceAttrs(slice_tup_list, x.shape)
op = (
flow.user_op_builder(name)
.Op("logical_slice_assign")
.Input("ref", [x])
.Input("value", [value])
.Attr("start", start_list)
.Attr("stop", stop_list)
.Attr("step", step_list)
.Build()
)
return op.InferAndTryRun()
@oneflow_export("reverse")
def reverse(
input: oneflow_api.BlobDesc,
axis: Union[int, Sequence[int]],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator reverses the elements on the assigned axis.
Args:
input (oneflow_api.BlobDesc): The input Blob.
axis (Union[int, Sequence[int]]): The reverse axis.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
ValueError: The name must be a string.
ValueError: The axis must be a int or a list/tuple of int.
ValueError: The axis is out of range.
Returns:
oneflow_api.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reverse_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32)) -> tp.Numpy:
reverse_blob = flow.reverse(x,
axis=0)
return reverse_blob
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
out = reverse_Job(x)
# out [[7. 8. 9.]
# [4. 5. 6.]
# [1. 2. 3.]]
"""
if name is None:
name = id_util.UniqueStr("Reverse_")
if not isinstance(name, str):
raise ValueError("name must be a string")
if isinstance(axis, int):
axis = [axis]
if not isinstance(axis, (tuple, list)) or not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be a int or a list/tuple of int")
ndim = len(input.shape)
slice_tup_list = [(None, None, None)] * ndim
for i, a in enumerate(axis):
if a < 0:
a += ndim
if a < 0 or a >= ndim:
raise ValueError("axis is out of range")
slice_tup_list[a] = (None, None, -1)
return slice_v2(input, slice_tup_list, name)
@oneflow_export("concat")
def concat(
inputs: Optional[Sequence[oneflow_api.BlobDesc]] = None,
axis: int = 0,
max_dim_size: Optional[int] = None,
name: Optional[str] = None,
values: Optional[Sequence[oneflow_api.BlobDesc]] = None,
) -> oneflow_api.BlobDesc:
r"""Concatenate two or more `Blob` s at specified axis.
Analogous to `numpy.concatenate <https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html>`_
Args:
inputs: a `list` of `Blob`
axis: a `int`. `0` by default
max_dim_size: hint of max dimension size along the given axis
name: name of this operator. `None` by default
values: deprecated param, use inputs instead
Returns:
A `Blob`
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def concat_Job() -> tp.Numpy:
constant_blob_1 = flow.constant(value=1.5,
shape=(1, 3, 3, 4),
dtype=flow.float,
name="blob1")
constant_blob_2 = flow.constant(value=2.5,
shape=(1, 3, 3, 4),
dtype=flow.float,
name="blob2")
return flow.concat(inputs=[constant_blob_1, constant_blob_2],
axis=3)
out = concat_Job()
# out.shape (1, 3, 3, 8)
"""
# backward compatible with values param name
if values is not None:
assert inputs is None
inputs = values
assert isinstance(inputs, (list, tuple))
if len(inputs) == 1:
return inputs[0]
assert len(inputs) >= 2
if axis < 0:
axis += len(inputs[0].shape)
assert axis >= 0 and axis < len(
inputs[0].shape
), "axis must be in range [0, num_axes of inputs)"
first_input_shape = inputs[0].shape
static_dim_size = 0
dynamic_dim_size = 0
for input in inputs:
assert len(input.shape) == len(first_input_shape)
for i in range(len(input.shape)):
if i == axis:
if input.is_dynamic:
dynamic_dim_size += input.shape[i]
else:
static_dim_size += input.shape[i]
else:
assert input.shape[i] == first_input_shape[i]
if max_dim_size is None:
max_dim_size = static_dim_size + dynamic_dim_size
else:
assert (
max_dim_size >= static_dim_size
), "max diemension size {} is too small to hold concatenated static dimension size {} along the given axis".format(
max_dim_size, static_dim_size
)
if name is None:
name = id_util.UniqueStr("Concat_")
op = (
flow.user_op_builder(name)
.Op("concat")
.Input("in", inputs)
.Output("out")
.Attr("axis", axis)
.Attr("max_dim_size", max_dim_size)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("gather_nd")
def gather_nd(
params: oneflow_api.BlobDesc,
indices: oneflow_api.BlobDesc,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator is a high-dimensional extension of `gather`, `indices` is a K-dimensional
tensor, which is regarded as a index of input Blob `params`.
Each element defines a slice of `params`:
.. math::
output[(i_0,i_1,...,i_{K-2})] = param[indices(i_{0},i_{1},...,i_{K-2})]
Args:
params (oneflow_api.BlobDesc): The input Blob.
indices (oneflow_api.BlobDesc): The slice indices.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def gather_nd_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32),
indice: tp.Numpy.Placeholder(shape=(2, 1), dtype=flow.int32)
) -> tp.Numpy:
gather_nd_blob = flow.gather_nd(params=x,
indices=indice)
return gather_nd_blob
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
indice = np.array([[0], [2]]).astype(np.int32)
out = gather_nd_Job(x, indice)
# out [[1. 2. 3.]
# [7. 8. 9.]]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def gather_nd_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32),
indice: tp.Numpy.Placeholder(shape=(2, 2), dtype=flow.int32)
) -> tp.Numpy:
gather_nd_blob = flow.gather_nd(params=x,
indices=indice)
return gather_nd_blob
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
indice = np.array([[0, 2], [2, 1]]).astype(np.int32)
out = gather_nd_Job(x, indice)
# out [3. 8.]
Example3:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def gather_nd_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32),
indice: tp.Numpy.Placeholder(shape=(3, 2), dtype=flow.int32)
) -> tp.Numpy:
gather_nd_blob = flow.gather_nd(params=x,
indices=indice)
return gather_nd_blob
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
indice = np.array([[0, 1], [1, 0], [2, 2]]).astype(np.int32)
out = gather_nd_Job(x, indice)
# out [2. 4. 9.]
"""
if name is None:
name = id_util.UniqueStr("GatherNd_")
op = (
flow.user_op_builder(name)
.Op("gather_nd")
.Input("params", [params])
.Input("indices", [indices])
.Output("out")
.Build()
)
return op.InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("scatter_nd")
def scatter_nd(
indices: oneflow_api.BlobDesc,
updates: oneflow_api.BlobDesc,
shape: Sequence[int],
name: Optional[str] = None,
):
"""This operator inserts the elements in `updates` according to the `indices` and create a new Blob.
Args:
indices (oneflow_api.BlobDesc): The indice of `updates`. Its type should be `flow.int`.
updates (oneflow_api.BlobDesc): The update Blob.
shape (Sequence[int]): The constant tensor shape, the constant tensor elements are all zero.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def scatter_nd_Job(indice: tp.Numpy.Placeholder(shape=(3, 1), dtype=flow.int32),
update: tp.Numpy.Placeholder(shape=(3, ), dtype=flow.float32),
) -> tp.Numpy:
scatter_blob = flow.scatter_nd(indices=indice,
updates=update,
shape=[8])
return scatter_blob
indice_array = np.array([[1], [6], [4]]).astype(np.int32)
update_array = np.array([10.2, 5.1, 12.7]).astype(np.float32)
out = scatter_nd_Job(indice_array, update_array)
# [ 0. 10.2 0. 0. 12.7 0. 5.1 0. ]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def scatter_nd_Job(indice: tp.Numpy.Placeholder(shape=(3, 1), dtype=flow.int32),
update: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32),
) -> tp.Numpy:
scatter_blob = flow.scatter_nd(indices=indice,
updates=update,
shape=[5, 3])
return scatter_blob
indice_array = np.array([[0], [4], [2]]).astype(np.int32)
update_array = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]).astype(np.float32)
out = scatter_nd_Job(indice_array, update_array)
# out [[1. 1. 1.]
# [0. 0. 0.]
# [3. 3. 3.]
# [0. 0. 0.]
# [2. 2. 2.]]
"""
if name is None:
name = id_util.UniqueStr("ScatterNd_")
op = (
flow.user_op_builder(name)
.Op("scatter_nd")
.Input("indices", [indices])
.Input("updates", [updates])
.Attr("shape", shape)
.Output("out")
.Build()
)
return op.InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("tensor_scatter_nd_update")
def tensor_scatter_nd_update(
params: oneflow_api.BlobDesc,
indices: oneflow_api.BlobDesc,
updates: oneflow_api.BlobDesc,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator inserts the elements in `updates` according to the `indices` into the Blob `params`.
Args:
params (oneflow_api.BlobDesc): The input Blob.
indices (oneflow_api.BlobDesc): The indice of `updates`. Its type should be `flow.int32`.
updates (oneflow_api.BlobDesc): The update Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def tensor_scatter_nd_Job(x: tp.Numpy.Placeholder(shape=(5, 3), dtype=flow.float32),
indice: tp.Numpy.Placeholder(shape=(3, 1), dtype=flow.int32),
update: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32),
) -> tp.Numpy:
scatter_blob = flow.tensor_scatter_nd_update(params=x,
indices=indice,
updates=update)
return scatter_blob
x = np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]).astype(np.float32)
indice_array = np.array([[0], [4], [2]]).astype(np.int32)
update_array = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]).astype(np.float32)
out = tensor_scatter_nd_Job(x, indice_array, update_array)
# out [[1. 1. 1.]
# [1. 2. 3.]
# [3. 3. 3.]
# [1. 2. 3.]
# [2. 2. 2.]]
"""
if name is None:
name = id_util.UniqueStr("TensorScatterNdUpdate_")
op = (
flow.user_op_builder(name)
.Op("tensor_scatter_nd_update")
.Input("params", [params])
.Input("updates", [updates])
.Input("indices", [indices])
.Output("out")
.Build()
)
return op.InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("tensor_scatter_nd_add")
def tensor_scatter_nd_add(
params: oneflow_api.BlobDesc,
indices: oneflow_api.BlobDesc,
updates: oneflow_api.BlobDesc,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator adds elements from 'updates' to Blob 'params' based on the `indices`.
Args:
params (oneflow_api.BlobDesc): The input Blob.
indices (oneflow_api.BlobDesc): The indice of `updates`. Its type should be `flow.int32`.
updates (oneflow_api.BlobDesc): The update Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def tensor_scatter_nd_add_Job(x: tp.Numpy.Placeholder(shape=(5, 3), dtype=flow.float32),
indice: tp.Numpy.Placeholder(shape=(3, 1), dtype=flow.int32),
update: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.float32),
) -> tp.Numpy:
scatter_blob = flow.tensor_scatter_nd_add(params=x,
indices=indice,
updates=update)
return scatter_blob
x = np.array([[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3],
[1, 2, 3]]).astype(np.float32)
indice_array = np.array([[0], [4], [2]]).astype(np.int32)
update_array = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]).astype(np.float32)
out = tensor_scatter_nd_add_Job(x, indice_array, update_array)
# out [[2. 3. 4.]
# [1. 2. 3.]
# [4. 5. 6.]
# [1. 2. 3.]
# [3. 4. 5.]]
"""
if name is None:
name = id_util.UniqueStr("TensorScatterNdAdd_")
op = (
flow.user_op_builder(name)
.Op("tensor_scatter_nd_add")
.Input("params", [params])
.Input("updates", [updates])
.Input("indices", [indices])
.Output("out")
.Build()
)
return op.InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("argwhere")
def argwhere(
condition: oneflow_api.BlobDesc,
dtype: Optional[flow.dtype] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator finds the indices of input Blob `condition` elements that are non-zero. It returns a List.
Each element in the output is a coordinate that points to a non-zero element in the condition.
Args:
condition (oneflow_api.BlobDesc): The input Blob.
dtype (Optional[flow.dtype], optional): The data type of output. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob. Its type is `ListNumpy`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def argwhere_Job(x: tp.Numpy.Placeholder(shape=(2, 3), dtype=flow.float32),
) -> tp.ListNumpy:
return flow.argwhere(x)
x = np.array([[0, 1, 0],
[2, 0, 2]]).astype(np.float32)
out = argwhere_Job(x)
# out [array([[0, 1],
# [1, 0],
# [1, 2]], dtype=int32)]
"""
if name is None:
name = id_util.UniqueStr("ArgWhere_")
if dtype is None:
dtype = flow.int32
op = (
flow.user_op_builder(name)
.Op("argwhere")
.Input("input", [condition])
.Attr("dtype", dtype)
.Output("output")
.Output("output_size")
.Build()
)
output, output_size = op.InferAndTryRun().RemoteBlobList()
return sync_dynamic_resize(output, output_size)
@oneflow_export("nonzero")
def nonzero(
a: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
"""This operator finds the indices of input Blob `condition` elements that are non-zero.
Args:
a (oneflow_api.BlobDesc): The input Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
"""
if name is None:
argwhere_name = id_util.UniqueStr("Nonzero_ArgWhere_")
tranpose_name = id_util.UniqueStr("Nonzero_Transpose_")
else:
argwhere_name = name + "_ArgWhere"
tranpose_name = name + "_Transpose"
indices = argwhere(a, name=argwhere_name)
return transpose(indices, perm=(1, 0), name=tranpose_name)
@oneflow_export("where")
def where(
condition: oneflow_api.BlobDesc,
x: Optional[oneflow_api.BlobDesc] = None,
y: Optional[oneflow_api.BlobDesc] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator returns the elements where condition is larger than 0.
If `x` and `y` is None, this operator is equal to `oneflow.argwhere`.
If `x` and `y` both are not None, If the element in condition is larger than 0,
it will take the `x` element, else it will take the `y` element.
Args:
condition (oneflow_api.BlobDesc): The input Blob.
x (Optional[oneflow_api.BlobDesc], optional): A Blob. Defaults to None.
y (Optional[oneflow_api.BlobDesc], optional): A Blob. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
ValueError: It is not supported when exactly one of x or y is non-None
Returns:
oneflow_api.BlobDesc: The result Blob. Its type is `ListNumpy`.
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def where_Job(condition: tp.Numpy.Placeholder(shape=(5, ), dtype=flow.int32),
x: tp.Numpy.Placeholder(shape=(5, ), dtype=flow.float32),
y: tp.Numpy.Placeholder(shape=(5, ), dtype=flow.float32),
) -> tp.ListNumpy:
return flow.where(condition=condition,
x=x,
y=y)
condition = np.array([3, 0, 1, 0, 1]).astype(np.int32)
x = np.array([10, 20, 30, 40, 50]).astype(np.float32)
y = np.array([100, 200, 300, 400, 500]).astype(np.float32)
out = where_Job(condition, x, y)
# out [array([ 10., 200., 30., 400., 50.], dtype=float32)]
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def where_Job(condition: tp.Numpy.Placeholder(shape=(5, ), dtype=flow.int32),
) -> tp.ListNumpy:
return flow.where(condition=condition)
condition = np.array([3, 0, 1, 0, 1]).astype(np.int32)
out = where_Job(condition)
# out [array([[0],
# [2],
# [4]], dtype=int32)]
"""
if x is None and y is None:
return argwhere(condition, name=name)
elif x is not None and y is not None:
if name is None:
name = id_util.UniqueStr("Where_")
if x.shape == condition.shape and y.shape == condition.shape:
broadcast_cond = condition
broadcast_x = x
broadcast_y = y
else:
broadcast_cond = flow.broadcast_to_compatible_with(condition, [x, y])
broadcast_x = flow.broadcast_to_compatible_with(x, [condition, y])
broadcast_y = flow.broadcast_to_compatible_with(y, [condition, x])
return (
flow.user_op_builder(name)
.Op("where")
.Input("condition", [broadcast_cond])
.Input("x", [broadcast_x])
.Input("y", [broadcast_y])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
raise ValueError("it is not supported when exactly one of x or y is non-None")
@oneflow_export("elem_cnt")
def elem_cnt(
inputs: oneflow_api.BlobDesc,
dtype: Optional[flow.dtype] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator returns the amount of elements in input Blob.
Args:
inputs (oneflow_api.BlobDesc): The input Blob.
dtype (Optional[flow.dtype], optional): The data type. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob. Its type is `ListNumpy`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def elem_cnt_Job(x: tp.Numpy.Placeholder(shape=(5, ), dtype=flow.float32),
) -> tp.ListNumpy:
return flow.elem_cnt(inputs=x, dtype=flow.int32)
x = np.array([10, 20, -30, 40, 50]).astype(np.float32)
out = elem_cnt_Job(x)
# [array([5], dtype=int32)]
"""
op_conf = op_conf_util.OperatorConf()
setattr(
op_conf, "name", name if name is not None else id_util.UniqueStr("ElemCnt_")
)
op_conf.shape_elem_cnt_conf.x = inputs.unique_name
op_conf.shape_elem_cnt_conf.exclude_axis_conf.SetInParent()
if dtype is not None:
op_conf.shape_elem_cnt_conf.data_type = oneflow_api.deprecated.GetProtoDtype4OfDtype(
dtype
)
op_conf.shape_elem_cnt_conf.y = "y"
interpret_util.Forward(op_conf)
out_lbi = logical_blob_id_util.LogicalBlobId()
setattr(out_lbi, "op_name", op_conf.name)
setattr(out_lbi, "blob_name", "y")
return remote_blob_util.RemoteBlob(out_lbi)
@oneflow_export("sync_dynamic_resize")
def sync_dynamic_resize(
inputs: oneflow_api.BlobDesc,
size: oneflow_api.BlobDesc,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""
Args:
inputs (oneflow_api.BlobDesc): The input Blob.
size (oneflow_api.BlobDesc): The size of new Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob. Its type is `ListNumpy`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def sync_dynamic_resize_Job(x: tp.Numpy.Placeholder(shape=(4, 3), dtype=flow.float32),
size: tp.Numpy.Placeholder(shape=(1, ), dtype=flow.int32),
) -> tp.ListNumpy:
resize_Blob = flow.sync_dynamic_resize(inputs=x,
size=size)
return resize_Blob
x = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9],
[10, 11, 12]]).astype(np.float32)
size = np.array([2]).astype(np.int32)
out = sync_dynamic_resize_Job(x, size)
# out [array([[1., 2., 3.],
# [4., 5., 6.]], dtype=float32)]
"""
op_conf = op_conf_util.OperatorConf()
setattr(
op_conf,
"name",
name if name is not None else id_util.UniqueStr("SyncDynamicResize_"),
)
setattr(op_conf.sync_dynamic_resize_conf, "in", inputs.unique_name)
setattr(op_conf.sync_dynamic_resize_conf, "size", size.unique_name)
setattr(op_conf.sync_dynamic_resize_conf, "axis", 0)
setattr(op_conf.sync_dynamic_resize_conf, "out", "out")
setattr(op_conf.sync_dynamic_resize_conf, "eager", flow.eager_execution_enabled())
interpret_util.Forward(op_conf)
out_lbi = logical_blob_id_util.LogicalBlobId()
setattr(out_lbi, "op_name", op_conf.name)
setattr(out_lbi, "blob_name", "out")
return remote_blob_util.RemoteBlob(out_lbi)
@oneflow_export("stack")
def stack(
inputs: Sequence[oneflow_api.BlobDesc], axis: int = 0, name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator stacks the multiple Blobs on the specified axis.
Args:
inputs (Sequence[oneflow_api.BlobDesc]): A list of input Blob.
axis (int): The stack axis.
name (Optional[str], optional): The name for the operation. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def stack_job(x: tp.Numpy.Placeholder(shape=(2, 4, 6)),
y: tp.Numpy.Placeholder(shape=(2, 4, 6)))->tp.Numpy:
out = flow.stack([x, y], axis=2)
return out
x = np.ones(shape=(2, 4, 6), dtype=np.float32)
y = np.ones(shape=(2, 4, 6), dtype=np.float32)
out = stack_job(x, y)
# output.shape (2, 4, 2, 6)
Returns:
oneflow_api.BlobDesc: The result Blob.
"""
if name is None:
name = id_util.UniqueStr("Stack_")
inputs = list(inputs)
_input_shape = inputs[0].shape
_max_dim = len(_input_shape)
# The axis must be in range [-(_max_dim +1), _max_dim]
if axis < 0:
axis = axis + _max_dim + 1
assert (axis >= 0) and (axis <= _max_dim)
# All input tensors must have the same shape
_input_list_length = len(inputs)
for i in range(_input_list_length):
_current_shape = inputs[i].shape
assert (
_input_shape == _current_shape
), "Each tensor should have the same shape ! Found a tensor instance shape is: {}".format(
_current_shape
)
# Expand dims for each tensor
inputs[i] = flow.expand_dims(
inputs[i], axis=axis, name=name + "expand_dims_{}".format(i)
)
return flow.concat(inputs, axis=axis, name=name + "concat")
@oneflow_export("random.generate_random_batch_permutation_indices")
def generate_random_batch_permutation_indices(
value: oneflow_api.BlobDesc, seed: Optional[int] = None, name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator generates a random permutation of indices in batch axis.
Args:
value (oneflow_api.BlobDesc): The input Blob.
seed (Optional[int], optional): The random seed. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob. Its type is `ListNumpy`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def random_indice_Job(x: tp.Numpy.Placeholder(shape=(4, 3), dtype=flow.int32),
) -> tp.ListNumpy:
return flow.random.generate_random_batch_permutation_indices(value=x)
x = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3],
[4, 4, 4]]).astype(np.int32)
out = random_indice_Job(x)
# out [array([3, 0, 2, 1], dtype=int32)]
"""
import random
op = (
flow.user_op_builder(
name
if name is not None
else id_util.UniqueStr(value.op_name + "_random_batch_permutation_indices")
)
.Op("generate_random_batch_permutation_indices")
.Input("x", [value])
.Output("y")
)
if seed is not None:
op.Attr("seed", seed)
assert name is not None
else:
op.Attr("seed", random.randint(-(2 ** 63) + 1, 2 ** 63 - 1))
return op.Build().InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("random.shuffle")
def shuffle(
value: oneflow_api.BlobDesc, seed: Optional[int] = None, name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator shuffle the elements in input Blob.
Args:
value (oneflow_api.BlobDesc): The input Blob.
seed (Optional[int], optional): The random seed. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def shuffle_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.int32),
) -> tp.Numpy:
return flow.random.shuffle(x)
x = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]).astype(np.int32)
out = shuffle_Job(x)
# out [[3 3 3]
# [1 1 1]
# [2 2 2]]
"""
return flow.gather(value, generate_random_batch_permutation_indices(value, seed))
@oneflow_export("identity")
def identity(
x: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""This operator returns a `Blob` that has identical content and data type to input `Blob`.
Analogous to `tf.identity <https://www.tensorflow.org/api_docs/python/tf/identity>`_
Args:
x (oneflow_api.BlobDesc): The input Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def identity_Job(x: tp.Numpy.Placeholder(shape=(3, 3), dtype=flow.int32),
) -> tp.Numpy:
return flow.identity(x)
x = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]).astype(np.int32)
out = identity_Job(x)
# out [[1 1 1]
# [2 2 2]
# [3 3 3]]
"""
if name is None:
name = id_util.UniqueStr("Identity_")
op = (
flow.user_op_builder(name).Op("identity").Input("in", [x]).Output("out").Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("identity_n")
def identity_n(
inputs: Sequence[oneflow_api.BlobDesc], name: Optional[str] = None
) -> List[oneflow_api.BlobDesc]:
"""This operator is similar to `oneflow.identity`. The difference is that the input and output
of `identity_n` is `List`.
Args:
inputs (Iterable[oneflow_api.BlobDesc]): A List of input Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
List[oneflow_api.BlobDesc]: A list of result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
from typing import List
@flow.global_function()
def identity_Job(x: tp.Numpy.Placeholder(shape=(1, 3), dtype=flow.int32),
y: tp.Numpy.Placeholder(shape=(1, 3), dtype=flow.int32),
z: tp.Numpy.Placeholder(shape=(1, 3), dtype=flow.int32)
) -> List[tp.Numpy]:
return flow.identity_n([x, y, z])
x = np.array([[1, 1, 1]]).astype(np.int32)
y = np.array([[2, 2, 2]]).astype(np.int32)
z = np.array([[3, 3, 3]]).astype(np.int32)
out = identity_Job(x, y, z)
# out[0] [[1, 1, 1]]
# out[1] [[2, 2, 2]]
# out[2] [[3, 3, 3]]
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("IdentityN_")
)
.Op("tuple_identity")
.Input("in", inputs)
.Output("out", len(inputs))
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
@oneflow_export("cast_to_static_shape")
def cast_to_static_shape(
x: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""This operator returns a `Blob` that has identical content and data type to input `Blob`, and whose shape is converted from dynamic to static
Args:
x (oneflow_api.BlobDesc): The input Blob which has dynamic shape.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob which is identical to input blob but has static shape.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def cast_to_static_shape_func(
x: tp.ListNumpy.Placeholder(shape=(3, 3), dtype=flow.float32),
) -> tp.Numpy:
return flow.cast_to_static_shape(x)
x = np.array([[1, 1, 1],
[2, 2, 2],
[3, 3, 3]]).astype(np.float32)
out = cast_to_static_shape_func(x)
# out [[1 1 1]
# [2 2 2]
# [3 3 3]]
"""
if not x.is_dynamic:
return x
if name is None:
name = id_util.UniqueStr("CastToStaticShape_")
op = (
flow.user_op_builder(name)
.Op("cast_to_static_shape")
.Input("input", [x])
.Output("output")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("squeeze")
def squeeze(
input: oneflow_api.BlobDesc,
axis: Optional[Sequence[int]] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator removes the specified dimention which size is 1 of the input Blob.
If the `axis` is not specified, this operator will remove all the dimention which size is 1 of the input Blob.
The amount of element in return value is the same as Blob `input`.
Args:
input (oneflow_api.BlobDesc): The input Blob.
axis (Optional[Sequence[int]], optional): The axis. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
Example 1:
.. code-block:
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def squeeze_Job(x: tp.Numpy.Placeholder(shape=(1, 1, 1, 3), dtype=flow.int32),
) -> tp.Numpy:
return flow.squeeze(x)
x = np.array([[[[1, 1, 1]]]]).astype(np.int32)
out = squeeze_Job(x)
# out.shape (3,)
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def squeeze_Job(x: tp.Numpy.Placeholder(shape=(1, 1, 1, 3), dtype=flow.int32),
) -> tp.Numpy:
return flow.squeeze(x, axis=[1, 2])
x = np.array([[[[1, 1, 1]]]]).astype(np.int32)
out = squeeze_Job(x)
# out.shape (1, 3)
"""
if axis is None:
axis = [idx for idx, dim in enumerate(input.shape) if dim == 1]
else:
assert isinstance(axis, list) or isinstance(axis, tuple)
in_num_axes = len(input.shape)
for x in axis:
assert x >= -in_num_axes and x < in_num_axes
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Squeeze_")
)
.Op("squeeze")
.Input("in", [input])
.Output("out")
.Attr("axes", list(axis))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("expand_dims")
def expand_dims(
input: oneflow_api.BlobDesc, axis: int, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
"""This operator inserts a dimention at the specified axis in the input Blob.
The size of new dimension can only be 1, and the amount of element in return value is the same as Blob `input`.
Args:
input (oneflow_api.BlobDesc): The input Blob.
axis (int): The specified dimension index.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def expand_dim_Job(x: tp.Numpy.Placeholder(shape=(1, 3, 3), dtype=flow.int32),
) -> tp.Numpy:
return flow.expand_dims(input=x,
axis=2)
x = np.array([[[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]]).astype(np.int32)
out = expand_dim_Job(x)
# out.shape (1, 3, 1, 3)
"""
in_num_axes = len(input.shape)
assert axis >= -(in_num_axes + 1) and axis <= in_num_axes
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("ExpandDims_")
)
.Op("expand_dims")
.Input("in", [input])
.Output("out")
.Attr("axis", axis)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("broadcast_like")
def broadcast_like(
x: oneflow_api.BlobDesc,
like: oneflow_api.BlobDesc,
broadcast_axes: Optional[Sequence[int]] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator broadcast the input Blob `x` on the specified axis with input Blob `like`.
Args:
x (oneflow_api.BlobDesc): The input Blob.
like (oneflow_api.BlobDesc): A Blob.
broadcast_axes (Optional[Sequence[int]], optional): The broadcast axis. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
ValueError: The length of broadcast_axes must be greater than 0 and less than or equal to number of axes of like shape.
Returns:
oneflow_api.BlobDesc: The result Blob.
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def broadcast_like_Job(x: tp.Numpy.Placeholder(shape=(3, 1), dtype=flow.float32)
) -> tp.Numpy:
like_tensor = flow.constant(value=1.0,
dtype=flow.float32,
shape=(3, 3))
return flow.broadcast_like(x=x,
like=like_tensor,
broadcast_axes=(1, ))
x = np.array([[1], [1], [1]]).astype(np.float32)
out = broadcast_like_Job(x)
# out [[[1 1 1]
# [1 1 1]
# [1 1 1]]]
# out.shape (3, 3)
Example 2:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def broadcast_like_Job(x: tp.Numpy.Placeholder(shape=(3, 1, 1), dtype=flow.float32)
) -> tp.Numpy:
like_tensor = flow.constant(value=1.0,
dtype=flow.float32,
shape=(3, 3, 3))
return flow.broadcast_like(x=x,
like=like_tensor,
broadcast_axes=(1, 2))
x = np.random.randn(3, 1, 1).astype(np.float32)
out = broadcast_like_Job(x)
# out.shape (3, 3, 3)
"""
if name is None:
name = id_util.UniqueStr("BroadcastLike_")
if broadcast_axes is None:
broadcast_axes = list(range(len(like.shape)))
assert isinstance(broadcast_axes, (list, tuple))
if len(broadcast_axes) <= 0 or len(broadcast_axes) > len(like.shape):
raise ValueError(
"The length of broadcast_axes must be greater than 0 and less than or equal to number of axes of like shape"
)
op = (
flow.user_op_builder(name)
.Op("broadcast_like")
.Input("x", [x])
.Input("like", [like])
.Attr("broadcast_axes", broadcast_axes)
.Output("y")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("masked_fill")
def masked_fill(
x: oneflow_api.BlobDesc,
mask: oneflow_api.BlobDesc,
value: Union[float, int],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Fill a blob with a given value according to the given mask.
Args:
x (oneflow_api.BlobDesc): Input Blob.
mask (oneflow_api.BlobDesc): Composed with 0 and 1, the input blob 'x' will be
filled with the given value where the mask is 1.
value (Union[int, int]): The value to use for filling the input blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Attention:
x and mask must be broadcastable to each other.
mask must be int type (int8/int32/int64).
Returns:
oneflow_api.BlobDesc: The value-filled Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def masked_fill_Job(x: tp.Numpy.Placeholder((4, ), mask: tp.Numpy.Placeholder((4, ),
dtype = flow.int8))->tp.Numpy:
return flow.masked_fill(x, mask, value=5)
x = np.array([1, 2, 3, 4], dtype=np.float32)
mask = np.array([1, 0, 0, 1], dtype=np.int8)
out = masked_fill_Job(x, mask)
# output [5 2 3 5]
"""
if name is None:
name = id_util.UniqueStr("MaskedFill_")
value_like_x = flow.constant_like(like=x, value=value, name=name + "_ConstantLike")
return flow.where(condition=mask, x=value_like_x, y=x, name=name + "_Where")
@oneflow_export("dim_gather")
def dim_gather(
input: oneflow_api.BlobDesc,
dim: int,
index: oneflow_api.BlobDesc,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r""" This operator gathers elements from `input` according to `index` along with the axis `dim`.
Take a 3-D blob as example, the output is specified by:
.. code-block:: python
output[i][j][k] = input[index[i][j][k]][j][k] # if dim == 0
output[i][j][k] = input[i][index[i][j][k]][k] # if dim == 1
output[i][j][k] = input[i][j][index[i][j][k]] # if dim == 2
The shape of `input` and `index` should be the same except in the `dim` dimension.
That is, if `input` is a n-dimension blob with shape :math:`(x_0, x_1, \dots, x_{i-1}, x_i, x_{i+1}, \dots, x_n)`,
and `dim = i`, then `index` must be a n-dimension blob with shape :math:`(x_0, x_1, \dots, x_{i-1}, k, x_{i+1}, \dots, x_n)`
where :math:`k \geq 1`.
The return Blob `output` will have the same shape with `index`.
Args:
input (oneflow_api.BlobDesc): The input blob
dim (int): The axis along which to index
index (oneflow_api.BlobDesc): The index blob of elements to gather
name (Optional[str], optional): The name of the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The elements gathered from `input` will be returned as the output Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def dim_gather_Job(input: tp.Numpy.Placeholder((2, 2), dtype=flow.float64),
index:tp.Numpy.Placeholder((2, 2), dtype=flow.int32))->tp.Numpy:
return flow.dim_gather(input, 1, index)
input = np.array([[1, 2], [3, 4]]).astype(np.float64)
index = np.array([[1, 0], [0, 1]]).astype(np.int32)
out = dim_gather_Job(input, index)
# output
# [[2. 1.]
# [3. 4.]]
"""
if len(input.shape) != len(index.shape):
raise ValueError("Dimensions of input and index should equal")
for i in range(0, len(input.shape)):
if dim == i:
continue
else:
if input.shape[i] != index.shape[i]:
raise ValueError(
"Dimensions of input and index should be same except at dim"
)
if dim >= len(index.shape):
raise ValueError(
"Value of dim is out of range(dim should be less than len(index.shape))"
)
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("DimGather_")
)
.Op("dim_gather")
.Input("input", [input])
.Input("index", [index])
.Output("output")
.Attr("dim", int(dim))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("amp_white_identity")
def amp_white_identity(
x: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
if name is None:
name = id_util.UniqueStr("AmpWhiteIdentity_")
op = (
flow.user_op_builder(name)
.Op("amp_white_identity")
.Input("in", [x])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("zeros")
def zeros(
shape: Sequence[int],
dtype: Optional[flow.dtype] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator creates a Tensor filled with the scalar value `0`.
Args:
shape (Sequence[int]): The shape of the Tensor.
dtype (Optional[flow.dtype], optional): The data type. Defaults to None.
name (Optional[str], optional): The name for the operator. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Tensor filled with value `0`
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function()
def zeros_job() -> tp.Numpy:
return flow.zeros(shape=(2, 3), dtype=flow.float32)
out = zeros_job()
# output: [[0. 0. 0.]
# [0. 0. 0.]]
"""
if name is None:
name = id_util.UniqueStr("Zeros_")
if dtype is None:
dtype = flow.float32
return flow.constant(value=0.0, shape=shape, dtype=dtype, name=name + "constant")
@oneflow_export("ones")
def ones(
shape: Sequence[int],
dtype: Optional[flow.dtype] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator creates a Tensor filled with the scalar value `1`.
Args:
shape (Sequence[int]): The shape of the Tensor.
dtype (Optional[flow.dtype], optional): The data type. Defaults to None.
name (Optional[str], optional): The name for the operator. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result Blob filled with value `1`
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function()
def ones_job() -> tp.Numpy:
return flow.ones(shape=(2, 3), dtype=flow.float32)
out = ones_job()
# output: [[1. 1. 1.]
# [1. 1. 1.]]
"""
if name is None:
name = id_util.UniqueStr("Ones_")
if dtype is None:
dtype = flow.float32
return flow.constant(value=1.0, shape=shape, dtype=dtype, name=name + "constant")
| [
"oneflow.python.framework.interpret_util.Forward",
"oneflow.constant",
"oneflow.concat",
"oneflow.python.framework.remote_blob.RemoteBlob",
"oneflow.eager_execution_enabled",
"oneflow.constant_like",
"oneflow.user_op_builder",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.where",
"onefl... | [((1174, 1198), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""gather"""'], {}), "('gather')\n", (1188, 1198), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5016, 5041), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""flatten"""'], {}), "('flatten')\n", (5030, 5041), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((6793, 6818), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""reshape"""'], {}), "('reshape')\n", (6807, 6818), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((9190, 9220), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""reshape_like"""'], {}), "('reshape_like')\n", (9204, 9220), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((10816, 10849), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""dynamic_reshape"""'], {}), "('dynamic_reshape')\n", (10830, 10849), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((12836, 12863), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""transpose"""'], {}), "('transpose')\n", (12850, 12863), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((14636, 14659), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""slice"""'], {}), "('slice')\n", (14650, 14659), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((19353, 19379), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""slice_v2"""'], {}), "('slice_v2')\n", (19367, 19379), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((21403, 21433), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""slice_update"""'], {}), "('slice_update')\n", (21417, 21433), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((24653, 24697), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""experimental.logical_slice"""'], {}), "('experimental.logical_slice')\n", (24667, 24697), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((25390, 25441), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""experimental.logical_slice_assign"""'], {}), "('experimental.logical_slice_assign')\n", (25404, 25441), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((26184, 26209), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""reverse"""'], {}), "('reverse')\n", (26198, 26209), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((28189, 28213), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""concat"""'], {}), "('concat')\n", (28203, 28213), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((31391, 31418), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""gather_nd"""'], {}), "('gather_nd')\n", (31405, 31418), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((34772, 34800), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""scatter_nd"""'], {}), "('scatter_nd')\n", (34786, 34800), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((37635, 37677), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tensor_scatter_nd_update"""'], {}), "('tensor_scatter_nd_update')\n", (37649, 37677), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((40046, 40085), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tensor_scatter_nd_add"""'], {}), "('tensor_scatter_nd_add')\n", (40060, 40085), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((42435, 42461), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""argwhere"""'], {}), "('argwhere')\n", (42449, 42461), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((44161, 44186), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nonzero"""'], {}), "('nonzero')\n", (44175, 44186), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((44949, 44972), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""where"""'], {}), "('where')\n", (44963, 44972), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((48437, 48463), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""elem_cnt"""'], {}), "('elem_cnt')\n", (48451, 48463), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((50160, 50197), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""sync_dynamic_resize"""'], {}), "('sync_dynamic_resize')\n", (50174, 50197), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((52284, 52307), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""stack"""'], {}), "('stack')\n", (52298, 52307), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((54245, 54311), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""random.generate_random_batch_permutation_indices"""'], {}), "('random.generate_random_batch_permutation_indices')\n", (54259, 54311), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((56023, 56055), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""random.shuffle"""'], {}), "('random.shuffle')\n", (56037, 56055), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((57162, 57188), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""identity"""'], {}), "('identity')\n", (57176, 57188), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((58446, 58474), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""identity_n"""'], {}), "('identity_n')\n", (58460, 58474), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((60079, 60117), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""cast_to_static_shape"""'], {}), "('cast_to_static_shape')\n", (60093, 60117), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((61596, 61621), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""squeeze"""'], {}), "('squeeze')\n", (61610, 61621), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((63831, 63860), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""expand_dims"""'], {}), "('expand_dims')\n", (63845, 63860), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((65417, 65449), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""broadcast_like"""'], {}), "('broadcast_like')\n", (65431, 65449), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((68519, 68548), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""masked_fill"""'], {}), "('masked_fill')\n", (68533, 68548), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((70145, 70173), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""dim_gather"""'], {}), "('dim_gather')\n", (70159, 70173), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((73090, 73126), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""amp_white_identity"""'], {}), "('amp_white_identity')\n", (73104, 73126), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((73512, 73535), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""zeros"""'], {}), "('zeros')\n", (73526, 73535), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((74615, 74637), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""ones"""'], {}), "('ones')\n", (74629, 74637), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((6472, 6504), 'functools.reduce', 'reduce', (['operator.mul', 'x.shape', '(1)'], {}), '(operator.mul, x.shape, 1)\n', (6478, 6504), False, 'from functools import reduce\n'), ((6524, 6554), 'functools.reduce', 'reduce', (['operator.mul', 'shape', '(1)'], {}), '(operator.mul, shape, 1)\n', (6530, 6554), False, 'from functools import reduce\n'), ((7992, 8028), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['x'], {}), '(x)\n', (8025, 8028), True, 'import oneflow as flow\n'), ((12311, 12338), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (12336, 12338), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((12653, 12684), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (12675, 12684), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((12695, 12731), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (12729, 12731), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((12800, 12832), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (12827, 12832), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((49497, 49524), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (49522, 49524), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((49941, 49972), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (49963, 49972), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((49987, 50023), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (50021, 50023), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((50120, 50156), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['out_lbi'], {}), '(out_lbi)\n', (50147, 50156), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((51552, 51579), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (51577, 51579), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((52063, 52094), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (52085, 52094), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((52109, 52145), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (52143, 52145), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((52244, 52280), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['out_lbi'], {}), '(out_lbi)\n', (52271, 52280), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((54189, 54241), 'oneflow.concat', 'flow.concat', (['inputs'], {'axis': 'axis', 'name': "(name + 'concat')"}), "(inputs, axis=axis, name=name + 'concat')\n", (54200, 54241), True, 'import oneflow as flow\n'), ((69992, 70060), 'oneflow.constant_like', 'flow.constant_like', ([], {'like': 'x', 'value': 'value', 'name': "(name + '_ConstantLike')"}), "(like=x, value=value, name=name + '_ConstantLike')\n", (70010, 70060), True, 'import oneflow as flow\n'), ((70072, 70141), 'oneflow.where', 'flow.where', ([], {'condition': 'mask', 'x': 'value_like_x', 'y': 'x', 'name': "(name + '_Where')"}), "(condition=mask, x=value_like_x, y=x, name=name + '_Where')\n", (70082, 70141), True, 'import oneflow as flow\n'), ((74537, 74611), 'oneflow.constant', 'flow.constant', ([], {'value': '(0.0)', 'shape': 'shape', 'dtype': 'dtype', 'name': "(name + 'constant')"}), "(value=0.0, shape=shape, dtype=dtype, name=name + 'constant')\n", (74550, 74611), True, 'import oneflow as flow\n'), ((75631, 75705), 'oneflow.constant', 'flow.constant', ([], {'value': '(1.0)', 'shape': 'shape', 'dtype': 'dtype', 'name': "(name + 'constant')"}), "(value=1.0, shape=shape, dtype=dtype, name=name + 'constant')\n", (75644, 75705), True, 'import oneflow as flow\n'), ((6044, 6073), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Flatten_"""'], {}), "('Flatten_')\n", (6061, 6073), True, 'import oneflow.python.framework.id_util as id_util\n'), ((8613, 8640), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (8638, 8640), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((8991, 9022), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (9013, 9022), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((9037, 9073), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (9071, 9073), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((9154, 9186), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (9181, 9186), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((10545, 10578), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ReshapeLike_"""'], {}), "('ReshapeLike_')\n", (10562, 10578), True, 'import oneflow.python.framework.id_util as id_util\n'), ((14313, 14344), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Transpose_"""'], {}), "('Transpose_')\n", (14330, 14344), True, 'import oneflow.python.framework.id_util as id_util\n'), ((20945, 20972), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Slice_"""'], {}), "('Slice_')\n", (20962, 20972), True, 'import oneflow.python.framework.id_util as id_util\n'), ((21978, 22011), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SliceUpdate_"""'], {}), "('SliceUpdate_')\n", (21995, 22011), True, 'import oneflow.python.framework.id_util as id_util\n'), ((24869, 24903), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""LogicalSlice_"""'], {}), "('LogicalSlice_')\n", (24886, 24903), True, 'import oneflow.python.framework.id_util as id_util\n'), ((25653, 25693), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""LogicalSliceAssign_"""'], {}), "('LogicalSliceAssign_')\n", (25670, 25693), True, 'import oneflow.python.framework.id_util as id_util\n'), ((27526, 27555), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Reverse_"""'], {}), "('Reverse_')\n", (27543, 27555), True, 'import oneflow.python.framework.id_util as id_util\n'), ((31095, 31123), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Concat_"""'], {}), "('Concat_')\n", (31112, 31123), True, 'import oneflow.python.framework.id_util as id_util\n'), ((34498, 34528), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""GatherNd_"""'], {}), "('GatherNd_')\n", (34515, 34528), True, 'import oneflow.python.framework.id_util as id_util\n'), ((37327, 37358), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScatterNd_"""'], {}), "('ScatterNd_')\n", (37344, 37358), True, 'import oneflow.python.framework.id_util as id_util\n'), ((39707, 39750), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TensorScatterNdUpdate_"""'], {}), "('TensorScatterNdUpdate_')\n", (39724, 39750), True, 'import oneflow.python.framework.id_util as id_util\n'), ((42102, 42142), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TensorScatterNdAdd_"""'], {}), "('TensorScatterNdAdd_')\n", (42119, 42142), True, 'import oneflow.python.framework.id_util as id_util\n'), ((43744, 43774), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ArgWhere_"""'], {}), "('ArgWhere_')\n", (43761, 43774), True, 'import oneflow.python.framework.id_util as id_util\n'), ((44637, 44675), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Nonzero_ArgWhere_"""'], {}), "('Nonzero_ArgWhere_')\n", (44654, 44675), True, 'import oneflow.python.framework.id_util as id_util\n'), ((44700, 44739), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Nonzero_Transpose_"""'], {}), "('Nonzero_Transpose_')\n", (44717, 44739), True, 'import oneflow.python.framework.id_util as id_util\n'), ((49823, 49874), 'oneflow_api.deprecated.GetProtoDtype4OfDtype', 'oneflow_api.deprecated.GetProtoDtype4OfDtype', (['dtype'], {}), '(dtype)\n', (49867, 49874), False, 'import oneflow_api\n'), ((52027, 52057), 'oneflow.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (52055, 52057), True, 'import oneflow as flow\n'), ((53372, 53399), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Stack_"""'], {}), "('Stack_')\n", (53389, 53399), True, 'import oneflow.python.framework.id_util as id_util\n'), ((58257, 58287), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Identity_"""'], {}), "('Identity_')\n", (58274, 58287), True, 'import oneflow.python.framework.id_util as id_util\n'), ((61344, 61383), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""CastToStaticShape_"""'], {}), "('CastToStaticShape_')\n", (61361, 61383), True, 'import oneflow.python.framework.id_util as id_util\n'), ((67835, 67870), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BroadcastLike_"""'], {}), "('BroadcastLike_')\n", (67852, 67870), True, 'import oneflow.python.framework.id_util as id_util\n'), ((69940, 69972), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MaskedFill_"""'], {}), "('MaskedFill_')\n", (69957, 69972), True, 'import oneflow.python.framework.id_util as id_util\n'), ((73270, 73308), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""AmpWhiteIdentity_"""'], {}), "('AmpWhiteIdentity_')\n", (73287, 73308), True, 'import oneflow.python.framework.id_util as id_util\n'), ((74445, 74472), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Zeros_"""'], {}), "('Zeros_')\n", (74462, 74472), True, 'import oneflow.python.framework.id_util as id_util\n'), ((75540, 75566), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Ones_"""'], {}), "('Ones_')\n", (75557, 75566), True, 'import oneflow.python.framework.id_util as id_util\n'), ((8271, 8300), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Reshape_"""'], {}), "('Reshape_')\n", (8288, 8300), True, 'import oneflow.python.framework.id_util as id_util\n'), ((12423, 12459), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""DynamicReshape_"""'], {}), "('DynamicReshape_')\n", (12440, 12459), True, 'import oneflow.python.framework.id_util as id_util\n'), ((49593, 49622), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ElemCnt_"""'], {}), "('ElemCnt_')\n", (49610, 49622), True, 'import oneflow.python.framework.id_util as id_util\n'), ((51664, 51703), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SyncDynamicResize_"""'], {}), "('SyncDynamicResize_')\n", (51681, 51703), True, 'import oneflow.python.framework.id_util as id_util\n'), ((55916, 55957), 'random.randint', 'random.randint', (['(-2 ** 63 + 1)', '(2 ** 63 - 1)'], {}), '(-2 ** 63 + 1, 2 ** 63 - 1)\n', (55930, 55957), False, 'import random\n'), ((8741, 8777), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""DynamicReshape_"""'], {}), "('DynamicReshape_')\n", (8758, 8777), True, 'import oneflow.python.framework.id_util as id_util\n'), ((47559, 47586), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Where_"""'], {}), "('Where_')\n", (47576, 47586), True, 'import oneflow.python.framework.id_util as id_util\n'), ((47796, 47848), 'oneflow.broadcast_to_compatible_with', 'flow.broadcast_to_compatible_with', (['condition', '[x, y]'], {}), '(condition, [x, y])\n', (47829, 47848), True, 'import oneflow as flow\n'), ((47875, 47927), 'oneflow.broadcast_to_compatible_with', 'flow.broadcast_to_compatible_with', (['x', '[condition, y]'], {}), '(x, [condition, y])\n', (47908, 47927), True, 'import oneflow as flow\n'), ((47954, 48006), 'oneflow.broadcast_to_compatible_with', 'flow.broadcast_to_compatible_with', (['y', '[condition, x]'], {}), '(y, [condition, x])\n', (47987, 48006), True, 'import oneflow as flow\n'), ((18819, 18837), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (18827, 18837), True, 'import numpy as np\n'), ((19015, 19033), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (19023, 19033), True, 'import numpy as np\n'), ((19055, 19073), 'numpy.iinfo', 'np.iinfo', (['np.int64'], {}), '(np.int64)\n', (19063, 19073), True, 'import numpy as np\n'), ((55601, 55671), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (["(value.op_name + '_random_batch_permutation_indices')"], {}), "(value.op_name + '_random_batch_permutation_indices')\n", (55618, 55671), True, 'import oneflow.python.framework.id_util as id_util\n'), ((58308, 58334), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (58328, 58334), True, 'import oneflow as flow\n'), ((61404, 61430), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (61424, 61430), True, 'import oneflow as flow\n'), ((73328, 73354), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (73348, 73354), True, 'import oneflow as flow\n'), ((34548, 34574), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (34568, 34574), True, 'import oneflow as flow\n'), ((31144, 31170), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (31164, 31170), True, 'import oneflow as flow\n'), ((37378, 37404), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (37398, 37404), True, 'import oneflow as flow\n'), ((39770, 39796), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (39790, 39796), True, 'import oneflow as flow\n'), ((42162, 42188), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (42182, 42188), True, 'import oneflow as flow\n'), ((43845, 43871), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (43865, 43871), True, 'import oneflow as flow\n'), ((68263, 68289), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (68283, 68289), True, 'import oneflow as flow\n'), ((21149, 21175), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (21169, 21175), True, 'import oneflow as flow\n'), ((25113, 25139), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (25133, 25139), True, 'import oneflow as flow\n'), ((25903, 25929), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (25923, 25929), True, 'import oneflow as flow\n'), ((59864, 59895), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""IdentityN_"""'], {}), "('IdentityN_')\n", (59881, 59895), True, 'import oneflow.python.framework.id_util as id_util\n'), ((10600, 10626), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (10620, 10626), True, 'import oneflow as flow\n'), ((14420, 14446), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (14440, 14446), True, 'import oneflow as flow\n'), ((8330, 8356), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (8350, 8356), True, 'import oneflow as flow\n'), ((22189, 22215), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (22209, 22215), True, 'import oneflow as flow\n'), ((6095, 6121), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (6115, 6121), True, 'import oneflow as flow\n'), ((63600, 63629), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Squeeze_"""'], {}), "('Squeeze_')\n", (63617, 63629), True, 'import oneflow.python.framework.id_util as id_util\n'), ((65185, 65217), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ExpandDims_"""'], {}), "('ExpandDims_')\n", (65202, 65217), True, 'import oneflow.python.framework.id_util as id_util\n'), ((4173, 4206), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BatchGather_"""'], {}), "('BatchGather_')\n", (4190, 4206), True, 'import oneflow.python.framework.id_util as id_util\n'), ((48036, 48062), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (48056, 48062), True, 'import oneflow as flow\n'), ((72818, 72849), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""DimGather_"""'], {}), "('DimGather_')\n", (72835, 72849), True, 'import oneflow.python.framework.id_util as id_util\n'), ((4710, 4738), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Gather_"""'], {}), "('Gather_')\n", (4727, 4738), True, 'import oneflow.python.framework.id_util as id_util\n')] |
import random
import numpy as np
import oneflow
import oneflow as flow
import oneflow.nn as nn
import oneflow.F as F
class MlpBlock(nn.Module):
def __init__(self, hidden_dim, mlp_dim):
super().__init__()
self.mlp = nn.Sequential(
nn.Linear(hidden_dim, mlp_dim),
nn.GELU(),
nn.Linear(mlp_dim, hidden_dim),
)
def forward(self, x):
return self.mlp(x)
class MixerBlock(nn.Module):
def __init__(self, num_tokens, hidden_dim, tokens_mlp_dim, channels_mlp_dim):
super().__init__()
self.token_layernorm = nn.LayerNorm(hidden_dim)
self.token_mix = MlpBlock(num_tokens, tokens_mlp_dim)
self.channel_layernorm = nn.LayerNorm(hidden_dim)
self.channel_mix = MlpBlock(hidden_dim, channels_mlp_dim)
def forward(self, x):
out = self.token_layernorm(x).transpose(1, 2)
x = x + self.token_mix(out).transpose(1,2)
out = self.channel_layernorm(x)
x = x + self.channel_mix(out)
return x
class MlpMixer(nn.Module):
def __init__(self, num_classes, num_blocks, patch_size, hidden_dim, tokens_mlp_dim, channels_mlp_dim, image_size=224):
super(MlpMixer, self).__init__()
assert (image_size % patch_size) == 0, 'image size must be divisible by the patch size'
num_tokens = (image_size // patch_size) ** 2
self.patch_embedding = nn.Conv2d(3, hidden_dim, kernel_size=patch_size, stride=patch_size, bias=False)
self.mlp = nn.Sequential(*[MixerBlock(num_tokens, hidden_dim, tokens_mlp_dim, channels_mlp_dim) for _ in range(num_blocks)])
self.layernorm = nn.LayerNorm(hidden_dim)
self.fc = nn.Linear(hidden_dim, num_classes)
def forward(self, x):
x = self.patch_embedding(x)
x = x.flatten(2).transpose(1,2)
x = self.mlp(x)
x = self.layernorm(x)
x = x.mean(dim=1)
x = self.fc(x)
return x
if __name__ == "__main__":
test_data = flow.ones((1, 3, 224, 224))
model = MlpMixer(num_classes=1000,
num_blocks=12,
patch_size=16,
hidden_dim=768,
tokens_mlp_dim=384,
channels_mlp_dim=3072,
image_size=224)
print(model(test_data).shape) | [
"oneflow.nn.Linear",
"oneflow.ones",
"oneflow.nn.LayerNorm",
"oneflow.nn.Conv2d",
"oneflow.nn.GELU"
] | [((2001, 2028), 'oneflow.ones', 'flow.ones', (['(1, 3, 224, 224)'], {}), '((1, 3, 224, 224))\n', (2010, 2028), True, 'import oneflow as flow\n'), ((602, 626), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['hidden_dim'], {}), '(hidden_dim)\n', (614, 626), True, 'import oneflow.nn as nn\n'), ((722, 746), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['hidden_dim'], {}), '(hidden_dim)\n', (734, 746), True, 'import oneflow.nn as nn\n'), ((1418, 1497), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['(3)', 'hidden_dim'], {'kernel_size': 'patch_size', 'stride': 'patch_size', 'bias': '(False)'}), '(3, hidden_dim, kernel_size=patch_size, stride=patch_size, bias=False)\n', (1427, 1497), True, 'import oneflow.nn as nn\n'), ((1656, 1680), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['hidden_dim'], {}), '(hidden_dim)\n', (1668, 1680), True, 'import oneflow.nn as nn\n'), ((1699, 1733), 'oneflow.nn.Linear', 'nn.Linear', (['hidden_dim', 'num_classes'], {}), '(hidden_dim, num_classes)\n', (1708, 1733), True, 'import oneflow.nn as nn\n'), ((265, 295), 'oneflow.nn.Linear', 'nn.Linear', (['hidden_dim', 'mlp_dim'], {}), '(hidden_dim, mlp_dim)\n', (274, 295), True, 'import oneflow.nn as nn\n'), ((309, 318), 'oneflow.nn.GELU', 'nn.GELU', ([], {}), '()\n', (316, 318), True, 'import oneflow.nn as nn\n'), ((332, 362), 'oneflow.nn.Linear', 'nn.Linear', (['mlp_dim', 'hidden_dim'], {}), '(mlp_dim, hidden_dim)\n', (341, 362), True, 'import oneflow.nn as nn\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from functools import partial
from typing import Iterator, Optional, Set, Union, List
import oneflow._C
import oneflow._oneflow_internal
import oneflow.framework.graph_build_util as graph_build_util
from oneflow.env import get_rank
from oneflow.framework.tensor import Tensor, TensorTuple
from oneflow.nn.module import Module
from oneflow.nn.parameter import Parameter
from oneflow.nn.graph.util import add_indent, seq_to_func_return
class BlockType:
NONE = "NONE"
MODULE = "MODULE"
PARAMETER = "PARAMETER"
BUFFER = "BUFFER"
class Block(object):
def __init__(
self,
prefix: str = "",
name: str = "",
value: Union[Module, Parameter, Tensor] = None,
):
assert not isinstance(value, Block)
self._name = name
self._name_prefix = prefix
self._type = BlockType.NONE
self._origin = value
self.config = BlockConfig()
self._scope = None
self._prev_scope = None
self._debug = False
self._debug_min_s_level = 2
self._debug_max_v_level = 0
if isinstance(value, Module):
self._type = BlockType.MODULE
self._is_executing_forward = False
self._modules = OrderedDict()
self._parameters = OrderedDict()
self._buffers = OrderedDict()
for (n, m) in list(value.named_children()):
self.__setattr__(n, Block(self._name_prefix + self._name + ".", n, m))
for (n, p) in list(value.named_parameters("", False)):
self.__setattr__(n, Block(self._name_prefix + self._name + ".", n, p))
for (n, b) in list(value.named_buffers("", False)):
self.__setattr__(n, Block(self._name_prefix + self._name + ".", n, b))
self._args_repr = []
self._outs_repr = []
elif isinstance(value, Parameter):
self._type = BlockType.PARAMETER
self._lazy_origin = None
self._lazy_origin_builder = None
elif isinstance(value, Tensor):
self._type = BlockType.BUFFER
self._lazy_origin = None
self._lazy_origin_builder = None
else:
raise NotImplementedError()
@property
def name(self):
return self._name
@property
def name_prefix(self):
return self._name_prefix
@property
def type(self):
return self._type
@property
def origin(self):
return self._origin
@property
def lazy_origin(self):
assert (
self._type == BlockType.PARAMETER or self._type == BlockType.BUFFER
), "Only Parameter or Buffer Block has lazy_origin"
return self._lazy_origin
def lazy_origin_builder(self):
assert (
self._type == BlockType.PARAMETER or self._type == BlockType.BUFFER
), "Only Parameter or Buffer Block has lazy_origin_builder"
return self._lazy_origin_builder
def set_lazy_origin_builder(self, fn=None):
assert (
self._type == BlockType.PARAMETER or self._type == BlockType.BUFFER
), "Only Parameter or Buffer Block has lazy_origin_builder"
self._lazy_origin_builder = fn
@property
def prev_scope(self):
if self._prev_scope is None:
self._prev_scope = oneflow._oneflow_internal.GetCurrentScope()
return self._prev_scope
@property
def scope(self):
if self._scope is None:
self._scope = graph_build_util.make_new_block_scope(self.prev_scope, self)
return self._scope
def debug(
self,
mode: bool = True,
v_level: int = 0,
ranks: Optional[Union[int, List[int]]] = None,
) -> None:
assert isinstance(mode, bool)
if ranks is None:
rank_list = [0]
elif isinstance(ranks, int):
rank_list = [ranks]
elif isinstance(ranks, list):
rank_list = ranks
else:
raise ValueError("ranks must be int or List[int].")
my_rank = get_rank()
if -1 in rank_list or my_rank in rank_list:
self._debug = mode
if self._debug:
self._debug_min_s_level = 0
self._debug_max_v_level = v_level
if self._type == BlockType.MODULE:
def _set_child(d):
for (_, n) in d.items():
n.debug(mode, v_level, ranks)
_set_child(self._modules)
_set_child(self._parameters)
_set_child(self._buffers)
def scope_context(self):
return graph_build_util.BlockScopeContext(self.prev_scope, self.scope)
def __call__(self, *args):
assert self._type == BlockType.MODULE
self._print(0, 1, self._shallow_repr())
for idx, arg in enumerate(args):
meta_repr_str = (
arg._meta_repr() if isinstance(arg, Tensor) else str(type(arg))
)
in_str = (
"(INPUT:_"
+ self.name_prefix
+ self.name
+ "-input_"
+ str(idx)
+ ":"
+ meta_repr_str
+ ")"
)
if not isinstance(arg, Tensor):
in_str = "[WARNING]" + in_str
self._args_repr.append(in_str)
self._print(0, 1, in_str)
def _print_state(d):
for (_, n) in d.items():
self._print(0, 1, n._shallow_repr())
_print_state(self._parameters)
_print_state(self._buffers)
result = self._origin.__class__.__call__(self, *args)
outputs = ()
if not (type(result) is tuple or type(result) is list):
outputs = (result,)
else:
outputs = result
for idx, out in enumerate(outputs):
out_repr = out._meta_repr() if isinstance(out, Tensor) else str(type(out))
out_str = (
"(OUTPUT:_"
+ self.name_prefix
+ self.name
+ "-output_"
+ str(idx)
+ ":"
+ out_repr
+ ")"
)
if not isinstance(out, Tensor):
out_str = "[WARNING]" + out_str
self._outs_repr.append(out_str)
self._print(0, 1, out_str)
return result
def __iter__(self) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
return iter(self._modules.values())
def forward(self, *args):
assert self._type == BlockType.MODULE
self._is_executing_forward = True
args = self._pre_forward_mapping_out_scope(*args)
with self.scope_context():
result = self._origin.__class__.forward(self, *args)
result = self._post_forward_mapping_out_scope(result)
result = seq_to_func_return(result)
self._is_executing_forward = False
return result
def _pre_forward_mapping_out_scope(self, *args):
# Insert identity op when doing activation checkpointing or pipeline execution.
# Identity op outside activation checkpointing scope will be the endpoint of an activation checkpointing segment.
# Identity op as the first op of a pipeline stage will make backward op depends on the identity op within the stage,
# otherwise the backward op may depends the op in former stage which will make graph creates unnessary buffers.
if self.config.activation_checkpointing or (
self.config.stage_id is not None and self.config.stage_id >= 0
):
def insert_identity(t):
assert isinstance(t, Tensor)
return oneflow._C.identity(t)
args = self._mapping_io("input", insert_identity, "insert_identity", *args,)
return args
def _post_forward_mapping_out_scope(self, *args):
# Insert identity op when doing activation checkpointing or pipeline execution.
if self.config.activation_checkpointing or (
self.config.stage_id is not None and self.config.stage_id >= 0
):
def insert_identity(t):
assert isinstance(t, Tensor)
return oneflow._C.identity(t)
args = self._mapping_io(
"output", insert_identity, "insert_identity", *args,
)
return args
def modules(self, memo: Optional[Set["Block"]] = None) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
if memo is None:
memo = set()
if self not in memo:
memo.add(self)
yield self
for (name, module) in self._modules.items():
if module is None:
continue
for m in module.modules(memo):
yield m
def _mapping_io(self, io_type, func, func_desc, *args):
assert isinstance(func_desc, str)
assert io_type in ("input", "output")
mapped_args = []
def mapping_tensor(item):
assert isinstance(item, Tensor)
return func(item)
for idx, arg in enumerate(args):
if isinstance(arg, list):
seq_args = list()
for i in range(len(arg)):
is_tensor, name, repr_str = self._io_tensor_check_and_gen(
arg[i], io_type, idx, i
)
if is_tensor:
seq_args.append(mapping_tensor(arg[i]))
self._print(
0,
1,
f"{repr_str} is a Tensor, {func_desc} transformation has been done.",
)
else:
self._print(
0,
0,
f"{repr_str} is not a Tensor, {func_desc} transformation will be ignored.",
)
seq_args.append(arg[i])
mapped_args.append(seq_args)
elif isinstance(arg, Tensor):
is_tensor, name, repr_str = self._io_tensor_check_and_gen(
arg, io_type, idx
)
assert is_tensor
mapped_args.append(mapping_tensor(arg))
self._print(
0,
1,
f"{repr_str} is a Tensor, {func_desc} transformation has been done.",
)
else:
is_tensor, name, repr_str = self._io_tensor_check_and_gen(
arg, io_type, idx
)
assert not is_tensor
mapped_args.append(arg)
self._print(
0,
0,
f"{repr_str} is not a Tensor or a list of Tensor, {func_desc} transformation will be ignored.",
)
return tuple(mapped_args)
def _io_tensor_check_and_gen(self, item, io_type, idx, second_idx=None):
assert io_type in ("input", "output")
name = (
"_"
+ self.name_prefix
+ self.name
+ "-"
+ io_type
+ "_"
+ str(idx)
+ ("" if second_idx is None else "_" + str(second_idx))
)
if isinstance(item, Tensor):
repr_str = (
"(" + io_type.upper() + ":" + name + ":" + item._meta_repr() + ")"
)
return True, name, repr_str
else:
repr_str = (
"[WARNING]("
+ io_type.upper()
+ ":"
+ name
+ ":"
+ str(type(item))
+ ")"
)
return False, name, repr_str
def _members(self, get_members_fn, recurse=True) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
memo = set()
modules = self.modules() if recurse else [self]
for module in modules:
members = get_members_fn(module)
for (k, v) in members:
if v is None or v in memo:
continue
memo.add(v)
yield v
def parameters(self, recurse: bool = True) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
gen = self._members(lambda module: module._parameters.items(), recurse=recurse)
for elem in gen:
yield elem
def buffers(self, recurse: bool = True) -> Iterator["Block"]:
assert self._type == BlockType.MODULE
gen = self._members(lambda module: module._buffers.items(), recurse=recurse)
for elem in gen:
yield elem
def __setattr__(self, name: str, value=None) -> None:
if value is None or not isinstance(value, Block):
self.__dict__[name] = value
else:
dicts_or_sets = (
self.__dict__,
self._modules,
self._parameters,
self._buffers,
)
for d in dicts_or_sets:
if name in d:
raise AttributeError(
"'{}' object has duplicated attribute named '{}'".format(
self._name, name
)
)
if value.type == BlockType.MODULE:
self._modules[name] = value
elif value.type == BlockType.PARAMETER:
self._parameters[name] = value
elif value.type == BlockType.BUFFER:
self._buffers[name] = value
else:
raise AttributeError(
"'{}' object are not allowed to set attribute named '{}'".format(
type(self).__name__, name
)
)
def __getattr__(self, name: str):
if name in self.__dict__:
return self.__dict__[name]
if self._type == BlockType.MODULE:
# support get module
if "_modules" in self.__dict__:
modules = self.__dict__["_modules"]
if name in modules:
return modules[name]
# support get parameter
p_state = self._get_in_states(name, "_parameters")
if p_state is not None:
return p_state
# support get buffer
b_state = self._get_in_states(name, "_buffers")
if b_state is not None:
return b_state
# support get normal attr
if name in self._origin.__dict__:
return self._origin.__dict__[name]
# support get function
if hasattr(self._origin, name):
return partial(getattr(self._origin.__class__, name), self)
raise AttributeError(
"'{}' '{}' object '{}' in nn.Graph has no attribute '{}'".format(
self._type, type(self).__name__, self._name_prefix + self.name, name
)
)
def _get_in_states(self, name, states_name):
if states_name not in self.__dict__:
return None
_states = self.__dict__[states_name]
if name not in _states:
return None
_s_block = _states[name]
if graph_build_util.lazy_mode.is_enabled():
# lazy
if _s_block._lazy_origin is None:
assert _s_block._lazy_origin_builder is not None, (
repr(_s_block) + " has no lazy Tensor creation function."
)
assert self._is_executing_forward, (
repr(_s_block)
+ "'s first get must happened in it's nn.Module.forward() to generate the right scope."
)
with _s_block.scope_context():
_s_block._lazy_origin = _s_block._lazy_origin_builder()
return _s_block._lazy_origin
elif (
not graph_build_util.lazy_mode.is_enabled()
) and self._is_executing_forward:
# eager and inside nn.Graph.build()
return _s_block.origin
else:
# outside nn.Graph.build()
return _s_block
def __repr__(self):
lines = None
if self._type == BlockType.MODULE:
child_lines = []
if not self.config._is_null:
child_lines.append(add_indent(repr(self.config), 2))
if len(self._args_repr) > 0:
for in_str in self._args_repr:
input_str = add_indent(in_str, 2)
child_lines.append(input_str)
def _append_child(d):
for (_, n) in d.items():
n_str = repr(n)
n_str = add_indent(n_str, 2)
child_lines.append(n_str)
_append_child(self._parameters)
_append_child(self._buffers)
_append_child(self._modules)
if len(self._outs_repr) > 0:
for out_str in self._outs_repr:
output_str = add_indent(out_str, 2)
child_lines.append(output_str)
if len(child_lines) > 0:
lines = child_lines
main_str = self._shallow_repr() + ": ("
if lines is not None:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
def _print(self, s_level=2, v_level=0, msg: str = ""):
r"""Do print according to info level.
"""
assert isinstance(s_level, int)
assert isinstance(v_level, int)
assert isinstance(msg, str)
if s_level >= self._debug_min_s_level:
if (s_level > 0) or (s_level == 0 and v_level <= self._debug_max_v_level):
print(msg)
def _shallow_repr(self):
shallow_repr = (
"("
+ self._type
+ ":"
+ self._name_prefix
+ self._name
+ ":"
+ (
self._origin._shallow_repr()
if self._type == BlockType.MODULE
else (self._origin._meta_repr())
)
+ ")"
)
return shallow_repr
class BlockConfig(object):
r"""Configurations on Block in nn.Graph.
"""
def __init__(self):
self._is_null = True
self._stage_id = None
self._activation_checkpointing = None
@property
def stage_id(self):
r"""Get stage id of Block in pipeline parallelism.
"""
return self._stage_id
@stage_id.setter
def stage_id(self, value: int = None):
r"""Set stage id of Block in pipeline parallelism.
Set different module's stage id to hint the graph preparing right num of buffers in pipeline.
"""
self._is_null = False
self._stage_id = value
@property
def activation_checkpointing(self):
r"""Get whether do activation checkpointing in this Block.
"""
return self._activation_checkpointing
@activation_checkpointing.setter
def activation_checkpointing(self, value: bool = False):
r"""Set whether do activation checkpointing in this Block.
"""
self._is_null = False
self._activation_checkpointing = value
def __repr__(self):
main_str = (
"("
+ "CONFIG"
+ ":config:"
+ self.__class__.__name__
+ "("
+ (
("stage_id=" + str(self.stage_id) + ", ")
if self.stage_id is not None
else ""
)
+ (
(
"activation_checkpointing="
+ str(self.activation_checkpointing)
+ ", "
)
if self.activation_checkpointing is not None
else ""
)
+ "))"
)
return main_str
| [
"oneflow.framework.graph_build_util.lazy_mode.is_enabled",
"oneflow.nn.graph.util.seq_to_func_return",
"oneflow.nn.graph.util.add_indent",
"oneflow.framework.graph_build_util.BlockScopeContext",
"oneflow.framework.graph_build_util.make_new_block_scope",
"oneflow.env.get_rank"
] | [((4697, 4707), 'oneflow.env.get_rank', 'get_rank', ([], {}), '()\n', (4705, 4707), False, 'from oneflow.env import get_rank\n'), ((5270, 5333), 'oneflow.framework.graph_build_util.BlockScopeContext', 'graph_build_util.BlockScopeContext', (['self.prev_scope', 'self.scope'], {}), '(self.prev_scope, self.scope)\n', (5304, 5333), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((7575, 7601), 'oneflow.nn.graph.util.seq_to_func_return', 'seq_to_func_return', (['result'], {}), '(result)\n', (7593, 7601), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return\n'), ((16112, 16151), 'oneflow.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (16149, 16151), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((1858, 1871), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1869, 1871), False, 'from collections import OrderedDict\n'), ((1903, 1916), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1914, 1916), False, 'from collections import OrderedDict\n'), ((1945, 1958), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1956, 1958), False, 'from collections import OrderedDict\n'), ((4129, 4189), 'oneflow.framework.graph_build_util.make_new_block_scope', 'graph_build_util.make_new_block_scope', (['self.prev_scope', 'self'], {}), '(self.prev_scope, self)\n', (4166, 4189), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((16792, 16831), 'oneflow.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (16829, 16831), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((17386, 17407), 'oneflow.nn.graph.util.add_indent', 'add_indent', (['in_str', '(2)'], {}), '(in_str, 2)\n', (17396, 17407), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return\n'), ((17598, 17618), 'oneflow.nn.graph.util.add_indent', 'add_indent', (['n_str', '(2)'], {}), '(n_str, 2)\n', (17608, 17618), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return\n'), ((17915, 17937), 'oneflow.nn.graph.util.add_indent', 'add_indent', (['out_str', '(2)'], {}), '(out_str, 2)\n', (17925, 17937), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from audioop import reverse
from collections import namedtuple
from typing import List, Tuple, Union, Iterable, Optional
import warnings
import oneflow as flow
from oneflow.framework.tensor import Tensor
# The implementation of rnn util is modified from: https://github.com/pytorch/pytorch/blob/master/torch/nn/utils/rnn.py
def bind(optional, fn):
if optional is None:
return None
return fn(optional)
def invert_permutation(permutation: Optional[Tensor]) -> Optional[Tensor]:
if permutation is None:
return None
return flow.scatter(
flow.zeros_like(permutation),
0,
permutation,
flow.arange(
0, permutation.numel(), device=permutation.device, dtype=flow.int32
),
)
class PackedSequence(object):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.PackedSequence.html.
Holds the data and list of :attr:`batch_sizes` of a packed sequence.
All RNN modules accept packed sequences as inputs.
Note:
Instances of this class should never be created manually. They are meant
to be instantiated by functions like :func:`pack_padded_sequence`.
Batch sizes represent the number elements at each sequence step in
the batch, not the varying sequence lengths passed to
:func:`pack_padded_sequence`. For instance, given data ``abc`` and ``x``
the :class:`PackedSequence` would contain data ``axbc`` with
``batch_sizes=[2,1,1]``.
Attributes:
data (Tensor): Tensor containing packed sequence
batch_sizes (Tensor): Tensor of integers holding
information about the batch size at each sequence step
sorted_indices (Tensor, optional): Tensor of integers holding how this
:class:`PackedSequence` is constructed from sequences.
unsorted_indices (Tensor, optional): Tensor of integers holding how this
to recover the original sequences with correct order.
.. note::
:attr:`data` can be on arbitrary device and of arbitrary dtype.
:attr:`sorted_indices` and :attr:`unsorted_indices` must be ``oneflow.int64``
tensors on the same device as :attr:`data`.
However, :attr:`batch_sizes` should always be a CPU ``oneflow.int64`` tensor.
This invariant is maintained throughout :class:`PackedSequence` class,
and all functions that construct a `:class:PackedSequence` in PyTorch
(i.e., they only pass in tensors conforming to this constraint).
"""
def __init__(
self,
data: Tensor,
batch_sizes: Optional[Tensor] = None,
sorted_indices: Optional[Tensor] = None,
unsorted_indices: Optional[Tensor] = None,
):
self.sorted_indices = sorted_indices
if unsorted_indices is None:
self.unsorted_indices = invert_permutation(sorted_indices)
self.sorted_indices = sorted_indices
if batch_sizes is not None:
if batch_sizes.device.type != "cpu":
raise ValueError(
"batch_sizes should always be on CPU. "
"Instances of PackedSequence should never be created manually. "
"They should be instantiated by functions like pack_sequence "
"and pack_padded_sequences in nn.rnn_utils "
)
self.data = data
self.batch_sizes = batch_sizes
else:
assert isinstance(data, (list, tuple)) and len(data) == 2
self.data = data[0]
self.batch_sizes = data[1]
def pin_memory(self):
return PackedSequence(
self.data.pin_memory(),
self.batch_sizes,
bind(self.sorted_indices, lambda t: t.pin_memory()),
bind(self.unsorted_indices, lambda t: t.pin_memory()),
)
def cuda(self, *args, **kwargs):
ex = flow.tensor((), dtype=self.data.dtype, device=self.data.device).to(
*args, **kwargs
)
if ex.is_cuda:
return self.to(*args, **kwargs)
return self.to(*args, device="cuda", **kwargs)
def cpu(self, *args, **kwargs):
ex = flow.tensor((), dtype=self.data.dtype, device=self.data.device).to(
*args, **kwargs
)
if ex.device.type == "cpu":
return self.to(*args, **kwargs)
return self.to(*args, device="cpu", **kwargs)
def double(self):
return self.to(dtype=flow.double)
def float(self):
return self.to(dtype=flow.float)
def half(self):
return self.to(dtype=flow.half)
def long(self):
return self.to(dtype=flow.long)
def int(self):
return self.to(dtype=flow.int)
def short(self):
return self.to(dtype=flow.short)
def char(self):
return self.to(dtype=flow.int8)
def byte(self):
return self.to(dtype=flow.uint8)
def to(self, *args, **kwargs):
"""Performs dtype and/or device conversion on `self.data`.
It has similar signature as :meth:`oneflow.Tensor.to`, except optional
arguments like `non_blocking` and `copy` should be passed as kwargs,
not args, or they will not apply to the index tensors.
.. note::
If the ``self.data`` Tensor already has the correct :class:`oneflow.dtype`
and :class:`oneflow.device`, then ``self`` is returned.
Otherwise, returns a copy with the desired configuration.
"""
data = self.data.to(*args, **kwargs)
if data is self.data:
return self
else:
kwargs = {
k: v
for k, v in filter(
lambda t: t[0] != "device" and t[0] != "dtype", kwargs.items()
)
}
sorted_indices = bind(
self.sorted_indices, lambda t: t.to(data.device, **kwargs)
)
unsorted_indices = bind(
self.unsorted_indices, lambda t: t.to(data.device, **kwargs)
)
return PackedSequence(
data, self.batch_sizes, sorted_indices, unsorted_indices
)
@property
def is_cuda(self):
r"""Returns true if `self.data` stored on a gpu"""
return self.data.is_cuda
def is_pinned(self):
r"""Returns true if `self.data` stored on in pinned memory"""
return self.data.is_pinned()
def pack_padded_sequence(
input: Tensor,
lengths: Tensor,
batch_first: bool = False,
enforce_sorted: bool = True,
) -> PackedSequence:
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.pack_padded_sequence.html.
Packs a Tensor containing padded sequences of variable length.
:attr:`input` can be of size ``T x B x *`` where `T` is the length of the
longest sequence (equal to ``lengths[0]``), ``B`` is the batch size, and
``*`` is any number of dimensions (including 0). If ``batch_first`` is
``True``, ``B x T x *`` :attr:`input` is expected.
For unsorted sequences, use `enforce_sorted = False`. If :attr:`enforce_sorted` is
``True``, the sequences should be sorted by length in a decreasing order, i.e.
``input[:,0]`` should be the longest sequence, and ``input[:,B-1]`` the shortest
one. `enforce_sorted = True` is only necessary for ONNX export.
Note:
This function accepts any input that has at least two dimensions. You
can apply it to pack the labels, and use the output of the RNN with
them to compute the loss directly. A Tensor can be retrieved from
a :class:`PackedSequence` object by accessing its ``.data`` attribute.
Args:
input (Tensor): padded batch of variable length sequences.
lengths (Tensor or list(int)): list of sequence lengths of each batch
element (must be on the CPU if provided as a tensor).
batch_first (bool, optional): if ``True``, the input is expected in ``B x T x *``
format.
enforce_sorted (bool, optional): if ``True``, the input is expected to
contain sequences sorted by length in a decreasing order. If
``False``, the input will get sorted unconditionally. Default: ``True``.
Returns:
a :class:`PackedSequence` object
"""
lengths = flow.as_tensor(lengths, dtype=flow.int64)
assert (
enforce_sorted == True
), "Only support enforce_sorted == True for now. Plesase Sort the input by length in a decreasing order."
if enforce_sorted:
sorted_indices = None
else:
lengths, sorted_indices = flow.sort(lengths, descending=True)
sorted_indices = sorted_indices.to(input.device)
batch_dim = 0 if batch_first else 1
input = input.index_select(batch_dim, sorted_indices)
data, batch_sizes = flow._C.pack_padded_sequence(input, lengths, batch_first)
return PackedSequence(data, batch_sizes, sorted_indices, None)
def pad_packed_sequence(
sequence: PackedSequence,
batch_first: bool = False,
padding_value: float = 0.0,
total_length: Optional[int] = None,
) -> Tuple[Tensor, Tensor]:
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.pad_packed_sequence.html.
Pads a packed batch of variable length sequences.
It is an inverse operation to :func:`pack_padded_sequence`.
The returned Tensor's data will be of size ``T x B x *``, where `T` is the length
of the longest sequence and `B` is the batch size. If ``batch_first`` is True,
the data will be transposed into ``B x T x *`` format.
.. note::
:attr:`total_length` is useful to implement the
``pack sequence -> recurrent network -> unpack sequence`` pattern in a
:class:`~oneflow.nn.Module` wrapped in :class:`~oneflow.nn.DataParallel`.
See :ref:`this FAQ section <pack-rnn-unpack-with-data-parallelism>` for
details.
Args:
sequence (PackedSequence): batch to pad
batch_first (bool, optional): if ``True``, the output will be in ``B x T x *``
format.
padding_value (float, optional): values for padded elements.
total_length (int, optional): if not ``None``, the output will be padded to
have length :attr:`total_length`. This method will throw :class:`ValueError`
if :attr:`total_length` is less than the max sequence length in
:attr:`sequence`.
Returns:
Tuple of Tensor containing the padded sequence, and a Tensor
containing the list of lengths of each sequence in the batch.
Batch elements will be re-ordered as they were ordered originally when
the batch was passed to ``pack_padded_sequence`` or ``pack_sequence``.
For example:
.. code-block:: python
>>> from oneflow.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
>>> import oneflow as flow
>>> seq = flow.tensor([[4,5,6], [1,2,0], [3,0,0]])
>>> lens = [3, 2, 1]
>>> packed = pack_padded_sequence(seq, lens, batch_first=True, enforce_sorted=True)
>>> packed.data
tensor([4, 1, 3, 5, 2, 6], dtype=oneflow.int64)
>>> packed.batch_sizes
tensor([3, 2, 1], dtype=oneflow.int64)
>>> seq_unpacked, lens_unpacked = pad_packed_sequence(packed, batch_first=True)
>>> seq_unpacked
tensor([[4, 5, 6],
[1, 2, 0],
[3, 0, 0]], dtype=oneflow.int64)
>>> lens_unpacked
tensor([3., 2., 1.], dtype=oneflow.float32)
"""
max_seq_length = sequence.batch_sizes.shape[0]
if total_length is not None:
if total_length < max_seq_length:
raise ValueError(
"Expected total_length to be at least the length "
"of the longest sequence in input, but got "
"total_length={} and max sequence length being {}".format(
total_length, max_seq_length
)
)
else:
total_length = max_seq_length
batch_sizes_t = sequence.batch_sizes.contiguous()
assert (
len(batch_sizes_t.shape) == 1
and batch_sizes_t.device.type == "cpu"
and batch_sizes_t.dtype == flow.int64
), f"'sequence.batch_sizes' should be a 1D CPU int64 tensor, but got {len(batch_sizes_t.shape)} D {batch_sizes_t.device.type} {batch_sizes_t.dtype} tensor"
batch_sizes = batch_sizes_t.numpy()
max_batch_size = int(batch_sizes[0])
max_real_seq_length = batch_sizes_t.shape[0]
max_seq_length = max_real_seq_length
if total_length > 0:
assert (
total_length >= max_seq_length
), f"Expected total_length to be at least the length of the longest sequence in input, but got total_length={total_length} and max sequence length being {max_seq_length}"
max_seq_length = total_length
output_size = [] # == [max_seq_length, max_batch_size, *sequence.data.size()[1:]]
output_size.append(max_seq_length)
output_size.append(max_batch_size)
output_size = output_size + list(sequence.data.shape[1:])
padded_output = flow.full(
output_size,
padding_value,
dtype=sequence.data.dtype,
device=sequence.data.device,
requires_grad=sequence.data.requires_grad,
)
# This will be modified at every iteration, but we reserve memory for it now.
tmp_view_size = output_size # == [-1, -1, *sequence.data.size()[1:]]
lengths = flow.empty(max_batch_size)
data_offset = 0
prev_batch_size = max_batch_size
prev_i = 0
lengths_idx = max_batch_size - 1
for i in range(max_real_seq_length + 1):
batch_size = batch_sizes[i] if i != max_real_seq_length else 0
if batch_size != prev_batch_size:
l = prev_batch_size * (i - prev_i)
tmp_view_size[0] = i - prev_i
tmp_view_size[1] = prev_batch_size
padded_output[prev_i:i, 0:prev_batch_size] = sequence.data[
data_offset : data_offset + l
].view(tmp_view_size)
data_offset += l
prev_i = i
dec = prev_batch_size - batch_size
if dec > 0:
for j in range(dec):
lengths[lengths_idx] = i
lengths_idx = lengths_idx - 1
prev_batch_size = batch_size
if batch_first:
permute_dims = (1, 0)
for i in range(2, padded_output.ndim):
permute_dims.append(i)
padded_output = padded_output.permute(permute_dims)
unsorted_indices = sequence.unsorted_indices
if unsorted_indices is not None:
batch_dim = 0 if batch_first else 1
return (
padded_output.index_select(batch_dim, unsorted_indices),
lengths[unsorted_indices],
)
return padded_output, lengths
def pad_sequence(
sequences: Union[Tensor, List[Tensor]],
batch_first: bool = False,
padding_value: float = 0.0,
) -> Tensor:
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.utils.rnn.pad_sequence.html.
Pad a list of variable length Tensors with ``padding_value``
``pad_sequence`` stacks a list of Tensors along a new dimension,
and pads them to equal length. For example, if the input is list of
sequences with size ``L x *`` and if batch_first is False, and ``T x B x *``
otherwise.
`B` is batch size. It is equal to the number of elements in ``sequences``.
`T` is length of the longest sequence.
`L` is length of the sequence.
`*` is any number of trailing dimensions, including none.
Note:
This function returns a Tensor of size ``T x B x *`` or ``B x T x *``
where `T` is the length of the longest sequence. This function assumes
trailing dimensions and type of all the Tensors in sequences are same.
Args:
sequences (list[Tensor]): list of variable length sequences.
batch_first (bool, optional): output will be in ``B x T x *`` if True, or in
``T x B x *`` otherwise. Default: False.
padding_value (float, optional): value for padded elements. Default: 0.
Returns:
Tensor of size ``T x B x *`` if :attr:`batch_first` is ``False``.
Tensor of size ``B x T x *`` otherwise
For example:
.. code-block:: python
>>> from oneflow.nn.utils.rnn import pad_sequence
>>> import oneflow as flow
>>> a = flow.ones(25, 300)
>>> b = flow.ones(22, 300)
>>> c = flow.ones(15, 300)
>>> out = pad_sequence([a, b, c])
>>> out.size()
oneflow.Size([25, 3, 300])
"""
if isinstance(sequences, Tensor):
sequences = sequences.unbind(0)
# assuming trailing dimensions and type of all the Tensors
# in sequences are same and fetching those from sequences[0]
sequences_size = len(sequences)
max_size = sequences[0].shape
trailing_dims = max_size[1:]
lens = [seq.shape[0] for seq in sequences]
lens.sort(reverse=True)
max_len = lens[0]
out_dims = [sequences_size, max_len] if batch_first else [max_len, sequences_size]
out_dims = out_dims + list(trailing_dims)
out = flow.full(
out_dims,
padding_value,
dtype=sequences[0].dtype,
device=sequences[0].device,
requires_grad=sequences[0].requires_grad,
)
for i in range(sequences_size):
currseq = sequences[i]
length_i = currseq.shape[0]
# use index notation to prevent duplicate references to the tensor
if batch_first:
out[i, 0:length_i] = currseq
else:
out[0:length_i, i] = currseq
return out
def unpad_sequence(
padded_sequences: Tensor, lengths: Tensor, batch_first: bool = False,
) -> List[Tensor]:
"""
Unpad padded Tensor into a list of variable length Tensors
``unpad_sequence`` unstacks padded Tensor into a list of variable length Tensors.
Args:
padded_sequences (Tensor): padded sequences.
lengths (Tensor): length of original (unpadded) sequences.
batch_first (bool, optional): whether batch dimension first or not. Default: False.
Returns:
a list of :class:`Tensor` objects
For example:
.. code-block:: python
>>> from oneflow.nn.utils.rnn import pad_sequence, unpad_sequence
>>> import oneflow as flow
>>> import numpy as np
>>> a = flow.ones(25, 300)
>>> b = flow.ones(22, 300)
>>> c = flow.ones(15, 300)
>>> sequences = [a, b, c]
>>> padded_sequences = pad_sequence(sequences)
>>> lengths = flow.as_tensor([v.size(0) for v in sequences])
>>> unpadded_sequences = unpad_sequence(padded_sequences, lengths)
>>> np.allclose(sequences[0].numpy(), unpadded_sequences[0].numpy())
True
>>> np.allclose(sequences[1].numpy(), unpadded_sequences[1].numpy())
True
>>> np.allclose(sequences[2].numpy(), unpadded_sequences[2].numpy())
True
"""
unpadded_sequences = []
if not batch_first:
padded_sequences = padded_sequences.permute((1, 0, 2))
max_length = padded_sequences.shape[1]
idx = flow.arange(max_length)
for seq, length in zip(padded_sequences, lengths):
mask = idx < length
unpacked_seq = seq[mask]
unpadded_sequences.append(unpacked_seq)
return unpadded_sequences
def pack_sequence(
sequences: List[Tensor], enforce_sorted: bool = True
) -> PackedSequence:
"""Packs a list of variable length Tensors
Consecutive call of the next functions: ``pad_sequence``, ``pack_padded_sequence``.
``sequences`` should be a list of Tensors of size ``L x *``, where `L` is
the length of a sequence and `*` is any number of trailing dimensions,
including zero.
For unsorted sequences, use `enforce_sorted = False`. If ``enforce_sorted``
is ``True``, the sequences should be sorted in the order of decreasing length.
``enforce_sorted = True`` is only necessary for ONNX export.
Args:
sequences (list[Tensor]): A list of sequences of decreasing length.
enforce_sorted (bool, optional): if ``True``, checks that the input
contains sequences sorted by length in a decreasing order. If
``False``, this condition is not checked. Default: ``True``.
Returns:
a :class:`PackedSequence` object
For example:
.. code-block:: python
>>> from oneflow.nn.utils.rnn import pack_sequence
>>> import oneflow as flow
>>> a = flow.tensor([1,2,3])
>>> b = flow.tensor([4,5])
>>> c = flow.tensor([6])
>>> packed = pack_sequence([a, b, c])
>>> packed.data
tensor([1, 4, 6, 2, 5, 3], dtype=oneflow.int64)
>>> packed.batch_sizes
tensor([3, 2, 1], dtype=oneflow.int64)
"""
lengths = flow.as_tensor([v.size(0) for v in sequences])
return pack_padded_sequence(
pad_sequence(sequences), lengths, enforce_sorted=enforce_sorted
)
def unpack_sequence(packed_sequences: PackedSequence) -> List[Tensor]:
"""Unpacks PackedSequence into a list of variable length Tensors
``packed_sequences`` should be a PackedSequence object.
Args:
packed_sequences (PackedSequence): A PackedSequence object.
Returns:
a list of :class:`Tensor` objects
For example:
.. code-block:: python
>>> from oneflow.nn.utils.rnn import pack_sequence, unpack_sequence
>>> import oneflow as flow
>>> a = flow.tensor([1,2,3])
>>> b = flow.tensor([4,5])
>>> c = flow.tensor([6])
>>> sequences = [a, b, c]
>>> packed_sequences = pack_sequence(sequences)
>>> packed_sequences.data
tensor([1, 4, 6, 2, 5, 3], dtype=oneflow.int64)
>>> packed_sequences.batch_sizes
tensor([3, 2, 1], dtype=oneflow.int64)
>>> unpacked_sequences = unpack_sequence(packed_sequences)
>>> unpacked_sequences
[tensor([1, 2, 3], dtype=oneflow.int64), tensor([4, 5], dtype=oneflow.int64), tensor([6], dtype=oneflow.int64)]
"""
padded_sequences, lengths = pad_packed_sequence(packed_sequences, batch_first=True)
unpacked_sequences = unpad_sequence(padded_sequences, lengths, batch_first=True)
return unpacked_sequences
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.tensor",
"oneflow._C.pack_padded_sequence",
"oneflow.as_tensor",
"oneflow.full",
"oneflow.zeros_like",
"oneflow.empty",
"oneflow.sort",
"oneflow.arange"
] | [((9118, 9159), 'oneflow.as_tensor', 'flow.as_tensor', (['lengths'], {'dtype': 'flow.int64'}), '(lengths, dtype=flow.int64)\n', (9132, 9159), True, 'import oneflow as flow\n'), ((9634, 9691), 'oneflow._C.pack_padded_sequence', 'flow._C.pack_padded_sequence', (['input', 'lengths', 'batch_first'], {}), '(input, lengths, batch_first)\n', (9662, 9691), True, 'import oneflow as flow\n'), ((14013, 14154), 'oneflow.full', 'flow.full', (['output_size', 'padding_value'], {'dtype': 'sequence.data.dtype', 'device': 'sequence.data.device', 'requires_grad': 'sequence.data.requires_grad'}), '(output_size, padding_value, dtype=sequence.data.dtype, device=\n sequence.data.device, requires_grad=sequence.data.requires_grad)\n', (14022, 14154), True, 'import oneflow as flow\n'), ((14368, 14394), 'oneflow.empty', 'flow.empty', (['max_batch_size'], {}), '(max_batch_size)\n', (14378, 14394), True, 'import oneflow as flow\n'), ((18145, 18280), 'oneflow.full', 'flow.full', (['out_dims', 'padding_value'], {'dtype': 'sequences[0].dtype', 'device': 'sequences[0].device', 'requires_grad': 'sequences[0].requires_grad'}), '(out_dims, padding_value, dtype=sequences[0].dtype, device=\n sequences[0].device, requires_grad=sequences[0].requires_grad)\n', (18154, 18280), True, 'import oneflow as flow\n'), ((20162, 20185), 'oneflow.arange', 'flow.arange', (['max_length'], {}), '(max_length)\n', (20173, 20185), True, 'import oneflow as flow\n'), ((23375, 23411), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (23390, 23411), False, 'import doctest\n'), ((1169, 1197), 'oneflow.zeros_like', 'flow.zeros_like', (['permutation'], {}), '(permutation)\n', (1184, 1197), True, 'import oneflow as flow\n'), ((9411, 9446), 'oneflow.sort', 'flow.sort', (['lengths'], {'descending': '(True)'}), '(lengths, descending=True)\n', (9420, 9446), True, 'import oneflow as flow\n'), ((4601, 4664), 'oneflow.tensor', 'flow.tensor', (['()'], {'dtype': 'self.data.dtype', 'device': 'self.data.device'}), '((), dtype=self.data.dtype, device=self.data.device)\n', (4612, 4664), True, 'import oneflow as flow\n'), ((4880, 4943), 'oneflow.tensor', 'flow.tensor', (['()'], {'dtype': 'self.data.dtype', 'device': 'self.data.device'}), '((), dtype=self.data.dtype, device=self.data.device)\n', (4891, 4943), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
import oneflow.python.framework.id_util as id_util
from typing import Optional, Sequence
class Squeeze(Module):
def __init__(self, dim: Optional[Sequence[int]] = None) -> None:
super().__init__()
self._op = (
flow.builtin_op("squeeze")
.Input("in")
.Output("out")
.Attr("axes", dim)
.Build()
)
def forward(self, x):
return self._op(x)[0]
@oneflow_export("squeeze")
@register_tensor_op("squeeze")
@experimental_api
def squeeze_op(input, dim: Optional[Sequence[int]] = None):
"""This operator removes the specified dimention which size is 1 of the input Tensor.
If the `dim` is not specified, this operator will remove all the dimention which size is 1 of the input Tensor.
The amount of element in return value is the same as Tensor `input`.
Args:
input (oneflow.Tensor): The input Tensor.
dim (Optional[Sequence[int]]): The dim. Defaults to None.
Returns:
Tensor: The result Tensor.
For example:
.. code-block:: python
>>> import oneflow.experimental as flow
>>> import numpy as np
>>> flow.enable_eager_execution()
>>> input = flow.Tensor(np.array([[[[1, 1, 1]]]]).astype(np.int32))
>>> out = flow.squeeze(input, dim=[1, 2]).numpy().shape
>>> print(out)
(1, 3)
"""
if type(dim) == int:
dim = [dim]
return Squeeze(dim=dim)(input)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.builtin_op",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.tensor.register_tensor_op"
] | [((1240, 1265), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""squeeze"""'], {}), "('squeeze')\n", (1254, 1265), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((1267, 1296), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""squeeze"""'], {}), "('squeeze')\n", (1285, 1296), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((2320, 2356), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (2335, 2356), False, 'import doctest\n'), ((1039, 1065), 'oneflow.builtin_op', 'flow.builtin_op', (['"""squeeze"""'], {}), "('squeeze')\n", (1054, 1065), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import oneflow._oneflow_internal
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.framework import id_util as id_util
from oneflow.compatible.single_client.framework import remote_blob as remote_blob_util
def categorical_ordinal_encode(
table: oneflow._oneflow_internal.BlobDesc,
size: oneflow._oneflow_internal.BlobDesc,
input_tensor: oneflow._oneflow_internal.BlobDesc,
hash_precomputed: bool = True,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator maintains a hash table to encode the categorical ordinal Blob. It converts a discrete input value into a continuous integer ID.
Args:
table (oneflow._oneflow_internal.BlobDesc): The hash table, you can assign it as a variable.
size (oneflow._oneflow_internal.BlobDesc): The size of hash table.
input_tensor (oneflow._oneflow_internal.BlobDesc): The input Blob.
hash_precomputed (bool, optional): We currently only support the 'True' mode. The internal hash value will no longer be computed. Defaults to True.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def categorical_ordinal_encode_Job(x: tp.Numpy.Placeholder((3, 3), dtype=flow.int32)
) -> tp.Numpy:
dtype = x.dtype
with flow.scope.namespace("categorical_ordinal_encode"):
table = flow.get_variable(
name="Table",
shape=(16,),
dtype=dtype,
initializer=flow.constant_initializer(0, dtype=dtype),
trainable=False,
reuse=False,
)
size = flow.get_variable(
name="Size",
shape=(1,),
dtype=dtype,
initializer=flow.constant_initializer(0, dtype=dtype),
trainable=False,
reuse=False,
)
return flow.categorical_ordinal_encode(
table=table, size=size, input_tensor=x, name="Encode",
)
x = np.array([[7, 0, 2],
[1, 7, 2],
[0, 1, 7]]).astype(np.int32)
out = categorical_ordinal_encode_Job(x)
# out [[1 0 2]
# [3 1 2]
# [0 3 1]]
"""
assert hash_precomputed is True
return (
flow.user_op_builder(name or id_util.UniqueStr("CategoricalOrdinalEncode_"))
.Op("CategoricalOrdinalEncode")
.Input("in", [input_tensor])
.Input("table", [table])
.Input("size", [size])
.Output("out")
.Attr("hash_precomputed", hash_precomputed)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def categorical_ordinal_encoder(
input_tensor: oneflow._oneflow_internal.BlobDesc,
capacity: int,
hash_precomputed: bool = True,
name: str = "CategoricalOrdinalEncoder",
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator uses `oneflow.compatible.single_client.categorical_ordinal_encode` to encapsulate a categorical_ordinal_encoder. More details please refer to `oneflow.compatible.single_client.categorical_ordinal_encode`
Args:
input_tensor (oneflow._oneflow_internal.BlobDesc): The input Blob.
capacity (int): The capacity of hash table.
hash_precomputed (bool, optional): We currently only support the 'True' mode. The internal hash value will no longer be computed. Defaults to True.
name (str, optional): The name for the operation. Defaults to "CategoricalOrdinalEncoder".
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
For example:
.. code-block:: python
import oneflow.compatible.single_client as flow
import numpy as np
import oneflow.compatible.single_client.typing as tp
@flow.global_function()
def categorical_ordinal_encoder_Job(x: tp.Numpy.Placeholder((3, 3), dtype=flow.int32)
) -> tp.Numpy:
return flow.layers.categorical_ordinal_encoder(x, 16)
x = np.array([[7, 0, 2],
[1, 7, 2],
[0, 1, 7]]).astype(np.int32)
out = categorical_ordinal_encoder_Job(x)
# out [[1 0 2]
# [3 1 2]
# [0 3 1]]
"""
assert hash_precomputed is True
dtype = input_tensor.dtype
with flow.scope.namespace(name):
table = flow.get_variable(
name="Table",
shape=(capacity * 2,),
dtype=dtype,
initializer=flow.constant_initializer(0, dtype=dtype),
trainable=False,
reuse=False,
)
size = flow.get_variable(
name="Size",
shape=(1,),
dtype=dtype,
initializer=flow.constant_initializer(0, dtype=dtype),
trainable=False,
reuse=False,
)
return categorical_ordinal_encode(
table=table, size=size, input_tensor=input_tensor, name="Encode"
)
| [
"oneflow.compatible.single_client.scope.namespace",
"oneflow.compatible.single_client.constant_initializer",
"oneflow.compatible.single_client.framework.id_util.UniqueStr"
] | [((5398, 5424), 'oneflow.compatible.single_client.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (5418, 5424), True, 'from oneflow.compatible import single_client as flow\n'), ((5571, 5612), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0)'], {'dtype': 'dtype'}), '(0, dtype=dtype)\n', (5596, 5612), True, 'from oneflow.compatible import single_client as flow\n'), ((5810, 5851), 'oneflow.compatible.single_client.constant_initializer', 'flow.constant_initializer', (['(0)'], {'dtype': 'dtype'}), '(0, dtype=dtype)\n', (5835, 5851), True, 'from oneflow.compatible import single_client as flow\n'), ((3407, 3453), 'oneflow.compatible.single_client.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""CategoricalOrdinalEncode_"""'], {}), "('CategoricalOrdinalEncode_')\n", (3424, 3453), True, 'from oneflow.compatible.single_client.framework import id_util as id_util\n')] |
import numpy as np
import os
import argparse
import json
import time
import shutil
import oneflow as flow
import oneflow.nn as nn
from utils import pad_sequences, load_imdb_data
from model import TransformerEncoderModel
parser = argparse.ArgumentParser()
parser.add_argument('--batch_size', type=int, default=32)
parser.add_argument('--n_epochs', type=int, default=15)
parser.add_argument('--lr', type=float, default=1e-4)
parser.add_argument('--sequence_len', type=int, default=128) # src_len
parser.add_argument('--vocab_sz', type=int, default=50000) # emb_sz
parser.add_argument('--d_model', type=int, default=512)
parser.add_argument('--dropout', type=float, default=0.1)
parser.add_argument('--n_head', type=int, default=8)
parser.add_argument('--n_encoder_layers', type=int, default=6)
parser.add_argument('--n_decoder_layers', type=int, default=6)
parser.add_argument('--dim_feedforward', type=int, default=1024)
parser.add_argument('--imdb_path', type=str, default='../../imdb')
parser.add_argument('--load_dir', type=str, default='.')
parser.add_argument('--save_dir', type=str, default='./best_model')
args = parser.parse_args()
args.n_classes = 2 # tgt_len
def shuffle_batch(data, label, batch_size):
permu = np.random.permutation(len(data))
data, label = data[permu], label[permu]
batch_n = len(data) // batch_size
x_batch = np.array([data[i * batch_size:i * batch_size + batch_size]
for i in range(batch_n)], dtype=np.int32)
y_batch = np.array([label[i * batch_size:i * batch_size + batch_size]
for i in range(batch_n)], dtype=np.int32)
return flow.tensor(x_batch, dtype=flow.int64).to("cuda"), flow.tensor(y_batch, dtype=flow.int64).to("cuda")
def prepare_data():
print("Preparing data...")
(train_data, train_labels), (test_data,
test_labels) = load_imdb_data(args.imdb_path)
with open(os.path.join(args.imdb_path, 'word_index.json')) as f:
word_index = json.load(f)
word_index = {k: (v + 2) for k, v in word_index.items()}
word_index["<PAD>"] = 0
word_index["<UNK>"] = 1
train_data = pad_sequences(
train_data, value=word_index["<PAD>"], padding='post', maxlen=args.sequence_len)
test_data = pad_sequences(
test_data, value=word_index["<PAD>"], padding='post', maxlen=args.sequence_len)
return train_data, train_labels, test_data, test_labels
def acc(labels, logits, g):
predictions = np.argmax(logits.numpy(), 1)
right_count = np.sum(predictions == labels.numpy())
g["total"] += labels.shape[0]
g["correct"] += right_count
def train():
train_data, train_labels, test_data, test_labels = prepare_data()
best_accuracy = 0.0
best_epoch = 0
print("Setting model...")
model = TransformerEncoderModel(emb_sz=args.vocab_sz, n_classes=args.n_classes, d_model=args.d_model, nhead=args.n_head,
num_encoder_layers=args.n_encoder_layers, dim_feedforward=args.dim_feedforward, dropout=args.dropout, batch_first=True)
criterion = nn.CrossEntropyLoss()
model.to("cuda")
criterion.to("cuda")
of_adam = flow.optim.Adam(model.parameters(), lr=args.lr)
if args.load_dir != '.':
model.load_state_dict(flow.load(args.load_dir))
print("Starting training...")
training_time = 0
for epoch in range(1, args.n_epochs + 1):
print("[Epoch:{}]".format(epoch))
model.train()
data, label = shuffle_batch(train_data, train_labels, args.batch_size)
s_t = time.time()
epoch_loss = 0
for i, (texts, labels) in enumerate(zip(data, label)):
output = model(texts)
loss = criterion(output, labels)
loss.backward()
of_adam.step()
of_adam.zero_grad()
epoch_loss += loss.numpy()
if i % 50 == 0 or i == data.shape[0] - 1:
print("{0:d}/{1:d}, loss:{2:.4f}".format(i +
1, data.shape[0], loss.numpy()))
epoch_loss /= data.shape[0]
e_t = time.time() - s_t
training_time += e_t
print("Epoch:{0:d} training time:{1:.2f}s, loss:{2:.4f}".format(
epoch, e_t, epoch_loss))
model.eval()
data, label = shuffle_batch(test_data, test_labels, args.batch_size)
g = {"correct": 0, "total": 0}
for i, (texts, labels) in enumerate(zip(data, label)):
logits = model(texts)
acc(labels, logits, g)
accuracy = g["correct"] * 100 / g["total"]
print("[Epoch:{0:d} ] accuracy: {1:.1f}%".format(epoch, accuracy))
if accuracy > best_accuracy:
best_accuracy = accuracy
best_epoch = epoch
if not os.path.exists(args.save_dir):
os.mkdir(args.save_dir)
else:
shutil.rmtree(args.save_dir)
assert not os.path.exists(args.save_dir)
os.mkdir(args.save_dir)
print("Epoch:{} save best model.".format(best_epoch))
flow.save(model.state_dict(), args.save_dir)
print("Epoch:{} get best accuracy:{}, average training time:{}s".format(
best_epoch, best_accuracy, training_time / args.n_epochs))
if __name__ == '__main__':
train()
| [
"oneflow.tensor",
"oneflow.nn.CrossEntropyLoss",
"oneflow.load"
] | [((232, 257), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (255, 257), False, 'import argparse\n'), ((1889, 1919), 'utils.load_imdb_data', 'load_imdb_data', (['args.imdb_path'], {}), '(args.imdb_path)\n', (1903, 1919), False, 'from utils import pad_sequences, load_imdb_data\n'), ((2159, 2258), 'utils.pad_sequences', 'pad_sequences', (['train_data'], {'value': "word_index['<PAD>']", 'padding': '"""post"""', 'maxlen': 'args.sequence_len'}), "(train_data, value=word_index['<PAD>'], padding='post', maxlen\n =args.sequence_len)\n", (2172, 2258), False, 'from utils import pad_sequences, load_imdb_data\n'), ((2279, 2377), 'utils.pad_sequences', 'pad_sequences', (['test_data'], {'value': "word_index['<PAD>']", 'padding': '"""post"""', 'maxlen': 'args.sequence_len'}), "(test_data, value=word_index['<PAD>'], padding='post', maxlen=\n args.sequence_len)\n", (2292, 2377), False, 'from utils import pad_sequences, load_imdb_data\n'), ((2816, 3062), 'model.TransformerEncoderModel', 'TransformerEncoderModel', ([], {'emb_sz': 'args.vocab_sz', 'n_classes': 'args.n_classes', 'd_model': 'args.d_model', 'nhead': 'args.n_head', 'num_encoder_layers': 'args.n_encoder_layers', 'dim_feedforward': 'args.dim_feedforward', 'dropout': 'args.dropout', 'batch_first': '(True)'}), '(emb_sz=args.vocab_sz, n_classes=args.n_classes,\n d_model=args.d_model, nhead=args.n_head, num_encoder_layers=args.\n n_encoder_layers, dim_feedforward=args.dim_feedforward, dropout=args.\n dropout, batch_first=True)\n', (2839, 3062), False, 'from model import TransformerEncoderModel\n'), ((3101, 3122), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3120, 3122), True, 'import oneflow.nn as nn\n'), ((2011, 2023), 'json.load', 'json.load', (['f'], {}), '(f)\n', (2020, 2023), False, 'import json\n'), ((3577, 3588), 'time.time', 'time.time', ([], {}), '()\n', (3586, 3588), False, 'import time\n'), ((1935, 1982), 'os.path.join', 'os.path.join', (['args.imdb_path', '"""word_index.json"""'], {}), "(args.imdb_path, 'word_index.json')\n", (1947, 1982), False, 'import os\n'), ((3291, 3315), 'oneflow.load', 'flow.load', (['args.load_dir'], {}), '(args.load_dir)\n', (3300, 3315), True, 'import oneflow as flow\n'), ((4100, 4111), 'time.time', 'time.time', ([], {}), '()\n', (4109, 4111), False, 'import time\n'), ((1642, 1680), 'oneflow.tensor', 'flow.tensor', (['x_batch'], {'dtype': 'flow.int64'}), '(x_batch, dtype=flow.int64)\n', (1653, 1680), True, 'import oneflow as flow\n'), ((1693, 1731), 'oneflow.tensor', 'flow.tensor', (['y_batch'], {'dtype': 'flow.int64'}), '(y_batch, dtype=flow.int64)\n', (1704, 1731), True, 'import oneflow as flow\n'), ((4779, 4808), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (4793, 4808), False, 'import os\n'), ((4826, 4849), 'os.mkdir', 'os.mkdir', (['args.save_dir'], {}), '(args.save_dir)\n', (4834, 4849), False, 'import os\n'), ((4884, 4912), 'shutil.rmtree', 'shutil.rmtree', (['args.save_dir'], {}), '(args.save_dir)\n', (4897, 4912), False, 'import shutil\n'), ((4986, 5009), 'os.mkdir', 'os.mkdir', (['args.save_dir'], {}), '(args.save_dir)\n', (4994, 5009), False, 'import os\n'), ((4940, 4969), 'os.path.exists', 'os.path.exists', (['args.save_dir'], {}), '(args.save_dir)\n', (4954, 4969), False, 'import os\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
from collections import OrderedDict
import oneflow as flow
import oneflow.unittest
from test_util import GenArgList
import torch
def _test_expand_new_dims_broadcast(test_case, device):
input_shape = (1, 4, 1, 1)
expand_dim = [2, 1, 2, 4, 2, 1]
input_nd = np.random.random(size=input_shape).astype(np.float32)
torch_in = torch.tensor(input_nd, requires_grad=True)
torch_out = torch_in.expand(*expand_dim)
torch_out.sum().backward()
of_input = flow.tensor(input_nd, dtype=flow.float32, requires_grad=True)
of_input = of_input.to_consistent(
placement=flow.placement(device, {0: [0, 1]}), sbp=flow.sbp.broadcast,
)
of_out = of_input.expand(*expand_dim)
of_out.sum().backward()
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(of_out.to_local().numpy(), torch_out.detach().cpu().numpy())
)
test_case.assertTrue(
np.array_equal(
of_input.grad.to_local().numpy(), torch_in.grad.cpu().numpy()
)
)
def _test_expand_same_dim_broadcast(test_case, device):
input_shape = (4, 1, 2, 1)
expand_dim = [4, 1, 2, 1]
input_nd = np.random.random(size=input_shape).astype(np.float32)
torch_in = torch.tensor(input_nd, requires_grad=True)
torch_out = torch_in.expand(*expand_dim)
torch_out.sum().backward()
of_input = flow.tensor(input_nd, dtype=flow.float32, requires_grad=True)
of_input = of_input.to_consistent(
placement=flow.placement(device, {0: [0, 1]}), sbp=flow.sbp.broadcast,
)
of_out = of_input.expand(*expand_dim)
loss = of_out.sum()
loss.backward()
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(of_out.to_local().numpy(), torch_out.detach().cpu().numpy())
)
test_case.assertTrue(
np.array_equal(
of_input.grad.to_local().numpy(), torch_in.grad.cpu().numpy()
)
)
def _test_expand_same_dim_negative_broadcast(test_case, device):
input_shape = (2, 1, 4, 1)
expand_dim = [2, -1, 4, 4]
input_nd = np.random.random(size=input_shape).astype(np.float32)
torch_in = torch.tensor(input_nd, requires_grad=True)
torch_out = torch_in.expand(*expand_dim)
torch_out.sum().backward()
of_input = flow.tensor(input_nd, dtype=flow.float32, requires_grad=True)
of_input = of_input.to_consistent(
placement=flow.placement(device, {0: [0, 1]}), sbp=flow.sbp.broadcast,
)
of_out = of_input.expand(*expand_dim)
loss = of_out.sum()
loss.backward()
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(of_out.to_local().numpy(), torch_out.detach().cpu().numpy())
)
test_case.assertTrue(
np.array_equal(
of_input.grad.to_local().numpy(), torch_in.grad.cpu().numpy()
)
)
def _test_expand_new_dims_split(test_case, device):
input_shape = (4, 1, 2, 1)
expand_dim = [2, 1, 4, 1, 2, 1]
input_nd = np.random.random(size=input_shape).astype(np.float32)
torch_in = torch.tensor(input_nd, requires_grad=True)
torch_out = torch_in.expand(*expand_dim)
torch_out.sum().backward()
of_input = flow.tensor(input_nd, dtype=flow.float32, requires_grad=True)
of_input = of_input.to_consistent(
placement=flow.placement(device, {0: [0, 1]}), sbp=flow.sbp.broadcast,
)
of_input = of_input.to_consistent(sbp=flow.sbp.split(0))
of_out = of_input.expand(*expand_dim)
loss = of_out.sum()
loss.backward()
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(
of_out.to_local().numpy(),
torch_out.detach().cpu().numpy()[:, :, 0:2, :, :, :],
)
)
test_case.assertTrue(
np.array_equal(
of_input.grad.to_local().numpy(),
torch_in.grad.cpu().numpy()[0:2, :, :, :],
)
)
def _test_expand_same_dim_split(test_case, device):
input_shape = (4, 1, 2, 1)
expand_dim = [4, 1, 2, 4]
input_nd = np.random.random(size=input_shape).astype(np.float32)
torch_in = torch.tensor(input_nd, requires_grad=True)
torch_out = torch_in.expand(*expand_dim)
torch_out.sum().backward()
of_input = flow.tensor(input_nd, dtype=flow.float32, requires_grad=True)
of_input = of_input.to_consistent(
placement=flow.placement(device, {0: [0, 1]}), sbp=flow.sbp.broadcast,
)
of_input = of_input.to_consistent(sbp=flow.sbp.split(0))
of_out = of_input.expand(*expand_dim)
loss = of_out.sum()
loss.backward()
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(
of_out.to_local().numpy(),
torch_out.detach().cpu().numpy()[0:2, :, :, :],
)
)
test_case.assertTrue(
np.array_equal(
of_input.grad.to_local().numpy(),
torch_in.grad.cpu().numpy()[0:2, :, :, :],
)
)
def _test_expand_same_dim_negative_split(test_case, device):
input_shape = (2, 1, 4, 1)
expand_dim = [2, -1, 4, 4]
input_nd = np.random.random(size=input_shape).astype(np.float32)
torch_in = torch.tensor(input_nd, requires_grad=True)
torch_out = torch_in.expand(*expand_dim)
torch_out.sum().backward()
of_input = flow.tensor(input_nd, dtype=flow.float32, requires_grad=True)
of_input = of_input.to_consistent(
placement=flow.placement(device, {0: [0, 1]}), sbp=flow.sbp.broadcast,
)
of_input = of_input.to_consistent(sbp=flow.sbp.split(2))
of_out = of_input.expand(*expand_dim)
loss = of_out.sum()
loss.backward()
if flow.env.get_rank() == 0:
test_case.assertTrue(
np.array_equal(
of_out.to_local().numpy(),
torch_out.detach().cpu().numpy()[:, :, 0:2, :],
)
)
test_case.assertTrue(
np.array_equal(
of_input.grad.to_local().numpy(),
torch_in.grad.cpu().numpy()[:, :, 0:2, :],
)
)
@flow.unittest.skip_unless_1n2d()
class ExpandConsistentTestCase(oneflow.unittest.TestCase):
def test_expand_broadcast(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_expand_new_dims_broadcast,
_test_expand_same_dim_broadcast,
_test_expand_same_dim_negative_broadcast,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
# NOTE(<NAME>): Run with the following command can pass the test locally, but will fail when run in ci.
# ONEFLOW_TEST_DEVICE_NUM=2 python3 -m oneflow.distributed.launch --nproc_per_node 2 test_consistent_expand_op.py
@unittest.skipIf(True, "skip for now")
def test_expand_split(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_expand_new_dims_split,
_test_expand_same_dim_split,
_test_expand_same_dim_negative_split,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.tensor",
"oneflow.placement",
"oneflow.sbp.split",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.env.get_rank"
] | [((6841, 6873), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (6871, 6873), True, 'import oneflow as flow\n'), ((976, 1018), 'torch.tensor', 'torch.tensor', (['input_nd'], {'requires_grad': '(True)'}), '(input_nd, requires_grad=True)\n', (988, 1018), False, 'import torch\n'), ((1111, 1172), 'oneflow.tensor', 'flow.tensor', (['input_nd'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(input_nd, dtype=flow.float32, requires_grad=True)\n', (1122, 1172), True, 'import oneflow as flow\n'), ((1893, 1935), 'torch.tensor', 'torch.tensor', (['input_nd'], {'requires_grad': '(True)'}), '(input_nd, requires_grad=True)\n', (1905, 1935), False, 'import torch\n'), ((2028, 2089), 'oneflow.tensor', 'flow.tensor', (['input_nd'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(input_nd, dtype=flow.float32, requires_grad=True)\n', (2039, 2089), True, 'import oneflow as flow\n'), ((2837, 2879), 'torch.tensor', 'torch.tensor', (['input_nd'], {'requires_grad': '(True)'}), '(input_nd, requires_grad=True)\n', (2849, 2879), False, 'import torch\n'), ((2972, 3033), 'oneflow.tensor', 'flow.tensor', (['input_nd'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(input_nd, dtype=flow.float32, requires_grad=True)\n', (2983, 3033), True, 'import oneflow as flow\n'), ((3773, 3815), 'torch.tensor', 'torch.tensor', (['input_nd'], {'requires_grad': '(True)'}), '(input_nd, requires_grad=True)\n', (3785, 3815), False, 'import torch\n'), ((3908, 3969), 'oneflow.tensor', 'flow.tensor', (['input_nd'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(input_nd, dtype=flow.float32, requires_grad=True)\n', (3919, 3969), True, 'import oneflow as flow\n'), ((4862, 4904), 'torch.tensor', 'torch.tensor', (['input_nd'], {'requires_grad': '(True)'}), '(input_nd, requires_grad=True)\n', (4874, 4904), False, 'import torch\n'), ((4997, 5058), 'oneflow.tensor', 'flow.tensor', (['input_nd'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(input_nd, dtype=flow.float32, requires_grad=True)\n', (5008, 5058), True, 'import oneflow as flow\n'), ((5955, 5997), 'torch.tensor', 'torch.tensor', (['input_nd'], {'requires_grad': '(True)'}), '(input_nd, requires_grad=True)\n', (5967, 5997), False, 'import torch\n'), ((6090, 6151), 'oneflow.tensor', 'flow.tensor', (['input_nd'], {'dtype': 'flow.float32', 'requires_grad': '(True)'}), '(input_nd, dtype=flow.float32, requires_grad=True)\n', (6101, 6151), True, 'import oneflow as flow\n'), ((7553, 7590), 'unittest.skipIf', 'unittest.skipIf', (['(True)', '"""skip for now"""'], {}), "(True, 'skip for now')\n", (7568, 7590), False, 'import unittest\n'), ((7996, 8011), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8009, 8011), False, 'import unittest\n'), ((1375, 1394), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (1392, 1394), True, 'import oneflow as flow\n'), ((2309, 2328), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (2326, 2328), True, 'import oneflow as flow\n'), ((3253, 3272), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (3270, 3272), True, 'import oneflow as flow\n'), ((4250, 4269), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (4267, 4269), True, 'import oneflow as flow\n'), ((5339, 5358), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (5356, 5358), True, 'import oneflow as flow\n'), ((6432, 6451), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (6449, 6451), True, 'import oneflow as flow\n'), ((6994, 7007), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7005, 7007), False, 'from collections import OrderedDict\n'), ((7259, 7279), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7269, 7279), False, 'from test_util import GenArgList\n'), ((7648, 7661), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7659, 7661), False, 'from collections import OrderedDict\n'), ((7901, 7921), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7911, 7921), False, 'from test_util import GenArgList\n'), ((907, 941), 'numpy.random.random', 'np.random.random', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (923, 941), True, 'import numpy as np\n'), ((1230, 1267), 'oneflow.placement', 'flow.placement', (['device', '{(0): [0, 1]}'], {}), '(device, {(0): [0, 1]})\n', (1244, 1267), True, 'import oneflow as flow\n'), ((1824, 1858), 'numpy.random.random', 'np.random.random', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (1840, 1858), True, 'import numpy as np\n'), ((2147, 2184), 'oneflow.placement', 'flow.placement', (['device', '{(0): [0, 1]}'], {}), '(device, {(0): [0, 1]})\n', (2161, 2184), True, 'import oneflow as flow\n'), ((2768, 2802), 'numpy.random.random', 'np.random.random', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (2784, 2802), True, 'import numpy as np\n'), ((3091, 3128), 'oneflow.placement', 'flow.placement', (['device', '{(0): [0, 1]}'], {}), '(device, {(0): [0, 1]})\n', (3105, 3128), True, 'import oneflow as flow\n'), ((3704, 3738), 'numpy.random.random', 'np.random.random', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (3720, 3738), True, 'import numpy as np\n'), ((4027, 4064), 'oneflow.placement', 'flow.placement', (['device', '{(0): [0, 1]}'], {}), '(device, {(0): [0, 1]})\n', (4041, 4064), True, 'import oneflow as flow\n'), ((4136, 4153), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (4150, 4153), True, 'import oneflow as flow\n'), ((4793, 4827), 'numpy.random.random', 'np.random.random', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (4809, 4827), True, 'import numpy as np\n'), ((5116, 5153), 'oneflow.placement', 'flow.placement', (['device', '{(0): [0, 1]}'], {}), '(device, {(0): [0, 1]})\n', (5130, 5153), True, 'import oneflow as flow\n'), ((5225, 5242), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (5239, 5242), True, 'import oneflow as flow\n'), ((5886, 5920), 'numpy.random.random', 'np.random.random', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (5902, 5920), True, 'import numpy as np\n'), ((6209, 6246), 'oneflow.placement', 'flow.placement', (['device', '{(0): [0, 1]}'], {}), '(device, {(0): [0, 1]})\n', (6223, 6246), True, 'import oneflow as flow\n'), ((6318, 6335), 'oneflow.sbp.split', 'flow.sbp.split', (['(2)'], {}), '(2)\n', (6332, 6335), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _prelu(input, alpha):
alpha = np.expand_dims(alpha, 0)
alpha = np.expand_dims(alpha, 2)
alpha = np.expand_dims(alpha, 3)
return np.where(input > 0, input, input * alpha)
def _prelu_grad(input, alpha):
return alpha * (input <= 0) + (input > 0)
def _test_prelu(test_case, shape, device):
np_input = np.random.randn(*shape)
input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))
np_alpha = np.random.randn(1)
prelu = flow.nn.PReLU(init=np_alpha)
if device == "cuda":
prelu.to(flow.device("cuda"))
np_out = _prelu(np_input, np_alpha)
of_out = prelu(input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_prelu_ndims(test_case, shape, device):
np_input = np.random.randn(*shape)
input = flow.Tensor(np_input, dtype=flow.float32, device=flow.device(device))
np_alpha = np.random.randn(shape[1])
prelu = flow.nn.PReLU(init=1.0, num_parameters=shape[1])
prelu_alpha = np.expand_dims(np_alpha, (1, 2))
prelu.weight = flow.nn.Parameter(flow.Tensor(prelu_alpha, dtype=flow.float32))
if device == "cuda":
prelu.to(flow.device("cuda"))
np_out = _prelu(np_input, np_alpha)
of_out = prelu(input)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def _test_prelu_grad(test_case, shape, device):
np_input = np.random.randn(*shape)
input = flow.Tensor(
np_input, dtype=flow.float32, requires_grad=True, device=flow.device(device)
)
np_alpha = 0.2
prelu = flow.nn.PReLU(init=np_alpha)
if device == "cuda":
prelu.to(flow.device("cuda"))
of_out = prelu(input).sum()
of_out.backward()
np_grad = _prelu_grad(np_input, np_alpha)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-5, 1e-5))
@flow.unittest.skip_unless_1n1d()
class TestPReLU(flow.unittest.TestCase):
def test_prelu(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(2, 4, 5, 6)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_prelu(test_case, *arg)
_test_prelu_ndims(test_case, *arg)
_test_prelu_grad(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.Tensor",
"oneflow.experimental.nn.PReLU",
"oneflow.experimental.unittest.skip_unless_1n1d",
"oneflow.experimental.device"
] | [((2568, 2600), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2598, 2600), True, 'import oneflow.experimental as flow\n'), ((772, 796), 'numpy.expand_dims', 'np.expand_dims', (['alpha', '(0)'], {}), '(alpha, 0)\n', (786, 796), True, 'import numpy as np\n'), ((809, 833), 'numpy.expand_dims', 'np.expand_dims', (['alpha', '(2)'], {}), '(alpha, 2)\n', (823, 833), True, 'import numpy as np\n'), ((846, 870), 'numpy.expand_dims', 'np.expand_dims', (['alpha', '(3)'], {}), '(alpha, 3)\n', (860, 870), True, 'import numpy as np\n'), ((882, 923), 'numpy.where', 'np.where', (['(input > 0)', 'input', '(input * alpha)'], {}), '(input > 0, input, input * alpha)\n', (890, 923), True, 'import numpy as np\n'), ((1063, 1086), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1078, 1086), True, 'import numpy as np\n'), ((1184, 1202), 'numpy.random.randn', 'np.random.randn', (['(1)'], {}), '(1)\n', (1199, 1202), True, 'import numpy as np\n'), ((1215, 1243), 'oneflow.experimental.nn.PReLU', 'flow.nn.PReLU', ([], {'init': 'np_alpha'}), '(init=np_alpha)\n', (1228, 1243), True, 'import oneflow.experimental as flow\n'), ((1513, 1536), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1528, 1536), True, 'import numpy as np\n'), ((1634, 1659), 'numpy.random.randn', 'np.random.randn', (['shape[1]'], {}), '(shape[1])\n', (1649, 1659), True, 'import numpy as np\n'), ((1672, 1720), 'oneflow.experimental.nn.PReLU', 'flow.nn.PReLU', ([], {'init': '(1.0)', 'num_parameters': 'shape[1]'}), '(init=1.0, num_parameters=shape[1])\n', (1685, 1720), True, 'import oneflow.experimental as flow\n'), ((1739, 1771), 'numpy.expand_dims', 'np.expand_dims', (['np_alpha', '(1, 2)'], {}), '(np_alpha, (1, 2))\n', (1753, 1771), True, 'import numpy as np\n'), ((2123, 2146), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2138, 2146), True, 'import numpy as np\n'), ((2294, 2322), 'oneflow.experimental.nn.PReLU', 'flow.nn.PReLU', ([], {'init': 'np_alpha'}), '(init=np_alpha)\n', (2307, 2322), True, 'import oneflow.experimental as flow\n'), ((3002, 3017), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3015, 3017), False, 'import unittest\n'), ((1809, 1853), 'oneflow.experimental.Tensor', 'flow.Tensor', (['prelu_alpha'], {'dtype': 'flow.float32'}), '(prelu_alpha, dtype=flow.float32)\n', (1820, 1853), True, 'import oneflow.experimental as flow\n'), ((2692, 2705), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2703, 2705), False, 'from collections import OrderedDict\n'), ((2813, 2833), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2823, 2833), False, 'from test_util import GenArgList\n'), ((1148, 1167), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1159, 1167), True, 'import oneflow.experimental as flow\n'), ((1286, 1305), 'oneflow.experimental.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1297, 1305), True, 'import oneflow.experimental as flow\n'), ((1598, 1617), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1609, 1617), True, 'import oneflow.experimental as flow\n'), ((1897, 1916), 'oneflow.experimental.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1908, 1916), True, 'import oneflow.experimental as flow\n'), ((2237, 2256), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2248, 2256), True, 'import oneflow.experimental as flow\n'), ((2365, 2384), 'oneflow.experimental.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (2376, 2384), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
def add_ofrecord_args(parser):
parser.add_argument("--image_size", type=int, default=224,
required=False, help="image size")
parser.add_argument("--resize_shorter", type=int, default=256,
required=False, help="resize shorter for validation")
parser.add_argument("--train_data_dir", type=str,
default="./data/hustcolor/ofrecord/train", help="train dataset directory")
parser.add_argument("--train_data_part_num", type=int,
default=5, help="train data part num")
parser.add_argument("--val_data_dir", type=str,
default="./data/hustcolor/ofrecord/val", help="val dataset directory")
parser.add_argument("--val_data_part_num", type=int,
default=5, help="val data part num")
return parser
def load_synthetic(args):
total_device_num = args.num_nodes * args.gpu_num_per_node
batch_size = total_device_num * args.batch_size_per_device
label = flow.data.decode_random(
shape=(),
dtype=flow.int32,
batch_size=batch_size,
initializer=flow.zeros_initializer(flow.int32),
)
shape=(args.image_size, args.image_size, 3) if args.channel_last else (3, args.image_size, args.image_size)
image = flow.data.decode_random(
shape=shape, dtype=flow.float, batch_size=batch_size
)
return label, image
def load_imagenet_for_training(args):
total_device_num = args.num_nodes * args.gpu_num_per_node
train_batch_size = total_device_num * args.batch_size_per_device
output_layout="NHWC" if args.channel_last else "NCHW"
color_space = 'RGB'
ofrecord = flow.data.ofrecord_reader(args.train_data_dir,
batch_size=train_batch_size,
data_part_num=args.train_data_part_num,
part_name_suffix_length=5,
random_shuffle=True,
shuffle_after_epoch=True)
label = flow.data.OFRecordRawDecoder(
ofrecord, "labels", shape=(), dtype=flow.int32)
if args.gpu_image_decoder:
encoded = flow.data.OFRecordBytesDecoder(ofrecord, "images")
image = flow.data.ImageDecoderRandomCropResize(encoded, target_width=224, target_height=224, num_workers=3)
else:
image = flow.data.OFRecordImageDecoderRandomCrop(ofrecord, "images", # seed=seed,
color_space=color_space)
rsz = flow.image.Resize(image, target_size=[args.image_size, args.image_size])
image = rsz[0]
rng = flow.random.CoinFlip(batch_size=train_batch_size) # , seed=seed)
normal = flow.image.CropMirrorNormalize(image, mirror_blob=rng,
color_space=color_space, output_layout=output_layout,
mean=args.rgb_mean, std=args.rgb_std, output_dtype=flow.float)
return label, normal
def load_imagenet_for_validation(args):
total_device_num = args.num_nodes * args.gpu_num_per_node
val_batch_size = total_device_num * args.val_batch_size_per_device
output_layout="NHWC" if args.channel_last else "NCHW"
color_space = 'RGB'
ofrecord = flow.data.ofrecord_reader(args.val_data_dir,
batch_size=val_batch_size,
data_part_num=args.val_data_part_num,
part_name_suffix_length=5,
shuffle_after_epoch=False)
image = flow.data.OFRecordImageDecoder(
ofrecord, "images", color_space=color_space)
label = flow.data.OFRecordRawDecoder(
ofrecord, "labels", shape=(), dtype=flow.int32)
rsz = flow.image.Resize(
image, resize_side="shorter",
keep_aspect_ratio=True,
target_size=args.resize_shorter)
normal = flow.image.CropMirrorNormalize(rsz[0], color_space=color_space, output_layout=output_layout,
crop_h=args.image_size, crop_w=args.image_size, crop_pos_y=0.5, crop_pos_x=0.5,
mean=args.rgb_mean, std=args.rgb_std, output_dtype=flow.float)
return label, normal
if __name__ == "__main__":
import os
import config as configs
from util import InitNodes, Metric
from job_function_util import get_val_config
parser = configs.get_parser()
args = parser.parse_args()
configs.print_args(args)
flow.config.gpu_device_num(args.gpu_num_per_node)
#flow.config.enable_debug_mode(True)
@flow.global_function(get_val_config(args))
def IOTest():
if args.train_data_dir:
assert os.path.exists(args.train_data_dir)
print("Loading data from {}".format(args.train_data_dir))
(labels, images) = load_imagenet_for_training(args)
else:
print("Loading synthetic data.")
(labels, images) = load_synthetic(args)
outputs = {"images": images, "labels": labels}
return outputs
total_device_num = args.num_nodes * args.gpu_num_per_node
train_batch_size = total_device_num * args.batch_size_per_device
metric = Metric(desc='io_test', calculate_batches=args.loss_print_every_n_iter,
batch_size=train_batch_size, prediction_key=None)
for i in range(1000):
IOTest().async_get(metric.metric_cb(0, i))
| [
"oneflow.image.Resize",
"oneflow.image.CropMirrorNormalize",
"oneflow.data.OFRecordRawDecoder",
"oneflow.data.OFRecordBytesDecoder",
"oneflow.data.ImageDecoderRandomCropResize",
"oneflow.zeros_initializer",
"oneflow.data.OFRecordImageDecoder",
"oneflow.data.OFRecordImageDecoderRandomCrop",
"oneflow.... | [((1924, 2001), 'oneflow.data.decode_random', 'flow.data.decode_random', ([], {'shape': 'shape', 'dtype': 'flow.float', 'batch_size': 'batch_size'}), '(shape=shape, dtype=flow.float, batch_size=batch_size)\n', (1947, 2001), True, 'import oneflow as flow\n'), ((2310, 2507), 'oneflow.data.ofrecord_reader', 'flow.data.ofrecord_reader', (['args.train_data_dir'], {'batch_size': 'train_batch_size', 'data_part_num': 'args.train_data_part_num', 'part_name_suffix_length': '(5)', 'random_shuffle': '(True)', 'shuffle_after_epoch': '(True)'}), '(args.train_data_dir, batch_size=train_batch_size,\n data_part_num=args.train_data_part_num, part_name_suffix_length=5,\n random_shuffle=True, shuffle_after_epoch=True)\n', (2335, 2507), True, 'import oneflow as flow\n'), ((2712, 2788), 'oneflow.data.OFRecordRawDecoder', 'flow.data.OFRecordRawDecoder', (['ofrecord', '"""labels"""'], {'shape': '()', 'dtype': 'flow.int32'}), "(ofrecord, 'labels', shape=(), dtype=flow.int32)\n", (2740, 2788), True, 'import oneflow as flow\n'), ((3317, 3366), 'oneflow.random.CoinFlip', 'flow.random.CoinFlip', ([], {'batch_size': 'train_batch_size'}), '(batch_size=train_batch_size)\n', (3337, 3366), True, 'import oneflow as flow\n'), ((3396, 3577), 'oneflow.image.CropMirrorNormalize', 'flow.image.CropMirrorNormalize', (['image'], {'mirror_blob': 'rng', 'color_space': 'color_space', 'output_layout': 'output_layout', 'mean': 'args.rgb_mean', 'std': 'args.rgb_std', 'output_dtype': 'flow.float'}), '(image, mirror_blob=rng, color_space=\n color_space, output_layout=output_layout, mean=args.rgb_mean, std=args.\n rgb_std, output_dtype=flow.float)\n', (3426, 3577), True, 'import oneflow as flow\n'), ((3954, 4125), 'oneflow.data.ofrecord_reader', 'flow.data.ofrecord_reader', (['args.val_data_dir'], {'batch_size': 'val_batch_size', 'data_part_num': 'args.val_data_part_num', 'part_name_suffix_length': '(5)', 'shuffle_after_epoch': '(False)'}), '(args.val_data_dir, batch_size=val_batch_size,\n data_part_num=args.val_data_part_num, part_name_suffix_length=5,\n shuffle_after_epoch=False)\n', (3979, 4125), True, 'import oneflow as flow\n'), ((4306, 4381), 'oneflow.data.OFRecordImageDecoder', 'flow.data.OFRecordImageDecoder', (['ofrecord', '"""images"""'], {'color_space': 'color_space'}), "(ofrecord, 'images', color_space=color_space)\n", (4336, 4381), True, 'import oneflow as flow\n'), ((4403, 4479), 'oneflow.data.OFRecordRawDecoder', 'flow.data.OFRecordRawDecoder', (['ofrecord', '"""labels"""'], {'shape': '()', 'dtype': 'flow.int32'}), "(ofrecord, 'labels', shape=(), dtype=flow.int32)\n", (4431, 4479), True, 'import oneflow as flow\n'), ((4500, 4608), 'oneflow.image.Resize', 'flow.image.Resize', (['image'], {'resize_side': '"""shorter"""', 'keep_aspect_ratio': '(True)', 'target_size': 'args.resize_shorter'}), "(image, resize_side='shorter', keep_aspect_ratio=True,\n target_size=args.resize_shorter)\n", (4517, 4608), True, 'import oneflow as flow\n'), ((4644, 4893), 'oneflow.image.CropMirrorNormalize', 'flow.image.CropMirrorNormalize', (['rsz[0]'], {'color_space': 'color_space', 'output_layout': 'output_layout', 'crop_h': 'args.image_size', 'crop_w': 'args.image_size', 'crop_pos_y': '(0.5)', 'crop_pos_x': '(0.5)', 'mean': 'args.rgb_mean', 'std': 'args.rgb_std', 'output_dtype': 'flow.float'}), '(rsz[0], color_space=color_space,\n output_layout=output_layout, crop_h=args.image_size, crop_w=args.\n image_size, crop_pos_y=0.5, crop_pos_x=0.5, mean=args.rgb_mean, std=\n args.rgb_std, output_dtype=flow.float)\n', (4674, 4893), True, 'import oneflow as flow\n'), ((5166, 5186), 'config.get_parser', 'configs.get_parser', ([], {}), '()\n', (5184, 5186), True, 'import config as configs\n'), ((5222, 5246), 'config.print_args', 'configs.print_args', (['args'], {}), '(args)\n', (5240, 5246), True, 'import config as configs\n'), ((5252, 5301), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (5278, 5301), True, 'import oneflow as flow\n'), ((5964, 6088), 'util.Metric', 'Metric', ([], {'desc': '"""io_test"""', 'calculate_batches': 'args.loss_print_every_n_iter', 'batch_size': 'train_batch_size', 'prediction_key': 'None'}), "(desc='io_test', calculate_batches=args.loss_print_every_n_iter,\n batch_size=train_batch_size, prediction_key=None)\n", (5970, 6088), False, 'from util import InitNodes, Metric\n'), ((2847, 2897), 'oneflow.data.OFRecordBytesDecoder', 'flow.data.OFRecordBytesDecoder', (['ofrecord', '"""images"""'], {}), "(ofrecord, 'images')\n", (2877, 2897), True, 'import oneflow as flow\n'), ((2914, 3017), 'oneflow.data.ImageDecoderRandomCropResize', 'flow.data.ImageDecoderRandomCropResize', (['encoded'], {'target_width': '(224)', 'target_height': '(224)', 'num_workers': '(3)'}), '(encoded, target_width=224,\n target_height=224, num_workers=3)\n', (2952, 3017), True, 'import oneflow as flow\n'), ((3040, 3130), 'oneflow.data.OFRecordImageDecoderRandomCrop', 'flow.data.OFRecordImageDecoderRandomCrop', (['ofrecord', '"""images"""'], {'color_space': 'color_space'}), "(ofrecord, 'images', color_space=\n color_space)\n", (3080, 3130), True, 'import oneflow as flow\n'), ((3210, 3282), 'oneflow.image.Resize', 'flow.image.Resize', (['image'], {'target_size': '[args.image_size, args.image_size]'}), '(image, target_size=[args.image_size, args.image_size])\n', (3227, 3282), True, 'import oneflow as flow\n'), ((5369, 5389), 'job_function_util.get_val_config', 'get_val_config', (['args'], {}), '(args)\n', (5383, 5389), False, 'from job_function_util import get_val_config\n'), ((1757, 1791), 'oneflow.zeros_initializer', 'flow.zeros_initializer', (['flow.int32'], {}), '(flow.int32)\n', (1779, 1791), True, 'import oneflow as flow\n'), ((5460, 5495), 'os.path.exists', 'os.path.exists', (['args.train_data_dir'], {}), '(args.train_data_dir)\n', (5474, 5495), False, 'import os\n')] |
import pickle
import oneflow as flow
import Levenshtein as Lev
class Decoder(object):
"""
Basic decoder class from which all other decoders inherit. Implements several
helper functions. Subclasses should implement the decode() method.
Arguments:
labels (string): mapping from integers to characters.
blank_index (int, optional): index for the blank '_' character. Defaults to 0.
space_index (int, optional): index for the space ' ' character. Defaults to 28.
"""
def __init__(self, int_to_char, blank_index=0):
self.int_to_char = int_to_char
self.blank_index = blank_index
def wer(self, real_strings, pred_strings):
"""
Computes the Word Error Rate, defined as the edit distance between the
two provided sentences after tokenizing to words.
Arguments:
s1 (string): space-separated sentence
s2 (string): space-separated sentence
"""
assert len(real_strings) == len(pred_strings)
wer = 0
for i in range(len(real_strings)):
wer += Lev.distance(real_strings[i], pred_strings[i])
return wer / len(real_strings)
class GreedyDecoder(Decoder):
def __init__(self, blank_index=0):
super(GreedyDecoder, self).__init__(blank_index)
def convert_to_strings(
self, sequences, sizes=6, remove_repetitions=True, return_offsets=True
):
"""Given a list of numeric sequences, returns the corresponding strings"""
strings = []
for i in range(sequences.size(0)):
string, string_offsets = self.process_string(
sequences[i], remove_repetitions
)
strings.append(string) # We only return one path
if return_offsets:
if i == 0:
offsets = string_offsets.unsqueeze(0)
else:
offsets = flow.cat((offsets, string_offsets.unsqueeze(0)), 0)
if return_offsets:
return strings, offsets.to("cuda")
else:
return strings
def process_string(self, sequence, remove_repetitions=True):
string = ""
offsets = []
for i in range(sequence.size(0)):
char = self.int_to_char[sequence[i].numpy().item()]
if char != self.int_to_char[self.blank_index]:
if (
remove_repetitions
and i != 0
and char == self.int_to_char[sequence[i - 1].numpy().item()]
):
pass
elif sequence[i].numpy().item() != 1 and len(offsets) < 6:
string = string + char
offsets.append(sequence[i].numpy().item())
elif len(offsets) < 6:
offsets.append(sequence[i].numpy().item())
if len(offsets) < 6:
offsets += [1] * (6 - len(offsets))
return string, flow.tensor(offsets, dtype=flow.int)
def decode(self, ctc_matrix):
top = flow.topk(ctc_matrix, k=1, dim=1)
new_top = top[1][0].detach()
for i in range(1, top[1].size(0)):
cur = top[1][i].detach()
new_top = flow.cat((new_top, cur), 0)
return new_top
| [
"oneflow.cat",
"oneflow.topk",
"oneflow.tensor"
] | [((3059, 3092), 'oneflow.topk', 'flow.topk', (['ctc_matrix'], {'k': '(1)', 'dim': '(1)'}), '(ctc_matrix, k=1, dim=1)\n', (3068, 3092), True, 'import oneflow as flow\n'), ((1103, 1149), 'Levenshtein.distance', 'Lev.distance', (['real_strings[i]', 'pred_strings[i]'], {}), '(real_strings[i], pred_strings[i])\n', (1115, 1149), True, 'import Levenshtein as Lev\n'), ((2973, 3009), 'oneflow.tensor', 'flow.tensor', (['offsets'], {'dtype': 'flow.int'}), '(offsets, dtype=flow.int)\n', (2984, 3009), True, 'import oneflow as flow\n'), ((3233, 3260), 'oneflow.cat', 'flow.cat', (['(new_top, cur)', '(0)'], {}), '((new_top, cur), 0)\n', (3241, 3260), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import random
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.unittest
def test_basic_slice(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(np.allclose(numpy_x[1], x[1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[-2], x[-2].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0, 1], x[0, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[(0, 1)], x[(0, 1)].numpy()))
test_case.assertTrue(np.allclose(numpy_x[((0, 1))], x[((0, 1))].numpy()))
test_case.assertTrue(np.allclose(numpy_x[None], x[None].numpy()))
test_case.assertTrue(np.allclose(numpy_x[True], x[True].numpy()))
test_case.assertTrue(np.allclose(numpy_x[1, None], x[1, None].numpy()))
test_case.assertTrue(np.allclose(numpy_x[1, None, 1], x[1, None, 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[1, None, None, 1], x[1, None, None, 1].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[:], x[:].numpy()))
test_case.assertTrue(np.allclose(numpy_x[:1], x[:1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:1], x[0:1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[-2:-1], x[-2:-1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[2:100:200], x[2:100:200].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:2, ...], x[0:2, ...].numpy()))
test_case.assertTrue(np.allclose(numpy_x[0:2, ..., 1], x[0:2, ..., 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[0:2, ..., 1, 1], x[0:2, ..., 1, 1].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[0:4:2, ...], x[0:4:2, ...].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[0:2, None, ..., True], x[0:2, None, ..., True].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[None, ..., 0:4:2, True], x[None, ..., 0:4:2, True].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[False, ...], x[False, ...].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[False, True, ...], x[False, True, ...].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[True, ..., False, True], x[True, ..., False, True].numpy())
)
test_case.assertTrue(
np.allclose(
numpy_x[True, None, ..., False, True],
x[True, None, ..., False, True].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[True, 1, ..., False, True], x[True, 1, ..., False, True].numpy()
)
)
def test_advanced_indexing(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(np.allclose(numpy_x[[0, 1]], x[[0, 1]].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[[0, 1], [1, 0]], x[[0, 1], [1, 0]].numpy())
)
test_case.assertTrue(
np.allclose(
numpy_x[[[0, 1], [0, 1], [1, 0]]], x[[[0, 1], [0, 1], [1, 0]]].numpy()
)
)
test_case.assertTrue(np.allclose(numpy_x[[[0], [1]]], x[[[0], [1]]].numpy()))
test_case.assertTrue(
np.allclose(
numpy_x[[[[0], [1]], [[0], [1]], [0, 1]]],
x[[[[0], [1]], [[0], [1]], [0, 1]]].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[[[[0, 1], [1, 1]], [[0, 0], [1, 1]], [0, 1]]],
x[[[[0, 1], [1, 1]], [[0, 0], [1, 1]], [0, 1]]].numpy(),
)
)
# Tensor index
test_case.assertTrue(
np.allclose(
numpy_x[np.array([0, 1]), np.array([1, 0])],
x[flow.tensor([0, 1]), flow.tensor([1, 0])].numpy(),
)
)
test_case.assertTrue(
np.allclose(
numpy_x[:, np.array([[0, 1], [1, 1]]), np.array([[1, 0], [1, 1]])],
x[:, flow.tensor([[0, 1], [1, 1]]), flow.tensor([[1, 0], [1, 1]]),].numpy(),
)
)
# mask tensor index
mask = np.random.rand(numpy_x.shape[0], numpy_x.shape[1]).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5, 1], x[y > 0.5, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0], x[y > 0].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0, 1], x[y > 0, 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1], x[y > 1].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1, 1], x[y > 1, 1].numpy()))
mask = np.random.rand(*numpy_x.shape).astype(np.float32)
y = flow.tensor(mask)
test_case.assertTrue(np.allclose(numpy_x[mask > 0.5], x[y > 0.5].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 0], x[y > 0].numpy()))
test_case.assertTrue(np.allclose(numpy_x[mask > 1], x[y > 1].numpy()))
def test_combining_indexing(test_case, numpy_x):
x = flow.tensor(numpy_x)
test_case.assertTrue(
np.allclose(numpy_x[[0, 1], 1:2, [1, 0]], x[[0, 1], 1:2, [1, 0]].numpy())
)
test_case.assertTrue(
np.allclose(numpy_x[:, [0, 1], [1, 0]], x[:, [0, 1], [1, 0]].numpy())
)
test_case.assertTrue(np.allclose(numpy_x[:, [0, 1], 1], x[:, [0, 1], 1].numpy()))
test_case.assertTrue(
np.allclose(numpy_x[..., [0, 1], 1, [1, 0]], x[..., [0, 1], 1, [1, 0]].numpy())
)
def test_mask_setitem(test_case, numpy_x):
x = flow.tensor(numpy_x)
# mask tensor index
mask = np.random.rand(*numpy_x.shape).astype(np.float32)
y = flow.tensor(mask)
# broadcast set
x[y > 0.5] = 1.0
numpy_x[mask > 0.5] = 1.0
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
# elementwise set
update = np.random.randn((mask > 0.5).sum()).astype(np.float32)
tensor_update = flow.tensor(update)
x[y > 0.5] = tensor_update
numpy_x[mask > 0.5] = update
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
# empty mask
x[y > 1.0] = 1.0
numpy_x[mask > 1.0] = 1.0
test_case.assertTrue(np.allclose(numpy_x, x.numpy()))
@flow.unittest.skip_unless_1n1d()
class TestTensorIndexing(flow.unittest.TestCase):
def test_basic_slice(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_basic_slice(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_basic_slice(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_basic_slice(test_case, numpy_x)
def test_advanced_indexing(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_advanced_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_advanced_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_advanced_indexing(test_case, numpy_x)
def test_combining_indexing(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_combining_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_combining_indexing(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_combining_indexing(test_case, numpy_x)
def test_mask_setitem(test_case):
numpy_x = np.arange(0, 60, 1).reshape([3, 4, 5]).astype(np.float32)
test_mask_setitem(test_case, numpy_x)
numpy_x = np.arange(0, 360, 1).reshape([3, 4, 5, 6]).astype(np.float32)
test_mask_setitem(test_case, numpy_x)
numpy_x = np.arange(0, 720, 1).reshape([8, 9, 10]).astype(np.float32)
test_mask_setitem(test_case, numpy_x)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor"
] | [((6552, 6584), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (6582, 6584), True, 'import oneflow as flow\n'), ((775, 795), 'oneflow.tensor', 'flow.tensor', (['numpy_x'], {}), '(numpy_x)\n', (786, 795), True, 'import oneflow as flow\n'), ((3178, 3198), 'oneflow.tensor', 'flow.tensor', (['numpy_x'], {}), '(numpy_x)\n', (3189, 3198), True, 'import oneflow as flow\n'), ((4531, 4548), 'oneflow.tensor', 'flow.tensor', (['mask'], {}), '(mask)\n', (4542, 4548), True, 'import oneflow as flow\n'), ((5095, 5112), 'oneflow.tensor', 'flow.tensor', (['mask'], {}), '(mask)\n', (5106, 5112), True, 'import oneflow as flow\n'), ((5401, 5421), 'oneflow.tensor', 'flow.tensor', (['numpy_x'], {}), '(numpy_x)\n', (5412, 5421), True, 'import oneflow as flow\n'), ((5906, 5926), 'oneflow.tensor', 'flow.tensor', (['numpy_x'], {}), '(numpy_x)\n', (5917, 5926), True, 'import oneflow as flow\n'), ((6021, 6038), 'oneflow.tensor', 'flow.tensor', (['mask'], {}), '(mask)\n', (6032, 6038), True, 'import oneflow as flow\n'), ((6280, 6299), 'oneflow.tensor', 'flow.tensor', (['update'], {}), '(update)\n', (6291, 6299), True, 'import oneflow as flow\n'), ((8359, 8374), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8372, 8374), False, 'import unittest\n'), ((4453, 4503), 'numpy.random.rand', 'np.random.rand', (['numpy_x.shape[0]', 'numpy_x.shape[1]'], {}), '(numpy_x.shape[0], numpy_x.shape[1])\n', (4467, 4503), True, 'import numpy as np\n'), ((5037, 5067), 'numpy.random.rand', 'np.random.rand', (['*numpy_x.shape'], {}), '(*numpy_x.shape)\n', (5051, 5067), True, 'import numpy as np\n'), ((5963, 5993), 'numpy.random.rand', 'np.random.rand', (['*numpy_x.shape'], {}), '(*numpy_x.shape)\n', (5977, 5993), True, 'import numpy as np\n'), ((4067, 4083), 'numpy.array', 'np.array', (['[0, 1]'], {}), '([0, 1])\n', (4075, 4083), True, 'import numpy as np\n'), ((4085, 4101), 'numpy.array', 'np.array', (['[1, 0]'], {}), '([1, 0])\n', (4093, 4101), True, 'import numpy as np\n'), ((4255, 4281), 'numpy.array', 'np.array', (['[[0, 1], [1, 1]]'], {}), '([[0, 1], [1, 1]])\n', (4263, 4281), True, 'import numpy as np\n'), ((4283, 4309), 'numpy.array', 'np.array', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (4291, 4309), True, 'import numpy as np\n'), ((6690, 6709), 'numpy.arange', 'np.arange', (['(0)', '(60)', '(1)'], {}), '(0, 60, 1)\n', (6699, 6709), True, 'import numpy as np\n'), ((6812, 6832), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(1)'], {}), '(0, 360, 1)\n', (6821, 6832), True, 'import numpy as np\n'), ((6938, 6958), 'numpy.arange', 'np.arange', (['(0)', '(720)', '(1)'], {}), '(0, 720, 1)\n', (6947, 6958), True, 'import numpy as np\n'), ((7105, 7124), 'numpy.arange', 'np.arange', (['(0)', '(60)', '(1)'], {}), '(0, 60, 1)\n', (7114, 7124), True, 'import numpy as np\n'), ((7233, 7253), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(1)'], {}), '(0, 360, 1)\n', (7242, 7253), True, 'import numpy as np\n'), ((7365, 7385), 'numpy.arange', 'np.arange', (['(0)', '(720)', '(1)'], {}), '(0, 720, 1)\n', (7374, 7385), True, 'import numpy as np\n'), ((7539, 7558), 'numpy.arange', 'np.arange', (['(0)', '(60)', '(1)'], {}), '(0, 60, 1)\n', (7548, 7558), True, 'import numpy as np\n'), ((7668, 7688), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(1)'], {}), '(0, 360, 1)\n', (7677, 7688), True, 'import numpy as np\n'), ((7801, 7821), 'numpy.arange', 'np.arange', (['(0)', '(720)', '(1)'], {}), '(0, 720, 1)\n', (7810, 7821), True, 'import numpy as np\n'), ((7970, 7989), 'numpy.arange', 'np.arange', (['(0)', '(60)', '(1)'], {}), '(0, 60, 1)\n', (7979, 7989), True, 'import numpy as np\n'), ((8093, 8113), 'numpy.arange', 'np.arange', (['(0)', '(360)', '(1)'], {}), '(0, 360, 1)\n', (8102, 8113), True, 'import numpy as np\n'), ((8220, 8240), 'numpy.arange', 'np.arange', (['(0)', '(720)', '(1)'], {}), '(0, 720, 1)\n', (8229, 8240), True, 'import numpy as np\n'), ((4118, 4137), 'oneflow.tensor', 'flow.tensor', (['[0, 1]'], {}), '([0, 1])\n', (4129, 4137), True, 'import oneflow as flow\n'), ((4139, 4158), 'oneflow.tensor', 'flow.tensor', (['[1, 0]'], {}), '([1, 0])\n', (4150, 4158), True, 'import oneflow as flow\n'), ((4329, 4358), 'oneflow.tensor', 'flow.tensor', (['[[0, 1], [1, 1]]'], {}), '([[0, 1], [1, 1]])\n', (4340, 4358), True, 'import oneflow as flow\n'), ((4360, 4389), 'oneflow.tensor', 'flow.tensor', (['[[1, 0], [1, 1]]'], {}), '([[1, 0], [1, 1]])\n', (4371, 4389), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import typing
import oneflow as flow
import oneflow._oneflow_internal
import oneflow.python.framework.id_util as id_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("nn.fused_self_attention_query_mul_key_and_value")
def api_fused_self_attention_query_mul_key_and_value(
x: oneflow._oneflow_internal.BlobDesc,
head_size: int,
alpha: float = 1.0,
name: typing.Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
if name is None:
name = id_util.UniqueStr("FusedSelfAttentionQueryMulKeyAndValue_")
op = (
flow.user_op_builder(name)
.Op("fused_self_attention_query_mul_key_and_value")
.Input("hidden_states", [x])
.Attr("head_size", int(head_size))
.Attr("alpha", float(alpha))
.Output("query_mul_key")
.Output("value")
.Build()
)
qmk, v = op.InferAndTryRun().RemoteBlobList()
return qmk, v
| [
"oneflow.user_op_builder",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.id_util.UniqueStr"
] | [((773, 838), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.fused_self_attention_query_mul_key_and_value"""'], {}), "('nn.fused_self_attention_query_mul_key_and_value')\n", (787, 838), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1097, 1156), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""FusedSelfAttentionQueryMulKeyAndValue_"""'], {}), "('FusedSelfAttentionQueryMulKeyAndValue_')\n", (1114, 1156), True, 'import oneflow.python.framework.id_util as id_util\n'), ((1177, 1203), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (1197, 1203), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
from oneflow.python.oneflow_export import experimental_api, oneflow_export
from .lr_scheduler import LrScheduler
@oneflow_export("optim.lr_scheduler.CosineAnnealingLR")
@experimental_api
class CosineAnnealingLR(LrScheduler):
r"""This operator creates a Cosine decayed learning rate scheduler.
Before the steps are specified by user, the learning rate will be updated as:
.. math::
& cos\_decay = 0.5*(1+cos(\pi*\frac{current\_step}{steps}))
& decay\_factor = (1-\alpha)*cos\_decay+\alpha
& learning\_rate = base\_learning\_rate*decay\_factor
After the steps specified by user, the learning rate will be :
.. math::
learning\_rate = {base\_learning\_rate}*{\alpha}
It has been proposed in
`SGDR: Stochastic Gradient Descent with Warm Restarts`_. Note that this only
implements the cosine annealing part of SGDR, and not the restarts.
Args:
optimizer(Optimizer): Wrapped optimizer.
steps (int): The decay steps in the scheduler.
alpha (float, optional): The learning rate scale factor (:math:`\alpha`). (default: 0.0)
last_step (int, optional): The index of last step. (default: -1)
verbose (bool, optional): If ``True``, prints a message to stdout for each update. (default: ``False``)
For example:
.. code-block:: python
import oneflow.experimental as flow
...
cosine_annealing_lr = flow.optim.lr_scheduler.CosineAnnealingLR(optimizer, steps=100, alpha=0.0)
for epoch in range(num_epoch):
train(...)
cosine_annealing_lr.step()
.. _SGDR\: Stochastic Gradient Descent with Warm Restarts:
https://arxiv.org/abs/1608.03983
"""
def __init__(
self, optimizer, steps: int, alpha: float = 0.0, last_step=-1, verbose=False
):
assert steps > 0, f"steps must greater than zero, but got {steps}"
self.steps = steps
self.alpha = alpha
super().__init__(optimizer, last_step, verbose)
def get_lr(self):
if self.last_step < self.steps:
cos_decay = 0.5 * (1 + math.cos(math.pi * self.last_step / self.steps))
decay_factor = (1 - self.alpha) * cos_decay + self.alpha
return [base_lr * decay_factor for base_lr in self.base_lrs]
else:
return [base_lr * self.alpha for base_lr in self.base_lrs]
| [
"oneflow.python.oneflow_export.oneflow_export"
] | [((720, 774), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optim.lr_scheduler.CosineAnnealingLR"""'], {}), "('optim.lr_scheduler.CosineAnnealingLR')\n", (734, 774), False, 'from oneflow.python.oneflow_export import experimental_api, oneflow_export\n'), ((2727, 2774), 'math.cos', 'math.cos', (['(math.pi * self.last_step / self.steps)'], {}), '(math.pi * self.last_step / self.steps)\n', (2735, 2774), False, 'import math\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
from oneflow import nn
import os
import numpy as np
import oneflow.unittest
class TestModule(nn.Module):
def forward(self, x):
sbp_1ds = [
flow.sbp.broadcast,
flow.sbp.partial_sum,
flow.sbp.split(0),
flow.sbp.split(1),
flow.sbp.split(2),
flow.sbp.split(3),
]
y = x
for sbp1 in sbp_1ds:
for sbp2 in sbp_1ds:
for sbp3 in sbp_1ds:
for sbp4 in sbp_1ds:
# (2, 2) -> (2, 2)
x = x.to_global(sbp=[sbp1, sbp2])
x = x.to_global(sbp=[sbp3, sbp4])
return x
class _TestGraph(nn.Graph):
def __init__(self, model):
super().__init__()
self.model = model
def build(self, x):
x = self.model(x)
return x
@flow.unittest.skip_unless_1n4d()
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestLazyAllSbpCombinationTesting(flow.unittest.TestCase):
def test_lazy_boxing_2d_all_combination(test_case):
model = TestModule()
graph = _TestGraph(model)
x = flow.ones(
4,
4,
4,
4,
sbp=[flow.sbp.broadcast, flow.sbp.broadcast],
placement=flow.placement(
type="cuda", ranks=np.array(range(4)).reshape(2, 2)
),
)
y = graph(x)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.sbp.split",
"oneflow.unittest.skip_unless_1n4d"
] | [((1504, 1536), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (1534, 1536), True, 'import oneflow as flow\n'), ((1554, 1588), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1563, 1588), False, 'import os\n'), ((2123, 2138), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2136, 2138), False, 'import unittest\n'), ((863, 880), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (877, 880), True, 'import oneflow as flow\n'), ((894, 911), 'oneflow.sbp.split', 'flow.sbp.split', (['(1)'], {}), '(1)\n', (908, 911), True, 'import oneflow as flow\n'), ((925, 942), 'oneflow.sbp.split', 'flow.sbp.split', (['(2)'], {}), '(2)\n', (939, 942), True, 'import oneflow as flow\n'), ((956, 973), 'oneflow.sbp.split', 'flow.sbp.split', (['(3)'], {}), '(3)\n', (970, 973), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
import oneflow as flow
import oneflow.unittest
def _test_linear_train_graph_with_zero(test_case, zero_stage=1):
def train_with_graph(iter_num=1):
P = flow.placement("cuda", {0: [0, 1]})
B = flow.sbp.broadcast
S0 = flow.sbp.split(0)
linear = flow.nn.Linear(8, 4)
linear = linear.to_consistent(placement=P, sbp=B)
flow.nn.init.constant_(linear.weight, 2.068758)
flow.nn.init.constant_(linear.bias, 0.23)
of_sgd = flow.optim.SGD(linear.parameters(), lr=0.001, momentum=0.9)
grad_scaler = flow.amp.StaticGradScaler(200)
x = flow.randint(1, 100, (4, 8), dtype=flow.float32, placement=P, sbp=S0)
class LinearTrainGraphWithZeRO(flow.nn.Graph):
def __init__(self):
super().__init__()
self.linear = linear
self.add_optimizer(of_sgd)
self.config.enable_amp(True)
self.set_grad_scaler(grad_scaler)
if zero_stage == 1:
print("zero stage 1 optimization")
self.config.set_zero_redundancy_optimizer_mode("distributed_split")
self.config.set_zero_redundancy_optimizer_min_size_after_split(1)
if zero_stage == 2:
self.config.set_zero_redundancy_optimizer_mode("distributed_split")
self.config.set_zero_redundancy_optimizer_min_size_after_split(1)
flow.boxing.nccl.enable_use_compute_stream(True)
if zero_stage == 3:
print("zero stage 3 optimization")
self.config.set_zero_redundancy_optimizer_mode("distributed_split")
self.config.set_zero_redundancy_optimizer_min_size_after_split(1)
flow.boxing.nccl.enable_use_compute_stream(True)
flow.boxing.nccl.disable_group_boxing_by_dst_parallel(True)
def build(self, x):
out = self.linear(x)
loss = out.sum()
loss.backward()
return out
class LinearEvalGraphWithZeRO(flow.nn.Graph):
def __init__(self):
super().__init__()
self.linear = linear
self.config.enable_amp(True)
def build(self, x):
out = self.linear(x)
return out
linear_t_g = LinearTrainGraphWithZeRO()
linear_e_g = LinearEvalGraphWithZeRO()
def one_train_iter():
out = linear_t_g(x)
def one_eval_iter():
out = linear_e_g(x)
for i in range(iter_num):
one_train_iter()
# After pass rewrite in training graph, parameters' sbp has been
# changed from flow.sbp.broadcast to flow.sbp.split(0)
test_case.assertEqual(linear.weight.sbp[0], S0)
test_case.assertEqual(linear.bias.sbp[0], S0)
# In evaluation graph, paramters's sbp are flow.sbp.split(0).
# But their consumer will consum them as flow.sbp.broadcast.
one_eval_iter()
iter_num = 1
graph_check_list = train_with_graph(iter_num)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n2d()
class TestLinearTrainGraphWithZeRO(oneflow.unittest.TestCase):
def test_linear_train_graph_with_zero_1(test_case):
_test_linear_train_graph_with_zero(test_case, 1)
def test_linear_train_graph_with_zero_2(test_case):
_test_linear_train_graph_with_zero(test_case, 2)
def test_linear_train_graph_with_zero_3(test_case):
_test_linear_train_graph_with_zero(test_case, 3)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.Linear",
"oneflow.boxing.nccl.enable_use_compute_stream",
"oneflow.randint",
"oneflow.sbp.split",
"oneflow.placement",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.nn.init.constant_",
"oneflow.amp.StaticGradScaler",
"oneflow.boxing.nccl.disable_group_boxing_by_dst_parallel"
] | [((3877, 3909), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (3907, 3909), True, 'import oneflow as flow\n'), ((3817, 3851), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (3826, 3851), False, 'import os\n'), ((4347, 4362), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4360, 4362), False, 'import unittest\n'), ((800, 837), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '{(0): [0, 1]}'], {}), "('cuda', {(0): [0, 1]})\n", (814, 837), True, 'import oneflow as flow\n'), ((880, 897), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (894, 897), True, 'import oneflow as flow\n'), ((915, 935), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(8)', '(4)'], {}), '(8, 4)\n', (929, 935), True, 'import oneflow as flow\n'), ((1002, 1049), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['linear.weight', '(2.068758)'], {}), '(linear.weight, 2.068758)\n', (1024, 1049), True, 'import oneflow as flow\n'), ((1058, 1099), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['linear.bias', '(0.23)'], {}), '(linear.bias, 0.23)\n', (1080, 1099), True, 'import oneflow as flow\n'), ((1199, 1229), 'oneflow.amp.StaticGradScaler', 'flow.amp.StaticGradScaler', (['(200)'], {}), '(200)\n', (1224, 1229), True, 'import oneflow as flow\n'), ((1243, 1312), 'oneflow.randint', 'flow.randint', (['(1)', '(100)', '(4, 8)'], {'dtype': 'flow.float32', 'placement': 'P', 'sbp': 'S0'}), '(1, 100, (4, 8), dtype=flow.float32, placement=P, sbp=S0)\n', (1255, 1312), True, 'import oneflow as flow\n'), ((2107, 2155), 'oneflow.boxing.nccl.enable_use_compute_stream', 'flow.boxing.nccl.enable_use_compute_stream', (['(True)'], {}), '(True)\n', (2149, 2155), True, 'import oneflow as flow\n'), ((2441, 2489), 'oneflow.boxing.nccl.enable_use_compute_stream', 'flow.boxing.nccl.enable_use_compute_stream', (['(True)'], {}), '(True)\n', (2483, 2489), True, 'import oneflow as flow\n'), ((2510, 2569), 'oneflow.boxing.nccl.disable_group_boxing_by_dst_parallel', 'flow.boxing.nccl.disable_group_boxing_by_dst_parallel', (['(True)'], {}), '(True)\n', (2563, 2569), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Optional, Sequence, Union
import oneflow
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
import oneflow._oneflow_internal
@oneflow_export("pad")
def pad(
x: oneflow._oneflow_internal.BlobDesc,
paddings: Sequence[int],
constant_value: Union[int, float] = 0,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator pads the input blob with constant value that user specifies. User can set the amount of padding by setting the parameter `paddings`.
Args:
x (oneflow._oneflow_internal.BlobDesc): The input Blob
paddings (Sequence[int]): A list of integers to specify the padding width, its length must equal with the length of `x.shape`.
constant_value (Union[int, float], optional): The constant value to pad. Defaults to 0.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
ValueError: The parameter `paddings` must be a tuple or a list.
Returns:
oneflow._oneflow_internal.BlobDesc: The Blob after padding.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def pad_Job(x: tp.Numpy.Placeholder((3, 3))
) -> tp.Numpy:
return flow.pad(x,
paddings=((2, 2), (1, 1)),
constant_value=5)
x = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).astype(np.float32)
out = pad_Job(x)
# out [[5. 5. 5. 5. 5.]
# [5. 5. 5. 5. 5.]
# [5. 1. 1. 1. 5.]
# [5. 1. 1. 1. 5.]
# [5. 1. 1. 1. 5.]
# [5. 5. 5. 5. 5.]
# [5. 5. 5. 5. 5.]]
"""
padding_before = []
padding_after = []
if isinstance(paddings, (list, tuple)):
assert len(paddings) == len(x.shape), ValueError(
"paddings must be the same size of input dims"
)
for p in paddings:
assert isinstance(p, (list, tuple)) and len(p) == 2, ValueError(
"the elem of paddings must be a tuple or a list with length of 2"
)
padding_before.append(p[0])
padding_after.append(p[1])
else:
raise ValueError("paddings must be a tuple or a list.")
if x.dtype in [
oneflow.float32,
oneflow.float16,
oneflow.float64,
]:
floating_constant_value = float(constant_value)
integral_constant_value = int(0)
else:
floating_constant_value = float(0)
integral_constant_value = int(constant_value)
return (
oneflow.user_op_builder(name if name is not None else id_util.UniqueStr("Pad_"))
.Op("pad")
.Input("x", [x])
.Output("y")
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Attr("floating_constant_value", floating_constant_value)
.Attr("integral_constant_value", integral_constant_value)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("pad_grad")
def pad_grad(
x: oneflow._oneflow_internal.BlobDesc,
paddings: Sequence[int],
constant_value: Union[int, float] = 0,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
padding_before = []
padding_after = []
if isinstance(paddings, (list, tuple)):
assert len(paddings) == len(x.shape), ValueError(
"paddings must be the same size of input dims"
)
for p in paddings:
assert isinstance(p, (list, tuple)) and len(p) == 2, ValueError(
"the elem of paddings must be a tuple or a list with length of 2"
)
padding_before.append(p[0])
padding_after.append(p[1])
else:
raise ValueError("paddings must be a tuple or a list.")
return (
oneflow.user_op_builder(
name if name is not None else id_util.UniqueStr("PadGrad_")
)
.Op("pad_grad")
.Input("dy", [x])
.Output("dx")
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Attr("floating_constant_value", float(constant_value))
.Attr("integral_constant_value", int(constant_value))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("same_padding")
def same_padding(
x: oneflow._oneflow_internal.BlobDesc,
padding: Sequence[int],
data_format: str,
kernel_size: Sequence[int],
strides: Sequence[int],
dilation_rate: Sequence[int],
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator do the padding in "SAME" mode, It can computes the pad width according to the `kernel_size` and `strides` to keep the size of feature map unchanged after convolution or other operations.
Args:
x (oneflow._oneflow_internal.BlobDesc): The input blob.
padding (Sequence[int]): The padding mode. It should be "SAME_UPPER" or "SAME_LOWER"
data_format ([type]): The data format of input Blob. If the string starts with "NC", it means the data format is `channel first`, else the data format is `channel last`.
kernel_size (Sequence[int]): The kernel size of operations. Its type should be tuple or list.
strides (Sequence[int]): The strides of operations. Its type should be tuple or list.
dilation_rate (Sequence[int]): The dilation rate of operations. Its type should be tuple or list.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The Blob after padding.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def same_pad_Job(x: tp.Numpy.Placeholder((1, 1, 3, 3))
) -> tp.Numpy:
return flow.same_padding(x,
padding="SAME_UPPER",
data_format="NCHW",
kernel_size=(3, 3),
strides=(1, 1),
dilation_rate=(1, 1))
x = np.ones(shape=(1, 1, 3, 3)).astype(np.float32)
out = same_pad_Job(x)
# out [[[[0. 0. 0. 0. 0.]
# [0. 1. 1. 1. 0.]
# [0. 1. 1. 1. 0.]
# [0. 1. 1. 1. 0.]
# [0. 0. 0. 0. 0.]]]]
"""
assert isinstance(padding, str) and (
padding.upper() == "SAME_LOWER" or padding.upper() == "SAME_UPPER"
), 'padding must be "SAME_LOWER" or "SAME_UPPER".'
channel_pos = "channels_first" if data_format.startswith("NC") else "channels_last"
assert isinstance(kernel_size, (list, tuple))
assert isinstance(strides, (list, tuple))
assert isinstance(dilation_rate, (list, tuple))
num_spatial_dims = len(x.shape) - 2
assert len(kernel_size) == num_spatial_dims
assert len(strides) == num_spatial_dims
assert len(dilation_rate) == num_spatial_dims
return (
oneflow.user_op_builder(
name if name is not None else id_util.UniqueStr("SamePadding_")
)
.Op("same_padding")
.Input("x", [x])
.Output("y")
.Attr("padding", padding.lower())
.Attr("data_format", channel_pos)
.Attr("kernel_size", kernel_size)
.Attr("strides", strides)
.Attr("dilation_rate", dilation_rate)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("reflection_pad2d")
def reflection_pad2d(
x: oneflow._oneflow_internal.BlobDesc,
padding: Union[int, tuple, list],
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""Pads the input tensor using the reflection of the input boundary.
Args:
x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format.
padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension;
if 4-dims tuple, uses (\text{padding\_left}padding_left , \text{padding\_right}padding_right , \text{padding\_top}padding_top , \text{padding\_bottom}padding_bottom )
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: [description]
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3))
) -> tp.Numpy:
return flow.reflection_pad2d(x, padding=[2, 2, 1, 1])
x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)
out = pad_Job(x)
# out [[[[ 5. 4. 3. 4. 5. 4. 3.]
# [ 2. 1. 0. 1. 2. 1. 0.]
# [ 5. 4. 3. 4. 5. 4. 3.]
# [ 8. 7. 6. 7. 8. 7. 6.]
# [ 5. 4. 3. 4. 5. 4. 3.]]
# [[ 14. 13. 12. 13. 14. 13. 12.]
# [ 11. 10. 9. 10. 11. 10. 9.]
# [ 14. 13. 12. 13. 14. 13. 12.]
# [ 17. 16. 15. 16. 17. 16. 15.]
# [ 14. 13. 12. 13. 14. 13. 12.]]]]
"""
H, W = x.shape[2], x.shape[3]
if isinstance(padding, (tuple, list)):
assert len(padding) == len(x.shape), ValueError(
"padding boundry must be the same size of input dims"
)
assert (
padding[2] < H and padding[3] < H and padding[0] < W and padding[1] < W
), ValueError(
"Padding size should be less than the corresponding input dimension!"
)
boundry = [padding[0], padding[1], padding[2], padding[3]]
elif isinstance(padding, int):
assert padding < H and padding < W, ValueError(
"Padding size should be less than the corresponding input dimension!"
)
boundry = [padding, padding, padding, padding]
else:
raise ValueError("padding must be in or list or tuple!")
return (
oneflow.user_op_builder(
name if name is not None else id_util.UniqueStr("Reflection_Pad2d_")
)
.Op("reflection_pad2d")
.Input("x", [x])
.Output("y")
.Attr("padding", list(boundry))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("replication_pad2d")
def replication_pad2d(
x: oneflow._oneflow_internal.BlobDesc,
padding: Union[int, tuple, list],
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""Pads the input tensor using the replication of the input boundary.
Args:
x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format.
padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension;
if 4-dims tuple, uses (\text{padding\_left}padding_left , \text{padding\_right}padding_right , \text{padding\_top}padding_top , \text{padding\_bottom}padding_bottom )
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: [description]
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3))
) -> tp.Numpy:
return flow.reflection_pad2d(x, padding=[2, 2, 1, 1])
x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)
out = pad_Job(x)
# out [[[[ 0. 0. 0. 1. 2. 2. 2.]
# [ 0. 0. 0. 1. 2. 2. 2.]
# [ 3. 3. 3. 4. 5. 5. 5.]
# [ 6. 6. 6. 7. 8. 8. 8.]
# [ 6. 6. 6. 7. 8. 8. 8.]]
# [[ 9. 9. 9. 10. 11. 11. 11.]
# [ 9. 9. 9. 10. 11. 11. 11.]
# [ 12. 12. 12. 13. 14. 14. 14.]
# [ 15. 15. 15. 16. 17. 17. 17.]
# [ 15. 15. 15. 16. 17. 17. 17.]]]]
"""
H, W = x.shape[2], x.shape[3]
if isinstance(padding, (tuple, list)):
assert len(padding) == len(x.shape), ValueError(
"padding boundry must be the same size of input dims"
)
assert (
padding[2] < H and padding[3] < H and padding[0] < W and padding[1] < W
), ValueError(
"Padding size should be less than the corresponding input dimension!"
)
boundry = [padding[0], padding[1], padding[2], padding[3]]
elif isinstance(padding, int):
assert padding < H and padding < W, ValueError(
"Padding size should be less than the corresponding input dimension!"
)
boundry = [padding, padding, padding, padding]
else:
raise ValueError("padding must be in or list or tuple!")
return (
oneflow.user_op_builder(
name if name is not None else id_util.UniqueStr("Replication_Pad2d_")
)
.Op("replication_pad2d")
.Input("x", [x])
.Output("y")
.Attr("padding", list(boundry))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("constant_pad2d")
def constant_pad2d(
x: oneflow._oneflow_internal.BlobDesc,
padding: Union[int, tuple, list],
constant_value: Union[int, float] = 0,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""Pads the input tensor using an input constant value.
Args:
x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format.
padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension;
if 4-dims tuple, uses (\text{padding\_left}padding_left , \text{padding\_right}padding_right , \text{padding\_top}padding_top , \text{padding\_bottom}padding_bottom )
constant_value (Union[int, float]): The constant value used for padding. Defaults to Zero.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: [description]
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3), const_value)
) -> tp.Numpy:
return flow.constant_pad2d(x, padding=[2, 2, 1, 1], const_value)
x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)
const_value = 1.5
out = pad_Job(x, const_value)
# out [[[[ 1.5 1.5 1.5 1.5 1.5 1.5 1.5]
# [ 1.5 1.5 0. 1. 2. 1.5 1.5]
# [ 1.5 1.5 3. 4. 5. 1.5 1.5]
# [ 1.5 1.5 6. 7. 8. 1.5 1.5]
# [ 1.5 1.5 1.5 1.5 1.5 1.5 1.5]]
# [[ 1.5 1.5 1.5 1.5 1.5 1.5 1.5.]
# [ 1.5 1.5 9. 10. 11. 1.5 1.5]
# [ 1.5 1.5 12. 13. 14. 1.5 1.5]
# [ 1.5 1.5 15. 16. 17. 1.5 1.5]
# [ 1.5 1.5 1.5 1.5 1.5 1.5 1.5]]]]
"""
H, W = x.shape[2], x.shape[3]
if isinstance(padding, (tuple, list)):
assert len(padding) == len(x.shape), ValueError(
"padding boundry must be the same size of input dims"
)
assert (
padding[2] < H and padding[3] < H and padding[0] < W and padding[1] < W
), ValueError(
"Padding size should be less than the corresponding input dimension!"
)
boundry = [padding[0], padding[1], padding[2], padding[3]]
elif isinstance(padding, int):
assert padding < H and padding < W, ValueError(
"Padding size should be less than the corresponding input dimension!"
)
boundry = [padding, padding, padding, padding]
else:
raise ValueError("padding must be in or list or tuple!")
if x.dtype in [
oneflow.float32,
oneflow.float16,
oneflow.float64,
]:
floating_value = float(constant_value)
integral_value = int(0)
else:
floating_value = float(0)
integral_value = int(constant_value)
return (
oneflow.user_op_builder(
name if name is not None else id_util.UniqueStr("Constant_Pad2d_")
)
.Op("constant_pad2d")
.Input("x", [x])
.Output("y")
.Attr("padding", list(boundry))
.Attr("floating_value", floating_value)
.Attr("integral_value", integral_value)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("zero_pad2d")
def zero_pad2d(
x: oneflow._oneflow_internal.BlobDesc,
padding: Union[int, tuple, list],
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""Pads the input tensor using zeros.
Args:
x (oneflow._oneflow_internal.BlobDesc): input blob, only support "NCHW" format.
padding (Union[int, oneflow._oneflow_internal.BlobDesc]): The size or bundary of padding, if is int uses the same padding in all dimension;
if 4-dims tuple, uses (\text{padding\_left}padding_left , \text{padding\_right}padding_right , \text{padding\_top}padding_top , \text{padding\_bottom}padding_bottom )
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: [description]
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def pad_Job(x: tp.Numpy.Placeholder((1, 2, 3, 3), const_value)
) -> tp.Numpy:
return flow.constant_pad2d(x, padding=[2, 2, 1, 1], const_value)
x = np.arange(18).reshape((1, 2, 3, 3)).astype(np.float32)
const_value = 1.5
out = pad_Job(x, const_value)
# out [[[[ 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 0. 1. 2. 0. 0.]
# [ 0. 0. 3. 4. 5. 0. 0.]
# [ 0. 0. 6. 7. 8. 0. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]]
# [[ 0. 0. 0. 0. 0. 0. 0.]
# [ 0. 0. 9. 10. 11. 0. 0.]
# [ 0. 0. 12. 13. 14. 0. 0.]
# [ 0. 0. 15. 16. 17. 0. 0.]
# [ 0. 0. 0. 0. 0. 0. 0.]]]]
"""
if name is None:
name = id_util.UniqueStr("Zero_Pad2d_")
return constant_pad2d(x, padding, 0.0, name)
| [
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.id_util.UniqueStr"
] | [((899, 920), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""pad"""'], {}), "('pad')\n", (913, 920), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3940, 3966), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""pad_grad"""'], {}), "('pad_grad')\n", (3954, 3966), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5241, 5271), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""same_padding"""'], {}), "('same_padding')\n", (5255, 5271), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((8513, 8547), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""reflection_pad2d"""'], {}), "('reflection_pad2d')\n", (8527, 8547), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((11390, 11425), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""replication_pad2d"""'], {}), "('replication_pad2d')\n", (11404, 11425), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((14296, 14328), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""constant_pad2d"""'], {}), "('constant_pad2d')\n", (14310, 14328), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((17780, 17808), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""zero_pad2d"""'], {}), "('zero_pad2d')\n", (17794, 17808), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((19573, 19605), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Zero_Pad2d_"""'], {}), "('Zero_Pad2d_')\n", (19590, 19605), True, 'import oneflow.python.framework.id_util as id_util\n'), ((11142, 11180), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Reflection_Pad2d_"""'], {}), "('Reflection_Pad2d_')\n", (11159, 11180), True, 'import oneflow.python.framework.id_util as id_util\n'), ((14046, 14085), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Replication_Pad2d_"""'], {}), "('Replication_Pad2d_')\n", (14063, 14085), True, 'import oneflow.python.framework.id_util as id_util\n'), ((17440, 17476), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Constant_Pad2d_"""'], {}), "('Constant_Pad2d_')\n", (17457, 17476), True, 'import oneflow.python.framework.id_util as id_util\n'), ((3541, 3566), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Pad_"""'], {}), "('Pad_')\n", (3558, 3566), True, 'import oneflow.python.framework.id_util as id_util\n'), ((4828, 4857), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""PadGrad_"""'], {}), "('PadGrad_')\n", (4845, 4857), True, 'import oneflow.python.framework.id_util as id_util\n'), ((8108, 8141), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SamePadding_"""'], {}), "('SamePadding_')\n", (8125, 8141), True, 'import oneflow.python.framework.id_util as id_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
from oneflow.ops.transpose_util import (
get_inversed_perm,
get_perm_when_transpose_axis_to_last_dim,
)
def argsort_op(input, dim: int = -1, descending: bool = False):
num_dims = len(input.shape)
dim = dim if dim >= 0 else dim + num_dims
direction = "DESCENDING" if descending else "ASCENDING"
assert 0 <= dim < num_dims, "dim out of range"
if dim == num_dims - 1:
return flow._C.arg_sort(input, direction)
else:
perm = get_perm_when_transpose_axis_to_last_dim(num_dims, dim)
x = flow._C.transpose(input, perm=perm)
x = flow._C.arg_sort(x, direction)
return flow._C.transpose(x, perm=get_inversed_perm(perm))
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim",
"oneflow._C.transpose",
"oneflow._C.arg_sort",
"oneflow.ops.transpose_util.get_inversed_perm"
] | [((1442, 1478), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (1457, 1478), False, 'import doctest\n'), ((1116, 1150), 'oneflow._C.arg_sort', 'flow._C.arg_sort', (['input', 'direction'], {}), '(input, direction)\n', (1132, 1150), True, 'import oneflow as flow\n'), ((1176, 1231), 'oneflow.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim', 'get_perm_when_transpose_axis_to_last_dim', (['num_dims', 'dim'], {}), '(num_dims, dim)\n', (1216, 1231), False, 'from oneflow.ops.transpose_util import get_inversed_perm, get_perm_when_transpose_axis_to_last_dim\n'), ((1244, 1279), 'oneflow._C.transpose', 'flow._C.transpose', (['input'], {'perm': 'perm'}), '(input, perm=perm)\n', (1261, 1279), True, 'import oneflow as flow\n'), ((1292, 1322), 'oneflow._C.arg_sort', 'flow._C.arg_sort', (['x', 'direction'], {}), '(x, direction)\n', (1308, 1322), True, 'import oneflow as flow\n'), ((1364, 1387), 'oneflow.ops.transpose_util.get_inversed_perm', 'get_inversed_perm', (['perm'], {}), '(perm)\n', (1381, 1387), False, 'from oneflow.ops.transpose_util import get_inversed_perm, get_perm_when_transpose_axis_to_last_dim\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import traceback
from functools import reduce
from typing import Any, Optional, Sequence, Union
import numpy as np
import oneflow
import oneflow._oneflow_internal
import oneflow._oneflow_internal.oneflow.core.register.logical_blob_id as lbi_util
import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb
import oneflow.core.operator.interface_blob_conf_pb2 as inter_face_blob_conf_util
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.compile_context as compile_context
import oneflow.framework.distribute as distribute_util
import oneflow.framework.id_util as id_util
import oneflow.framework.placement_context as placement_ctx
import oneflow.framework.remote_blob as remote_blob_util
class ArgBlobDef(object):
def __init__(
self,
shape,
dtype,
name=None,
distribute=oneflow._oneflow_internal.distribute.auto(),
):
lbi = lbi_util.LogicalBlobId()
if name is None:
name = id_util.UniqueStr("Input_")
lbi.set_op_name(name)
lbi.set_blob_name("out")
self.lbi_ = lbi
assert type(shape) is tuple
for dim in shape:
assert type(dim) is int
assert dim > 0
self.shape_ = shape
self.dtype_ = dtype
self.distribute_ = distribute
@property
def lbi(self):
return self.lbi_
@property
def op_name(self):
return self.lbi_.op_name()
@property
def blob_name(self):
return self.lbi_.blob_name()
@property
def unique_name(self):
return self.op_name + "/" + self.blob_name + self._Distribute2Str()
@property
def shape(self):
return self.shape_
@property
def dtype(self):
return self.dtype_
@property
def is_dynamic(self):
raise NotImplementedError
def with_distribute(self, distribute):
return type(self)(shape=self.shape_, dtype=self.dtype_, name=self.op_name)
def Clone(self, op_name=None):
return type(self)(shape=self.shape_, dtype=self.dtype_, name=op_name)
def AddAndInferOp(self, op_conf):
raise NotImplementedError
def EagerAddAndInferOp(self, op_conf):
raise NotImplementedError
def CheckAndAsyncPush(self, session, arg_ndarray):
self._CheckNdarray(arg_ndarray)
self._AsyncPush(session, arg_ndarray)
def _CheckNdarray(self, ndarray):
raise NotImplementedError
def _AsyncPush(self, session, arg_ndarray):
raise NotImplementedError
def ToInterfaceBlobConf(self):
interface_blob_conf = inter_face_blob_conf_util.InterfaceBlobConf()
interface_blob_conf.shape.dim.extend(self.shape_)
interface_blob_conf.data_type = oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(
self.dtype_
)
interface_blob_conf.is_dynamic = self.is_dynamic
sbp_parallel = sbp_parallel_pb.SbpParallel()
sbp_parallel.split_parallel.axis = 0
interface_blob_conf.parallel_distribution.sbp_parallel.extend([sbp_parallel])
return interface_blob_conf
def _Distribute2Str(self):
if (
type(self.distribute_)
is oneflow._oneflow_internal.distribute.AutoDistribute
):
return ""
elif (
type(self.distribute_)
is oneflow._oneflow_internal.distribute.SplitDistribute
):
return ":S" + str(self.distribute_.axis)
elif (
type(self.distribute_)
is oneflow._oneflow_internal.distribute.BroadcastDistribute
):
return ":B"
else:
raise NotImplementedError
def _AddAndInferMirroredOp(mirrored_lbn, op_conf, sub_consistent_blob_list):
compile_context.CurJobAddMirroredOp(op_conf)
job_name = oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName()
num_sub_lbi = c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi(
job_name, mirrored_lbn
)
for i in range(num_sub_lbi):
sub_lbi = c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi(
job_name, mirrored_lbn, i
)
lbi = lbi_util.LogicalBlobId()
lbi.set_op_name(sub_lbi.op_name)
lbi.set_blob_name(sub_lbi.blob_name)
sub_consistent_blob_list.append(
oneflow._oneflow_internal.ConsistentBlob(
lbi, "", oneflow._oneflow_internal.distribute.auto()
)
)
def _MakePushNdarrayCallback(ndarray):
copied = np.copy(ndarray, order="C")
def Copy(ofblob):
capacity = reduce(lambda x, y: x * y, ofblob.static_shape, 1)
elem_cnt = reduce(lambda x, y: x * y, copied.shape, 1)
assert elem_cnt <= capacity, "%s v.s. %s" % (copied.shape, ofblob.static_shape)
ofblob.CopyFromNdarray(copied)
return Copy
| [
"oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype",
"oneflow._oneflow_internal.oneflow.core.register.logical_blob_id.LogicalBlobId",
"oneflow.core.operator.interface_blob_conf_pb2.InterfaceBlobConf",
"oneflow.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi",
"oneflow.framework.co... | [((4423, 4467), 'oneflow.framework.compile_context.CurJobAddMirroredOp', 'compile_context.CurJobAddMirroredOp', (['op_conf'], {}), '(op_conf)\n', (4458, 4467), True, 'import oneflow.framework.compile_context as compile_context\n'), ((4483, 4548), 'oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (4546, 4548), False, 'import oneflow\n'), ((4567, 4646), 'oneflow.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi', (['job_name', 'mirrored_lbn'], {}), '(job_name, mirrored_lbn)\n', (4622, 4646), True, 'import oneflow.framework.c_api_util as c_api_util\n'), ((5181, 5208), 'numpy.copy', 'np.copy', (['ndarray'], {'order': '"""C"""'}), "(ndarray, order='C')\n", (5188, 5208), True, 'import numpy as np\n'), ((1502, 1545), 'oneflow._oneflow_internal.distribute.auto', 'oneflow._oneflow_internal.distribute.auto', ([], {}), '()\n', (1543, 1545), False, 'import oneflow\n'), ((1568, 1592), 'oneflow._oneflow_internal.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (1590, 1592), True, 'import oneflow._oneflow_internal.oneflow.core.register.logical_blob_id as lbi_util\n'), ((3255, 3300), 'oneflow.core.operator.interface_blob_conf_pb2.InterfaceBlobConf', 'inter_face_blob_conf_util.InterfaceBlobConf', ([], {}), '()\n', (3298, 3300), True, 'import oneflow.core.operator.interface_blob_conf_pb2 as inter_face_blob_conf_util\n'), ((3399, 3470), 'oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype', 'oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype', (['self.dtype_'], {}), '(self.dtype_)\n', (3457, 3470), False, 'import oneflow\n'), ((3573, 3602), 'oneflow.core.job.sbp_parallel_pb2.SbpParallel', 'sbp_parallel_pb.SbpParallel', ([], {}), '()\n', (3600, 3602), True, 'import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb\n'), ((4712, 4791), 'oneflow.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi', (['job_name', 'mirrored_lbn', 'i'], {}), '(job_name, mirrored_lbn, i)\n', (4764, 4791), True, 'import oneflow.framework.c_api_util as c_api_util\n'), ((4828, 4852), 'oneflow._oneflow_internal.oneflow.core.register.logical_blob_id.LogicalBlobId', 'lbi_util.LogicalBlobId', ([], {}), '()\n', (4850, 4852), True, 'import oneflow._oneflow_internal.oneflow.core.register.logical_blob_id as lbi_util\n'), ((5251, 5301), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'ofblob.static_shape', '(1)'], {}), '(lambda x, y: x * y, ofblob.static_shape, 1)\n', (5257, 5301), False, 'from functools import reduce\n'), ((5321, 5364), 'functools.reduce', 'reduce', (['(lambda x, y: x * y)', 'copied.shape', '(1)'], {}), '(lambda x, y: x * y, copied.shape, 1)\n', (5327, 5364), False, 'from functools import reduce\n'), ((1637, 1664), 'oneflow.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Input_"""'], {}), "('Input_')\n", (1654, 1664), True, 'import oneflow.framework.id_util as id_util\n'), ((5059, 5102), 'oneflow._oneflow_internal.distribute.auto', 'oneflow._oneflow_internal.distribute.auto', ([], {}), '()\n', (5100, 5102), False, 'import oneflow\n')] |
import os
import time
import argparse
import numpy as np
import glob
import imageio
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import oneflow as flow
def make_dirs(*pathes):
for path in pathes:
# dir path
if not os.path.exists(path):
os.makedirs(path)
def load_mnist(data_dir, transpose=True):
if os.path.exists(data_dir):
print("Found MNIST - skip download")
else:
print("not Found MNIST - start download")
download_mnist(data_dir)
fd = open(os.path.join(data_dir, "train-images-idx3-ubyte"))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trX = loaded[16:].reshape((60000, 28, 28, 1)).astype(np.float32)
fd = open(os.path.join(data_dir, "train-labels-idx1-ubyte"))
loaded = np.fromfile(file=fd, dtype=np.uint8)
trY = loaded[8:].reshape((60000)).astype(np.float32)
fd = open(os.path.join(data_dir, "t10k-images-idx3-ubyte"))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teX = loaded[16:].reshape((10000, 28, 28, 1)).astype(np.float32)
fd = open(os.path.join(data_dir, "t10k-labels-idx1-ubyte"))
loaded = np.fromfile(file=fd, dtype=np.uint8)
teY = loaded[8:].reshape((10000)).astype(np.float32)
X = trX
y = trY.astype(int)
seed = 547
# np.random.seed(seed)
np.random.shuffle(X)
# np.random.seed(seed)
np.random.shuffle(y)
y_vec = np.zeros((len(y), 10), dtype=np.float32)
for i, label in enumerate(y):
y_vec[i, y[i]] = 1.0
if transpose:
X = np.transpose(X, (0, 3, 1, 2))
return (X - 127.5) / 127.5, y_vec
def download_mnist(data_dir):
import subprocess
os.mkdir(data_dir)
url_base = "http://yann.lecun.com/exdb/mnist/"
file_names = [
"train-images-idx3-ubyte.gz",
"train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
"t10k-labels-idx1-ubyte.gz",
]
for file_name in file_names:
url = (url_base + file_name).format(**locals())
print(url)
out_path = os.path.join(data_dir, file_name)
cmd = ["curl", url, "-o", out_path]
print("Downloading ", file_name)
subprocess.call(cmd)
cmd = ["gzip", "-d", out_path]
print("Decompressing ", file_name)
subprocess.call(cmd)
def to_numpy(x, mean=True):
if mean:
x = flow.mean(x)
return x.numpy()
def to_tensor(x, grad=True, dtype=flow.float32):
if not isinstance(x, np.ndarray):
x = np.array(x)
return flow.Tensor(x, requires_grad=grad, dtype=dtype)
def save_to_gif(path):
anim_file = os.path.join(path, "dcgan.gif")
with imageio.get_writer(anim_file, mode="I") as writer:
filenames = glob.glob(os.path.join(path, "*image*.png"))
filenames = sorted(filenames)
last = -1
for i, filename in enumerate(filenames):
frame = 2 * (i ** 0.5)
if round(frame) > round(last):
last = frame
else:
continue
image = imageio.imread(filename)
writer.append_data(image)
image = imageio.imread(filename)
writer.append_data(image)
print("Save images gif to {} done.".format(anim_file))
def save_images(x, size, path):
x = x.astype(np.float)
fig = plt.figure(figsize=(4, 4))
for i in range(size):
plt.subplot(4, 4, i + 1)
plt.imshow(x[i, 0, :, :] * 127.5 + 127.5, cmap="gray")
plt.axis("off")
plt.savefig(path)
print("Save image to {} done.".format(path))
| [
"oneflow.Tensor",
"oneflow.mean"
] | [((103, 124), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (117, 124), False, 'import matplotlib\n'), ((367, 391), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (381, 391), False, 'import os\n'), ((610, 646), 'numpy.fromfile', 'np.fromfile', ([], {'file': 'fd', 'dtype': 'np.uint8'}), '(file=fd, dtype=np.uint8)\n', (621, 646), True, 'import numpy as np\n'), ((795, 831), 'numpy.fromfile', 'np.fromfile', ([], {'file': 'fd', 'dtype': 'np.uint8'}), '(file=fd, dtype=np.uint8)\n', (806, 831), True, 'import numpy as np\n'), ((967, 1003), 'numpy.fromfile', 'np.fromfile', ([], {'file': 'fd', 'dtype': 'np.uint8'}), '(file=fd, dtype=np.uint8)\n', (978, 1003), True, 'import numpy as np\n'), ((1151, 1187), 'numpy.fromfile', 'np.fromfile', ([], {'file': 'fd', 'dtype': 'np.uint8'}), '(file=fd, dtype=np.uint8)\n', (1162, 1187), True, 'import numpy as np\n'), ((1329, 1349), 'numpy.random.shuffle', 'np.random.shuffle', (['X'], {}), '(X)\n', (1346, 1349), True, 'import numpy as np\n'), ((1381, 1401), 'numpy.random.shuffle', 'np.random.shuffle', (['y'], {}), '(y)\n', (1398, 1401), True, 'import numpy as np\n'), ((1678, 1696), 'os.mkdir', 'os.mkdir', (['data_dir'], {}), '(data_dir)\n', (1686, 1696), False, 'import os\n'), ((2523, 2570), 'oneflow.Tensor', 'flow.Tensor', (['x'], {'requires_grad': 'grad', 'dtype': 'dtype'}), '(x, requires_grad=grad, dtype=dtype)\n', (2534, 2570), True, 'import oneflow as flow\n'), ((2612, 2643), 'os.path.join', 'os.path.join', (['path', '"""dcgan.gif"""'], {}), "(path, 'dcgan.gif')\n", (2624, 2643), False, 'import os\n'), ((3312, 3338), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(4, 4)'}), '(figsize=(4, 4))\n', (3322, 3338), True, 'import matplotlib.pyplot as plt\n'), ((3489, 3506), 'matplotlib.pyplot.savefig', 'plt.savefig', (['path'], {}), '(path)\n', (3500, 3506), True, 'import matplotlib.pyplot as plt\n'), ((546, 595), 'os.path.join', 'os.path.join', (['data_dir', '"""train-images-idx3-ubyte"""'], {}), "(data_dir, 'train-images-idx3-ubyte')\n", (558, 595), False, 'import os\n'), ((731, 780), 'os.path.join', 'os.path.join', (['data_dir', '"""train-labels-idx1-ubyte"""'], {}), "(data_dir, 'train-labels-idx1-ubyte')\n", (743, 780), False, 'import os\n'), ((904, 952), 'os.path.join', 'os.path.join', (['data_dir', '"""t10k-images-idx3-ubyte"""'], {}), "(data_dir, 't10k-images-idx3-ubyte')\n", (916, 952), False, 'import os\n'), ((1088, 1136), 'os.path.join', 'os.path.join', (['data_dir', '"""t10k-labels-idx1-ubyte"""'], {}), "(data_dir, 't10k-labels-idx1-ubyte')\n", (1100, 1136), False, 'import os\n'), ((1550, 1579), 'numpy.transpose', 'np.transpose', (['X', '(0, 3, 1, 2)'], {}), '(X, (0, 3, 1, 2))\n', (1562, 1579), True, 'import numpy as np\n'), ((2050, 2083), 'os.path.join', 'os.path.join', (['data_dir', 'file_name'], {}), '(data_dir, file_name)\n', (2062, 2083), False, 'import os\n'), ((2177, 2197), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (2192, 2197), False, 'import subprocess\n'), ((2288, 2308), 'subprocess.call', 'subprocess.call', (['cmd'], {}), '(cmd)\n', (2303, 2308), False, 'import subprocess\n'), ((2364, 2376), 'oneflow.mean', 'flow.mean', (['x'], {}), '(x)\n', (2373, 2376), True, 'import oneflow as flow\n'), ((2500, 2511), 'numpy.array', 'np.array', (['x'], {}), '(x)\n', (2508, 2511), True, 'import numpy as np\n'), ((2653, 2692), 'imageio.get_writer', 'imageio.get_writer', (['anim_file'], {'mode': '"""I"""'}), "(anim_file, mode='I')\n", (2671, 2692), False, 'import imageio\n'), ((3123, 3147), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (3137, 3147), False, 'import imageio\n'), ((3373, 3397), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(4)', '(4)', '(i + 1)'], {}), '(4, 4, i + 1)\n', (3384, 3397), True, 'import matplotlib.pyplot as plt\n'), ((3406, 3460), 'matplotlib.pyplot.imshow', 'plt.imshow', (['(x[i, 0, :, :] * 127.5 + 127.5)'], {'cmap': '"""gray"""'}), "(x[i, 0, :, :] * 127.5 + 127.5, cmap='gray')\n", (3416, 3460), True, 'import matplotlib.pyplot as plt\n'), ((3469, 3484), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (3477, 3484), True, 'import matplotlib.pyplot as plt\n'), ((264, 284), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (278, 284), False, 'import os\n'), ((298, 315), 'os.makedirs', 'os.makedirs', (['path'], {}), '(path)\n', (309, 315), False, 'import os\n'), ((2734, 2767), 'os.path.join', 'os.path.join', (['path', '"""*image*.png"""'], {}), "(path, '*image*.png')\n", (2746, 2767), False, 'import os\n'), ((3044, 3068), 'imageio.imread', 'imageio.imread', (['filename'], {}), '(filename)\n', (3058, 3068), False, 'import imageio\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from automated_test_util import *
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestCrossEntropyLossModule(flow.unittest.TestCase):
@unittest.skip("nn.CrossEntropyLoss has bug")
@autotest(n=200)
def test_CrossEntropyLoss_with_random_data(test_case):
num_classes = random()
shape = random_tensor(ndim=random(2, 5), dim1=num_classes).value().shape
m = torch.nn.CrossEntropyLoss(
reduction=oneof("none", "sum", "mean", nothing()),
ignore_index=random(0, num_classes) | nothing(),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(len(shape), *shape).to(device)
target = random_pytorch_tensor(
len(shape) - 1, *shape[:1] + shape[2:], low=0, high=num_classes, dtype=int
).to(device)
y = m(x, target)
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((712, 744), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (742, 744), True, 'import oneflow as flow\n'), ((808, 852), 'unittest.skip', 'unittest.skip', (['"""nn.CrossEntropyLoss has bug"""'], {}), "('nn.CrossEntropyLoss has bug')\n", (821, 852), False, 'import unittest\n'), ((1586, 1601), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1599, 1601), False, 'import unittest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
config = flow.function_config()
class TestPooling(unittest.TestCase):
run_test = False
def _test_body(self, x, ksize, strides, padding, data_format, dtype=np.float32):
if not self.run_test:
return
f1 = self.make_job(
x.shape, ksize, strides, padding, data_format, dtype=flow.float32
)
f2 = self.make_trt_job(
x.shape, ksize, strides, padding, data_format, dtype=flow.float32
)
a = f1(x).get()
b = f2(x).get()
print("without trt: ", a)
print("with tensorrt", b)
self.assertTrue(a.shape == b.shape)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=1e-03, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(
self, shape, ksize, strides, padding, data_format, dtype=np.float32
):
x = np.ones(shape, dtype=dtype)
self._test_body(
x,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
dtype=dtype,
)
def _test_random_body(
self, shape, ksize, strides, padding, data_format, dtype=np.float32
):
x = np.random.random(shape).astype(dtype)
self._test_body(
x,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
dtype=dtype,
)
def test_ones_input(self):
print("test ones input: ")
self._test_ones_body((1, 1, 6, 6), 1, 1, "VALID", "NCHW")
self._test_ones_body((1, 3, 6, 6), 3, 2, "SAME", "NCHW")
self._test_ones_body((1, 1, 3, 3), 1, 1, "VALID", "NCHW")
self._test_ones_body((1, 5, 9, 9), 3, 1, "SAME", "NCHW")
self._test_ones_body((1, 7, 9, 9), 1, 1, "SAME", "NCHW")
self._test_ones_body((1, 5, 3, 3), 1, 1, "VALID", "NCHW")
self._test_ones_body((1, 1, 6, 6), 2, 2, "SAME", "NCHW")
self._test_ones_body((1, 1, 6, 6), 2, 2, "VALID", "NCHW")
self._test_ones_body((1, 1, 9, 9), 2, 2, "SAME", "NCHW")
self._test_ones_body((1, 1, 9, 9), 2, 2, "VALID", "NCHW")
# self._test_ones_body((1, 224, 224, 3), 3, 2, "VALID", "NHWC")
# self._test_ones_body((1, 224, 224, 1), 2, 1, "SAME", "NHWC")
def test_random_input(self):
print("test random input: ")
self._test_random_body((1, 1, 6, 6), 1, 1, "VALID", "NCHW")
self._test_random_body((1, 3, 6, 6), 3, 2, "SAME", "NCHW")
self._test_random_body((1, 5, 6, 6), 3, 2, "VALID", "NCHW")
self._test_random_body((1, 7, 6, 6), 3, 2, "SAME", "NCHW")
self._test_random_body((1, 3, 3, 3), 1, 1, "VALID", "NCHW")
self._test_random_body((1, 3, 6, 6), 3, 2, "SAME", "NCHW")
self._test_random_body((1, 1, 6, 6), 2, 2, "SAME", "NCHW")
self._test_random_body((1, 1, 6, 6), 2, 2, "VALID", "NCHW")
self._test_random_body((1, 1, 9, 9), 2, 2, "SAME", "NCHW")
self._test_random_body((1, 1, 9, 9), 2, 2, "VALID", "NCHW")
# self._test_random_body((1, 224, 224, 3), 3, 2, "VALID", "NHWC")
# self._test_random_body((1, 224, 224, 1), 2, 1, "SAME", "NHWC")
class TestMaxPooling(TestPooling):
run_test = True
def make_job(
self, x_shape, ksize, strides, padding, data_format, dtype=flow.float32
):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def max_pooling_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.nn.max_pool2d(
x,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
return max_pooling_job
def make_trt_job(
self, x_shape, ksize, strides, padding, data_format, dtype=flow.float32
):
config.use_xla_jit(False)
config.use_tensorrt(True)
@flow.global_function(config)
def trt_max_pooling_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.nn.max_pool2d(
x,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
return trt_max_pooling_job
class TestAveragePooling(TestPooling):
run_test = True
def make_job(
self, x_shape, ksize, strides, padding, data_format, dtype=flow.float32
):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def avg_pooling_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.nn.avg_pool2d(
x,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
return avg_pooling_job
def make_trt_job(
self, x_shape, ksize, strides, padding, data_format, dtype=flow.float32
):
config.use_xla_jit(False)
config.use_tensorrt(True)
@flow.global_function(config)
def trt_avg_pooling_job(x=flow.FixedTensorDef(x_shape, dtype=dtype)):
return flow.nn.avg_pool2d(
x,
ksize=ksize,
strides=strides,
padding=padding,
data_format=data_format,
)
return trt_avg_pooling_job
if __name__ == "__main__":
unittest.main()
| [
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.nn.max_pool2d",
"oneflow.FixedTensorDef",
"oneflow.function_config",
"oneflow.nn.avg_pool2d"
] | [((659, 681), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (679, 681), True, 'import oneflow as flow\n'), ((6127, 6142), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6140, 6142), False, 'import unittest\n'), ((1365, 1393), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1391, 1393), True, 'import oneflow as flow\n'), ((1515, 1542), 'numpy.ones', 'np.ones', (['shape'], {'dtype': 'dtype'}), '(shape, dtype=dtype)\n', (1522, 1542), True, 'import numpy as np\n'), ((4084, 4112), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (4104, 4112), True, 'import oneflow as flow\n'), ((4615, 4643), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (4635, 4643), True, 'import oneflow as flow\n'), ((5212, 5240), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (5232, 5240), True, 'import oneflow as flow\n'), ((5743, 5771), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (5763, 5771), True, 'import oneflow as flow\n'), ((4143, 4184), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (4162, 4184), True, 'import oneflow as flow\n'), ((4206, 4303), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['x'], {'ksize': 'ksize', 'strides': 'strides', 'padding': 'padding', 'data_format': 'data_format'}), '(x, ksize=ksize, strides=strides, padding=padding,\n data_format=data_format)\n', (4224, 4303), True, 'import oneflow as flow\n'), ((4678, 4719), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (4697, 4719), True, 'import oneflow as flow\n'), ((4741, 4838), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['x'], {'ksize': 'ksize', 'strides': 'strides', 'padding': 'padding', 'data_format': 'data_format'}), '(x, ksize=ksize, strides=strides, padding=padding,\n data_format=data_format)\n', (4759, 4838), True, 'import oneflow as flow\n'), ((5271, 5312), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (5290, 5312), True, 'import oneflow as flow\n'), ((5334, 5431), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['x'], {'ksize': 'ksize', 'strides': 'strides', 'padding': 'padding', 'data_format': 'data_format'}), '(x, ksize=ksize, strides=strides, padding=padding,\n data_format=data_format)\n', (5352, 5431), True, 'import oneflow as flow\n'), ((5806, 5847), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (5825, 5847), True, 'import oneflow as flow\n'), ((5869, 5966), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['x'], {'ksize': 'ksize', 'strides': 'strides', 'padding': 'padding', 'data_format': 'data_format'}), '(x, ksize=ksize, strides=strides, padding=padding,\n data_format=data_format)\n', (5887, 5966), True, 'import oneflow as flow\n'), ((1861, 1884), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (1877, 1884), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
import oneflow.typing as tp
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestLinear(flow.unittest.TestCase):
def test_linear_v1(test_case):
linear = flow.nn.Linear(3, 8, False)
input_arr = np.array(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=np.float32,
)
np_weight = np.ones((3, 8)).astype(np.float32)
np_weight.fill(2.3)
x = flow.Tensor(input_arr)
flow.nn.init.constant_(linear.weight, 2.3)
of_out = linear(x)
np_out = np.matmul(input_arr, np_weight)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def test_linear_v2(test_case):
linear = flow.nn.Linear(3, 8)
input_arr = np.array(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=np.float32,
)
np_weight = np.ones((3, 8)).astype(np.float32)
np_weight.fill(2.068758)
np_bias = np.ones((8))
np_bias.fill(0.23)
x = flow.Tensor(input_arr)
flow.nn.init.constant_(linear.weight, 2.068758)
flow.nn.init.constant_(linear.bias, 0.23)
of_out = linear(x)
np_out = np.matmul(input_arr, np_weight)
np_out += np_bias
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def test_linear_3_dimension_input(test_case):
input_arr = np.random.randn(2, 3, 4)
x = flow.Tensor(input_arr)
m = flow.nn.Linear(4, 5, True)
flow.nn.init.constant_(m.weight, 5.6)
flow.nn.init.constant_(m.bias, 0.78)
of_out = m(x)
np_weight = np.ones((4, 5)).astype(np.float32)
np_weight.fill(5.6)
np_bias = np.ones((5))
np_bias.fill(0.78)
np_out = np.matmul(input_arr, np_weight)
np_out += np_bias
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
def test_linear_4_dimension_input(test_case):
input_arr = np.random.randn(4, 5, 6, 7)
x = flow.Tensor(input_arr)
m = flow.nn.Linear(7, 3, False)
flow.nn.init.constant_(m.weight, 11.3)
of_out = m(x)
np_weight = np.ones((7, 3)).astype(np.float32)
np_weight.fill(11.3)
np_out = np.matmul(input_arr, np_weight)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestIdentity(flow.unittest.TestCase):
def test_identity(test_case):
m = flow.nn.Identity(54, unused_argument1=0.1, unused_argument2=False)
x = flow.Tensor(np.random.rand(2, 3, 4, 5))
y = m(x)
test_case.assertTrue(np.array_equal(x.numpy(), y.numpy()))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.nn.Linear",
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.Tensor",
"oneflow.experimental.nn.init.constant_",
"oneflow.experimental.nn.Identity"
] | [((4480, 4495), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4493, 4495), False, 'import unittest\n'), ((931, 958), 'oneflow.experimental.nn.Linear', 'flow.nn.Linear', (['(3)', '(8)', '(False)'], {}), '(3, 8, False)\n', (945, 958), True, 'import oneflow.experimental as flow\n'), ((980, 1344), 'numpy.array', 'np.array', (['[[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -0.28708987, -\n 2.18369248], [0.35217619, -0.67095644, -1.58943879], [0.08086036, -\n 1.81075924, 1.20752494], [0.8901075, -0.49976737, -1.07153746], [-\n 0.44872912, -1.07275683, 0.06256855], [-0.22556897, 0.74798368, \n 0.90416439], [0.48339456, -2.32742195, -0.59321527]]'], {'dtype': 'np.float32'}), '([[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -0.28708987,\n -2.18369248], [0.35217619, -0.67095644, -1.58943879], [0.08086036, -\n 1.81075924, 1.20752494], [0.8901075, -0.49976737, -1.07153746], [-\n 0.44872912, -1.07275683, 0.06256855], [-0.22556897, 0.74798368, \n 0.90416439], [0.48339456, -2.32742195, -0.59321527]], dtype=np.float32)\n', (988, 1344), True, 'import numpy as np\n'), ((1614, 1636), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (1625, 1636), True, 'import oneflow.experimental as flow\n'), ((1646, 1688), 'oneflow.experimental.nn.init.constant_', 'flow.nn.init.constant_', (['linear.weight', '(2.3)'], {}), '(linear.weight, 2.3)\n', (1668, 1688), True, 'import oneflow.experimental as flow\n'), ((1735, 1766), 'numpy.matmul', 'np.matmul', (['input_arr', 'np_weight'], {}), '(input_arr, np_weight)\n', (1744, 1766), True, 'import numpy as np\n'), ((1902, 1922), 'oneflow.experimental.nn.Linear', 'flow.nn.Linear', (['(3)', '(8)'], {}), '(3, 8)\n', (1916, 1922), True, 'import oneflow.experimental as flow\n'), ((1944, 2308), 'numpy.array', 'np.array', (['[[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -0.28708987, -\n 2.18369248], [0.35217619, -0.67095644, -1.58943879], [0.08086036, -\n 1.81075924, 1.20752494], [0.8901075, -0.49976737, -1.07153746], [-\n 0.44872912, -1.07275683, 0.06256855], [-0.22556897, 0.74798368, \n 0.90416439], [0.48339456, -2.32742195, -0.59321527]]'], {'dtype': 'np.float32'}), '([[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -0.28708987,\n -2.18369248], [0.35217619, -0.67095644, -1.58943879], [0.08086036, -\n 1.81075924, 1.20752494], [0.8901075, -0.49976737, -1.07153746], [-\n 0.44872912, -1.07275683, 0.06256855], [-0.22556897, 0.74798368, \n 0.90416439], [0.48339456, -2.32742195, -0.59321527]], dtype=np.float32)\n', (1952, 2308), True, 'import numpy as np\n'), ((2589, 2599), 'numpy.ones', 'np.ones', (['(8)'], {}), '(8)\n', (2596, 2599), True, 'import numpy as np\n'), ((2643, 2665), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (2654, 2665), True, 'import oneflow.experimental as flow\n'), ((2675, 2722), 'oneflow.experimental.nn.init.constant_', 'flow.nn.init.constant_', (['linear.weight', '(2.068758)'], {}), '(linear.weight, 2.068758)\n', (2697, 2722), True, 'import oneflow.experimental as flow\n'), ((2732, 2773), 'oneflow.experimental.nn.init.constant_', 'flow.nn.init.constant_', (['linear.bias', '(0.23)'], {}), '(linear.bias, 0.23)\n', (2754, 2773), True, 'import oneflow.experimental as flow\n'), ((2820, 2851), 'numpy.matmul', 'np.matmul', (['input_arr', 'np_weight'], {}), '(input_arr, np_weight)\n', (2829, 2851), True, 'import numpy as np\n'), ((3032, 3056), 'numpy.random.randn', 'np.random.randn', (['(2)', '(3)', '(4)'], {}), '(2, 3, 4)\n', (3047, 3056), True, 'import numpy as np\n'), ((3070, 3092), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (3081, 3092), True, 'import oneflow.experimental as flow\n'), ((3106, 3132), 'oneflow.experimental.nn.Linear', 'flow.nn.Linear', (['(4)', '(5)', '(True)'], {}), '(4, 5, True)\n', (3120, 3132), True, 'import oneflow.experimental as flow\n'), ((3142, 3179), 'oneflow.experimental.nn.init.constant_', 'flow.nn.init.constant_', (['m.weight', '(5.6)'], {}), '(m.weight, 5.6)\n', (3164, 3179), True, 'import oneflow.experimental as flow\n'), ((3189, 3225), 'oneflow.experimental.nn.init.constant_', 'flow.nn.init.constant_', (['m.bias', '(0.78)'], {}), '(m.bias, 0.78)\n', (3211, 3225), True, 'import oneflow.experimental as flow\n'), ((3355, 3365), 'numpy.ones', 'np.ones', (['(5)'], {}), '(5)\n', (3362, 3365), True, 'import numpy as np\n'), ((3414, 3445), 'numpy.matmul', 'np.matmul', (['input_arr', 'np_weight'], {}), '(input_arr, np_weight)\n', (3423, 3445), True, 'import numpy as np\n'), ((3628, 3655), 'numpy.random.randn', 'np.random.randn', (['(4)', '(5)', '(6)', '(7)'], {}), '(4, 5, 6, 7)\n', (3643, 3655), True, 'import numpy as np\n'), ((3669, 3691), 'oneflow.experimental.Tensor', 'flow.Tensor', (['input_arr'], {}), '(input_arr)\n', (3680, 3691), True, 'import oneflow.experimental as flow\n'), ((3705, 3732), 'oneflow.experimental.nn.Linear', 'flow.nn.Linear', (['(7)', '(3)', '(False)'], {}), '(7, 3, False)\n', (3719, 3732), True, 'import oneflow.experimental as flow\n'), ((3742, 3780), 'oneflow.experimental.nn.init.constant_', 'flow.nn.init.constant_', (['m.weight', '(11.3)'], {}), '(m.weight, 11.3)\n', (3764, 3780), True, 'import oneflow.experimental as flow\n'), ((3910, 3941), 'numpy.matmul', 'np.matmul', (['input_arr', 'np_weight'], {}), '(input_arr, np_weight)\n', (3919, 3941), True, 'import numpy as np\n'), ((743, 786), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (784, 786), True, 'import oneflow.experimental as flow\n'), ((4237, 4303), 'oneflow.experimental.nn.Identity', 'flow.nn.Identity', (['(54)'], {'unused_argument1': '(0.1)', 'unused_argument2': '(False)'}), '(54, unused_argument1=0.1, unused_argument2=False)\n', (4253, 4303), True, 'import oneflow.experimental as flow\n'), ((4053, 4096), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (4094, 4096), True, 'import oneflow.experimental as flow\n'), ((4329, 4355), 'numpy.random.rand', 'np.random.rand', (['(2)', '(3)', '(4)', '(5)'], {}), '(2, 3, 4, 5)\n', (4343, 4355), True, 'import numpy as np\n'), ((1537, 1552), 'numpy.ones', 'np.ones', (['(3, 8)'], {}), '((3, 8))\n', (1544, 1552), True, 'import numpy as np\n'), ((2501, 2516), 'numpy.ones', 'np.ones', (['(3, 8)'], {}), '((3, 8))\n', (2508, 2516), True, 'import numpy as np\n'), ((3272, 3287), 'numpy.ones', 'np.ones', (['(4, 5)'], {}), '((4, 5))\n', (3279, 3287), True, 'import numpy as np\n'), ((3827, 3842), 'numpy.ones', 'np.ones', (['(7, 3)'], {}), '((7, 3))\n', (3834, 3842), True, 'import numpy as np\n')] |
import os
import logging
logging.basicConfig(level = logging.INFO, filename = 'train.log', filemode = 'w', format = "%(asctime)s [%(filename)s:%(lineno)d - %(levelname)s ] %(message)s")
import sys
sys.path.insert(0, "../../../")
import math
import argparse
import numpy as np
# import torch
# from torch.utils.data import DataLoader
# import torch.nn.functional as F
# import torch.optim as optim
# import torch.optim.lr_scheduler as lr_scheduler
import oneflow as of
import oneflow.nn.functional as F
import oneflow.optim as optim
import oneflow.optim.lr_scheduler as lr_scheduler
from oneflow.utils.data import DataLoader
from tqdm import tqdm
from sklearn.metrics.pairwise import cosine_similarity
from libs.trainer import nnet_trainer
from libs.dataio import dataset
from libs.nnet import residual_network
from libs.components import loss
from libs.utils.config_parser import ArgParser
from libs.utils.utils import read_config
from libs.utils.performance_eval import compute_eer
class ResNetTrainer(nnet_trainer.NNetTrainer):
def __init__(self, data_opts, model_opts, train_opts, args):
super(ResNetTrainer, self).__init__(data_opts, model_opts, train_opts, args)
# summary(self.model, (self.input_dim, 300))
def build_model(self):
model_config = read_config("conf/model/{}.yaml".format(self.model_opts['arch']))
model_config['input_dim'] = self.input_dim
self.embedding_dim = model_config['embedding_dim']
self.model = residual_network.SpeakerEmbNet(model_config)
self._reset_opts('model', model_config)
def build_criterion(self):
super().build_criterion()
def build_dataloader(self):
super().build_dataloader()
def build_optimizer(self):
super().build_optimizer()
# self.lr_scheduler = lr_scheduler.MultiStepLR(self.optim, milestones = self.train_opts['milestones'], gamma = self.train_opts['lr_decay'])
def train_epoch(self):
self.model.train()
self.criterion.train()
sum_samples, sum_loss, correct = 0, 0, 0
self.train_opts['check_interval'] = len(self.trainloader) // 2
progress_bar = tqdm(self.trainloader)
for batch_idx, (feature, targets_label) in enumerate(progress_bar):
self.dev_check_count += 1
self.optim.zero_grad() # zero_grad function only zero parameters which will be updated by optimizer
feature = feature.to(self.device)
targets_label = targets_label.to(self.device)
output = self.model(feature) # output of 2nd fc layer
loss, logits = self.criterion(output, targets_label)
sum_samples += feature.size(0)
_, prediction = of.max(logits, dim = 1)
correct += (prediction == targets_label).sum().item()
loss.backward()
self.optim.step()
sum_loss += loss.item() * targets_label.size(0)
progress_bar.set_description(
'Train Epoch: {:3d} [{:4d}/{:4d} ({:3.3f}%)] AMLoss: {:.4f} Acc: {:.4f}%'.format(
self.current_epoch, batch_idx + 1, len(self.trainloader),
100. * (batch_idx + 1) / len(self.trainloader),
sum_loss / sum_samples, 100. * correct / sum_samples
)
)
if self.dev_check_count % self.train_opts['check_interval'] == 0:
# dev hyper-parameter adjust
dev_loss, dev_acc = self._dev()
self.criterion.train()
self.model.train()
logging.info("Epoch {} Dev loss {:.8f} Dev acc {:.4%} LR {:.8f}".format(self.current_epoch, dev_loss, dev_acc, self.optim.state_dict()['param_groups'][0]['_options']['lr']))
# logging.info("Epoch {} Dev loss {:.8f} Dev acc {:.4%} LR {:.8f}".format(self.current_epoch, dev_loss, dev_acc, self.optim.state_dict()['param_groups'][0]['lr']))
if self.best_dev_loss >= dev_loss:
self.best_dev_loss = dev_loss
self.best_dev_epoch = self.current_epoch
logging.info("Best dev loss is {:.8f} at epoch {}".format(self.best_dev_loss, self.best_dev_epoch))
self.count = 0
self.save("best_dev_model.pth")
else:
self.count += 1
self.lr_scheduler.step()
self.save()
def _dev(self):
# parallel_model = self.model
# self.model = self.model.module
self.model.eval()
self.criterion.eval()
dev_loss = 0
dev_correct = 0
with of.no_grad():
for feature, spk in self.trainset.get_dev_data():
feature = feature.to(self.device)
spk = spk.to(self.device)
output = self.model(feature)
am_loss, logits = self.criterion(output, spk)
_, prediction = of.max(logits, dim = 1)
dev_correct += (prediction == spk).sum().item()
dev_loss += am_loss.item()
# self.model = parallel_model
return dev_loss / self.trainset.dev_number, dev_correct / self.trainset.dev_number
def extract_embedding(self, feature):
feature = feature.to(self.device)
if self.train_opts['loss'] == 'CrossEntropy':
_, xv = self.model.extract_embedding(feature)
else:
xv, _ = self.model.extract_embedding(feature)
xv = F.normalize(xv)
return xv
def add_argument(parser):
parser.add_argument("--feat-type", type = str, default = 'python_mfcc', dest = "feat_type", help = 'input feature')
parser.add_argument("--input-dim", type = int, default = 30, dest = "input_dim", help = "dimension of input feature")
parser.add_argument("--arch", type = str, default = "tdnn", choices = ["resnet", "tdnn", "etdnn", "ftdnn", "rawnet", "wav2spk", "wavenet"], help = "specify model architecture")
parser.add_argument("--loss", type = str, default = "AMSoftmax", choices = ["AMSoftmax", "CrossEntropy", "ASoftmax", "TripletLoss"], help = "specify loss function")
parser.add_argument("--bs", type = int, default = 64, help = "specify batch size for training")
parser.add_argument("--resume", type = str, default = 'none', help = "if you give a ckpt path to this argument and if the ckpt file exists, it will resume training based on this ckpt file. Otherwise, it will start a new training process")
parser.add_argument("--device", default = 'gpu', choices = ['cuda', 'cpu'], help = 'designate the device on which the model will run')
parser.add_argument("--mode", default = 'train', choices = ['train', 'test'], help = 'train or test mode')
return parser
def main():
# parser = ArgParser()
parser = argparse.ArgumentParser()
parser = add_argument(parser)
args = parser.parse_args()
args = vars(args)
data_config = read_config("conf/data.yaml")
model_config = read_config("conf/model.yaml")
train_config = read_config("conf/train.yaml")
trainer = ResNetTrainer(data_config, model_config, train_config, args)
trainer()
if __name__ == "__main__":
main()
| [
"oneflow.max",
"oneflow.no_grad",
"oneflow.nn.functional.normalize"
] | [((25, 186), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'filename': '"""train.log"""', 'filemode': '"""w"""', 'format': '"""%(asctime)s [%(filename)s:%(lineno)d - %(levelname)s ] %(message)s"""'}), "(level=logging.INFO, filename='train.log', filemode='w',\n format='%(asctime)s [%(filename)s:%(lineno)d - %(levelname)s ] %(message)s'\n )\n", (44, 186), False, 'import logging\n'), ((197, 228), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""../../../"""'], {}), "(0, '../../../')\n", (212, 228), False, 'import sys\n'), ((6868, 6893), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6891, 6893), False, 'import argparse\n'), ((6999, 7028), 'libs.utils.utils.read_config', 'read_config', (['"""conf/data.yaml"""'], {}), "('conf/data.yaml')\n", (7010, 7028), False, 'from libs.utils.utils import read_config\n'), ((7048, 7078), 'libs.utils.utils.read_config', 'read_config', (['"""conf/model.yaml"""'], {}), "('conf/model.yaml')\n", (7059, 7078), False, 'from libs.utils.utils import read_config\n'), ((7098, 7128), 'libs.utils.utils.read_config', 'read_config', (['"""conf/train.yaml"""'], {}), "('conf/train.yaml')\n", (7109, 7128), False, 'from libs.utils.utils import read_config\n'), ((1491, 1535), 'libs.nnet.residual_network.SpeakerEmbNet', 'residual_network.SpeakerEmbNet', (['model_config'], {}), '(model_config)\n', (1521, 1535), False, 'from libs.nnet import residual_network\n'), ((2174, 2196), 'tqdm.tqdm', 'tqdm', (['self.trainloader'], {}), '(self.trainloader)\n', (2178, 2196), False, 'from tqdm import tqdm\n'), ((2731, 2752), 'oneflow.max', 'of.max', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (2737, 2752), True, 'import oneflow as of\n'), ((2846, 2861), 'libs.components.loss.backward', 'loss.backward', ([], {}), '()\n', (2859, 2861), False, 'from libs.components import loss\n'), ((4691, 4703), 'oneflow.no_grad', 'of.no_grad', ([], {}), '()\n', (4701, 4703), True, 'import oneflow as of\n'), ((5550, 5565), 'oneflow.nn.functional.normalize', 'F.normalize', (['xv'], {}), '(xv)\n', (5561, 5565), True, 'import oneflow.nn.functional as F\n'), ((2918, 2929), 'libs.components.loss.item', 'loss.item', ([], {}), '()\n', (2927, 2929), False, 'from libs.components import loss\n'), ((5000, 5021), 'oneflow.max', 'of.max', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (5006, 5021), True, 'import oneflow as of\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
from oneflow.nn.common_types import _size_2_t
@flow.unittest.skip_unless_1n1d()
class TestFold(flow.unittest.TestCase):
@autotest(n=3, auto_backward=True, rtol=1e-4, atol=1e-4)
def test_fold_with_random_data_1(test_case):
m = torch.nn.Fold(
output_size=constant((4, 4)),
kernel_size=constant(3),
dilation=constant(1),
padding=constant(1),
stride=constant(1),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(
ndim=3, dim0=constant(2), dim1=constant(36), dim2=constant(16)
).to(device)
y = m(x)
return y
@autotest(n=3, auto_backward=True, rtol=1e-4, atol=1e-4)
def test_fold_with_random_data_2(test_case):
m = torch.nn.Fold(
output_size=constant((4, 4)),
kernel_size=constant(3),
dilation=constant(1),
padding=constant(0),
stride=constant(1),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(
ndim=3, dim0=constant(2), dim1=constant(36), dim2=constant(4)
).to(device)
y = m(x)
return y
@autotest(n=3, auto_backward=True, rtol=1e-4, atol=1e-4)
def test_fold_with_random_data_3(test_case):
m = torch.nn.Fold(
output_size=constant((8, 8)),
kernel_size=constant(3),
dilation=constant(1),
padding=constant(1),
stride=constant(2),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(
ndim=3, dim0=constant(2), dim1=constant(72), dim2=constant(16)
).to(device)
y = m(x)
return y
@autotest(n=3, auto_backward=True, rtol=1e-4, atol=1e-4)
def test_fold_with_random_data_4(test_case):
m = torch.nn.Fold(
output_size=constant((8, 8)),
kernel_size=constant(3),
dilation=constant(2),
padding=constant(1),
stride=constant(2),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(
ndim=3, dim0=constant(2), dim1=constant(9), dim2=constant(9)
).to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((757, 789), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (787, 789), True, 'import oneflow as flow\n'), ((3143, 3158), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3156, 3158), False, 'import unittest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
from typing import Union, Optional, Sequence, List, Tuple
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.module as module_util
import oneflow.python.ops.math_unary_elementwise_ops as math_unary_elementwise_ops
from oneflow.python.oneflow_export import oneflow_export
from oneflow.python.ops.transpose_util import get_perm_when_transpose_axis_to_last_dim
from oneflow.python.ops.transpose_util import get_inversed_perm
import oneflow_api
@oneflow_export("math.add")
def add(
x: Union[int, float, oneflow_api.BlobDesc],
y: Union[int, float, oneflow_api.BlobDesc],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""Compute :math:`X + Y` element-wise, math.add supports broadcasting.
The equation is:
.. math::
out = X + Y
Args:
x (Union[int, float, oneflow_api.BlobDesc]): A Blob.
y (Union[int, float, oneflow_api.BlobDesc]): A Blob has the same type of x.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob is added by x and y, and has the same type of x.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def addJob(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.add(x, y)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([1, 1, 1]).astype(np.float32)
out = addJob(x, y)
# out [2., 3., 4.]
"""
if isinstance(x, (int, float)):
return scalar_add(y, x, name)
elif isinstance(y, (int, float)):
return scalar_add(x, y, name)
elif x.shape == y.shape and x.is_dynamic == y.is_dynamic:
return element_wise_add(x, y, name)
elif x.shape == (1,):
return scalar_add_by_tensor(y, x, name)
elif y.shape == (1,):
return scalar_add_by_tensor(x, y, name)
else:
return broadcast_add(x, y, name)
def _recursive_build_add_n(inputs, name=None):
inputs = list(inputs)
kernel_max_inputs = 8
if len(inputs) == 1:
return inputs[0]
elif len(inputs) <= kernel_max_inputs:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("AddN_")
)
.Op("add_n")
.Input("in", inputs)
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
assert len(inputs) > kernel_max_inputs
new_inputs = inputs[kernel_max_inputs:]
new_inputs.append(_recursive_build_add_n(inputs[:kernel_max_inputs]))
return _recursive_build_add_n(new_inputs)
@oneflow_export("math.add_n")
def add_n(
inputs: Sequence[oneflow_api.BlobDesc], name: Optional[str] = None
) -> oneflow_api.BlobDesc:
"""Add all the input tensors in element-wise.
Args:
inputs (Sequence[oneflow_api.BlobDesc]): A list of Blob, each Blob has the same shape and type.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The sum of the inputs, has the same shape and type as the elements of inputs.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def add_n_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.add_n([x, y])
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([1, 1, 1]).astype(np.float32)
out = add_n_Job(x, y)
print(out)
# out [2., 3., 4.]
"""
return _recursive_build_add_n(inputs, name)
@oneflow_export("math.subtract")
def subtract(
x: Union[int, float, oneflow_api.BlobDesc],
y: Union[int, float, oneflow_api.BlobDesc],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""Compute :math:`X - Y` element-wise.
The equation is:
.. math::
out = X - Y
Args:
x (Union[int, float, oneflow_api.BlobDesc]): A Blob.
y (Union[int, float, oneflow_api.BlobDesc]): A Blob has the same type of x.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob after subtracting, has the same type as x.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def subtractJob(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.subtract(x, y)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([2, 4, 1]).astype(np.float32)
out = subtractJob(x, y)
# out [-1., -2., 2.]
"""
if isinstance(x, (int, float)):
return scalar_add(-1 * y, x, name)
elif isinstance(y, (int, float)):
return scalar_add(x, -1 * y, name)
elif x.shape == y.shape:
# TODO: add element-wise op
return broadcast_sub(x, y, name)
elif x.shape == (1,):
return scalar_sub_by_tensor(y, x, name)
elif y.shape == (1,):
return scalar_sub_by_tensor(x, y, name)
else:
return broadcast_sub(x, y, name)
@oneflow_export("math.multiply")
def multiply(
x: Union[int, float, oneflow_api.BlobDesc],
y: Union[int, float, oneflow_api.BlobDesc],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Compute :math:`x \times y` element-wise.
The equation is:
.. math::
out = X \times Y
Args:
x (Union[int, float, oneflow_api.BlobDesc]): A Blob.
y (Union[int, float, oneflow_api.BlobDesc]): A Blob has the same type of x.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob after multiplying, has the same type as x.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def multiplyJob(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.multiply(x, y)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([2, 3, 3]).astype(np.float32)
out = multiplyJob(x, y)
# out [2., 6., 9.]
"""
if isinstance(x, (int, float)):
return scalar_mul(y, x, name)
elif isinstance(y, (int, float)):
return scalar_mul(x, y, name)
elif x.shape == y.shape:
return element_wise_mul(x, y, name)
elif x.shape == (1,):
return scalar_mul_by_tensor(y, x, name)
elif y.shape == (1,):
return scalar_mul_by_tensor(x, y, name)
else:
return broadcast_mul(x, y, name)
@oneflow_export("math.divide")
def divide(
x: Union[int, float, oneflow_api.BlobDesc],
y: Union[int, float, oneflow_api.BlobDesc],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Computes the division of x by y.
The equation is:
.. math::
out = \frac{X}{Y}
Args:
x (Union[int, float, oneflow_api.BlobDesc]): A Blob.
y (Union[int, float, oneflow_api.BlobDesc]): A Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with same shape as input x.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def divideJob(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.divide(x, y)
x = np.array([25, 16, 9]).astype(np.float32)
y = np.array([10, 4, 2]).astype(np.float32)
out = divideJob(x, y)
# out [2.5, 4., 4.5]
"""
if isinstance(x, (int, float)):
return scalar_mul(math_unary_elementwise_ops.reciprocal_no_nan(y), x, name)
elif isinstance(y, (int, float)):
if y == 0 or y == 0.0:
y = 0.0
else:
y = 1.0 / (float(y))
return scalar_mul(x, y, name)
elif x.shape == y.shape:
# TODO: add element-wise op
return broadcast_div(x, y, name)
elif x.shape == (1,):
return scalar_div_by_tensor(y, x, name)
elif y.shape == (1,):
return scalar_div_by_tensor(x, y, name)
else:
return broadcast_div(x, y, name)
@oneflow_export("math.mod")
def floor_mod(
x: Union[int, float, oneflow_api.BlobDesc],
y: Union[int, float, oneflow_api.BlobDesc],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""This operator mods two Blobs.
The equation is:
.. math::
out = X \bmod Y
Args:
x (Union[int, float, oneflow_api.BlobDesc]): A Blob
y (Union[int, float, oneflow_api.BlobDesc]): A Blob has the same type of x
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
NotImplementedError: x must be an int or a float
NotImplementedError: y must be an int or a float
Returns:
oneflow_api.BlobDesc: A Blob with same type as input x.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def modJob(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.mod(x, y)
x = np.array([16, 9, 5]).astype(np.float32)
y = np.array([6, 4, 3]).astype(np.float32)
out = modJob(x, y)
# out [4., 1., 2.]
"""
if isinstance(x, (int, float)):
raise NotImplementedError
elif isinstance(y, (int, float)):
raise NotImplementedError
elif x.shape == y.shape:
# TODO: add element-wise op
return broadcast_floor_mod(x, y, name)
else:
return broadcast_floor_mod(x, y, name)
def scalar_add(x, operand, name=None):
if name is None:
name = id_util.UniqueStr("ScalarAdd_")
builder = flow.user_op_builder(name).Op("scalar_add").Input("in", [x]).Output("out")
if isinstance(operand, int):
builder = (
builder.Attr("has_int_operand", True)
.Attr("has_float_operand", False)
.Attr("int_operand", operand)
.Attr("float_operand", 0.0)
)
elif isinstance(operand, float):
builder = (
builder.Attr("has_int_operand", False)
.Attr("has_float_operand", True)
.Attr("int_operand", 0)
.Attr("float_operand", operand)
)
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
def scalar_add_by_tensor(x, scalar, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ScalarAddByTensor_"))
.Op("scalar_add_by_tensor")
.Input("x", [x])
.Input("scalar", [scalar])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def element_wise_add(x, y, name=None):
return flow.math.add_n([x, y], name)
def build_broadcast_binary_op(math_op, x, y, name=None):
if name is None:
name = id_util.UniqueStr(math_op + "_")
return (
flow.user_op_builder(name)
.Op(math_op)
.Input("x", [x])
.Input("y", [y])
.Output("z")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def broadcast_add(x, y, name=None):
return build_broadcast_binary_op("broadcast_add", x, y, name)
def broadcast_sub(x, y, name=None):
return build_broadcast_binary_op("broadcast_sub", x, y, name)
def scalar_sub_by_tensor(x, scalar, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ScalarSubByTensor_"))
.Op("scalar_sub_by_tensor")
.Input("x", [x])
.Input("scalar", [scalar])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def element_wise_mul(x, y, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ElementWiseMul_"))
.Op("multiply")
.Input("x", [x])
.Input("y", [y])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def broadcast_mul(x, y, name=None):
return build_broadcast_binary_op("broadcast_mul", x, y, name)
def scalar_mul(x, operand, name=None):
if name is None:
name = id_util.UniqueStr("ScalarMul_")
builder = flow.user_op_builder(name).Op("scalar_mul").Input("in", [x]).Output("out")
if isinstance(operand, int):
builder = (
builder.Attr("has_int_operand", True)
.Attr("has_float_operand", False)
.Attr("int_operand", operand)
.Attr("float_operand", 0.0)
)
elif isinstance(operand, float):
builder = (
builder.Attr("has_int_operand", False)
.Attr("has_float_operand", True)
.Attr("int_operand", 0)
.Attr("float_operand", operand)
)
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
def scalar_mul_by_tensor(x, scalar, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ScalarMulByTensor_"))
.Op("scalar_mul_by_tensor")
.Input("x", [x])
.Input("scalar", [scalar])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def broadcast_div(x, y, name=None):
return build_broadcast_binary_op("broadcast_div", x, y, name)
def scalar_div_by_tensor(x, scalar, name=None):
return (
flow.user_op_builder(name or id_util.UniqueStr("ScalarDivByTensor_"))
.Op("scalar_div_by_tensor")
.Input("x", [x])
.Input("scalar", [scalar])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def broadcast_floor_mod(x, y, name=None):
return build_broadcast_binary_op("broadcast_floor_mod", x, y, name)
@oneflow_export("math.gelu")
def gelu(x: oneflow_api.BlobDesc, name: Optional[str] = None) -> oneflow_api.BlobDesc:
r"""Gelu activation operator.
The equation is:
.. math::
out = 0.5 * x * (1 + tanh(\sqrt{\frac{2}{\pi}} * (x + 0.044715x^{3})))
Args:
x (oneflow_api.BlobDesc): Input Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def geluJob(x: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.gelu(x)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
out = geluJob(x)
# out [-0.15426877, 0., 0.34573123]
"""
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Gelu_"))
.Op("gelu")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.relu", "nn.relu")
def relu(x: oneflow_api.BlobDesc, name: Optional[str] = None) -> oneflow_api.BlobDesc:
r"""Relu activation
The equation is:
.. math::
out = max(X, 0)
Args:
x (oneflow_api.BlobDesc): Input Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: An activated Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reluJob(x: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.relu(x)
x = np.array([-1, 0, 5]).astype(np.float32)
out = reluJob(x)
# out [0., 0., 5.]
"""
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Relu_"))
.Op("relu")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.sigmoid")
def sigmoid(
x: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Sigmoid activation
The equation is:
.. math::
out = \frac{1}{1 + e^{-x}}
Args:
x (oneflow_api.BlobDesc): Input Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: An activated Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def sigmoidJob(x: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.sigmoid(x)
x = np.array([-1, 0, 1]).astype(np.float32)
out = sigmoidJob(x)
# out [0.26894143, 0.5, 0.7310586]
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Sigmoid_")
)
.Op("sigmoid")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.sigmoid_grad")
def sigmoid_grad(
y: oneflow_api.BlobDesc, dy: oneflow_api.BlobDesc, name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("SigmoidGrad_")
)
.Op("sigmoid_grad")
.Input("y", [y])
.Input("dy", [dy])
.Output("dx")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.unsorted_segment_sum", "unsorted_segment_sum")
def unsorted_segment_sum(
data: oneflow_api.BlobDesc,
segment_ids: oneflow_api.BlobDesc,
num_segments: int,
axis: int = 0,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Computes the sum along segments of a Blob.
Args:
data (oneflow_api.BlobDesc): Input Blob
segment_ids (oneflow_api.BlobDesc): A Blob should be the size of the first dimension, with consecutive IDs in the range 0 to k (k < d0).
num_segments (int): num_segments should equal the number of distinct segment IDs.
axis (int, optional): The axis of data. Defaults to 0.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with the same type of data.
For example:
.. code-block:: python
# Example 1:
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def unsorted_segment_sumJob(data: tp.Numpy.Placeholder((3, 4)),
segment_ids: tp.Numpy.Placeholder((4, ), dtype=flow.int32)
)->tp.Numpy:
return flow.math.unsorted_segment_sum(data, segment_ids, num_segments=2, axis=1)
input_blob = np.array([[1, 2, 3, 4],
[5, 6, 7 ,8],
[9, 10, 11, 12]]).astype(np.float32)
segment_ids = np.array([0, 1, 0, 1]).astype(np.int32)
out = unsorted_segment_sumJob(input_blob, segment_ids)
# out [[ 4. 6.]
# [12. 14.]
# [20. 22.]]
# Example 2
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def unsorted_segment_sumJob(data: tp.Numpy.Placeholder((3, 4)),
segment_ids: tp.Numpy.Placeholder((3, ), dtype=flow.int32)
)->tp.Numpy:
return flow.math.unsorted_segment_sum(data, segment_ids, num_segments=2, axis=0)
input_blob = np.array([[1, 2, 3, 4],
[5, 6, 7 ,8],
[9, 10, 11, 12]]).astype(np.float32)
segment_ids = np.array([0, 1, 0]).astype(np.int32)
out = unsorted_segment_sumJob(input_blob, segment_ids)
# out [[10. 12. 14. 16.]
# [ 5. 6. 7. 8.]]
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("UnsortedSegmentSum_")
)
.Op("unsorted_segment_sum")
.Input("data", [data])
.Input("segment_ids", [segment_ids])
.Output("out")
.Attr("axis", int(axis))
.Attr("num_segments", int(num_segments))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.unsorted_segment_sum_like", "unsorted_segment_sum_like")
def unsorted_segment_sum_like(
data: oneflow_api.BlobDesc,
segment_ids: oneflow_api.BlobDesc,
like: oneflow_api.BlobDesc,
axis: int = 0,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Computes the sum along segments of a Blob, the output shape is the same as the `like` Blob.
Args:
data (oneflow_api.BlobDesc): Input Blob
segment_ids (oneflow_api.BlobDesc): A Blob should be the size of the first dimension, with consecutive IDs in the range 0 to k (k < d0).
like (oneflow_api.BlobDesc): The input Blob which specifies shape
axis (int, optional): The axis of data. Defaults to 0.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def unsorted_segment_sum_like_Job(data: tp.Numpy.Placeholder((3, 4)),
segment_ids: tp.Numpy.Placeholder((3, ), dtype=flow.int32),
like: tp.Numpy.Placeholder((2, 4), dtype=flow.float32)
)->tp.Numpy:
return flow.math.unsorted_segment_sum_like(data, segment_ids, like, axis=0)
input_blob = np.array([[1, 2, 3, 4],
[5, 6, 7 ,8],
[9, 10, 11, 12]]).astype(np.float32)
segment_ids = np.array([0, 1, 0]).astype(np.int32)
like = np.zeros(shape=(2, 4), dtype=np.float32)
out = unsorted_segment_sum_like_Job(input_blob, segment_ids, like)
# out [[10. 12. 14. 16.]
# [ 5. 6. 7. 8.]]
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("UnsortedSegmentSumLike_")
)
.Op("unsorted_segment_sum_like")
.Input("data", [data])
.Input("segment_ids", [segment_ids])
.Input("like", [like])
.Output("out")
.Attr("axis", int(axis))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.unsorted_batch_segment_sum", "unsorted_batch_segment_sum")
def unsorted_batch_segment_sum(
data: oneflow_api.BlobDesc,
segment_ids: oneflow_api.BlobDesc,
num_segments: int,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""It is similar with `unsorted_segment_sum`, the difference is that `unsorted_batch_segment_sum` brings a `batch axis`. We can do the segment sum in different batch of data.
For example, the segment id is like:
.. code-block:: python
[[0 0 0 1 2 2 3 3],
[0 0 1 1 2 3 3 3]]
Args:
data (oneflow_api.BlobDesc): Input Blob
segment_ids (oneflow_api.BlobDesc): A Blob with shape (d0, d1). The d0, d1 are the first and second dimension of data.
num_segments (int): num_segments should equal the number of distinct segment IDs.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def unsorted_batch_segment_sum_Job(data: tp.Numpy.Placeholder((3, 4)),
segment_ids: tp.Numpy.Placeholder((3, 4), dtype=flow.int32)
)->tp.Numpy:
return flow.math.unsorted_batch_segment_sum(data, segment_ids, 2)
input_blob = np.array([[1, 2, 3, 4],
[1, 2, 3 ,4],
[1, 2, 3, 4]]).astype(np.float32)
segment_ids = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0]]).astype(np.int32)
out = unsorted_batch_segment_sum_Job(input_blob, segment_ids)
# out [[6. 4.]
# [7. 3.]
# [8. 2.]]
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("UnsortedBatchSegmentSum_")
)
.Op("unsorted_batch_segment_sum")
.Input("data", [data])
.Input("segment_ids", [segment_ids])
.Output("out")
.Attr("num_segments", int(num_segments))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("cast")
def cast(
x: oneflow_api.BlobDesc, dtype: flow.dtype, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""The op takes input x and casts it to the output with `dtype`
Args:
x (oneflow_api.BlobDesc): Input Blob
dtype (flow.dtype): Data type of the output
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def cast_Job(x: tp.Numpy.Placeholder((3, ), dtype=flow.float32)
)->tp.Numpy:
return flow.cast(x, dtype=flow.int32)
x = np.array([1, 2, 3]).astype(np.float32)
out = cast_Job(x)
# out.dtype = "int32"
"""
if x.dtype == dtype:
return x
if name is None:
name = id_util.UniqueStr("Cast_")
return (
flow.user_op_builder(name)
.Op("cast")
.Input("in", [x])
.Output("out")
.Attr("dtype", dtype)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.equal")
def equal(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Returns the truth value of :math:`{x}=={y}` element-wise.
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with int8 type.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def equal_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.equal(x, y)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([1, 2, 1]).astype(np.float32)
out = equal_Job(x, y)
# out [1 1 0]
"""
return build_broadcast_binary_op("broadcast_equal", x, y, name)
@oneflow_export("math.not_equal")
def not_equal(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Returns the truth value of :math:`{x}!={y}` element-wise.
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with int8 type.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def not_equal_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.not_equal(x, y)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([1, 2, 1]).astype(np.float32)
out = not_equal_Job(x, y)
# out [0 0 1]
"""
return build_broadcast_binary_op("broadcast_not_equal", x, y, name)
@oneflow_export("math.less")
def less(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Returns the truth value of :math:`x < y` element-wise.
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with int8 type.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def less_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.less(x, y)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([1, 2, 4]).astype(np.float32)
out = less_Job(x, y)
# out [0 0 1]
"""
return build_broadcast_binary_op("broadcast_less", x, y, name)
@oneflow_export("math.less_equal")
def less_equal(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Returns the truth value of :math:`x <= y` element-wise.
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with int8 type.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def less_equal_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.less_equal(x, y)
x = np.array([1, 2, 3]).astype(np.float32)
y = np.array([1, 1, 4]).astype(np.float32)
out = less_equal_Job(x, y)
# out [1 0 1]
"""
return build_broadcast_binary_op("broadcast_less_equal", x, y, name)
@oneflow_export("math.greater")
def greater(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Returns the truth value of :math:`x > y` element-wise.
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with int8 type.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def greater_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.greater(x, y)
x = np.array([1, 1, 4]).astype(np.float32)
y = np.array([1, 2, 3]).astype(np.float32)
out = greater_Job(x, y)
# out [0 0 1]
"""
return build_broadcast_binary_op("broadcast_greater", x, y, name)
@oneflow_export("math.greater_equal")
def greater_equal(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Returns the truth value of :math:`x >= y` element-wise.
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with int8 type.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def greater_equal_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.greater_equal(x, y)
x = np.array([1, 1, 4]).astype(np.float32)
y = np.array([1, 2, 3]).astype(np.float32)
out = greater_equal_Job(x, y)
# out [1 0 1]
"""
return build_broadcast_binary_op("broadcast_greater_equal", x, y, name)
@oneflow_export("math.logical_and")
def logical_and(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Logical AND function.
Each element is calculated by:
.. math::
out = X \land Y
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob with int8 type.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def logical_and_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.logical_and(x, y)
x = np.array([1, 0, 1]).astype(np.float32)
y = np.array([0, 0, 1]).astype(np.float32)
out = logical_and_Job(x, y)
# out [0 0 1]
"""
return build_broadcast_binary_op("broadcast_logical_and", x, y, name)
@oneflow_export("math.minimum")
def minimum(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Returns the min of x and y element-wise, this op supports broadcasting.
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob. Must have the same type of x
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob, has the same type of x.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def minimum_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.minimum(x, y)
x = np.array([2, 3, 4]).astype(np.float32)
y = np.array([4, 2, 1]).astype(np.float32)
out = minimum_Job(x, y)
# out [2. 2. 1.]
"""
if x.shape == y.shape:
return (
flow.user_op_builder(name or id_util.UniqueStr("ElementWiseMinimum_"))
.Op("elementwise_minimum")
.Input("x", [x])
.Input("y", [y])
.Output("z")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
return build_broadcast_binary_op("broadcast_minimum", x, y, name)
@oneflow_export("math.maximum")
def maximum(
x: oneflow_api.BlobDesc, y: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
"""Returns the max of x and y element-wise, this op supports broadcasting.
Args:
x (oneflow_api.BlobDesc): A Blob
y (oneflow_api.BlobDesc): A Blob. Must have the same type of x
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob, has the same type of x.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def maximum_Job(x: tp.Numpy.Placeholder((3, )),
y: tp.Numpy.Placeholder((3, ))
)->tp.Numpy:
return flow.math.maximum(x, y)
x = np.array([2, 3, 4]).astype(np.float32)
y = np.array([4, 2, 1]).astype(np.float32)
out = maximum_Job(x, y)
# out [4. 3. 4.]
"""
if x.shape == y.shape:
return (
flow.user_op_builder(name or id_util.UniqueStr("ElementWiseMaximum_"))
.Op("elementwise_maximum")
.Input("x", [x])
.Input("y", [y])
.Output("z")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
return build_broadcast_binary_op("broadcast_maximum", x, y, name)
@oneflow_export("math.reduced_shape_elem_cnt")
def elem_cnt(
input_blob: oneflow_api.BlobDesc,
axis: Optional[Sequence[int]] = None,
dtype: Optional[flow.dtype] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""Computes the product of input_blob's dimensions along the parameter `axis`. By default, all the dimensions will be computed.
Args:
input_blob (oneflow_api.BlobDesc): Input Blob
axis (Optional[Sequence[int]], optional): The dimensions along which the op is performed. Defaults to None.
dtype (Optional[flow.dtype], optional): The data type. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob
For example:
.. code-block:: python
# Example 1:
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def elem_cnt_Job(x: tp.Numpy.Placeholder((3, 4, 5))
)->tp.Numpy:
return flow.math.reduced_shape_elem_cnt(x, axis=[0, 1])
x = np.ones(shape=(3, 4, 5), dtype=np.float32)
out = elem_cnt_Job(x) # 3 x 4 = 12
# out [12]
# Example 2:
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def elem_cnt_Job(x: tp.Numpy.Placeholder((3, 4, 5))
)->tp.Numpy:
return flow.math.reduced_shape_elem_cnt(x)
x = np.ones(shape=(3, 4, 5), dtype=np.float32)
out = elem_cnt_Job(x) # 3 x 4 x 5 = 60
# out [60]
"""
op_conf = op_conf_util.OperatorConf()
setattr(
op_conf,
"name",
name if name is not None else id_util.UniqueStr("ShapeElemCnt_"),
)
op_conf.shape_elem_cnt_conf.x = input_blob.unique_name
if axis is None:
op_conf.shape_elem_cnt_conf.exclude_axis_conf.SetInParent()
else:
assert isinstance(axis, (tuple, list))
op_conf.shape_elem_cnt_conf.include_axis_conf.axis.extend(axis)
if dtype is not None:
op_conf.shape_elem_cnt_conf.data_type = oneflow_api.deprecated.GetProtoDtype4OfDtype(
dtype
)
op_conf.shape_elem_cnt_conf.y = "y"
interpret_util.Forward(op_conf)
out_lbi = logical_blob_id_util.LogicalBlobId()
out_lbi.op_name = op_conf.name
out_lbi.blob_name = "y"
return remote_blob_util.RemoteBlob(out_lbi)
def _top_k_at_last_dim(
input: oneflow_api.BlobDesc,
k: int = 1,
sorted: bool = True,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("TopK_"))
.Op("top_k")
.Input("in", [input])
.Output("out")
.Attr("k", k)
.Attr("sorted", sorted)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.top_k")
def top_k(
input: oneflow_api.BlobDesc,
axis: int = -1,
k: int = 1,
sorted: bool = True,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""Finds the indices of the k largest entries at specified axis, the difference between other framework is that oneflow only return the indices.
Args:
input (oneflow_api.BlobDesc): The input Blob
axis (int, optional): dimension to be calculated. Defaults to the last dim (-1)
k (int, optional): Number of top elements to look for along the last dimension. Defaults to 1.
sorted (bool, optional): If true the resulting k elements will be sorted by the values in descending order. Defaults to True.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob(dtype=int32) contains the indices of the k largest elements.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def topk_Job(x: tp.Numpy.Placeholder((5, ))
)->tp.Numpy:
return flow.math.top_k(x, 2)
x = np.array([1, 3, 8, 7, 2], dtype=np.float32)
out = topk_Job(x)
# out [2 3]
"""
name = name if name is not None else id_util.UniqueStr("TopK_")
num_axes = len(input.shape)
axis = axis if axis >= 0 else axis + num_axes
assert 0 <= axis < num_axes, "axis out of range"
if axis == num_axes - 1:
return _top_k_at_last_dim(input, k, sorted, name)
else:
perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis)
x = flow.transpose(input, perm, False, True, name + "_transpose")
x = _top_k_at_last_dim(x, k, sorted, name)
return flow.transpose(
x, get_inversed_perm(perm), False, True, name + "_inverse_transpose"
)
def _argmax_at_last_dim(
input: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("ArgMax_"))
.Op("argmax")
.Input("in", [input])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.argmax")
def argmax(
input: oneflow_api.BlobDesc, axis: int = -1, name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""The op computes the index with the largest value of a Blob at specified axis.
Args:
input (oneflow_api.BlobDesc): Input Blob
axis (int, optional): dimension to be calculated. Defaults to the last dim (-1)
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob(dtype=int32) contains the index with the largest value of `input`
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def argmax_Job(x: tp.Numpy.Placeholder((2, 5))
)->tp.Numpy:
return flow.math.argmax(x)
x = np.array([[1, 3, 8, 7, 2],
[1, 9, 4, 3, 2]], dtype=np.float32)
out = argmax_Job(x)
# out [2 1]
"""
name = name if name is not None else id_util.UniqueStr("ArgMax_")
num_axes = len(input.shape)
axis = axis if axis >= 0 else axis + num_axes
assert 0 <= axis < num_axes, "axis out of range"
if axis == num_axes - 1:
return _argmax_at_last_dim(input, name)
else:
perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis)
x = flow.transpose(input, perm, False, True, name + "_transpose")
x = _argmax_at_last_dim(x, name)
x = flow.expand_dims(x, -1, name + "_expand_dims")
x = flow.transpose(
x, get_inversed_perm(perm), False, True, name + "_inverse_transpose"
)
x = flow.squeeze(x, [axis], name + "_squeeze")
return x
@oneflow_export("math.broadcast_to_compatible_with", "broadcast_to_compatible_with")
def broadcast_to_compatible_with(
x: oneflow_api.BlobDesc,
compatible: Sequence[oneflow_api.BlobDesc],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Returns a 'Blob' with the shape can be broadcasted by other shapes
Args:
x (oneflow_api.BlobDesc): a 'Blob'
compatible (Sequence[oneflow_api.BlobDesc]): Sequence of different shape
name (Optional[str], optional): This operator's name. Defaults to None.
Returns:
oneflow_api.BlobDesc: A 'Blob' with the biggest shape
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def broadcast_to_compatible_with_Job(x: tp.Numpy.Placeholder((4, 1, 1))
)->tp.Numpy:
blob_a = flow.constant(value=1, dtype=flow.float32, shape=(1, 2, 1))
blob_b = flow.constant(value=1, dtype=flow.float32, shape=(1, 1, 3))
return flow.math.broadcast_to_compatible_with(x, [blob_a, blob_b])
x = np.ones(shape=(4, 1, 1), dtype=np.float32)
out = broadcast_to_compatible_with_Job(x)
# out.shape (4, 2, 3)
"""
assert isinstance(compatible, (list, tuple))
if name is None:
name = id_util.UniqueStr("BroadcastToCompatibleWith_")
op_conf = op_conf_util.OperatorConf()
setattr(op_conf, "name", name)
setattr(op_conf.broadcast_to_compatible_with_conf, "x", x.unique_name)
setattr(op_conf.broadcast_to_compatible_with_conf, "y", "y")
op_conf.broadcast_to_compatible_with_conf.compatible.extend(
[cp.unique_name for cp in compatible]
)
interpret_util.Forward(op_conf)
ret_lbi = logical_blob_id_util.LogicalBlobId()
ret_lbi.op_name = op_conf.name
ret_lbi.blob_name = "y"
return remote_blob_util.RemoteBlob(ret_lbi)
@oneflow_export(
"math.clip_by_value", "clip_by_value", "clip_by_scalar", "clip", "clamp"
)
def clip_by_value(
values: oneflow_api.BlobDesc,
min_value: Optional[Union[int, float]] = None,
max_value: Optional[Union[int, float]] = None,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This op clips Blob values to a specified min value and max value.
The equation is:
.. math::
out = MIN(MAX(x, min), max)
Args:
values (oneflow_api.BlobDesc): Input Blob
min_value (Optional[Union[int, float]], optional): The minimum value to clip by. Defaults to None.
max_value (Optional[Union[int, float]], optional): The maximum value to clip by. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
ValueError: min_value and max_value `cannot be None at the same time`
Returns:
oneflow_api.BlobDesc: A clipped Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def clip_by_value_Job(x: tp.Numpy.Placeholder((4, ))
)->tp.Numpy:
return flow.math.clip_by_value(x, min_value=-1, max_value=5)
x = np.array([-2, 1, 4, 7], dtype=np.float32)
out = clip_by_value_Job(x)
# out [-1. 1. 4. 5.]
"""
if name is None:
name = id_util.UniqueStr("ClipByValue_")
if min_value is not None and max_value is not None:
op_builder = (
flow.user_op_builder(name)
.Op("clip_by_scalar")
.Attr("floating_min", float(min_value))
.Attr("integral_min", int(min_value))
.Attr("floating_max", float(max_value))
.Attr("integral_max", int(max_value))
)
elif min_value is not None:
op_builder = (
flow.user_op_builder(name)
.Op("clip_by_scalar_min")
.Attr("floating_min", float(min_value))
.Attr("integral_min", int(min_value))
)
elif max_value is not None:
op_builder = (
flow.user_op_builder(name)
.Op("clip_by_scalar_max")
.Attr("floating_max", float(max_value))
.Attr("integral_max", int(max_value))
)
else:
raise ValueError("min_value and max_value cannot be None at the same time")
return (
op_builder.Input("x", [values])
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.l2_normalize")
def l2_normalize(
input: oneflow_api.BlobDesc,
axis: Optional[int] = None,
epsilon: float = 1e-12,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Use L2 norm to normalizes along dimension `axis`
The equation is:
.. math::
out = \frac{x}{\sqrt{\Sigma{x^2}+\epsilon}}
Args:
input (oneflow_api.BlobDesc): Input Blob
axis (Optional[int], optional): The axis on which to apply L2 normalization. Defaults to None.
epsilon (float, optional): The epsilon value is used to avoid division by zero. Defaults to 1e-12.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The normalized Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def l2_normalize_Job(x: tp.Numpy.Placeholder((4, ))
)->tp.Numpy:
return flow.math.l2_normalize(x, axis=0)
x = np.array([1, 2, 3, 4], dtype=np.float32)
out = l2_normalize_Job(x)
# out [0.18257418 0.36514837 0.5477226 0.73029673]
"""
if axis < 0:
axis += len(input.shape)
assert axis >= 0 and axis < len(input.shape)
y, square_x_sum = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("L2Normalize_")
)
.Op("l2_normalize")
.Input("x", [input])
.Output("y")
.Output("square_x_sum")
.Attr("axis", int(axis))
.Attr("epsilon", float(epsilon))
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
return y
@oneflow_export("math.squared_difference")
def squared_difference(
x: Union[int, float, oneflow_api.BlobDesc],
y: Union[int, float, oneflow_api.BlobDesc],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This op computes :math:`(x - y)^2` element-wise.
Args:
x (Union[int, float, oneflow_api.BlobDesc]): A Blob
y (Union[int, float, oneflow_api.BlobDesc]): A Blob with the same type of x
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def squared_difference_Job(x: tp.Numpy.Placeholder((4, )),
y: tp.Numpy.Placeholder((4, ))
)->tp.Numpy:
return flow.math.squared_difference(x, y)
x = np.array([1, 2, 3, 4], dtype=np.float32)
y = np.array([2, 4, 6, 8], dtype=np.float32)
out = squared_difference_Job(x, y)
# out [ 1. 4. 9. 16.]
"""
name_subtract, name_square = None, None
if name is not None:
name_subtract = name + "_subtract"
name_square = name + "_square"
return flow.math.square(flow.math.subtract(x, y, name_subtract), name_square)
@oneflow_export("math.gelu_grad")
def gelu_grad(
x: oneflow_api.BlobDesc, dy: oneflow_api.BlobDesc, name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("GeluGrad_")
)
.Op("gelu_grad")
.Input("x", [x])
.Input("dy", [dy])
.Output("dx")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.tril", "nn.tril")
def tril(
x: oneflow_api.BlobDesc,
diagonal: int = 0,
fill_value: Union[int, float] = 0,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Compute lower triangle of an matrix.
Args:
x (oneflow_api.BlobDesc): Input Blob.
diagonal (int): Diagonal offset, when diagonal > 0, diagonal offset up,
otherwise, offset downward.
fill_value(Union[int, float]): The value filled into the upper triangle.
name (Optional[str], optional): The name for the operation. Defaults to None.
Attention:
The dimension of x must greater or equal to 2.
Returns:
oneflow_api.BlobDesc: The lower triangle blob of input.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def tril_Job(x: tp.Numpy.Placeholder((4, 4))
)->tp.Numpy:
return flow.math.tril(x, 0)
x = np.array([[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],
dtype=np.float32)
out = tril_Job(x).get()
# output [[1, 0, 0, 0],
[1, 2, 0, 0],
[1, 2, 3, 0],
[1, 2, 3, 4]]
"""
if isinstance(fill_value, float):
is_floating_fill_value = True
floating_fill_value = float(fill_value)
integer_fill_value = int(0)
else:
is_floating_fill_value = False
floating_fill_value = float(0)
integer_fill_value = int(fill_value)
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Tril_"))
.Op("tril")
.Input("in", [x])
.Attr("diagonal", diagonal)
.Attr("is_floating_fill_value", is_floating_fill_value)
.Attr("floating_fill_value", floating_fill_value)
.Attr("integer_fill_value", integer_fill_value)
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.fused_scale_tril", "nn.fused_scale_tril")
def fused_scale_tril(
x: oneflow_api.BlobDesc,
diagonal: int = 0,
fill_value: Union[int, float] = 0,
scale: Union[int, float] = 1,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
if isinstance(fill_value, float):
is_floating_fill_value = True
floating_fill_value = float(fill_value)
integer_fill_value = int(0)
else:
is_floating_fill_value = False
floating_fill_value = float(0)
integer_fill_value = int(fill_value)
if isinstance(scale, float):
is_floating_scale_value = True
floating_scale_value = float(scale)
integer_scale_value = int(1)
else:
is_floating_scale_value = False
floating_scale_value = float(1)
integer_scale_value = int(scale)
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("FusedScaleTril_")
)
.Op("fused_scale_tril")
.Input("in", [x])
.Attr("diagonal", diagonal)
.Attr("is_floating_fill_value", is_floating_fill_value)
.Attr("floating_fill_value", floating_fill_value)
.Attr("integer_fill_value", integer_fill_value)
.Attr("is_floating_scale_value", is_floating_scale_value)
.Attr("floating_scale_value", floating_scale_value)
.Attr("integer_scale_value", integer_scale_value)
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("math.polyval")
def polyval(
coeffs: Union[List, Tuple], x: oneflow_api.BlobDesc, name: Optional[str] = None
) -> oneflow_api.BlobDesc:
r"""Computes the elementwise value of a polynomial.
Args:
coeffs (Union[List, Tuple]): The coefficients of the polynomial.
x (oneflow_api.BlobDesc): A Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob, has the same data type of x.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def polyval_Job(
x: tp.Numpy.Placeholder((3,), dtype=flow.float32)
) -> tp.Numpy:
coeffs = [1.0, 3.0, -2.0]
return flow.math.polyval(coeffs, x)
x = np.array([1.0, 2.0, 3.0]).astype(np.float32)
out = polyval_Job(x)
# output [ 2. 8. 16.]
"""
if name is None:
name = id_util.UniqueStr("Polyval_")
if not isinstance(coeffs, (list, tuple)):
raise ValueError(
"Argument coeffs must be list type " "found {}".format(type(coeffs))
)
if len(coeffs) < 1:
return flow.zeros_like(x, name=name)
p = flow.zeros_like(x, name=name)
for c in coeffs:
p = flow.math.add(c, flow.math.multiply(p, x))
return p
@oneflow_export("math.in_top_k", "in_top_k")
def in_top_k(
targets: oneflow_api.BlobDesc,
predictions: oneflow_api.BlobDesc,
k: Optional[int],
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""Says whether the targets are in the top K predictions.
Args:
targets (oneflow_api.BlobDesc): A Blob of type int32 or int64.
predictions (oneflow_api.BlobDesc): A Blob of type float32.
k (Optional[int], optional): Number of top elements to look at for computing precision.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: A Blob of type bool. Computed Precision at k as a bool Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def intopk_Job(
targets: tp.Numpy.Placeholder((2,), dtype=flow.int32),
predictions: tp.Numpy.Placeholder((2, 4), dtype=flow.float32),
) -> tp.Numpy:
return flow.math.in_top_k(targets, predictions, 1)
targets = np.array([3, 1], dtype=np.int32)
predictions = np.array([[0.0, 1.0, 2.0, 3.0], [3.0, 2.0, 1.0, 0.0],], dtype=np.float32)
out = intopk_Job(targets, predictions)
# out [1 0]
"""
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("InTopK_"))
.Op("in_top_k")
.Input("targets", [targets])
.Input("predictions", [predictions])
.Attr("k", k)
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("range")
def range(start, limit=None, delta=1, dtype=None, name="range") -> oneflow_api.BlobDesc:
r"""This operator is similar to python `range`, the difference is that `oneflow.range` generates
a Blob.
Args:
start ([type]): The start of interval. Its type should be `int`.
limit ([type], optional): The limit of interval. Its type should be `int`.
delta (int, optional): The numerical spacing between elements. Defaults to 1.
dtype ([type], optional): The output's data type. Currently we only support `oneflow.int64`. Defaults to None.
name (str, optional): The name for the operation. Defaults to "range".
Returns:
oneflow_api.BlobDesc: The result Blob
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function()
def range_job()->tp.Numpy:
with flow.scope.placement("cpu", "0:0"):
out = flow.range(10, dtype=flow.int64)
return out
out = range_job()
# out [0 1 2 3 4 5 6 7 8 9]
Example2:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function()
def range_job()->tp.Numpy:
with flow.scope.placement("cpu", "0:0"):
out = flow.range(1, 10, 3, dtype=flow.int64)
return out
out = range_job()
# out [1 4 7]
"""
# Ensure the dtype is not None
assert dtype is not None, "Please specified data type"
if limit is None:
# If limit is None, We start from zero.
start, limit = 0, start
assert limit > start, "Limit should be larger than start"
assert delta <= limit - start, "Delta is ilegal"
# Ensure start, limit, delta's dtype is int, We will Add dtype hierarchy in Later version.
assert type(start) == int, "Params `start`'s type should be int"
assert type(limit) == int, "Params `limit`'s type should be int"
assert type(delta) == int, "Params `delta`'s type should be int"
# Build User OP
return (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Range_"))
.Op("range")
.Attr("start", start)
.Attr("delta", delta)
.Attr("limit", limit)
.Attr("dtype", dtype)
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
| [
"oneflow.python.framework.interpret_util.Forward",
"oneflow.math.add_n",
"oneflow.math.multiply",
"oneflow.python.ops.transpose_util.get_inversed_perm",
"oneflow.python.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim",
"oneflow.python.framework.remote_blob.RemoteBlob",
"oneflow.user_op_build... | [((1399, 1425), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.add"""'], {}), "('math.add')\n", (1413, 1425), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3764, 3792), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.add_n"""'], {}), "('math.add_n')\n", (3778, 3792), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4858, 4889), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.subtract"""'], {}), "('math.subtract')\n", (4872, 4889), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((6487, 6518), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.multiply"""'], {}), "('math.multiply')\n", (6501, 6518), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((8082, 8111), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.divide"""'], {}), "('math.divide')\n", (8096, 8111), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((9800, 9826), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.mod"""'], {}), "('math.mod')\n", (9814, 9826), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15455, 15482), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.gelu"""'], {}), "('math.gelu')\n", (15469, 15482), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((16583, 16621), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.relu"""', '"""nn.relu"""'], {}), "('math.relu', 'nn.relu')\n", (16597, 16621), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((17648, 17678), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.sigmoid"""'], {}), "('math.sigmoid')\n", (17662, 17678), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((18780, 18815), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.sigmoid_grad"""'], {}), "('math.sigmoid_grad')\n", (18794, 18815), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((19256, 19323), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.unsorted_segment_sum"""', '"""unsorted_segment_sum"""'], {}), "('math.unsorted_segment_sum', 'unsorted_segment_sum')\n", (19270, 19323), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((22155, 22232), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.unsorted_segment_sum_like"""', '"""unsorted_segment_sum_like"""'], {}), "('math.unsorted_segment_sum_like', 'unsorted_segment_sum_like')\n", (22169, 22232), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((24426, 24505), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.unsorted_batch_segment_sum"""', '"""unsorted_batch_segment_sum"""'], {}), "('math.unsorted_batch_segment_sum', 'unsorted_batch_segment_sum')\n", (24440, 24505), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((26732, 26754), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""cast"""'], {}), "('cast')\n", (26746, 26754), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((27950, 27978), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.equal"""'], {}), "('math.equal')\n", (27964, 27978), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((28987, 29019), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.not_equal"""'], {}), "('math.not_equal')\n", (29001, 29019), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((30052, 30079), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.less"""'], {}), "('math.less')\n", (30066, 30079), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((31080, 31113), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.less_equal"""'], {}), "('math.less_equal')\n", (31094, 31113), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((32149, 32179), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.greater"""'], {}), "('math.greater')\n", (32163, 32179), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((33199, 33235), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.greater_equal"""'], {}), "('math.greater_equal')\n", (33213, 33235), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((34290, 34324), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.logical_and"""'], {}), "('math.logical_and')\n", (34304, 34324), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((35410, 35440), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.minimum"""'], {}), "('math.minimum')\n", (35424, 35440), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((36876, 36906), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.maximum"""'], {}), "('math.maximum')\n", (36890, 36906), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((38341, 38386), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduced_shape_elem_cnt"""'], {}), "('math.reduced_shape_elem_cnt')\n", (38355, 38386), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((41290, 41318), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.top_k"""'], {}), "('math.top_k')\n", (41304, 41318), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((43611, 43640), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.argmax"""'], {}), "('math.argmax')\n", (43625, 43640), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((45369, 45456), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.broadcast_to_compatible_with"""', '"""broadcast_to_compatible_with"""'], {}), "('math.broadcast_to_compatible_with',\n 'broadcast_to_compatible_with')\n", (45383, 45456), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((47323, 47415), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.clip_by_value"""', '"""clip_by_value"""', '"""clip_by_scalar"""', '"""clip"""', '"""clamp"""'], {}), "('math.clip_by_value', 'clip_by_value', 'clip_by_scalar',\n 'clip', 'clamp')\n", (47337, 47415), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((49923, 49958), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.l2_normalize"""'], {}), "('math.l2_normalize')\n", (49937, 49958), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((51676, 51717), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.squared_difference"""'], {}), "('math.squared_difference')\n", (51690, 51717), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((53053, 53085), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.gelu_grad"""'], {}), "('math.gelu_grad')\n", (53067, 53085), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((53517, 53555), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.tril"""', '"""nn.tril"""'], {}), "('math.tril', 'nn.tril')\n", (53531, 53555), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((55599, 55661), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.fused_scale_tril"""', '"""nn.fused_scale_tril"""'], {}), "('math.fused_scale_tril', 'nn.fused_scale_tril')\n", (55613, 55661), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((57139, 57169), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.polyval"""'], {}), "('math.polyval')\n", (57153, 57169), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((58568, 58611), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.in_top_k"""', '"""in_top_k"""'], {}), "('math.in_top_k', 'in_top_k')\n", (58582, 58611), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((60265, 60288), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""range"""'], {}), "('range')\n", (60279, 60288), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((12486, 12515), 'oneflow.math.add_n', 'flow.math.add_n', (['[x, y]', 'name'], {}), '([x, y], name)\n', (12501, 12515), True, 'import oneflow as flow\n'), ((40005, 40032), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (40030, 40032), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((40628, 40659), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (40650, 40659), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((40674, 40710), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (40708, 40710), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((40785, 40821), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['out_lbi'], {}), '(out_lbi)\n', (40812, 40821), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((46801, 46828), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (46826, 46828), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((47125, 47156), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (47147, 47156), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((47172, 47208), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (47206, 47208), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((47283, 47319), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['ret_lbi'], {}), '(ret_lbi)\n', (47310, 47319), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((58446, 58475), 'oneflow.zeros_like', 'flow.zeros_like', (['x'], {'name': 'name'}), '(x, name=name)\n', (58461, 58475), True, 'import oneflow as flow\n'), ((11429, 11460), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarAdd_"""'], {}), "('ScalarAdd_')\n", (11446, 11460), True, 'import oneflow.python.framework.id_util as id_util\n'), ((12611, 12643), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (["(math_op + '_')"], {}), "(math_op + '_')\n", (12628, 12643), True, 'import oneflow.python.framework.id_util as id_util\n'), ((13891, 13922), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarMul_"""'], {}), "('ScalarMul_')\n", (13908, 13922), True, 'import oneflow.python.framework.id_util as id_util\n'), ((27694, 27720), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Cast_"""'], {}), "('Cast_')\n", (27711, 27720), True, 'import oneflow.python.framework.id_util as id_util\n'), ((40510, 40561), 'oneflow_api.deprecated.GetProtoDtype4OfDtype', 'oneflow_api.deprecated.GetProtoDtype4OfDtype', (['dtype'], {}), '(dtype)\n', (40554, 40561), False, 'import oneflow_api\n'), ((42661, 42687), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TopK_"""'], {}), "('TopK_')\n", (42678, 42687), True, 'import oneflow.python.framework.id_util as id_util\n'), ((42935, 42991), 'oneflow.python.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim', 'get_perm_when_transpose_axis_to_last_dim', (['num_axes', 'axis'], {}), '(num_axes, axis)\n', (42975, 42991), False, 'from oneflow.python.ops.transpose_util import get_perm_when_transpose_axis_to_last_dim\n'), ((43004, 43065), 'oneflow.transpose', 'flow.transpose', (['input', 'perm', '(False)', '(True)', "(name + '_transpose')"], {}), "(input, perm, False, True, name + '_transpose')\n", (43018, 43065), True, 'import oneflow as flow\n'), ((44678, 44706), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ArgMax_"""'], {}), "('ArgMax_')\n", (44695, 44706), True, 'import oneflow.python.framework.id_util as id_util\n'), ((44944, 45000), 'oneflow.python.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim', 'get_perm_when_transpose_axis_to_last_dim', (['num_axes', 'axis'], {}), '(num_axes, axis)\n', (44984, 45000), False, 'from oneflow.python.ops.transpose_util import get_perm_when_transpose_axis_to_last_dim\n'), ((45013, 45074), 'oneflow.transpose', 'flow.transpose', (['input', 'perm', '(False)', '(True)', "(name + '_transpose')"], {}), "(input, perm, False, True, name + '_transpose')\n", (45027, 45074), True, 'import oneflow as flow\n'), ((45128, 45174), 'oneflow.expand_dims', 'flow.expand_dims', (['x', '(-1)', "(name + '_expand_dims')"], {}), "(x, -1, name + '_expand_dims')\n", (45144, 45174), True, 'import oneflow as flow\n'), ((45306, 45348), 'oneflow.squeeze', 'flow.squeeze', (['x', '[axis]', "(name + '_squeeze')"], {}), "(x, [axis], name + '_squeeze')\n", (45318, 45348), True, 'import oneflow as flow\n'), ((46738, 46785), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BroadcastToCompatibleWith_"""'], {}), "('BroadcastToCompatibleWith_')\n", (46755, 46785), True, 'import oneflow.python.framework.id_util as id_util\n'), ((48784, 48817), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ClipByValue_"""'], {}), "('ClipByValue_')\n", (48801, 48817), True, 'import oneflow.python.framework.id_util as id_util\n'), ((52996, 53035), 'oneflow.math.subtract', 'flow.math.subtract', (['x', 'y', 'name_subtract'], {}), '(x, y, name_subtract)\n', (53014, 53035), True, 'import oneflow as flow\n'), ((58176, 58205), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Polyval_"""'], {}), "('Polyval_')\n", (58193, 58205), True, 'import oneflow.python.framework.id_util as id_util\n'), ((58408, 58437), 'oneflow.zeros_like', 'flow.zeros_like', (['x'], {'name': 'name'}), '(x, name=name)\n', (58423, 58437), True, 'import oneflow as flow\n'), ((9260, 9307), 'oneflow.python.ops.math_unary_elementwise_ops.reciprocal_no_nan', 'math_unary_elementwise_ops.reciprocal_no_nan', (['y'], {}), '(y)\n', (9304, 9307), True, 'import oneflow.python.ops.math_unary_elementwise_ops as math_unary_elementwise_ops\n'), ((40117, 40151), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ShapeElemCnt_"""'], {}), "('ShapeElemCnt_')\n", (40134, 40151), True, 'import oneflow.python.framework.id_util as id_util\n'), ((43163, 43186), 'oneflow.python.ops.transpose_util.get_inversed_perm', 'get_inversed_perm', (['perm'], {}), '(perm)\n', (43180, 43186), False, 'from oneflow.python.ops.transpose_util import get_inversed_perm\n'), ((45218, 45241), 'oneflow.python.ops.transpose_util.get_inversed_perm', 'get_inversed_perm', (['perm'], {}), '(perm)\n', (45235, 45241), False, 'from oneflow.python.ops.transpose_util import get_inversed_perm\n'), ((58526, 58550), 'oneflow.math.multiply', 'flow.math.multiply', (['p', 'x'], {}), '(p, x)\n', (58544, 58550), True, 'import oneflow as flow\n'), ((11475, 11501), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (11495, 11501), True, 'import oneflow as flow\n'), ((13937, 13963), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (13957, 13963), True, 'import oneflow as flow\n'), ((49252, 49278), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (49272, 49278), True, 'import oneflow as flow\n'), ((49496, 49522), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (49516, 49522), True, 'import oneflow as flow\n'), ((48910, 48936), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (48930, 48936), True, 'import oneflow as flow\n'), ((12665, 12691), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (12685, 12691), True, 'import oneflow as flow\n'), ((16405, 16431), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Gelu_"""'], {}), "('Gelu_')\n", (16422, 16431), True, 'import oneflow.python.framework.id_util as id_util\n'), ((17470, 17496), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Relu_"""'], {}), "('Relu_')\n", (17487, 17496), True, 'import oneflow.python.framework.id_util as id_util\n'), ((18587, 18616), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Sigmoid_"""'], {}), "('Sigmoid_')\n", (18604, 18616), True, 'import oneflow.python.framework.id_util as id_util\n'), ((27743, 27769), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (27763, 27769), True, 'import oneflow as flow\n'), ((43425, 43453), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ArgMax_"""'], {}), "('ArgMax_')\n", (43442, 43453), True, 'import oneflow.python.framework.id_util as id_util\n'), ((3308, 3334), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""AddN_"""'], {}), "('AddN_')\n", (3325, 3334), True, 'import oneflow.python.framework.id_util as id_util\n'), ((12198, 12237), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarAddByTensor_"""'], {}), "('ScalarAddByTensor_')\n", (12215, 12237), True, 'import oneflow.python.framework.id_util as id_util\n'), ((13170, 13209), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarSubByTensor_"""'], {}), "('ScalarSubByTensor_')\n", (13187, 13209), True, 'import oneflow.python.framework.id_util as id_util\n'), ((13497, 13533), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ElementWiseMul_"""'], {}), "('ElementWiseMul_')\n", (13514, 13533), True, 'import oneflow.python.framework.id_util as id_util\n'), ((14660, 14699), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarMulByTensor_"""'], {}), "('ScalarMulByTensor_')\n", (14677, 14699), True, 'import oneflow.python.framework.id_util as id_util\n'), ((15100, 15139), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ScalarDivByTensor_"""'], {}), "('ScalarDivByTensor_')\n", (15117, 15139), True, 'import oneflow.python.framework.id_util as id_util\n'), ((19029, 19062), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SigmoidGrad_"""'], {}), "('SigmoidGrad_')\n", (19046, 19062), True, 'import oneflow.python.framework.id_util as id_util\n'), ((53296, 53326), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""GeluGrad_"""'], {}), "('GeluGrad_')\n", (53313, 53326), True, 'import oneflow.python.framework.id_util as id_util\n'), ((36531, 36571), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ElementWiseMinimum_"""'], {}), "('ElementWiseMinimum_')\n", (36548, 36571), True, 'import oneflow.python.framework.id_util as id_util\n'), ((37996, 38036), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ElementWiseMaximum_"""'], {}), "('ElementWiseMaximum_')\n", (38013, 38036), True, 'import oneflow.python.framework.id_util as id_util\n'), ((26405, 26450), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""UnsortedBatchSegmentSum_"""'], {}), "('UnsortedBatchSegmentSum_')\n", (26422, 26450), True, 'import oneflow.python.framework.id_util as id_util\n'), ((41053, 41079), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TopK_"""'], {}), "('TopK_')\n", (41070, 41079), True, 'import oneflow.python.framework.id_util as id_util\n'), ((60003, 60031), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""InTopK_"""'], {}), "('InTopK_')\n", (60020, 60031), True, 'import oneflow.python.framework.id_util as id_util\n'), ((51357, 51390), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""L2Normalize_"""'], {}), "('L2Normalize_')\n", (51374, 51390), True, 'import oneflow.python.framework.id_util as id_util\n'), ((21806, 21846), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""UnsortedSegmentSum_"""'], {}), "('UnsortedSegmentSum_')\n", (21823, 21846), True, 'import oneflow.python.framework.id_util as id_util\n'), ((24086, 24130), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""UnsortedSegmentSumLike_"""'], {}), "('UnsortedSegmentSumLike_')\n", (24103, 24130), True, 'import oneflow.python.framework.id_util as id_util\n'), ((62483, 62510), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Range_"""'], {}), "('Range_')\n", (62500, 62510), True, 'import oneflow.python.framework.id_util as id_util\n'), ((55207, 55233), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Tril_"""'], {}), "('Tril_')\n", (55224, 55233), True, 'import oneflow.python.framework.id_util as id_util\n'), ((56532, 56568), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""FusedScaleTril_"""'], {}), "('FusedScaleTril_')\n", (56549, 56568), True, 'import oneflow.python.framework.id_util as id_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
class Where(Module):
def __init__(self) -> None:
super().__init__()
def forward(self, condition, x, y):
assert condition.dtype == flow.int32 or condition.dtype == flow.int8
if isinstance(x, int) or isinstance(x, float):
x = flow.Tensor(
[float(x)],
dtype=flow.float32,
device=flow.device(condition.device.type),
)
if isinstance(y, int) or isinstance(y, float):
y = flow.Tensor(
[float(y)],
dtype=flow.float32,
device=flow.device(condition.device.type),
)
assert (
condition.device.type == x.device.type
and condition.device.type == y.device.type
)
broadcast_cond = condition
broadcast_x = x
broadcast_y = y
broadcast_like_shape = []
broadcast_condition_axes = []
broadcast_x_axes = []
broadcast_y_axes = []
for i in range(len(x.shape)):
max_dim = max(x.shape[i], max(y.shape[i], condition.shape[i]))
broadcast_like_shape.append(max_dim)
if max_dim != condition.shape[i]:
broadcast_condition_axes.append(i)
if max_dim != x.shape[i]:
broadcast_x_axes.append(i)
if max_dim != y.shape[i]:
broadcast_y_axes.append(i)
broadcast_like_tensor = flow.experimental.zeros(
tuple(broadcast_like_shape), dtype=flow.float32
)
broadcast_like_tensor = broadcast_like_tensor.to(x.device.type)
broadcast_like_tensor.requires_grad = x.requires_grad or y.requires_grad
if len(broadcast_condition_axes) != 0:
condition = flow.experimental.cast(condition, flow.float32)
broadcast_cond = flow.experimental.broadcast_like(
condition, broadcast_like_tensor, tuple(broadcast_condition_axes)
)
broadcast_cond = flow.experimental.cast(broadcast_cond, flow.int32)
if len(broadcast_x_axes) != 0:
broadcast_x = flow.experimental.broadcast_like(
x, broadcast_like_tensor, broadcast_axes=tuple(broadcast_x_axes)
)
if len(broadcast_y_axes) != 0:
broadcast_y = flow.experimental.broadcast_like(
y, broadcast_like_tensor, broadcast_axes=tuple(broadcast_y_axes)
)
return flow.F.where(broadcast_cond, broadcast_x, broadcast_y)
@oneflow_export("where")
@register_tensor_op("where")
@experimental_api
def where_op(condition, x, y):
"""Return a tensor of elements selected from either :attr:`x` or :attr:`y`, depending on :attr:`condition`.
If the element in condition is larger than 0,
it will take the `x` element, else it will take the `y` element
.. note::
The tensors :attr:`condition`, :attr:`x`, :attr:`y` must be broadcastable.
It will take the `x` element, else it will take the `y` element.
Args:
condition (IntTensor): When 1 (nonzero), yield x, otherwise yield y
x (Tensor or Scalar): value (if :attr:x is a scalar) or values selected at indices
where :attr:`condition` is True
y (Tensor or Scalar): value (if :attr:x is a scalar) or values selected at indices
where :attr:`condition` is False
Returns:
Tensor: A tensor of shape equal to the broadcasted shape of :attr:`condition`, :attr:`x`, :attr:`y`
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = flow.Tensor(
... np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]),
... dtype=flow.float32,
... )
>>> y = flow.Tensor(np.ones(shape=(3, 2)), dtype=flow.float32)
>>> condition = flow.Tensor(np.array([[0, 1], [1, 0], [1, 0]]), dtype=flow.int32)
>>> out = condition.where(x, y).numpy()
>>> print(out)
[[1. 0.3139]
[0.3898 1. ]
[0.0478 1. ]]
"""
return Where()(condition, x, y)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.python.framework.tensor.register_tensor_op",
"oneflow.device",
"oneflow.experimental.cast",
"oneflow.F.where",
"oneflow.python.oneflow_export.oneflow_export"
] | [((3313, 3336), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""where"""'], {}), "('where')\n", (3327, 3336), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((3338, 3365), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""where"""'], {}), "('where')\n", (3356, 3365), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((5066, 5102), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (5081, 5102), False, 'import doctest\n'), ((3255, 3309), 'oneflow.F.where', 'flow.F.where', (['broadcast_cond', 'broadcast_x', 'broadcast_y'], {}), '(broadcast_cond, broadcast_x, broadcast_y)\n', (3267, 3309), True, 'import oneflow as flow\n'), ((2562, 2609), 'oneflow.experimental.cast', 'flow.experimental.cast', (['condition', 'flow.float32'], {}), '(condition, flow.float32)\n', (2584, 2609), True, 'import oneflow as flow\n'), ((2798, 2848), 'oneflow.experimental.cast', 'flow.experimental.cast', (['broadcast_cond', 'flow.int32'], {}), '(broadcast_cond, flow.int32)\n', (2820, 2848), True, 'import oneflow as flow\n'), ((1166, 1200), 'oneflow.device', 'flow.device', (['condition.device.type'], {}), '(condition.device.type)\n', (1177, 1200), True, 'import oneflow as flow\n'), ((1387, 1421), 'oneflow.device', 'flow.device', (['condition.device.type'], {}), '(condition.device.type)\n', (1398, 1421), True, 'import oneflow as flow\n')] |
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.config import configurable
from libai.layers import (
Embedding,
LayerNorm,
Linear,
LMLogits,
ParallelCrossEntropyLoss,
TransformerLayer,
VocabEmbedding,
build_activation,
)
from libai.utils import distributed as dist
from .utils import init_method_normal, scaled_init_method_normal
class BertExtendedAttnMask(nn.Module):
def forward(self, attention_mask):
# We create a 3D attention mask from a 2D tensor mask.
# [b, 1, s]
attention_mask_b1s = attention_mask.unsqueeze(1)
# [b, s, 1]
attention_mask_bs1 = attention_mask.unsqueeze(2)
# [b, s, s]
attention_mask_bss = attention_mask_b1s * attention_mask_bs1
# [b, 1, s, s]
extended_attention_mask = attention_mask_bss.unsqueeze(1)
# Convert attention mask to binary.
extended_attention_mask = extended_attention_mask > 0.5
return extended_attention_mask
class BertEmbeddings(nn.Module):
def __init__(
self,
vocab_size,
hidden_size,
max_sequence_length,
embedding_dropout_prob,
num_tokentypes=0,
init_method=nn.init.xavier_normal_,
amp_enabled=False,
):
super().__init__()
self.vocab_embeddings = VocabEmbedding(
vocab_size, hidden_size, init_method=init_method, amp_enabled=amp_enabled
)
self.position_embeddings = Embedding(
max_sequence_length, hidden_size, init_method=init_method, amp_enabled=amp_enabled
)
# NOTE(l1aoxingyu): Set position_ids sbp sign to [B, B] initially, because position_ids is a
# 1D-tensor from 0 to seq_length, if set to [S(0), B] at first, then position_ids
# will split at the first dim of hierarchy.
self.position_ids = flow.arange(
max_sequence_length,
dtype=flow.long,
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=dist.get_layer_placement(0),
).unsqueeze(0)
if num_tokentypes > 0:
self.tokentype_embeddings = Embedding(
num_tokentypes, hidden_size, init_method=init_method, amp_enabled=amp_enabled
)
self.tokentype_ids = flow.zeros(
self.position_ids.size(),
dtype=flow.long,
sbp=self.position_ids.sbp,
placement=self.position_ids.placement,
)
else:
self.tokentype_embeddings = None
self.embedding_dropout = nn.Dropout(embedding_dropout_prob)
def forward(self, input_ids, tokentype_ids=None, position_ids=None):
seq_length = input_ids.size()[1]
word_embeddings = self.vocab_embeddings(input_ids)
if position_ids is None:
# Change position_ids sbp sign: [B, B] -> [S(0), B]
position_ids = (
self.position_ids[:, :seq_length].expand_as(input_ids).to_global(sbp=input_ids.sbp)
)
position_embeddings = self.position_embeddings(position_ids)
embeddings = word_embeddings + position_embeddings
if self.tokentype_embeddings is not None:
if tokentype_ids is None:
tokentype_ids = (
self.tokentype_ids[:, :seq_length]
.expand_as(input_ids)
.to_global(sbp=input_ids.sbp)
)
embeddings = embeddings + self.tokentype_embeddings(tokentype_ids)
embeddings = self.embedding_dropout(embeddings)
return embeddings
def word_embeddings(self):
return self.vocab_embeddings.weight
class BertLMPredictionHead(nn.Module):
def __init__(self, hidden_size, init_method):
super().__init__()
self.dense = Linear(
hidden_size,
hidden_size,
bias=True,
parallel="col",
init_method=init_method,
layer_idx=-1,
)
self.activation_func = build_activation("gelu")
self.layernorm = LayerNorm((hidden_size,), layer_idx=-1)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.activation_func(hidden_states)
hidden_states = hidden_states.to_global(
grad_sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.split(2)])
)
# NOTE(l1aoxingyu): hidden_states shape is [B, S, H] whose sbp sign: [S(0), S(2)]
# Change from [S(0), S(2)] -> [S(0), B] because layernorm cannot get inputs with sbp S(2)
hidden_states = hidden_states.to_global(
sbp=dist.get_nd_sbp([flow.sbp.split(0), flow.sbp.broadcast])
)
hidden_states = self.layernorm(hidden_states)
return hidden_states
class BertPooler(nn.Module):
"""Pooler layer.
Pool hidden states of the first token and
add a linear transformation followed by a tanh.
Args:
hidden_size: hidden state feature dimension
"""
def __init__(self, hidden_size, init_method):
super().__init__()
self.dense = Linear(
hidden_size,
hidden_size,
bias=True,
parallel="col",
init_method=init_method,
layer_idx=-1,
)
self.activation_func = build_activation("tanh")
def forward(self, hidden_states):
"""Just "pool" the model by simply taking the [CLS] token corresponding
to the first token."""
# hidden_states: [bsz, seq_len, hidden_size]
select_token_tensor = hidden_states[:, 0, :]
pooled_output = self.dense(select_token_tensor)
pooled_output = self.activation_func(pooled_output)
return pooled_output
class BertLoss(nn.Module):
def __init__(self, add_binary_head):
super().__init__()
self.add_binary_head = add_binary_head
self.lm_loss = ParallelCrossEntropyLoss()
def forward(self, lm_output, lm_labels, loss_mask, binary_logits, ns_labels):
lm_loss = self.lm_loss(lm_output, lm_labels)
loss_mask = loss_mask.float()
# Change loss_mask.sum() sbp sign from [P, B] -> [B, B]
# because (lm_loss * loss_mask) / loss_mask.sum() cannot accept P / P
denominator = (
loss_mask.sum().to_global(sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]))
+ 1e-7
)
masked_lm_loss = flow.sum(lm_loss.view(-1) * loss_mask.view(-1)) / denominator
# NOTE(l1aoxingyu): Change lm loss sbp sign [P, P] -> [P, B] to add with sop loss
# whose sbp sign: [P, B]
masked_lm_loss = masked_lm_loss.to_global(
sbp=dist.get_nd_sbp([flow.sbp.partial_sum, flow.sbp.broadcast])
)
loss_dict = {"lm_loss": masked_lm_loss}
if self.add_binary_head:
sop_loss = flow._C.cross_entropy(
binary_logits, ns_labels, ignore_index=-1, reduction="none"
).mean()
loss_dict["sop_loss"] = sop_loss
return loss_dict
class BertModel(nn.Module):
"""The bare Bert Model transformer outputting raw hidden-states without
any specific head on top.
Args:
vocab_size (int): The size of vocabulary file.
hidden_size (int): The size of hidden states.
hidden_layers (_type_): The number of ``TransformerLayer`` in encoder.
num_attention_heads (int):
The number of attention heads for each attention layer of ``TransformerLayer``.
intermediate_size (int):
The size of intermediate layer in feed-forward network for each ``TransformerLayer``.
hidden_dropout_prob (float, optional):
The dropout ratio for the output for each TransformerLayer. Defaults to 0.0.
attention_probs_dropout_prob (float, optional):
The dropout ratio for the output of each attention layer in ``TransformerLayer``.
Defaults to 0.0.
max_position_embeddings (int):
Max sequence length of input, defines the shape of Position Embeddings
in ``BertEmbedding``.
num_tokentypes (int, optional):
Number of segment token indices. Defaults to 2.
add_pooling_layer (bool, optional):
Whether or not averaging or pooling the sequence of hidden-states for the
whole input sequence. Defaults to ``True``.
initializer_range (float, optional):
Sigma of the normal distribution in the initialization method. Defaults to 0.02.
layernorm_epsilon (float, optional):
The epsilon of LayerNorm layer. Defaults to 1e-5.
bias_gelu_fusion (bool, optional):
Whether or not to fuse the computing of bias and gelu. Defaults to ``False``.
bias_dropout_fusion (bool, optional):
Whether or not to fuse the computing of dropout and bias. Defaults to ``False``.
scale_mask_softmax_fusion (bool, optional):
Whether to fuse the computing of mask and softmax in attention layers.
Defaults to ``False``.
apply_query_key_layer_scaling (bool, optional):
Whether or not to use layer index related scaling in computing attention scores.
If ``True``, the scaling factor equals to sqrt(d) * (layer_index + 1).
Defaults to ``True``.
apply_residual_post_layernorm (bool, optional):
If set ``True``, use original BERT residual connection ordering otherwise use Megatron
BERT residual connection which is more stable when scaling model size introduced in
https://arxiv.org/pdf/1909.08053.pdf.
Default: ``False``.
amp_enabled (bool, optional):
Whether or not to set fp16 for embedding weight in T5 model. Defaults to ``False``.
"""
@configurable
def __init__(
self,
vocab_size,
hidden_size,
hidden_layers,
num_attention_heads,
intermediate_size,
hidden_dropout_prob,
attention_probs_dropout_prob,
max_position_embeddings,
num_tokentypes=2,
add_pooling_layer=True,
initializer_range=0.02,
layernorm_eps=1e-12,
bias_gelu_fusion=True,
bias_dropout_fusion=True,
scale_mask_softmax_fusion=True,
apply_query_key_layer_scaling=True,
apply_residual_post_layernorm=False,
amp_enabled=False,
):
super().__init__()
init_method = init_method_normal(initializer_range)
scaled_init_method = scaled_init_method_normal(initializer_range, hidden_layers)
# Embeddings
self.embeddings = BertEmbeddings(
vocab_size,
hidden_size,
max_position_embeddings,
hidden_dropout_prob,
num_tokentypes,
init_method,
amp_enabled,
)
# Mask generation
self.extended_attn_mask = BertExtendedAttnMask()
# Encoders
self.encoders = nn.ModuleList(
[
TransformerLayer(
hidden_size,
intermediate_size,
num_attention_heads,
attention_dropout_prob=attention_probs_dropout_prob,
output_dropout_prob=hidden_dropout_prob,
layernorm_epsilon=layernorm_eps,
bias_gelu_fusion=bias_gelu_fusion,
bias_dropout_fusion=bias_dropout_fusion,
scale_mask_softmax_fusion=scale_mask_softmax_fusion,
apply_query_key_layer_scaling=apply_query_key_layer_scaling,
init_method=init_method,
output_layer_init_method=scaled_init_method,
apply_residual_post_layernorm=apply_residual_post_layernorm,
layer_idx=i,
)
for i in range(hidden_layers)
]
)
self.final_layernorm = LayerNorm((hidden_size,), eps=layernorm_eps, layer_idx=-1)
self.pooler = BertPooler(hidden_size, init_method) if add_pooling_layer else None
@classmethod
def from_config(cls, cfg):
return {
"vocab_size": cfg.vocab_size,
"hidden_size": cfg.hidden_size,
"hidden_layers": cfg.hidden_layers,
"num_attention_heads": cfg.num_attention_heads,
"intermediate_size": cfg.intermediate_size,
"hidden_dropout_prob": cfg.hidden_dropout_prob,
"attention_probs_dropout_prob": cfg.attention_probs_dropout_prob,
"max_position_embeddings": cfg.max_position_embeddings,
"num_tokentypes": cfg.num_tokentypes,
"add_pooling_layer": cfg.add_pooling_layer,
"initializer_range": cfg.initializer_range,
"layernorm_eps": cfg.layernorm_eps,
"bias_gelu_fusion": cfg.bias_gelu_fusion,
"bias_dropout_fusion": cfg.bias_dropout_fusion,
"scale_mask_softmax_fusion": cfg.scale_mask_softmax_fusion,
"apply_query_key_layer_scaling": cfg.apply_query_key_layer_scaling,
"apply_residual_post_layernorm": cfg.apply_residual_post_layernorm,
"amp_enabled": cfg.amp_enabled,
}
def forward(self, input_ids, attention_mask, tokentype_ids=None):
"""
Args:
input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary.
attention_mask (flow.LongTensor): Mask to avoid performing attention
on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
tokentype_ids (flow.LongTensor, optional): Segment token indices to indicate first and
second portions of the inputs. Indices are selected in `[0, 1]`. Defaults to None.
"""
extended_attention_mask = self.extended_attn_mask(attention_mask)
embedding_output = self.embeddings(input_ids, tokentype_ids)
hidden_states = embedding_output
for layer in self.encoders:
hidden_states = layer(hidden_states, extended_attention_mask)
encoder_output = self.final_layernorm(hidden_states)
pooled_output = self.pooler(encoder_output) if self.pooler is not None else None
return encoder_output, pooled_output
def word_embeddings_weight(self):
return self.embeddings.word_embeddings()
class BertPreTrainingHeads(nn.Module):
def __init__(self, vocab_size, hidden_size, init_method, add_binary_head=True):
super().__init__()
self.predictions = BertLMPredictionHead(hidden_size, init_method)
self.seq_relationship = Linear(
hidden_size,
2,
bias=True,
parallel="data",
init_method=init_method,
layer_idx=-1,
)
self.lm_logits = LMLogits(vocab_size, bias=True)
self.loss_func = BertLoss(add_binary_head)
def forward(
self,
sequence_output,
pooled_output,
word_embeddings_weight,
ns_labels,
lm_labels,
loss_mask,
):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
prediction_scores = self.lm_logits(prediction_scores, word_embeddings_weight)
if lm_labels is not None:
return self.loss_func(
prediction_scores, lm_labels, loss_mask, seq_relationship_score, ns_labels
)
return {
"prediction_scores": prediction_scores,
"seq_relationship_score": seq_relationship_score,
}
class BertForPreTraining(nn.Module):
"""Bert Model with two heads on top as done during the pretraining: a
`masked language modeling` head and a `next sentence prediction (classification)` head.
"""
def __init__(self, cfg):
super().__init__()
self.bert = BertModel(cfg)
self.cls_head = BertPreTrainingHeads(
cfg.vocab_size,
cfg.hidden_size,
init_method_normal(cfg.initializer_range),
cfg.add_binary_head,
)
def forward(
self,
input_ids,
attention_mask,
tokentype_ids=None,
ns_labels=None,
lm_labels=None,
loss_mask=None,
):
"""
Args:
input_ids (flow.LongTensor): Indices of input sequence tokens in vocabulary.
attention_mask (flow.LongTensor): Mask to avoid performing attention on
padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
tokentype_ids (flow.LongTensor, optional): Segment token indices to indicate first
and second portions of the inputs. Indices are selected in `[0, 1]`.
Defaults to None.
ns_labels (flow.LongTensor, optional): Labels for computing the next sequence prediction
(classification) loss. Input should be a sequence pair (see `input_ids` docstring).
Indices should be in `[0, 1]`:
- 0 indicates sequence B is a continuation of sequence A,
- 1 indicates sequence B is a random sequence.
lm_labels (flow.LongTensor, optional): Labels for computing the masked
language modeling loss. Indices should be in `[-1, 0, ..., config.vocab_size]`.
loss_mask (flow.LongTensor, optional): Mask to avoid performing loss computing
on ignored tokens. Tokens with indices set to `-1` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
outputs = self.bert(input_ids, attention_mask, tokentype_ids)
sequence_output, pooled_output = outputs[:2]
return self.cls_head(
sequence_output,
pooled_output,
self.bert.word_embeddings_weight(),
ns_labels,
lm_labels,
loss_mask,
)
@staticmethod
def set_pipeline_stage_id(model):
dist_utils = dist.get_dist_util()
# Set pipeline parallelism stage_id
for module_block in model.modules():
# module.origin can get the original module
if isinstance(module_block.origin, BertEmbeddings):
module_block.config.stage_id = dist_utils.get_layer_stage_id(0)
elif isinstance(module_block.origin, BertExtendedAttnMask):
module_block.config.stage_id = dist_utils.get_layer_stage_id(0)
elif isinstance(module_block.origin, TransformerLayer):
module_block.config.stage_id = dist_utils.get_layer_stage_id(module_block.layer_idx)
elif isinstance(module_block.origin, BertPooler):
module_block.config.stage_id = dist_utils.get_layer_stage_id(-1)
elif isinstance(module_block.origin, BertPreTrainingHeads):
module_block.config.stage_id = dist_utils.get_layer_stage_id(-1)
# Set the last layernorm stage id
model.bert.final_layernorm.config.stage_id = dist_utils.get_layer_stage_id(-1)
| [
"oneflow._C.cross_entropy",
"oneflow.sbp.split",
"oneflow.nn.Dropout"
] | [((1950, 2043), 'libai.layers.VocabEmbedding', 'VocabEmbedding', (['vocab_size', 'hidden_size'], {'init_method': 'init_method', 'amp_enabled': 'amp_enabled'}), '(vocab_size, hidden_size, init_method=init_method,\n amp_enabled=amp_enabled)\n', (1964, 2043), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((2097, 2194), 'libai.layers.Embedding', 'Embedding', (['max_sequence_length', 'hidden_size'], {'init_method': 'init_method', 'amp_enabled': 'amp_enabled'}), '(max_sequence_length, hidden_size, init_method=init_method,\n amp_enabled=amp_enabled)\n', (2106, 2194), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((3225, 3259), 'oneflow.nn.Dropout', 'nn.Dropout', (['embedding_dropout_prob'], {}), '(embedding_dropout_prob)\n', (3235, 3259), False, 'from oneflow import nn\n'), ((4468, 4571), 'libai.layers.Linear', 'Linear', (['hidden_size', 'hidden_size'], {'bias': '(True)', 'parallel': '"""col"""', 'init_method': 'init_method', 'layer_idx': '(-1)'}), "(hidden_size, hidden_size, bias=True, parallel='col', init_method=\n init_method, layer_idx=-1)\n", (4474, 4571), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((4681, 4705), 'libai.layers.build_activation', 'build_activation', (['"""gelu"""'], {}), "('gelu')\n", (4697, 4705), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((4731, 4770), 'libai.layers.LayerNorm', 'LayerNorm', (['(hidden_size,)'], {'layer_idx': '(-1)'}), '((hidden_size,), layer_idx=-1)\n', (4740, 4770), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((5781, 5884), 'libai.layers.Linear', 'Linear', (['hidden_size', 'hidden_size'], {'bias': '(True)', 'parallel': '"""col"""', 'init_method': 'init_method', 'layer_idx': '(-1)'}), "(hidden_size, hidden_size, bias=True, parallel='col', init_method=\n init_method, layer_idx=-1)\n", (5787, 5884), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((5994, 6018), 'libai.layers.build_activation', 'build_activation', (['"""tanh"""'], {}), "('tanh')\n", (6010, 6018), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((6587, 6613), 'libai.layers.ParallelCrossEntropyLoss', 'ParallelCrossEntropyLoss', ([], {}), '()\n', (6611, 6613), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((12676, 12734), 'libai.layers.LayerNorm', 'LayerNorm', (['(hidden_size,)'], {'eps': 'layernorm_eps', 'layer_idx': '(-1)'}), '((hidden_size,), eps=layernorm_eps, layer_idx=-1)\n', (12685, 12734), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((15462, 15555), 'libai.layers.Linear', 'Linear', (['hidden_size', '(2)'], {'bias': '(True)', 'parallel': '"""data"""', 'init_method': 'init_method', 'layer_idx': '(-1)'}), "(hidden_size, 2, bias=True, parallel='data', init_method=init_method,\n layer_idx=-1)\n", (15468, 15555), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((15660, 15691), 'libai.layers.LMLogits', 'LMLogits', (['vocab_size'], {'bias': '(True)'}), '(vocab_size, bias=True)\n', (15668, 15691), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((19014, 19034), 'libai.utils.distributed.get_dist_util', 'dist.get_dist_util', ([], {}), '()\n', (19032, 19034), True, 'from libai.utils import distributed as dist\n'), ((2781, 2874), 'libai.layers.Embedding', 'Embedding', (['num_tokentypes', 'hidden_size'], {'init_method': 'init_method', 'amp_enabled': 'amp_enabled'}), '(num_tokentypes, hidden_size, init_method=init_method, amp_enabled\n =amp_enabled)\n', (2790, 2874), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((7361, 7420), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.partial_sum, flow.sbp.broadcast]'], {}), '([flow.sbp.partial_sum, flow.sbp.broadcast])\n', (7376, 7420), True, 'from libai.utils import distributed as dist\n'), ((11745, 12311), 'libai.layers.TransformerLayer', 'TransformerLayer', (['hidden_size', 'intermediate_size', 'num_attention_heads'], {'attention_dropout_prob': 'attention_probs_dropout_prob', 'output_dropout_prob': 'hidden_dropout_prob', 'layernorm_epsilon': 'layernorm_eps', 'bias_gelu_fusion': 'bias_gelu_fusion', 'bias_dropout_fusion': 'bias_dropout_fusion', 'scale_mask_softmax_fusion': 'scale_mask_softmax_fusion', 'apply_query_key_layer_scaling': 'apply_query_key_layer_scaling', 'init_method': 'init_method', 'output_layer_init_method': 'scaled_init_method', 'apply_residual_post_layernorm': 'apply_residual_post_layernorm', 'layer_idx': 'i'}), '(hidden_size, intermediate_size, num_attention_heads,\n attention_dropout_prob=attention_probs_dropout_prob,\n output_dropout_prob=hidden_dropout_prob, layernorm_epsilon=\n layernorm_eps, bias_gelu_fusion=bias_gelu_fusion, bias_dropout_fusion=\n bias_dropout_fusion, scale_mask_softmax_fusion=\n scale_mask_softmax_fusion, apply_query_key_layer_scaling=\n apply_query_key_layer_scaling, init_method=init_method,\n output_layer_init_method=scaled_init_method,\n apply_residual_post_layernorm=apply_residual_post_layernorm, layer_idx=i)\n', (11761, 12311), False, 'from libai.layers import Embedding, LayerNorm, Linear, LMLogits, ParallelCrossEntropyLoss, TransformerLayer, VocabEmbedding, build_activation\n'), ((6996, 7053), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (7011, 7053), True, 'from libai.utils import distributed as dist\n'), ((7537, 7624), 'oneflow._C.cross_entropy', 'flow._C.cross_entropy', (['binary_logits', 'ns_labels'], {'ignore_index': '(-1)', 'reduction': '"""none"""'}), "(binary_logits, ns_labels, ignore_index=-1, reduction=\n 'none')\n", (7558, 7624), True, 'import oneflow as flow\n'), ((2576, 2633), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (2591, 2633), True, 'from libai.utils import distributed as dist\n'), ((2657, 2684), 'libai.utils.distributed.get_layer_placement', 'dist.get_layer_placement', (['(0)'], {}), '(0)\n', (2681, 2684), True, 'from libai.utils import distributed as dist\n'), ((5007, 5024), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (5021, 5024), True, 'import oneflow as flow\n'), ((5026, 5043), 'oneflow.sbp.split', 'flow.sbp.split', (['(2)'], {}), '(2)\n', (5040, 5043), True, 'import oneflow as flow\n'), ((5327, 5344), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (5341, 5344), True, 'import oneflow as flow\n')] |
import oneflow as flow
def GANloss(dis_output, t_real, gan_mode="hinge", dis_update=True):
assert gan_mode == "hinge"
if dis_update:
if t_real:
loss = flow.math.minimum(
dis_output - flow.ones_like(dis_output),
flow.zeros_like(dis_output)
)
loss = flow.math.negative(flow.math.reduce_mean(loss))
else:
loss = flow.math.minimum(
flow.math.negative(dis_output) - flow.ones_like(dis_output),
flow.zeros_like(dis_output)
)
loss = flow.math.negative(flow.math.reduce_mean(loss))
else:
loss = flow.math.negative(flow.math.reduce_mean(dis_output))
return loss
def image_recon_loss(gen_recon, src_content):
return flow.nn.L1Loss(gen_recon, src_content)
def feature_matching_loss(fake_features_trans, real_features_style):
return flow.nn.L1Loss(fake_features_trans, real_features_style)
| [
"oneflow.math.reduce_mean",
"oneflow.ones_like",
"oneflow.math.negative",
"oneflow.zeros_like",
"oneflow.nn.L1Loss"
] | [((796, 834), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', (['gen_recon', 'src_content'], {}), '(gen_recon, src_content)\n', (810, 834), True, 'import oneflow as flow\n'), ((916, 972), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', (['fake_features_trans', 'real_features_style'], {}), '(fake_features_trans, real_features_style)\n', (930, 972), True, 'import oneflow as flow\n'), ((682, 715), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['dis_output'], {}), '(dis_output)\n', (703, 715), True, 'import oneflow as flow\n'), ((274, 301), 'oneflow.zeros_like', 'flow.zeros_like', (['dis_output'], {}), '(dis_output)\n', (289, 301), True, 'import oneflow as flow\n'), ((354, 381), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['loss'], {}), '(loss)\n', (375, 381), True, 'import oneflow as flow\n'), ((529, 556), 'oneflow.zeros_like', 'flow.zeros_like', (['dis_output'], {}), '(dis_output)\n', (544, 556), True, 'import oneflow as flow\n'), ((609, 636), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['loss'], {}), '(loss)\n', (630, 636), True, 'import oneflow as flow\n'), ((229, 255), 'oneflow.ones_like', 'flow.ones_like', (['dis_output'], {}), '(dis_output)\n', (243, 255), True, 'import oneflow as flow\n'), ((451, 481), 'oneflow.math.negative', 'flow.math.negative', (['dis_output'], {}), '(dis_output)\n', (469, 481), True, 'import oneflow as flow\n'), ((484, 510), 'oneflow.ones_like', 'flow.ones_like', (['dis_output'], {}), '(dis_output)\n', (498, 510), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
from collections import OrderedDict
from functools import partial
from typing import Dict, Optional, Union, List
from google.protobuf import text_format
import oneflow
import oneflow._oneflow_internal
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.graph_build_util as graph_build_util
import oneflow.framework.session_context as session_ctx
from oneflow.amp import GradScaler, StaticGradScaler
from oneflow.env import get_rank
from oneflow.framework.multi_client_session import MultiClientSession
from oneflow.framework.tensor import Tensor, TensorTuple
from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple
from oneflow.nn.graph.block import Block, BlockType, get_block_cls
from oneflow.nn.graph.graph_config import GraphConfig
from oneflow.nn.graph.optimizer import OptDict, VariableConfig
from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg
from oneflow.nn.module import Module
from oneflow.nn.optimizer.lr_scheduler import LrScheduler
from oneflow.nn.optimizer.optimizer import Optimizer
from oneflow.nn.optimizer.sparse_optimizer import SparseOptimizer
class Graph(object):
r"""Base class for training or evaluating a neural network in graph mode.
To use graph mode for model training or evaluation in OneFlow, you should:
1. Define your customized graph as a subclass of ``nn.Graph``.
2. Add ``super().__init__()`` in your subclass's ``__init__()``.
3. Add modules to your graph as regular attributes.
4. Define computation logical in ``build()`` method.
5. Instantiate your graph then call it.
.. code-block:: python
>>> import oneflow as flow
>>> class LinearGraph(flow.nn.Graph):
... def __init__(self):
... super().__init__()
... # Add a module to the graph.
... self.linear = flow.nn.Linear(3, 8, False)
... def build(self, x):
... # Use the module to build the computation logic of the graph.
... return self.linear(x)
# Instantiate the graph
>>> linear_graph = LinearGraph()
>>> x = flow.randn(4, 3)
# First call on graph will run graph's build() method to
# trace a computatioin graph. Then the computation graph will be
# optimized and executed for the first time.
>>> linear_graph(x).shape
oneflow.Size([4, 8])
# Later call on graph will execute the computation graph directly.
>>> linear_graph(x).shape
oneflow.Size([4, 8])
Note that Graph cannot be nested at the moment.
"""
_child_init_cnt = dict()
def __init__(self):
"""
Initializes internal Graph states. It MUST be called in ``__init__`` method of subclass.
.. code-block:: python
>>> import oneflow as flow
>>> class SubclassGraph(flow.nn.Graph):
... def __init__(self):
... super().__init__() # MUST be called
... # Then define the graph attributes
... def build(self):
... pass
"""
self._generate_name()
self.config = GraphConfig()
self._blocks = OrderedDict()
self._opts = []
self._grad_scaler = None
self._variables_conf = OrderedDict()
self._is_compiled = False
# forward graph job proto
self._forward_job_proto = None
# forward, backward and optimized graph job proto
self._full_job_proto = None
self._args_repr = []
self._outs_repr = []
self._debug = False
self._debug_min_s_level = 2
self._debug_max_v_level = 0
self._outputs_buffer_size = 2
self._cur_index_of_ouputs_buffer = 0
self._c_nn_graph = oneflow._oneflow_internal.nn.graph.CNNGraph(self._name)
session = session_ctx.GetDefaultSession()
assert type(session) is MultiClientSession
session.TryInit()
session.AddCGraph(self._c_nn_graph)
def build(self, *args):
r"""The ``build()`` method must be overridden to define neural network
computaion logic.
The ``build()`` method of nn.Graph is very similar to the ``forward()``
method of nn.Module. It is used to describe the computatioin logical of
a neural network.
When a graph object being called for the first time, the ``build()``
method will be called implicitly to build the computatioin graph.
Make sure to call modules's ``train()`` or ``eval()`` method before the
first call of your graph to make the module executing the right
training or evaluation logic if needed.
.. code-block:: python
>>> import oneflow as flow
>>> class MyGraph(flow.nn.Graph):
... def __init__(self):
... super().__init__()
... self.linear = flow.nn.Linear(3, 8, False)
... def build(self, x):
... return self.linear(x)
>>> linear_graph = MyGraph()
>>> x = flow.randn(4, 3)
>>> y = linear_graph(x) # The build() method is called implicitly
Note that ``build()`` method's inputs and outputs only accept positional
arguements at the moment, each argument must be one of these types:
* ``Tensor``
* ``list`` of ``Tensor``
* ``None``
"""
raise NotImplementedError()
def add_optimizer(
self, optim: Optimizer, *, lr_sch: LrScheduler = None,
):
r"""Add an optimizer, an learning rate scheduler to the graph.
To do training with nn.Graph, you should do 2 more things:
1. Add at least one optimizer(learning rate schedulers are optional) with ``add_optimizer()`` method.
2. Call loss tensor's ``backward()`` method in ``build()`` method.
Note that the computaion graph will automatically execute these methods:
* optimizer's ``clip_grad()`` if a optimizer is set to do grad cliping.
* optimizer's ``step()``.
* optimizer's ``zero_grad()``.
* learn rate scheduler's ``step()``.
Also note that only scalar tensor are allowed to call ``backward()``
in ``nn.Graph.build()`` for the moment. So you may call ``Tensor.sum()``
or ``Tensor.mean()`` to make the loss tensor a scalar tensor.
.. code-block:: python
>>> import oneflow as flow
>>> loss_fn = flow.nn.MSELoss(reduction="sum")
>>> model = flow.nn.Sequential(flow.nn.Linear(3, 1), flow.nn.Flatten(0, 1))
>>> optimizer = flow.optim.SGD(model.parameters(), lr=1e-6)
>>> class LinearTrainGraph(flow.nn.Graph):
... def __init__(self):
... super().__init__()
... self.model = model
... self.loss_fn = loss_fn
... # Add an optimizer
... self.add_optimizer(optimizer)
... def build(self, x, y):
... y_pred = self.model(x)
... loss = self.loss_fn(y_pred, y)
... # Call loss tensor's backward(), loss tensor must be a scalar tensor
... loss.backward()
... return loss
>>> linear_graph = LinearTrainGraph()
>>> x = flow.randn(10, 3)
>>> y = flow.randn(10)
>>> for t in range(3):
... loss = linear_graph(x, y)
Args:
optim (oneflow.optim.Optimizer): The optimizer.
lr_sch : The learning rate scheduler, see oneflow.optim.lr_scheduler.
"""
opt_dict = dict()
assert optim is not None, "optimizer cannot be None"
assert isinstance(
optim, (Optimizer, SparseOptimizer)
), "optimizer must be an instance of Optimizer"
opt_dict["optim"] = optim
if lr_sch is not None:
assert isinstance(lr_sch, LrScheduler)
assert (
lr_sch._optimizer is optim
), "lr_scheduler's optimizer must be the same optimizer in add_optimizer."
opt_dict["lr_sch"] = lr_sch
self._opts.append(opt_dict)
# Set the training config if there is an optimizer add in graph.
if len(self._opts) == 1:
self.config._train(True)
def set_grad_scaler(self, grad_scaler: GradScaler = None):
r"""Set the GradScaler for gradient and loss scaling.
"""
assert isinstance(grad_scaler, (GradScaler, StaticGradScaler))
self._grad_scaler = grad_scaler
def __call__(self, *args):
r"""Call nn.Graph subclass instance to run your customized graph.
Call your customized graph after the instantiation:
.. code-block:: python
g = CustomGraph()
out_tensors = g(input_tensors)
The inputs of ``__call__`` method must match the inputs of ``build()``
method. And the ``__call__`` method will return outputs matching the
outputs of ``build()`` method.
Note that the first call takes longer than later calls, because nn.Graph
will do the computaion graph generation and optimization at the first call.
Donot override this function.
"""
if not self._is_compiled:
self._compile(*args)
return self._run(*args)
@property
def name(self):
r"""Name auto-generated for this graph.
"""
return self._name
@property
def training(self):
r"""In traninig mode if the graph has an optimizer.
"""
return self.config.training
def debug(
self,
v_level: int = 0,
ranks: Optional[Union[int, List[int]]] = None,
mode: bool = True,
) -> None:
r"""Open or close debug mode of the graph.
If in debug mode, logs of computation graph building infos or warnings will be
printed. Otherwise, only errors will be printed.
Each nn.Module inside a nn.Graph also has a debug() method to enable debug mode.
Use ``v_level`` to choose verbose debug info level, default level is 0, max level is 3.
``v_level`` 0 will print warning and graph building stages. ``v_level`` 1 will additionally
print graph build info of each nn.Module. ``v_level`` 2 will additionally print graph build
info of each operation. ``v_level`` 3 will additionally print more detailed info of each
operation.
Use ``ranks`` to choose which rank to print the debug information.
.. code-block:: python
g = CustomGraph()
g.debug() # Open debug mode
out_tensors = g(input_tensors) # Will print log for debug at the first call
Args:
v_level (int): choose verbose debug info level, default v_level is 0, max v_level is 3.
ranks (int or list(int)): choose ranks to print the debug information. Default rank ``0``.
You can choose any valid rank. Ranks equals ``-1`` means debug on all ranks.
mode (bool): whether to set debug mode (``True``) or not (``False``). Default: ``True``.
"""
assert isinstance(v_level, int)
assert v_level >= 0, "The min verbose debug info level is 0."
assert v_level <= 3, "The max verbose debug info level is 3."
assert isinstance(mode, bool)
if ranks is None:
rank_list = [0]
elif isinstance(ranks, int):
rank_list = [ranks]
elif isinstance(ranks, list):
rank_list = ranks
else:
raise ValueError("ranks must be int or List[int].")
my_rank = get_rank()
if -1 in rank_list or my_rank in rank_list:
self._debug = mode
if self._debug:
self._debug_min_s_level = 0
self._debug_max_v_level = v_level
for name, block in self._blocks.items():
assert block.type == BlockType.MODULE
block.debug(v_level, ranks, mode)
def __repr__(self):
r"""For printing the graph structure.
The graph structure can be printed after graph instantiation.
After the first call of graph, inputs and outputs will be added to
the graph structure.
.. code-block:: python
g = CustomGraph()
print(g)
out_tensors = g(input_tensors)
print(g) # Inputs and Outputs infos are added
"""
child_lines = []
child_lines.append(add_indent(repr(self.config), 2))
if len(self._args_repr) > 0:
for in_str in self._args_repr:
input_str = add_indent(in_str, 2)
child_lines.append(input_str)
if len(self._blocks) > 0:
for n, m in self._blocks.items():
mod_str = repr(m)
mod_str = add_indent(mod_str, 2)
child_lines.append(mod_str)
if len(self._outs_repr) > 0:
for out_str in self._outs_repr:
output_str = add_indent(out_str, 2)
child_lines.append(output_str)
main_str = self._shallow_repr() + ": ("
if len(child_lines) > 0:
main_str += "\n " + "\n ".join(child_lines) + "\n"
main_str += ")"
return main_str
def _shallow_repr(self):
shallow_repr = "(GRAPH:" + self._name + ":" + self.__class__.__name__ + ")"
return shallow_repr
def _print(self, s_level=2, v_level=0, msg: str = ""):
r"""Do print according to info level.
"""
assert isinstance(s_level, int)
assert isinstance(v_level, int)
assert isinstance(msg, str)
if s_level >= self._debug_min_s_level:
if (s_level > 0) or (s_level == 0 and v_level <= self._debug_max_v_level):
print(msg, flush=True)
@property
def _config_proto(self):
return self.config.proto
@property
def _optimization_conf_proto(self):
session = session_ctx.GetDefaultSession()
assert type(session) is MultiClientSession
return session.resource
@property
def _graph_proto(self):
if not self._is_compiled:
self._print(
2,
0,
f"[ERROR]{self._shallow_repr()} has not been compiled, so it's graph proto is None."
" You can call the graph to trigger it's compilation.",
)
return self._forward_job_proto
@property
def _full_graph_proto(self):
if not self._is_compiled:
self._print(
2,
0,
f"[ERROR]{self._shallow_repr()} has not been compiled, so it's full graph proto is None."
" You can call the graph to trigger it's compilation.",
)
return self._full_job_proto
def _generate_name(self):
child_name = self.__class__.__name__
if Graph._child_init_cnt.get(child_name) is None:
Graph._child_init_cnt[child_name] = 0
self._name = child_name + "_" + str(Graph._child_init_cnt[child_name])
Graph._child_init_cnt[child_name] += 1
def _state(self):
for _, b in self._blocks.items():
pa_gen = b.parameters(recurse=True)
for pa in pa_gen:
yield pa
bu_gen = b.buffers(recurse=True)
for bu in bu_gen:
yield bu
def _filter_states(self):
state_tensor_set = set()
state_tensors = []
state_op_names = []
for state_block in self._state():
state_tensor = state_block.origin
if state_tensor in state_tensor_set:
continue
op_name = state_block.name_prefix + state_block.name
state_tensor_set.add(state_tensor)
state_tensors.append(state_tensor)
state_op_names.append(op_name)
if state_block.type == BlockType.PARAMETER:
self._variables_conf[state_tensor] = VariableConfig(op_name)
self._state_tensor_tuple = convert_to_tensor_tuple(state_tensors)
return state_op_names
def _generate_config_proto(self):
self.config.proto.set_job_name(self._name)
self._outputs_buffer_size = self.config._outputs_buffer_size
if self._grad_scaler is not None:
self._grad_scaler._generate_conf_for_graph(
self.config.proto.mutable_train_conf()
)
for opt in self._opts:
opt_dict = OptDict(opt)
self.config._generate_optimizer_and_variable_configs(
opt_dict, self._variables_conf
)
def _create_states_builder(self):
state2lazy_builder = dict()
for state_block in self._state():
state_tensor = state_block.origin
op_name = state_block.name_prefix + state_block.name
if state_tensor in state2lazy_builder:
# Differe tensor block shares the same tensor, so they need to share the same
# builder.
state_block.set_lazy_origin_builder(state2lazy_builder[state_tensor])
else:
if state_block.type == BlockType.PARAMETER:
assert state_tensor in self._variables_conf
state_config = self._variables_conf[state_tensor]
op_name = state_config.name
else:
state_config = None
# Init a new lazy tensor builder
state_block.lazy_origin_builder().name = op_name
state_block.lazy_origin_builder().method = partial(
graph_build_util.build_graph_state,
op_name,
state_tensor,
state_config,
)
state2lazy_builder[state_tensor] = state_block.lazy_origin_builder()
def _compile(self, *args):
# Build graph
try:
self._print(0, 0, self._shallow_repr() + " start building graph.")
assert not self._is_compiled, (
"nn.Graph " + self._name + " has already been compiled."
)
build_graph_start = time.perf_counter()
with graph_build_util.GLogScopeContext(
self._debug_min_s_level, self._debug_max_v_level
):
eager_outputs = self._build_graph(*args)
build_graph_end = time.perf_counter()
self._print(
0,
0,
self._shallow_repr()
+ " building graph Done! Cost time: "
+ str(round(build_graph_end - build_graph_start, 2))
+ "s."
+ "\n",
)
except:
self._print(
2,
0,
"[ERROR]"
+ self._shallow_repr()
+ " building graph got error: "
+ sys_exc_error_msg(),
)
raise
# Complie graph to execution plan and init Runtime
try:
self._print(
0, 0, self._shallow_repr() + " start building plan.",
)
compile_and_init_start = time.perf_counter()
self._c_nn_graph.complie_and_init_runtime()
compile_and_init_end = time.perf_counter()
self._print(
0,
0,
self._shallow_repr()
+ " building plan Done! Cost time: "
+ str(round(compile_and_init_end - compile_and_init_start, 2))
+ "s."
+ "\n"
+ self._shallow_repr()
+ "'s total time to build graph and plan : "
+ str(round(compile_and_init_end - build_graph_start, 2))
+ "s."
+ "\n",
)
except:
self._print(
2,
0,
"[ERROR]"
+ self._shallow_repr()
+ " building plan got error: "
+ sys_exc_error_msg(),
)
raise
self._is_compiled = True
return eager_outputs
def _build_graph(self, *args):
session = session_ctx.GetDefaultSession()
assert type(session) is MultiClientSession
# Filter to get unique states in graph
state_op_names = self._filter_states()
self._generate_config_proto()
# Deal with parameter and buffer
self._print(
0,
1,
self._shallow_repr()
+ " start building graph builders of parameters and buffers.",
)
self._create_states_builder()
self._print(
0,
1,
self._shallow_repr()
+ " end building graph builders of parameters and buffers.",
)
with graph_build_util.graph_build_context(self.config.proto, session):
# Deal with inputs
self._print(0, 1, self._shallow_repr() + " start building graph inputs.")
arg_op_names, lazy_args, self._args_repr, _ = self._build_io(
"input", graph_build_util.build_graph_input_arg, *args
)
self._print(0, 1, self._shallow_repr() + " end building graph inputs.")
# Deal with module in self.build(*args)
self._print(0, 1, self._shallow_repr() + " start building graph modules.")
outputs = self.build(*lazy_args)
self._print(0, 1, self._shallow_repr() + " end building graph modules.")
# Deal with outputs
self._print(0, 1, self._shallow_repr() + " start building graph outputs.")
if not (type(outputs) is tuple or type(outputs) is list):
if outputs is None:
outputs = ()
else:
outputs = (outputs,)
(
output_op_names,
self._eager_outputs,
self._outs_repr,
out2name,
) = self._build_io("output", graph_build_util.build_graph_output, *outputs)
self._print(0, 1, self._shallow_repr() + " end building graph outputs.")
# Save forward graph job proto
self._forward_job_proto = c_api_util.GetCurrentJob()
self._print(
0,
1,
self._shallow_repr() + " start building graph with compile passes.",
)
# Complete the graph job proto
oneflow._oneflow_internal.CurJobBuildAndInferCtx_Complete()
# Save full graph job proto after job Complete for find real output blob shape and build it.
self._full_job_proto = c_api_util.GetCurrentJob()
self._print(
0, 1, self._shallow_repr() + " end building graph with compile passes."
)
# Re-build outputs accoring to full graph and outputs buffer config.
self._print(
0,
1,
self._shallow_repr()
+ " start re-building graph outputs for optimizatioin.",
)
self._rebuild_outputs(out2name)
self._print(
0,
1,
self._shallow_repr()
+ " end re-building graph outputs for optimizatioin.",
)
# Register input/output/variable/buffer to _c_nn_graph
self._c_nn_graph.register_input_op_names_and_tensors(
arg_op_names, convert_to_tensor_tuple(self._flatten_io("input", *args))
)
self._c_nn_graph.register_output_op_names_and_tensors(
output_op_names, self._outputs_tensor_tuple
)
self._c_nn_graph.register_variable_op_names_and_tensors(
state_op_names, self._state_tensor_tuple
)
return seq_to_func_return(self._eager_outputs_buffer[0])
def _rebuild_outputs(self, out2name=None):
# NOTE(chengcheng):
# Lazy build output eager tensors.
#
# After JobBuildAndInferCtxt.Complete, the output tensor shape
# could be changed by JobPass, such as GradientAccumulationRewritePass.
def build_real_output(fake_eager_out):
lbn = out2name[fake_eager_out] + "/out"
assert lbn in self._full_job_proto.helper.lbn2logical_blob_desc
blob_conf = self._full_job_proto.helper.lbn2logical_blob_desc[lbn]
shape = tuple(blob_conf.shape.dim)
dtype = fake_eager_out.dtype
with oneflow._oneflow_internal.lazy_mode.guard(False):
if fake_eager_out.is_consistent:
eager_out = oneflow.empty(
shape,
dtype=dtype,
placement=fake_eager_out.placement,
sbp=fake_eager_out.sbp,
)
else:
eager_out = oneflow.empty(
shape, dtype=dtype, device=fake_eager_out.device
)
return eager_out
def convert_to_synced_tensor_tuple(*args):
tensor_tuple = convert_to_tensor_tuple(*args)
# tensors acting as buffer should be synced once upon created.
oneflow._oneflow_internal.nn.graph.SoftSyncNNGraphBuffers(
tensor_tuple, self._c_nn_graph
)
return tensor_tuple
self._eager_outputs = self._mapping_io(
"output", build_real_output, *self._eager_outputs
)
self._outputs_tensor_tuple = convert_to_synced_tensor_tuple(
self._flatten_io("output", *self._eager_outputs)
)
self._eager_outputs_buffer = [
self._eager_outputs,
]
self._outputs_tensor_tuple_buffer = [
self._outputs_tensor_tuple,
]
# Make outputs buffer
for i in range(self._outputs_buffer_size - 1):
outputs_buffer_item = self._empty_like_io("output", *self._eager_outputs)
self._eager_outputs_buffer.append(outputs_buffer_item)
outputs_tensor_tuple_buffer_item = convert_to_synced_tensor_tuple(
self._flatten_io("output", *outputs_buffer_item)
)
self._outputs_tensor_tuple_buffer.append(outputs_tensor_tuple_buffer_item)
self._check_outputs_buffer()
def _check_outputs_buffer(self):
has_len = len(self._outputs_tensor_tuple_buffer)
assert (
has_len == self._outputs_buffer_size
), f"nn.Graph's outputs buffer size {has_len} donot match the set value {self._outputs_buffer_size}."
# Check there is not duplicated outputs buffer tensor.
out_id_dic = dict()
def check_id_and_add(t, name):
if t is not None:
tid = id(t)
assert (
tid not in out_id_dic
), f"nn.Graph's outputs buffer add buffer tensor tid {tid} has conflict, new item name {name}, old item name {out_id_dic[tid]}."
out_id_dic[tid] = name
for b_idx, buffer in enumerate(self._outputs_tensor_tuple_buffer):
for i_idx, item in enumerate(buffer):
check_id_and_add(
item, "graph_ouputs_buffer_" + str(b_idx) + "_" + str(i_idx)
)
def _run(self, *args):
try:
flattened_eager_args = self._flatten_io("input", *args)
outputs_tensor_tuple = self._outputs_tensor_tuple_buffer[
self._cur_index_of_ouputs_buffer
]
eager_outputs = self._eager_outputs_buffer[self._cur_index_of_ouputs_buffer]
# oneflow._oneflow_internal.eager.multi_client.Sync() NOTE(chengcheng): Need Sync?
oneflow._oneflow_internal.nn.graph.RunLazyNNGraph(
convert_to_tensor_tuple(flattened_eager_args),
outputs_tensor_tuple,
self._state_tensor_tuple,
self._c_nn_graph,
)
# Update outputs buffer reading index
self._cur_index_of_ouputs_buffer += 1
if self._cur_index_of_ouputs_buffer >= self._outputs_buffer_size:
self._cur_index_of_ouputs_buffer = 0
except:
self._print(
2,
0,
"[ERROR]"
+ self._shallow_repr()
+ " run got error: "
+ sys_exc_error_msg(),
)
raise
# Copy outputs from buffer
eager_outputs = self._copy_io("output", *eager_outputs)
# Make sure that last used devices of tensors in `outputs_tensor_tuple` are
# "critical_section".
# NNGraph's execution flow will be broken if `last_used_device` of `outputs_tensor_tuple`
# are not "critical_section".
oneflow._oneflow_internal.nn.graph.SoftSyncNNGraphBuffers(
outputs_tensor_tuple, self._c_nn_graph
)
return seq_to_func_return(eager_outputs)
def _build_io(self, io_type, build_func, *args):
assert io_type in ("input", "output")
io_type_upper = io_type.upper()
build_args = []
op_names = []
args_repr = []
tensor2op_name = {}
def build_tensor_or_none(tensor, name, repr_str):
assert tensor is None or (isinstance(tensor, Tensor))
if isinstance(tensor, Tensor):
build_arg = build_func(name, tensor)
op_names.append(name)
tensor2op_name[build_arg] = name
else:
build_arg = None
args_repr.append(repr_str)
self._print(0, 1, repr_str)
return build_arg
for idx, arg in enumerate(args):
if isinstance(arg, Tensor) or arg is None:
if arg is None:
name, repr_str = self._io_item_check_and_gen(
arg, None, io_type, idx
)
else:
name, repr_str = self._io_item_check_and_gen(
arg, Tensor, io_type, idx
)
build_args.append(build_tensor_or_none(arg, name, repr_str))
elif isinstance(arg, (TensorTuple, list)):
if isinstance(arg, TensorTuple):
seq_args = TensorTuple()
else:
seq_args = list()
for i in range(len(arg)):
name, repr_str = self._io_item_check_and_gen(
arg[i], Tensor, io_type, idx, i
)
seq_args.append(build_tensor_or_none(arg[i], name, repr_str))
build_args.append(seq_args)
else:
self._io_item_check_and_gen(arg, Tensor, io_type, idx)
return op_names, build_args, args_repr, tensor2op_name
def _mapping_io(self, io_type, func, *args):
assert io_type in ("input", "output")
io_type_upper = io_type.upper()
mapped_args = []
def mapping_tensor_or_none(tensor):
assert tensor is None or (isinstance(tensor, Tensor))
if isinstance(tensor, Tensor):
mapped_arg = func(tensor)
else:
mapped_arg = None
return mapped_arg
for idx, arg in enumerate(args):
if isinstance(arg, Tensor) or arg is None:
mapped_args.append(mapping_tensor_or_none(arg))
elif isinstance(arg, (TensorTuple, list)):
if isinstance(arg, TensorTuple):
seq_args = TensorTuple()
else:
seq_args = list()
for i in range(len(arg)):
seq_args.append(mapping_tensor_or_none(arg[i]))
mapped_args.append(seq_args)
else:
self._io_item_check(arg, None, io_type, idx)
return mapped_args
def _empty_like_io(self, io_type, *args):
def func(t):
shape = t.shape
dtype = t.dtype
with oneflow._oneflow_internal.lazy_mode.guard(False):
if t.is_consistent:
eager_out = oneflow.empty(
shape, dtype=dtype, placement=t.placement, sbp=t.sbp,
)
else:
eager_out = oneflow.empty(shape, dtype=dtype, device=t.device)
return eager_out
return self._mapping_io(io_type, func, *args)
def _copy_io(self, io_type, *args):
def func(tensor):
with oneflow._oneflow_internal.lazy_mode.guard(False):
build_arg = tensor.to(copy=True)
return build_arg
return self._mapping_io(io_type, func, *args)
def _flatten_io(self, io_type, *args):
assert isinstance(args, tuple)
flattened_args = []
for idx, arg in enumerate(args):
if isinstance(arg, Tensor):
flattened_args.append(arg)
elif isinstance(arg, (TensorTuple, list)):
for i in range(len(arg)):
self._io_item_check(arg[i], Tensor, io_type, idx, i)
flattened_args.append(arg[i])
else:
self._io_item_check(arg, None, io_type, idx)
return flattened_args
def _io_item_check(self, item, expect_type, io_type, idx, second_idx=None):
if expect_type is None and item is None:
return
elif expect_type is not None and isinstance(item, expect_type):
return
else:
assert io_type in ("input", "output")
name = (
"_"
+ self.name
+ "-"
+ io_type
+ "_"
+ str(idx)
+ ("" if second_idx is None else "_" + str(second_idx))
)
repr_str = (
"[ERROR](" + io_type.upper() + ":" + name + ":" + str(type(item)) + ")"
)
self._print(2, 0, repr_str)
raise NotImplementedError(
"nn.Graph.build()'s input/output only support types: Tensor/list(Tensor)/None."
)
def _io_item_check_and_gen(self, item, expect_type, io_type, idx, second_idx=None):
assert io_type in ("input", "output")
name = (
"_"
+ self.name
+ "-"
+ io_type
+ "_"
+ str(idx)
+ ("" if second_idx is None else "_" + str(second_idx))
)
if expect_type is None and item is None:
repr_str = (
"[WARNING]("
+ io_type.upper()
+ ":"
+ name
+ ":"
+ str(type(item))
+ ")"
)
return name, repr_str
elif expect_type is not None and isinstance(item, expect_type):
if isinstance(item, Tensor):
repr_str = (
"(" + io_type.upper() + ":" + name + ":" + item._meta_repr() + ")"
)
else:
repr_str = (
"[WARNING]("
+ io_type.upper()
+ ":"
+ name
+ ":"
+ str(type(item))
+ ")"
)
return name, repr_str
else:
repr_str = (
"[ERROR](" + io_type.upper() + ":" + name + ":" + str(type(item)) + ")"
)
self._print(2, 0, repr_str)
raise NotImplementedError(
"nn.Graph.build()'s input/output only support types: Tensor/list(Tensor)/None."
)
def _add_block(self, name: str, module: Module = None) -> None:
r"""Adds module to the graph as a block so that the module will
be called in nn.Graph.build.
Args:
name (str): name of the child block. The child block can be accessed from this graph using the given name.
module (Module): child module to be added to the graph.
Just assign nn.Module in nn.Graph, _add_block will be called to add the
module as a Block:
.. code-block:: python
>>> import oneflow as flow
>>> class LinearGraph(flow.nn.Graph):
... def __init__(self):
... super().__init__()
... # add a nn.Module as a block to graph.
... self.linear = flow.nn.Linear(3, 8, False)
... def build(self, x):
... # call the nn.Module block.
... return self.linear(x)
The block can be accessed as an attribute using the given name.
>>> g = LinearGraph()
>>> print(repr(g.linear))
(MODULE:linear:Linear(in_features=3, out_features=8, bias=False)): (
(PARAMETER:linear.weight:tensor(..., size=(8, 3), dtype=oneflow.float32, requires_grad=True)): ()
)
"""
if "_name" not in self.__dict__:
raise AttributeError(
"Base class nn.Graph has not been initialized, "
"please call super().__init__() in subclass of nn.Graph "
"before assigning any attribute."
)
if not isinstance(module, Module) and module is not None:
raise TypeError("{} is not a Module subclass".format(type(module)))
elif not isinstance(name, str):
raise TypeError("module name should be a string. Got {}".format(type(name)))
elif hasattr(self, name) and name not in self._blocks:
raise KeyError("attribute '{}' already exists".format(name))
elif "." in name:
raise KeyError('module name can\'t contain ".", got: {}'.format(name))
elif name == "":
raise KeyError('module name can\'t be empty string ""')
self._blocks[name] = get_block_cls(module)("", name, module)
def __setattr__(self, name: str, value=None):
if isinstance(value, Module):
self._add_block(name, value)
elif isinstance(value, Optimizer):
raise AttributeError(
"'{}' nn.Graph is not allowed to set Optimizer attribute named '{}'. "
"Please use add_optimizer(...) instead.".format(
type(self).__name__, name
)
)
elif isinstance(value, Tensor):
raise AttributeError(
"'{}' nn.Graph is not allowed to set Tensor attribute named '{}'. "
"Please use nn.Module to hold the tensor, then add the nn.Module to nn.Graph.".format(
type(self).__name__, name
)
)
else:
object.__setattr__(self, name, value)
def __getattr__(self, name: str):
if "_blocks" in self.__dict__:
if name in self._blocks:
return self._blocks[name]
if name in self.__dict__:
return self.__dict__[name]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.framework.tensor_tuple_util.convert_to_tensor_tuple",
"oneflow.nn.graph.util.seq_to_func_return",
"oneflow._oneflow_internal.lazy_mode.guard",
"oneflow.empty",
"oneflow.framework.tensor.TensorTuple",
"oneflow.env.get_rank",
"oneflow.nn.graph.graph_config.GraphConfig",
"oneflow._oneflow_intern... | [((40278, 40314), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (40293, 40314), False, 'import doctest\n'), ((3813, 3826), 'oneflow.nn.graph.graph_config.GraphConfig', 'GraphConfig', ([], {}), '()\n', (3824, 3826), False, 'from oneflow.nn.graph.graph_config import GraphConfig\n'), ((3850, 3863), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3861, 3863), False, 'from collections import OrderedDict\n'), ((3952, 3965), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3963, 3965), False, 'from collections import OrderedDict\n'), ((4436, 4491), 'oneflow._oneflow_internal.nn.graph.CNNGraph', 'oneflow._oneflow_internal.nn.graph.CNNGraph', (['self._name'], {}), '(self._name)\n', (4479, 4491), False, 'import oneflow\n'), ((4510, 4541), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4539, 4541), True, 'import oneflow.framework.session_context as session_ctx\n'), ((12428, 12438), 'oneflow.env.get_rank', 'get_rank', ([], {}), '()\n', (12436, 12438), False, 'from oneflow.env import get_rank\n'), ((14792, 14823), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (14821, 14823), True, 'import oneflow.framework.session_context as session_ctx\n'), ((16878, 16916), 'oneflow.framework.tensor_tuple_util.convert_to_tensor_tuple', 'convert_to_tensor_tuple', (['state_tensors'], {}), '(state_tensors)\n', (16901, 16916), False, 'from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple\n'), ((21072, 21103), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (21101, 21103), True, 'import oneflow.framework.session_context as session_ctx\n'), ((24767, 24816), 'oneflow.nn.graph.util.seq_to_func_return', 'seq_to_func_return', (['self._eager_outputs_buffer[0]'], {}), '(self._eager_outputs_buffer[0])\n', (24785, 24816), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg\n'), ((29805, 29906), 'oneflow._oneflow_internal.nn.graph.SoftSyncNNGraphBuffers', 'oneflow._oneflow_internal.nn.graph.SoftSyncNNGraphBuffers', (['outputs_tensor_tuple', 'self._c_nn_graph'], {}), '(outputs_tensor_tuple,\n self._c_nn_graph)\n', (29862, 29906), False, 'import oneflow\n'), ((29940, 29973), 'oneflow.nn.graph.util.seq_to_func_return', 'seq_to_func_return', (['eager_outputs'], {}), '(eager_outputs)\n', (29958, 29973), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg\n'), ((17329, 17341), 'oneflow.nn.graph.optimizer.OptDict', 'OptDict', (['opt'], {}), '(opt)\n', (17336, 17341), False, 'from oneflow.nn.graph.optimizer import OptDict, VariableConfig\n'), ((19024, 19043), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19041, 19043), False, 'import time\n'), ((19263, 19282), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (19280, 19282), False, 'import time\n'), ((20049, 20068), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (20066, 20068), False, 'import time\n'), ((20160, 20179), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (20177, 20179), False, 'import time\n'), ((21719, 21783), 'oneflow.framework.graph_build_util.graph_build_context', 'graph_build_util.graph_build_context', (['self.config.proto', 'session'], {}), '(self.config.proto, session)\n', (21755, 21783), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((23137, 23163), 'oneflow.framework.c_api_util.GetCurrentJob', 'c_api_util.GetCurrentJob', ([], {}), '()\n', (23161, 23163), True, 'import oneflow.framework.c_api_util as c_api_util\n'), ((23382, 23441), 'oneflow._oneflow_internal.CurJobBuildAndInferCtx_Complete', 'oneflow._oneflow_internal.CurJobBuildAndInferCtx_Complete', ([], {}), '()\n', (23439, 23441), False, 'import oneflow\n'), ((23582, 23608), 'oneflow.framework.c_api_util.GetCurrentJob', 'c_api_util.GetCurrentJob', ([], {}), '()\n', (23606, 23608), True, 'import oneflow.framework.c_api_util as c_api_util\n'), ((26081, 26111), 'oneflow.framework.tensor_tuple_util.convert_to_tensor_tuple', 'convert_to_tensor_tuple', (['*args'], {}), '(*args)\n', (26104, 26111), False, 'from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple\n'), ((26199, 26292), 'oneflow._oneflow_internal.nn.graph.SoftSyncNNGraphBuffers', 'oneflow._oneflow_internal.nn.graph.SoftSyncNNGraphBuffers', (['tensor_tuple', 'self._c_nn_graph'], {}), '(tensor_tuple,\n self._c_nn_graph)\n', (26256, 26292), False, 'import oneflow\n'), ((38993, 39014), 'oneflow.nn.graph.block.get_block_cls', 'get_block_cls', (['module'], {}), '(module)\n', (39006, 39014), False, 'from oneflow.nn.graph.block import Block, BlockType, get_block_cls\n'), ((13441, 13462), 'oneflow.nn.graph.util.add_indent', 'add_indent', (['in_str', '(2)'], {}), '(in_str, 2)\n', (13451, 13462), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg\n'), ((13650, 13672), 'oneflow.nn.graph.util.add_indent', 'add_indent', (['mod_str', '(2)'], {}), '(mod_str, 2)\n', (13660, 13672), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg\n'), ((13828, 13850), 'oneflow.nn.graph.util.add_indent', 'add_indent', (['out_str', '(2)'], {}), '(out_str, 2)\n', (13838, 13850), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg\n'), ((16818, 16841), 'oneflow.nn.graph.optimizer.VariableConfig', 'VariableConfig', (['op_name'], {}), '(op_name)\n', (16832, 16841), False, 'from oneflow.nn.graph.optimizer import OptDict, VariableConfig\n'), ((18450, 18535), 'functools.partial', 'partial', (['graph_build_util.build_graph_state', 'op_name', 'state_tensor', 'state_config'], {}), '(graph_build_util.build_graph_state, op_name, state_tensor, state_config\n )\n', (18457, 18535), False, 'from functools import partial\n'), ((19061, 19149), 'oneflow.framework.graph_build_util.GLogScopeContext', 'graph_build_util.GLogScopeContext', (['self._debug_min_s_level', 'self._debug_max_v_level'], {}), '(self._debug_min_s_level, self.\n _debug_max_v_level)\n', (19094, 19149), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((25464, 25512), 'oneflow._oneflow_internal.lazy_mode.guard', 'oneflow._oneflow_internal.lazy_mode.guard', (['(False)'], {}), '(False)\n', (25505, 25512), False, 'import oneflow\n'), ((28788, 28833), 'oneflow.framework.tensor_tuple_util.convert_to_tensor_tuple', 'convert_to_tensor_tuple', (['flattened_eager_args'], {}), '(flattened_eager_args)\n', (28811, 28833), False, 'from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple\n'), ((33068, 33116), 'oneflow._oneflow_internal.lazy_mode.guard', 'oneflow._oneflow_internal.lazy_mode.guard', (['(False)'], {}), '(False)\n', (33109, 33116), False, 'import oneflow\n'), ((33575, 33623), 'oneflow._oneflow_internal.lazy_mode.guard', 'oneflow._oneflow_internal.lazy_mode.guard', (['(False)'], {}), '(False)\n', (33616, 33623), False, 'import oneflow\n'), ((25595, 25693), 'oneflow.empty', 'oneflow.empty', (['shape'], {'dtype': 'dtype', 'placement': 'fake_eager_out.placement', 'sbp': 'fake_eager_out.sbp'}), '(shape, dtype=dtype, placement=fake_eager_out.placement, sbp=\n fake_eager_out.sbp)\n', (25608, 25693), False, 'import oneflow\n'), ((25862, 25925), 'oneflow.empty', 'oneflow.empty', (['shape'], {'dtype': 'dtype', 'device': 'fake_eager_out.device'}), '(shape, dtype=dtype, device=fake_eager_out.device)\n', (25875, 25925), False, 'import oneflow\n'), ((33186, 33253), 'oneflow.empty', 'oneflow.empty', (['shape'], {'dtype': 'dtype', 'placement': 't.placement', 'sbp': 't.sbp'}), '(shape, dtype=dtype, placement=t.placement, sbp=t.sbp)\n', (33199, 33253), False, 'import oneflow\n'), ((33355, 33405), 'oneflow.empty', 'oneflow.empty', (['shape'], {'dtype': 'dtype', 'device': 't.device'}), '(shape, dtype=dtype, device=t.device)\n', (33368, 33405), False, 'import oneflow\n'), ((19777, 19796), 'oneflow.nn.graph.util.sys_exc_error_msg', 'sys_exc_error_msg', ([], {}), '()\n', (19794, 19796), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg\n'), ((20902, 20921), 'oneflow.nn.graph.util.sys_exc_error_msg', 'sys_exc_error_msg', ([], {}), '()\n', (20919, 20921), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg\n'), ((29393, 29412), 'oneflow.nn.graph.util.sys_exc_error_msg', 'sys_exc_error_msg', ([], {}), '()\n', (29410, 29412), False, 'from oneflow.nn.graph.util import add_indent, seq_to_func_return, sys_exc_error_msg\n'), ((31316, 31329), 'oneflow.framework.tensor.TensorTuple', 'TensorTuple', ([], {}), '()\n', (31327, 31329), False, 'from oneflow.framework.tensor import Tensor, TensorTuple\n'), ((32590, 32603), 'oneflow.framework.tensor.TensorTuple', 'TensorTuple', ([], {}), '()\n', (32601, 32603), False, 'from oneflow.framework.tensor import Tensor, TensorTuple\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import tempfile
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
from optimizer_test_util import clip_grad_norm_np
from oneflow.one_embedding import Ftrl
import oneflow as flow
from oneflow.nn.parameter import Parameter
def compare_with_numpy_ftrl(
test_case,
device,
x_shape,
learning_rate,
train_iters,
weight_decay,
lr_power,
initial_accumulator_value,
lambda1,
lambda2,
beta,
reload_state_step,
save_load_by_pickle,
):
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
def train_by_oneflow():
x = Parameter(flow.Tensor(init_value, device=flow.device(device)))
ftrl = Ftrl(
[
{
"params": [x],
"lr": learning_rate,
"weight_decay": weight_decay,
"lr_power": lr_power,
"initial_accumulator_value": initial_accumulator_value,
"lambda1": lambda1,
"lambda2": lambda2,
"beta": beta,
}
]
)
def train_one_iter(grad):
grad_tensor = flow.tensor(
grad,
dtype=flow.float32,
requires_grad=False,
device=flow.device(device),
)
loss = flow.sum(x * grad_tensor)
loss.backward()
ftrl.step()
ftrl.zero_grad()
for i in range(train_iters):
train_one_iter(random_grad_seq[i])
if i == reload_state_step:
state_dict = ftrl.state_dict()
ftrl = Ftrl([{"params": [x],}],)
if save_load_by_pickle:
with tempfile.TemporaryDirectory() as save_dir:
flow.save(state_dict, save_dir)
state_dict = flow.load(save_dir)
ftrl.load_state_dict(state_dict)
return x
def train_by_numpy():
x = init_value
accum = np.zeros_like(x)
accum.fill(initial_accumulator_value)
z_arr = np.zeros_like(x)
def np_train_one_iter(grad):
grad = grad + weight_decay * x
new_accum = accum + grad * grad
sigma = (
np.power(new_accum, lr_power) - np.power(accum, lr_power)
) / learning_rate
new_z_val = z_arr + grad - sigma * x
update_val = (np.sign(new_z_val) * lambda1 - new_z_val) / (
(beta + np.power(new_accum, lr_power)) / learning_rate + lambda2
)
param = np.where(np.abs(new_z_val) < lambda1, 0.0, update_val)
return (param, new_accum, new_z_val)
for i in range(1, train_iters + 1):
(x, accum, z_arr) = np_train_one_iter(random_grad_seq[i - 1])
return x
oneflow_res = train_by_oneflow().numpy()
numpy_res = train_by_numpy()
test_case.assertTrue(
np.allclose(oneflow_res.flatten(), numpy_res.flatten(), rtol=1e-4, atol=1e-4)
)
def compare_with_numpy_ftrl_clip_grad(
test_case,
device,
x_shape,
learning_rate,
train_iters,
weight_decay,
lr_power,
initial_accumulator_value,
lambda1,
lambda2,
beta,
clip_grad_max_norm,
clip_grad_norm_type,
reload_state_step,
save_load_by_pickle,
):
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=x_shape).astype(np.float32))
init_value = np.random.uniform(size=x_shape).astype(np.float32)
def train_by_oneflow():
x = Parameter(flow.Tensor(init_value, device=flow.device(device)))
ftrl = Ftrl(
[
{
"params": [x],
"lr": learning_rate,
"weight_decay": weight_decay,
"lr_power": lr_power,
"initial_accumulator_value": initial_accumulator_value,
"lambda1": lambda1,
"lambda2": lambda2,
"beta": beta,
"clip_grad_max_norm": clip_grad_max_norm,
"clip_grad_norm_type": clip_grad_norm_type,
}
]
)
def train_one_iter(grad):
grad_tensor = flow.tensor(
grad,
dtype=flow.float32,
requires_grad=False,
device=flow.device(device),
)
loss = flow.sum(x * grad_tensor)
loss.backward()
ftrl.clip_grad()
ftrl.step()
ftrl.zero_grad()
for i in range(train_iters):
train_one_iter(random_grad_seq[i])
if i == reload_state_step:
state_dict = ftrl.state_dict()
ftrl = Ftrl([{"params": [x],}])
if save_load_by_pickle:
with tempfile.TemporaryDirectory() as save_dir:
flow.save(state_dict, save_dir)
state_dict = flow.load(save_dir)
ftrl.load_state_dict(state_dict)
return x
def train_by_numpy():
x = init_value
accum = np.zeros_like(x)
accum.fill(initial_accumulator_value)
z_arr = np.zeros_like(x)
def np_train_one_iter(grad):
total_norm, grad = clip_grad_norm_np(
grad, clip_grad_max_norm, clip_grad_norm_type
)
grad = grad + weight_decay * x
new_accum = accum + grad * grad
sigma = (
np.power(new_accum, lr_power) - np.power(accum, lr_power)
) / learning_rate
new_z_val = z_arr + grad - sigma * x
update_val = (np.sign(new_z_val) * lambda1 - new_z_val) / (
(beta + np.power(new_accum, lr_power)) / learning_rate + lambda2
)
param = np.where(np.abs(new_z_val) < lambda1, 0.0, update_val)
return (param, new_accum, new_z_val)
for i in range(1, train_iters + 1):
(x, accum, z_arr) = np_train_one_iter(random_grad_seq[i - 1])
return x
oneflow_res = train_by_oneflow().numpy()
numpy_res = train_by_numpy()
test_case.assertTrue(
np.allclose(oneflow_res.flatten(), numpy_res.flatten(), rtol=1e-4, atol=1e-4)
)
@flow.unittest.skip_unless_1n1d()
class Testftrl(flow.unittest.TestCase):
def test_ftrl(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cuda", "cpu"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["weight_decay"] = [0.9, 0.000]
arg_dict["lr_power"] = [-0.5, 0.5]
arg_dict["initial_accumulator_value"] = [0.1, 0.05]
arg_dict["lambda1"] = [0.01]
arg_dict["lambda2"] = [0.0, 0.01]
arg_dict["beta"] = [1.0]
arg_dict["reload_state_step"] = [5] # save and load optim state
arg_dict["save_load_by_pickle"] = [False, True]
for arg in GenArgList(arg_dict):
compare_with_numpy_ftrl(test_case, *arg)
def test_ftrl_clip_grad(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["x_shape"] = [(10,)]
arg_dict["learning_rate"] = [1, 1e-3]
arg_dict["train_iters"] = [10]
arg_dict["weight_decay"] = [0.9, 0.000]
arg_dict["lr_power"] = [-0.5]
arg_dict["initial_accumulator_value"] = [0.1, 0.05]
arg_dict["lambda1"] = [0.01]
arg_dict["lambda2"] = [0.0]
arg_dict["beta"] = [1.0]
arg_dict["clip_grad_max_norm"] = [0, 0.5, 1.0]
arg_dict["clip_grad_norm_type"] = ["inf", "-inf", 0.0, 1.0, 2.0, 3.5]
arg_dict["reload_state_step"] = [5] # save and load optim state
arg_dict["save_load_by_pickle"] = [False, True]
for arg in GenArgList(arg_dict):
compare_with_numpy_ftrl_clip_grad(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.one_embedding.Ftrl",
"oneflow.sum",
"oneflow.test_utils.test_util.GenArgList",
"oneflow.device",
"oneflow.save",
"oneflow.load"
] | [((7170, 7202), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (7200, 7202), True, 'import oneflow as flow\n'), ((8855, 8870), 'unittest.main', 'unittest.main', ([], {}), '()\n', (8868, 8870), False, 'import unittest\n'), ((1471, 1689), 'oneflow.one_embedding.Ftrl', 'Ftrl', (["[{'params': [x], 'lr': learning_rate, 'weight_decay': weight_decay,\n 'lr_power': lr_power, 'initial_accumulator_value':\n initial_accumulator_value, 'lambda1': lambda1, 'lambda2': lambda2,\n 'beta': beta}]"], {}), "([{'params': [x], 'lr': learning_rate, 'weight_decay': weight_decay,\n 'lr_power': lr_power, 'initial_accumulator_value':\n initial_accumulator_value, 'lambda1': lambda1, 'lambda2': lambda2,\n 'beta': beta}])\n", (1475, 1689), False, 'from oneflow.one_embedding import Ftrl\n'), ((2835, 2851), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2848, 2851), True, 'import numpy as np\n'), ((2914, 2930), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (2927, 2930), True, 'import numpy as np\n'), ((4501, 4809), 'oneflow.one_embedding.Ftrl', 'Ftrl', (["[{'params': [x], 'lr': learning_rate, 'weight_decay': weight_decay,\n 'lr_power': lr_power, 'initial_accumulator_value':\n initial_accumulator_value, 'lambda1': lambda1, 'lambda2': lambda2,\n 'beta': beta, 'clip_grad_max_norm': clip_grad_max_norm,\n 'clip_grad_norm_type': clip_grad_norm_type}]"], {}), "([{'params': [x], 'lr': learning_rate, 'weight_decay': weight_decay,\n 'lr_power': lr_power, 'initial_accumulator_value':\n initial_accumulator_value, 'lambda1': lambda1, 'lambda2': lambda2,\n 'beta': beta, 'clip_grad_max_norm': clip_grad_max_norm,\n 'clip_grad_norm_type': clip_grad_norm_type}])\n", (4505, 4809), False, 'from oneflow.one_embedding import Ftrl\n'), ((6019, 6035), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (6032, 6035), True, 'import numpy as np\n'), ((6098, 6114), 'numpy.zeros_like', 'np.zeros_like', (['x'], {}), '(x)\n', (6111, 6114), True, 'import numpy as np\n'), ((7292, 7305), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (7303, 7305), False, 'from collections import OrderedDict\n'), ((7886, 7906), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (7896, 7906), False, 'from oneflow.test_utils.test_util import GenArgList\n'), ((8021, 8034), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8032, 8034), False, 'from collections import OrderedDict\n'), ((8737, 8757), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8747, 8757), False, 'from oneflow.test_utils.test_util import GenArgList\n'), ((1301, 1332), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (1318, 1332), True, 'import numpy as np\n'), ((2155, 2180), 'oneflow.sum', 'flow.sum', (['(x * grad_tensor)'], {}), '(x * grad_tensor)\n', (2163, 2180), True, 'import oneflow as flow\n'), ((4331, 4362), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (4348, 4362), True, 'import numpy as np\n'), ((5311, 5336), 'oneflow.sum', 'flow.sum', (['(x * grad_tensor)'], {}), '(x * grad_tensor)\n', (5319, 5336), True, 'import oneflow as flow\n'), ((6184, 6248), 'optimizer_test_util.clip_grad_norm_np', 'clip_grad_norm_np', (['grad', 'clip_grad_max_norm', 'clip_grad_norm_type'], {}), '(grad, clip_grad_max_norm, clip_grad_norm_type)\n', (6201, 6248), False, 'from optimizer_test_util import clip_grad_norm_np\n'), ((2456, 2479), 'oneflow.one_embedding.Ftrl', 'Ftrl', (["[{'params': [x]}]"], {}), "([{'params': [x]}])\n", (2460, 2479), False, 'from oneflow.one_embedding import Ftrl\n'), ((5641, 5664), 'oneflow.one_embedding.Ftrl', 'Ftrl', (["[{'params': [x]}]"], {}), "([{'params': [x]}])\n", (5645, 5664), False, 'from oneflow.one_embedding import Ftrl\n'), ((1232, 1263), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (1249, 1263), True, 'import numpy as np\n'), ((1434, 1453), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1445, 1453), True, 'import oneflow as flow\n'), ((2101, 2120), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2112, 2120), True, 'import oneflow as flow\n'), ((3095, 3124), 'numpy.power', 'np.power', (['new_accum', 'lr_power'], {}), '(new_accum, lr_power)\n', (3103, 3124), True, 'import numpy as np\n'), ((3127, 3152), 'numpy.power', 'np.power', (['accum', 'lr_power'], {}), '(accum, lr_power)\n', (3135, 3152), True, 'import numpy as np\n'), ((3429, 3446), 'numpy.abs', 'np.abs', (['new_z_val'], {}), '(new_z_val)\n', (3435, 3446), True, 'import numpy as np\n'), ((4262, 4293), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'x_shape'}), '(size=x_shape)\n', (4279, 4293), True, 'import numpy as np\n'), ((4464, 4483), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (4475, 4483), True, 'import oneflow as flow\n'), ((5257, 5276), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (5268, 5276), True, 'import oneflow as flow\n'), ((6405, 6434), 'numpy.power', 'np.power', (['new_accum', 'lr_power'], {}), '(new_accum, lr_power)\n', (6413, 6434), True, 'import numpy as np\n'), ((6437, 6462), 'numpy.power', 'np.power', (['accum', 'lr_power'], {}), '(accum, lr_power)\n', (6445, 6462), True, 'import numpy as np\n'), ((6739, 6756), 'numpy.abs', 'np.abs', (['new_z_val'], {}), '(new_z_val)\n', (6745, 6756), True, 'import numpy as np\n'), ((2547, 2576), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (2574, 2576), False, 'import tempfile\n'), ((2614, 2645), 'oneflow.save', 'flow.save', (['state_dict', 'save_dir'], {}), '(state_dict, save_dir)\n', (2623, 2645), True, 'import oneflow as flow\n'), ((2683, 2702), 'oneflow.load', 'flow.load', (['save_dir'], {}), '(save_dir)\n', (2692, 2702), True, 'import oneflow as flow\n'), ((3259, 3277), 'numpy.sign', 'np.sign', (['new_z_val'], {}), '(new_z_val)\n', (3266, 3277), True, 'import numpy as np\n'), ((5731, 5760), 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), '()\n', (5758, 5760), False, 'import tempfile\n'), ((5798, 5829), 'oneflow.save', 'flow.save', (['state_dict', 'save_dir'], {}), '(state_dict, save_dir)\n', (5807, 5829), True, 'import oneflow as flow\n'), ((5867, 5886), 'oneflow.load', 'flow.load', (['save_dir'], {}), '(save_dir)\n', (5876, 5886), True, 'import oneflow as flow\n'), ((6569, 6587), 'numpy.sign', 'np.sign', (['new_z_val'], {}), '(new_z_val)\n', (6576, 6587), True, 'import numpy as np\n'), ((3329, 3358), 'numpy.power', 'np.power', (['new_accum', 'lr_power'], {}), '(new_accum, lr_power)\n', (3337, 3358), True, 'import numpy as np\n'), ((6639, 6668), 'numpy.power', 'np.power', (['new_accum', 'lr_power'], {}), '(new_accum, lr_power)\n', (6647, 6668), True, 'import numpy as np\n')] |
from __future__ import absolute_import
import os
import sys
import errno
import shutil
import json
import time
import oneflow as flow
import os.path as osp
import numpy as np
import cv2
import random
def gaussian2D(shape, sigma=1):
m, n = [(ss - 1.) / 2. for ss in shape]
y, x = np.ogrid[-m:m+1,-n:n+1]
h = np.exp(-(x * x + y * y) / (2 * sigma * sigma))
h[h < np.finfo(h.dtype).eps * h.max()] = 0
return h
def draw_dense_reg(regmap, heatmap, center, value, radius, is_offset=False):
diameter = 2 * radius + 1
gaussian = gaussian2D((diameter, diameter), sigma=diameter / 6)
value = np.array(value, dtype=np.float32).reshape(-1, 1, 1)
dim = value.shape[0]
reg = np.ones((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32) * value
if is_offset and dim == 2:
delta = np.arange(diameter * 2 + 1) - radius
reg[0] = reg[0] - delta.reshape(1, -1)
reg[1] = reg[1] - delta.reshape(-1, 1)
x, y = int(center[0]), int(center[1])
height, width = heatmap.shape[0:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_regmap = regmap[:, y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian[radius - top:radius + bottom,
radius - left:radius + right]
masked_reg = reg[:, radius - top:radius + bottom,
radius - left:radius + right]
if min(masked_gaussian.shape) > 0 and min(masked_heatmap.shape) > 0: # TODO debug
idx = (masked_gaussian >= masked_heatmap).reshape(
1, masked_gaussian.shape[0], masked_gaussian.shape[1])
masked_regmap = (1 - idx) * masked_regmap + idx * masked_reg
regmap[:, y - top:y + bottom, x - left:x + right] = masked_regmap
return regmap
def grayscale(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
def lighting_(data_rng, image, alphastd, eigval, eigvec):
alpha = data_rng.normal(scale=alphastd, size=(3, ))
image += np.dot(eigvec, eigval * alpha)
def blend_(alpha, image1, image2):
image1 *= alpha
image2 *= (1 - alpha)
image1 += image2
def saturation_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs[:, :, None])
def brightness_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
image *= alpha
def contrast_(data_rng, image, gs, gs_mean, var):
alpha = 1. + data_rng.uniform(low=-var, high=var)
blend_(alpha, image, gs_mean)
def color_aug(data_rng, image, eig_val, eig_vec):
functions = [brightness_, contrast_, saturation_]
random.shuffle(functions)
gs = grayscale(image)
gs_mean = gs.mean()
for f in functions:
f(data_rng, image, gs, gs_mean, 0.4)
lighting_(data_rng, image, 0.1, eig_val, eig_vec)
def get_3rd_point(a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_dir(src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_affine_transform(center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
rot_rad = np.pi * rot / 180
src_dir = get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def mkdir_if_missing(directory):
if not osp.exists(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno != errno.EEXIST:
raise
class AverageMeter(object):
"""Computes and stores the average and current value.
Code imported from https://github.com/pytorch/examples/blob/master/imagenet/main.py#L247-L262
"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def save_checkpoint(is_best, fpath='checkpoint_models'):
mkdir_if_missing(osp.dirname(fpath))
check_point = flow.train.CheckPoint() #构造 CheckPoint 对象
check_point.init()
check_point.save(fpath)
if is_best:
shutil.copy(fpath, osp.join(osp.dirname(fpath), 'best_model.pth.tar'))
class Logger(object):
"""
Write console output to external text file.
Code imported from https://github.com/Cysu/open-reid/blob/master/reid/utils/logging.py.
"""
def __init__(self, fpath=None):
self.console = sys.stdout
self.file = None
if fpath is not None:
mkdir_if_missing(os.path.dirname(fpath))
self.file = open(fpath, 'w')
def __del__(self):
self.close()
def __enter__(self):
pass
def __exit__(self, *args):
self.close()
def write(self, msg):
self.console.write(msg)
if self.file is not None:
self.file.write(msg)
def flush(self):
self.console.flush()
if self.file is not None:
self.file.flush()
os.fsync(self.file.fileno())
def close(self):
self.console.close()
if self.file is not None:
self.file.close()
def read_json(fpath):
with open(fpath, 'r') as f:
obj = json.load(f)
return obj
def write_json(obj, fpath):
mkdir_if_missing(osp.dirname(fpath))
with open(fpath, 'w') as f:
json.dump(obj, f, indent=4, separators=(',', ': '))
def match_top_k(predictions, labels, top_k=1):
max_k_preds = np.argpartition(predictions.numpy(), -top_k)[:, -top_k:]
match_array = np.logical_or.reduce(max_k_preds == labels.reshape((-1, 1)), axis=1)
num_matched = match_array.sum()
return num_matched, match_array.shape[0]
class StopWatch(object):
def __init__(self):
pass
def start(self):
self.start_time = time.time()
self.last_split = self.start_time
def split(self):
now = time.time()
duration = now - self.last_split
self.last_split = now
return duration
def stop(self):
self.stop_time = time.time()
def duration(self):
return self.stop_time - self.start_time
class Metric(object):
def __init__(self, summary=None, save_summary_steps=-1, desc='train', calculate_batches=-1,
batch_size=256, top_k=5, prediction_key='predictions', label_key='labels',
loss_key=None):
self.summary = summary
self.save_summary = isinstance(self.summary, Summary)
self.save_summary_steps = save_summary_steps
self.desc = desc
self.calculate_batches = calculate_batches
self.top_k = top_k
self.prediction_key = prediction_key
self.label_key = label_key
self.loss_key = loss_key
if loss_key:
self.fmt = "{}: epoch {}, iter {}, loss: {:.6f}, top_1: {:.6f}, top_k: {:.6f}, samples/s: {:.3f}"
else:
self.fmt = "{}: epoch {}, iter {}, top_1: {:.6f}, top_k: {:.6f}, samples/s: {:.3f}"
self.timer = StopWatch()
self.timer.start()
self._clear()
def _clear(self):
self.top_1_num_matched = 0
self.top_k_num_matched = 0
self.num_samples = 0.0
def metric_cb(self, epoch, step):
def callback(outputs):
if step == 0: self._clear()
if self.prediction_key:
num_matched, num_samples = match_top_k(outputs[self.prediction_key],
outputs[self.label_key])
self.top_1_num_matched += num_matched
num_matched, _ = match_top_k(outputs[self.prediction_key],
outputs[self.label_key], self.top_k)
self.top_k_num_matched += num_matched
else:
num_samples = outputs[self.label_key].shape[0]
self.num_samples += num_samples
if (step + 1) % self.calculate_batches == 0:
throughput = self.num_samples / self.timer.split()
if self.prediction_key:
top_1_accuracy = self.top_1_num_matched / self.num_samples
top_k_accuracy = self.top_k_num_matched / self.num_samples
else:
top_1_accuracy = 0.0
top_k_accuracy = 0.0
if self.loss_key:
loss = outputs[self.loss_key].mean()
print(self.fmt.format(self.desc, epoch, step + 1, loss, top_1_accuracy,
top_k_accuracy, throughput), time.time())
if self.save_summary:
self.summary.scalar(self.desc+"_" + self.loss_key, loss, epoch, step)
else:
print(self.fmt.format(self.desc, epoch, step + 1, top_1_accuracy,
top_k_accuracy, throughput), time.time())
self._clear()
if self.save_summary:
self.summary.scalar(self.desc + "_throughput", throughput, epoch, step)
if self.prediction_key:
self.summary.scalar(self.desc + "_top_1", top_1_accuracy, epoch, step)
self.summary.scalar(self.desc + "_top_{}".format(self.top_k),
top_k_accuracy, epoch, step)
if self.save_summary:
if (step + 1) % self.save_summary_steps == 0:
self.summary.save()
return callback
| [
"oneflow.train.CheckPoint"
] | [((338, 384), 'numpy.exp', 'np.exp', (['(-(x * x + y * y) / (2 * sigma * sigma))'], {}), '(-(x * x + y * y) / (2 * sigma * sigma))\n', (344, 384), True, 'import numpy as np\n'), ((1976, 2015), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (1988, 2015), False, 'import cv2\n'), ((2148, 2178), 'numpy.dot', 'np.dot', (['eigvec', '(eigval * alpha)'], {}), '(eigvec, eigval * alpha)\n', (2154, 2178), True, 'import numpy as np\n'), ((2825, 2850), 'random.shuffle', 'random.shuffle', (['functions'], {}), '(functions)\n', (2839, 2850), False, 'import random\n'), ((3573, 3607), 'numpy.array', 'np.array', (['[0, 0]'], {'dtype': 'np.float32'}), '([0, 0], dtype=np.float32)\n', (3581, 3607), True, 'import numpy as np\n'), ((3986, 4025), 'numpy.array', 'np.array', (['[0, dst_w * -0.5]', 'np.float32'], {}), '([0, dst_w * -0.5], np.float32)\n', (3994, 4025), True, 'import numpy as np\n'), ((4039, 4073), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (4047, 4073), True, 'import numpy as np\n'), ((4085, 4119), 'numpy.zeros', 'np.zeros', (['(3, 2)'], {'dtype': 'np.float32'}), '((3, 2), dtype=np.float32)\n', (4093, 4119), True, 'import numpy as np\n'), ((4753, 4770), 'numpy.dot', 'np.dot', (['t', 'new_pt'], {}), '(t, new_pt)\n', (4759, 4770), True, 'import numpy as np\n'), ((729, 797), 'numpy.ones', 'np.ones', (['(dim, diameter * 2 + 1, diameter * 2 + 1)'], {'dtype': 'np.float32'}), '((dim, diameter * 2 + 1, diameter * 2 + 1), dtype=np.float32)\n', (736, 797), True, 'import numpy as np\n'), ((3095, 3146), 'numpy.array', 'np.array', (['[-direct[1], direct[0]]'], {'dtype': 'np.float32'}), '([-direct[1], direct[0]], dtype=np.float32)\n', (3103, 3146), True, 'import numpy as np\n'), ((3199, 3214), 'numpy.sin', 'np.sin', (['rot_rad'], {}), '(rot_rad)\n', (3205, 3214), True, 'import numpy as np\n'), ((3216, 3231), 'numpy.cos', 'np.cos', (['rot_rad'], {}), '(rot_rad)\n', (3222, 3231), True, 'import numpy as np\n'), ((3735, 3777), 'numpy.array', 'np.array', (['[scale, scale]'], {'dtype': 'np.float32'}), '([scale, scale], dtype=np.float32)\n', (3743, 3777), True, 'import numpy as np\n'), ((4279, 4327), 'numpy.array', 'np.array', (['[dst_w * 0.5, dst_h * 0.5]', 'np.float32'], {}), '([dst_w * 0.5, dst_h * 0.5], np.float32)\n', (4287, 4327), True, 'import numpy as np\n'), ((4690, 4737), 'numpy.array', 'np.array', (['[pt[0], pt[1], 1.0]'], {'dtype': 'np.float32'}), '([pt[0], pt[1], 1.0], dtype=np.float32)\n', (4698, 4737), True, 'import numpy as np\n'), ((4842, 4863), 'os.path.exists', 'osp.exists', (['directory'], {}), '(directory)\n', (4852, 4863), True, 'import os.path as osp\n'), ((5666, 5689), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (5687, 5689), True, 'import oneflow as flow\n'), ((6916, 6928), 'json.load', 'json.load', (['f'], {}), '(f)\n', (6925, 6928), False, 'import json\n'), ((7000, 7018), 'os.path.dirname', 'osp.dirname', (['fpath'], {}), '(fpath)\n', (7011, 7018), True, 'import os.path as osp\n'), ((7062, 7113), 'json.dump', 'json.dump', (['obj', 'f'], {'indent': '(4)', 'separators': "(',', ': ')"}), "(obj, f, indent=4, separators=(',', ': '))\n", (7071, 7113), False, 'import json\n'), ((7529, 7540), 'time.time', 'time.time', ([], {}), '()\n', (7538, 7540), False, 'import time\n'), ((7623, 7634), 'time.time', 'time.time', ([], {}), '()\n', (7632, 7634), False, 'import time\n'), ((7782, 7793), 'time.time', 'time.time', ([], {}), '()\n', (7791, 7793), False, 'import time\n'), ((640, 673), 'numpy.array', 'np.array', (['value'], {'dtype': 'np.float32'}), '(value, dtype=np.float32)\n', (648, 673), True, 'import numpy as np\n'), ((855, 882), 'numpy.arange', 'np.arange', (['(diameter * 2 + 1)'], {}), '(diameter * 2 + 1)\n', (864, 882), True, 'import numpy as np\n'), ((4503, 4518), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (4513, 4518), True, 'import numpy as np\n'), ((4520, 4535), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (4530, 4535), True, 'import numpy as np\n'), ((4588, 4603), 'numpy.float32', 'np.float32', (['src'], {}), '(src)\n', (4598, 4603), True, 'import numpy as np\n'), ((4605, 4620), 'numpy.float32', 'np.float32', (['dst'], {}), '(dst)\n', (4615, 4620), True, 'import numpy as np\n'), ((4892, 4914), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (4903, 4914), False, 'import os\n'), ((5623, 5641), 'os.path.dirname', 'osp.dirname', (['fpath'], {}), '(fpath)\n', (5634, 5641), True, 'import os.path as osp\n'), ((6216, 6238), 'os.path.dirname', 'os.path.dirname', (['fpath'], {}), '(fpath)\n', (6231, 6238), False, 'import os\n'), ((396, 413), 'numpy.finfo', 'np.finfo', (['h.dtype'], {}), '(h.dtype)\n', (404, 413), True, 'import numpy as np\n'), ((5831, 5849), 'os.path.dirname', 'osp.dirname', (['fpath'], {}), '(fpath)\n', (5842, 5849), True, 'import os.path as osp\n'), ((10362, 10373), 'time.time', 'time.time', ([], {}), '()\n', (10371, 10373), False, 'import time\n'), ((10695, 10706), 'time.time', 'time.time', ([], {}), '()\n', (10704, 10706), False, 'import time\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.amin,
"""
amin(input, dim, keepdim=False) -> Tensor
This function is equivalent to PyTorch’s amin function.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.amin.html.
Returns the minimum value of each slice of the `input` tensor in the given dimension(s) `dim`.
If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed (see :func:`oneflow.squeeze`), resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s).
Parameters:
input (oneflow.Tensor): the input Tensor.
dim (int, Tuple[int]): the dimension or dimensions to reduce.
keepdim (bool): whether the output tensor has `dim` retained or not.
Example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> flow.amin(x, 1)
tensor([[0, 1],
[4, 5]], dtype=oneflow.int64)
>>> flow.amin(x, 0)
tensor([[0, 1],
[2, 3]], dtype=oneflow.int64)
>>> flow.amin(x)
tensor(0, dtype=oneflow.int64)
>>> flow.amin(x, 0, True)
tensor([[[0, 1],
[2, 3]]], dtype=oneflow.int64)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 2038), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.amin', '"""\n amin(input, dim, keepdim=False) -> Tensor \n \n This function is equivalent to PyTorch’s amin function. \n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.amin.html.\n \n Returns the minimum value of each slice of the `input` tensor in the given dimension(s) `dim`.\n\n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed (see :func:`oneflow.squeeze`), resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s).\n \n Parameters:\n input (oneflow.Tensor): the input Tensor.\n dim (int, Tuple[int]): the dimension or dimensions to reduce. \n keepdim (bool): whether the output tensor has `dim` retained or not.\n \n Example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> flow.amin(x, 1)\n tensor([[0, 1],\n [4, 5]], dtype=oneflow.int64)\n >>> flow.amin(x, 0)\n tensor([[0, 1],\n [2, 3]], dtype=oneflow.int64)\n >>> flow.amin(x)\n tensor(0, dtype=oneflow.int64)\n >>> flow.amin(x, 0, True)\n tensor([[[0, 1],\n [2, 3]]], dtype=oneflow.int64)\n """'], {}), '(oneflow.amin,\n """\n amin(input, dim, keepdim=False) -> Tensor \n \n This function is equivalent to PyTorch’s amin function. \n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.amin.html.\n \n Returns the minimum value of each slice of the `input` tensor in the given dimension(s) `dim`.\n\n If `keepdim` is `True`, the output tensor is of the same size as `input` except in the dimension(s) `dim` where it is of size 1. Otherwise, `dim` is squeezed (see :func:`oneflow.squeeze`), resulting in the output tensor having 1 (or `len(dim)`) fewer dimension(s).\n \n Parameters:\n input (oneflow.Tensor): the input Tensor.\n dim (int, Tuple[int]): the dimension or dimensions to reduce. \n keepdim (bool): whether the output tensor has `dim` retained or not.\n \n Example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> flow.amin(x, 1)\n tensor([[0, 1],\n [4, 5]], dtype=oneflow.int64)\n >>> flow.amin(x, 0)\n tensor([[0, 1],\n [2, 3]], dtype=oneflow.int64)\n >>> flow.amin(x)\n tensor(0, dtype=oneflow.int64)\n >>> flow.amin(x, 0, True)\n tensor([[[0, 1],\n [2, 3]]], dtype=oneflow.int64)\n """\n )\n', (670, 2038), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
import os
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
import numpy as np
import oneflow as flow
import oneflow._oneflow_internal
from oneflow_gpt.config import get_args
from oneflow_gpt import distribute as dist
from oneflow_gpt.data import GPTDataLoader
from oneflow_gpt.model import GPTModel, Embedding, Logits
from oneflow_gpt.model import Transformer, TransformerLayer, ActivationCheckpointing
from oneflow_gpt.model import ParallelSparseSoftmaxCrossEntropyLoss
from oneflow_gpt.optimizer import make_optimizer, make_lr_scheduler, make_grad_scaler
from oneflow_gpt.logger import print_rank_0, print_rank_last, Logger
from oneflow_gpt.logger import IterationMetric, AccumulationMetric
from oneflow_gpt.logger import LossMetric, ThroughputMetric
class Trainer(object):
def __init__(self):
self.args = get_args()
self.rank = flow.env.get_rank()
self.world_size = flow.env.get_world_size()
self.model = GPTModel()
self.data_loader = GPTDataLoader()
self.cross_entropy = ParallelSparseSoftmaxCrossEntropyLoss()
self.optimizer = make_optimizer(self.args, self.model)
self.lr_scheduler = make_lr_scheduler(self.args, self.optimizer)
# self.optimizer = None
# self.lr_scheduler = None
# NOTE(zwx): grad scaler is not available in eager mode
self.grad_scaler = make_grad_scaler(self.args)
if self.args.graph:
flow.boxing.nccl.enable_use_compute_stream(True)
self.train_graph = GPTGraph(
self.model,
self.data_loader,
self.cross_entropy,
self.optimizer,
self.lr_scheduler,
self.grad_scaler,
)
# self.save("init")
self.logger = Logger(self.rank)
self.logger.register_metric("iter", IterationMetric())
self.logger.register_metric("samples", AccumulationMetric())
self.logger.register_metric("loss", LossMetric(), "loss: {:.5f}", True)
self.logger.register_metric(
"throughput", ThroughputMetric(), "throughput: {:.2f}", True
)
def __call__(self):
iteration = 0
while iteration < self.args.train_iters:
if self.args.graph:
loss = self.train_graph()
else:
raise NotImplementedError
# loss = self.train_eager()
if loss.is_consistent:
loss = loss.to_local()
# snapshot.step()
# iteration = snapshot.iter
iteration += 1
print(f"[{self.rank}] iteration: {iteration}")
self.logger.meter("samples", self.args.global_batch_size)
self.logger.meter("loss", loss)
self.logger.meter("throughput", self.args.global_batch_size)
if iteration % self.args.log_interval == 0:
self.logger.meter("iter", iteration)
self.logger.print_metrics([self.world_size - 1])
print(f"[{self.rank}] training finished")
def train_eager(self):
data, label = self.data_loader()
logits = self.model(data)
loss = self.cross_entropy(logits, label)
loss.backward()
self.optimizer.zero_grad()
self.optimizer.step()
self.lr_scheduler.step()
return loss
def save(self, subdir):
if self.args.checkpoint_save_path is None:
return
save_path = os.path.join(self.args.checkpoint_save_path, subdir)
print_rank_0(f"Saving model to {save_path}")
state_dict = self.model.state_dict()
flow.save(state_dict, save_path, consistent_dst_rank=0)
class GPTGraph(flow.nn.Graph):
def __init__(
self,
model,
data_loader,
cross_entropy,
optimizer=None,
lr_scheduler=None,
grad_scaler=None,
):
super().__init__()
self.model = model
self.data_loader = data_loader
self.cross_entropy = cross_entropy
self.is_train = False
if optimizer is not None:
self.is_train = True
self.add_optimizer(optimizer, lr_sch=lr_scheduler)
if grad_scaler is not None:
self.set_grad_scaler(grad_scaler)
args = get_args()
self.set_activation_checkpointing()
self.set_pipeline_stage_id()
self.config.set_gradient_accumulation_steps(args.num_accumulation_steps)
if args.fp16:
self.config.enable_amp(True)
self.config.allow_fuse_add_to_output(True)
self.config.allow_fuse_model_update_ops(True)
self.config.allow_fuse_cast_scale(True)
def set_activation_checkpointing(self):
for module_block in self.model.modules():
if isinstance(module_block.origin, TransformerLayer):
module_block.config.activation_checkpointing = True
def set_pipeline_stage_id(self):
dist_util = dist.get_dist_util()
self.data_loader.config.stage_id = dist_util.get_layer_stage_id(0)
self.data_loader.data_decoder.config.stage_id = dist_util.get_layer_stage_id(0)
for module_block in self.model.modules():
if isinstance(module_block.origin, Embedding):
module_block.config.stage_id = dist_util.get_layer_stage_id(0)
elif isinstance(
module_block.origin, (TransformerLayer, ActivationCheckpointing)
):
module_block.config.stage_id = dist_util.get_layer_stage_id(
module_block.origin.layer_idx
)
elif isinstance(module_block.origin, Transformer):
module_block.config.stage_id = dist_util.get_layer_stage_id(-1)
elif isinstance(module_block.origin, Logits):
module_block.config.stage_id = dist_util.get_layer_stage_id(-1)
else:
pass
self.data_loader.label_decoder.config.stage_id = dist_util.get_layer_stage_id(
-1
)
self.cross_entropy.config.stage_id = dist_util.get_layer_stage_id(-1)
def build(self):
data, label = self.data_loader()
logits = self.model(data)
loss = self.cross_entropy(logits, label)
if self.is_train:
loss.backward()
return loss
if __name__ == "__main__":
Trainer()()
print(f"[{flow.env.get_rank()}] exit")
| [
"oneflow.boxing.nccl.enable_use_compute_stream",
"oneflow.env.get_rank",
"oneflow.env.get_world_size",
"oneflow.save"
] | [((883, 893), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (891, 893), False, 'from oneflow_gpt.config import get_args\n'), ((914, 933), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (931, 933), True, 'import oneflow as flow\n'), ((960, 985), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (983, 985), True, 'import oneflow as flow\n'), ((1007, 1017), 'oneflow_gpt.model.GPTModel', 'GPTModel', ([], {}), '()\n', (1015, 1017), False, 'from oneflow_gpt.model import GPTModel, Embedding, Logits\n'), ((1045, 1060), 'oneflow_gpt.data.GPTDataLoader', 'GPTDataLoader', ([], {}), '()\n', (1058, 1060), False, 'from oneflow_gpt.data import GPTDataLoader\n'), ((1090, 1129), 'oneflow_gpt.model.ParallelSparseSoftmaxCrossEntropyLoss', 'ParallelSparseSoftmaxCrossEntropyLoss', ([], {}), '()\n', (1127, 1129), False, 'from oneflow_gpt.model import ParallelSparseSoftmaxCrossEntropyLoss\n'), ((1155, 1192), 'oneflow_gpt.optimizer.make_optimizer', 'make_optimizer', (['self.args', 'self.model'], {}), '(self.args, self.model)\n', (1169, 1192), False, 'from oneflow_gpt.optimizer import make_optimizer, make_lr_scheduler, make_grad_scaler\n'), ((1221, 1265), 'oneflow_gpt.optimizer.make_lr_scheduler', 'make_lr_scheduler', (['self.args', 'self.optimizer'], {}), '(self.args, self.optimizer)\n', (1238, 1265), False, 'from oneflow_gpt.optimizer import make_optimizer, make_lr_scheduler, make_grad_scaler\n'), ((1424, 1451), 'oneflow_gpt.optimizer.make_grad_scaler', 'make_grad_scaler', (['self.args'], {}), '(self.args)\n', (1440, 1451), False, 'from oneflow_gpt.optimizer import make_optimizer, make_lr_scheduler, make_grad_scaler\n'), ((1849, 1866), 'oneflow_gpt.logger.Logger', 'Logger', (['self.rank'], {}), '(self.rank)\n', (1855, 1866), False, 'from oneflow_gpt.logger import print_rank_0, print_rank_last, Logger\n'), ((3532, 3584), 'os.path.join', 'os.path.join', (['self.args.checkpoint_save_path', 'subdir'], {}), '(self.args.checkpoint_save_path, subdir)\n', (3544, 3584), False, 'import os\n'), ((3593, 3637), 'oneflow_gpt.logger.print_rank_0', 'print_rank_0', (['f"""Saving model to {save_path}"""'], {}), "(f'Saving model to {save_path}')\n", (3605, 3637), False, 'from oneflow_gpt.logger import print_rank_0, print_rank_last, Logger\n'), ((3692, 3747), 'oneflow.save', 'flow.save', (['state_dict', 'save_path'], {'consistent_dst_rank': '(0)'}), '(state_dict, save_path, consistent_dst_rank=0)\n', (3701, 3747), True, 'import oneflow as flow\n'), ((4358, 4368), 'oneflow_gpt.config.get_args', 'get_args', ([], {}), '()\n', (4366, 4368), False, 'from oneflow_gpt.config import get_args\n'), ((5036, 5056), 'oneflow_gpt.distribute.get_dist_util', 'dist.get_dist_util', ([], {}), '()\n', (5054, 5056), True, 'from oneflow_gpt import distribute as dist\n'), ((72, 97), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'import os\n'), ((1493, 1541), 'oneflow.boxing.nccl.enable_use_compute_stream', 'flow.boxing.nccl.enable_use_compute_stream', (['(True)'], {}), '(True)\n', (1535, 1541), True, 'import oneflow as flow\n'), ((1911, 1928), 'oneflow_gpt.logger.IterationMetric', 'IterationMetric', ([], {}), '()\n', (1926, 1928), False, 'from oneflow_gpt.logger import IterationMetric, AccumulationMetric\n'), ((1977, 1997), 'oneflow_gpt.logger.AccumulationMetric', 'AccumulationMetric', ([], {}), '()\n', (1995, 1997), False, 'from oneflow_gpt.logger import IterationMetric, AccumulationMetric\n'), ((2043, 2055), 'oneflow_gpt.logger.LossMetric', 'LossMetric', ([], {}), '()\n', (2053, 2055), False, 'from oneflow_gpt.logger import LossMetric, ThroughputMetric\n'), ((2142, 2160), 'oneflow_gpt.logger.ThroughputMetric', 'ThroughputMetric', ([], {}), '()\n', (2158, 2160), False, 'from oneflow_gpt.logger import LossMetric, ThroughputMetric\n'), ((6470, 6489), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (6487, 6489), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import oneflow.typing as tp
from tsn_model import restsn
import argparse
import time
import mmcv
import os.path as osp
import os
import sys
import tempfile
import numpy as np
from collections import Counter
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
from video_dataset import *
import shutil
def parse_args():
parser = argparse.ArgumentParser(description='Test an action recognizer')
parser.add_argument('--launcher',
choices=['none', 'pytorch', 'mpi', 'slurm'],
default='pytorch',
help='job launcher')
parser.add_argument('--out', help='output result file', default='default.pkl')
parser.add_argument('--num_classes', help='number of class', type=int, default=400)
parser.add_argument('--use_softmax', action='store_true',
help='whether to use softmax score')
parser.add_argument("--train_num_segments", type=int, default=3, required=False)
parser.add_argument("--epoch", type=int, default=100, required=False)
# for oneflow
parser.add_argument("--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("--lr", type=int, default=0.1, required=False)
parser.add_argument("--use_tensorrt", dest="use_tensorrt", action="store_true", default=False, required=False, help="inference with tensorrt")
parser.add_argument("--model_load_dir", type=str, default='/home/zjlab/liuxy/mmaction/modelzoo/model_pcb', required=False, help="model load directory")
parser.add_argument("--log_dir", type=str, default="./output", required=False, help="log info save directory")
parser.add_argument("--image_height", type=int, default=224, required=False)
parser.add_argument("--image_width", type=int, default=224, required=False)
parser.add_argument("--train_batch_size", type=int, default=8, required=False)
parser.add_argument("--out_dir", type=str, default="output/save_model/", required=False)
args = parser.parse_args()
return args
# train config
args = parse_args()
func_config = flow.function_config()
func_config.default_data_type(flow.float)
flow.config.gpu_device_num(args.gpu_num_per_node)
if args.use_tensorrt:
func_config.use_tensorrt(True)
@flow.global_function('train',func_config)
def train_tsn(image:tp.Numpy.Placeholder((args.train_batch_size*args.train_num_segments,3,224,224)),
label:tp.Numpy.Placeholder((args.train_batch_size,400))):
features = restsn(image, args.train_batch_size, trainable=True)
loss = flow.nn.softmax_cross_entropy_with_logits(label, features, name="loss_liu")
# loss = flow.nn.sparse_softmax_cross_entropy_with_logits(label, features, name="loss_liu")
# set learning rate as 0.1
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.00001])
# Set SGD optimizer, weight_decay factor is 0.00001
gradient_clip = flow.optimizer.grad_clipping.by_global_norm(40.0)
flow.optimizer.AdamW(lr_scheduler,
weight_decay=0.0001,grad_clipping=gradient_clip).minimize(loss)
# flow.optimizer.Adam(lr_scheduler,
# grad_clipping=gradient_clip).minimize(loss)
return loss
@flow.global_function('predict')
def val_tsn(image:tp.Numpy.Placeholder((250,3,224,224))):
output= restsn(image, 1, trainable=False)
# loss = flow.nn.softmax_cross_entropy_with_logits(label, output, name="loss_liu")
return output
check_point = flow.train.CheckPoint()
check_point.load(args.model_load_dir)
class TSNTrain(object):
def __init__(self):
train_mode = True
def train(self, imgs, labels):
img_group = np.ascontiguousarray(imgs)
bs = img_group.shape[0]
img_group = img_group.reshape(
(-1, 3) + img_group.shape[3:])
num_seg = img_group.shape[0] // bs
one_hot_labels = (np.arange(args.num_classes)==labels[:,None]).astype(np.float32)
loss = train_tsn(img_group, one_hot_labels).get()
return loss.numpy()
def multi_train():
global args
count = 0
# train VideoDataset config
ann_file = "data/kinetics400/kinetics400_train_list_videos.txt"
img_prefix = "/data/liuxy/videos_train_big"
img_norm_cfg = {'mean': [123.675, 116.28, 103.53], 'std': [58.395, 57.12, 57.375], 'to_rgb': True}
anno_open = open(ann_file, 'r')
anno_len = len(anno_open.readlines())
anno_open.close()
oneflow_dataset = VideoDataset(ann_file,
img_prefix,
img_norm_cfg,
num_segments=args.train_num_segments,
new_length=1,
new_step=1,
random_shift=True,
modality='RGB',
image_tmpl='img_{:05d}.jpg',
img_scale=256,
input_size=224,
div_255=False,
flip_ratio=0.5,
resize_keep_ratio=True,
oversample=None,
random_crop=False,
more_fix_crop=False,
multiscale_crop=True,
scales=[1, 0.875, 0.75, 0.66],
max_distort=1,
test_mode=False)
# val VideoDataset config
ann_file_val = "data/kinetics400/kinetics400_val_list_videos.txt"
img_prefix_val = "/data/liuxy/videos_val_mp4"
anno_open_val = open(ann_file_val, 'r')
anno_len_val = len(anno_open_val.readlines())
anno_open_val.close()
oneflow_dataset_val = VideoDataset(ann_file_val, img_prefix_val, img_norm_cfg)
flow.env.grpc_use_no_signal()
flow.env.log_dir(args.log_dir)
obj = TSNTrain()
val_prec = 0
for j in range(args.epoch):
wrong_count = 0
iter_num = anno_len//args.train_batch_size
for i in range(iter_num):
img_group = []
labels = []
for k in range(args.train_batch_size):
try:
img, label = oneflow_dataset[args.train_batch_size*i+k]
img_group.append(img)
labels.append(label)
except:
print(args.train_batch_size*i+k)
break
img_group = np.array(img_group)
labels = np.array(labels)
loss = obj.train(img_group, labels)
if i % 20 == 0:
print("Epoch: {}\t iter: [{}/{}]\t loss: {}\t".format(j, i, iter_num, loss.mean()))
# val after each epoch
for i in range(anno_len_val):
img_group, label = oneflow_dataset_val[i]
array = np.ascontiguousarray(img_group)
feature = val_tsn(array).get()
flow_result = np.argmax(feature.numpy().flatten())
if label!=flow_result:
wrong_count = wrong_count +1
count = count + 1
final_precision = float(anno_len_val - wrong_count)/anno_len_val
print("val precision is: {}".format(final_precision))
if final_precision > val_prec:
val_prec = final_precision
shutil.rmtree(args.out_dir)
os.mkdir(args.out_dir)
check_point.save(args.out_dir)
print("saving model...")
def main():
global args
args = parse_args()
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
multi_train()
if __name__ == '__main__':
main()
| [
"oneflow.function_config",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.train.CheckPoint",
"oneflow.env.log_dir",
"oneflow.optimizer.grad_clipping.by_global_norm",
"oneflow.optimizer.AdamW",
"oneflow.nn.softmax_cross_entropy_with_logits",
"oneflow.global_function",
"oneflow.typing.Numpy.... | [((834, 889), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'UserWarning'}), "('ignore', category=UserWarning)\n", (857, 889), False, 'import warnings\n'), ((2676, 2698), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (2696, 2698), True, 'import oneflow as flow\n'), ((2741, 2790), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (2767, 2790), True, 'import oneflow as flow\n'), ((2855, 2897), 'oneflow.global_function', 'flow.global_function', (['"""train"""', 'func_config'], {}), "('train', func_config)\n", (2875, 2897), True, 'import oneflow as flow\n'), ((3783, 3814), 'oneflow.global_function', 'flow.global_function', (['"""predict"""'], {}), "('predict')\n", (3803, 3814), True, 'import oneflow as flow\n'), ((4038, 4061), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (4059, 4061), True, 'import oneflow as flow\n'), ((964, 1028), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test an action recognizer"""'}), "(description='Test an action recognizer')\n", (987, 1028), False, 'import argparse\n'), ((3086, 3138), 'tsn_model.restsn', 'restsn', (['image', 'args.train_batch_size'], {'trainable': '(True)'}), '(image, args.train_batch_size, trainable=True)\n', (3092, 3138), False, 'from tsn_model import restsn\n'), ((3151, 3226), 'oneflow.nn.softmax_cross_entropy_with_logits', 'flow.nn.softmax_cross_entropy_with_logits', (['label', 'features'], {'name': '"""loss_liu"""'}), "(label, features, name='loss_liu')\n", (3192, 3226), True, 'import oneflow as flow\n'), ((3373, 3427), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[1e-05]'], {}), '([], [1e-05])\n', (3414, 3427), True, 'import oneflow as flow\n'), ((3506, 3555), 'oneflow.optimizer.grad_clipping.by_global_norm', 'flow.optimizer.grad_clipping.by_global_norm', (['(40.0)'], {}), '(40.0)\n', (3549, 3555), True, 'import oneflow as flow\n'), ((3885, 3918), 'tsn_model.restsn', 'restsn', (['image', '(1)'], {'trainable': '(False)'}), '(image, 1, trainable=False)\n', (3891, 3918), False, 'from tsn_model import restsn\n'), ((5941, 5970), 'oneflow.env.grpc_use_no_signal', 'flow.env.grpc_use_no_signal', ([], {}), '()\n', (5968, 5970), True, 'import oneflow as flow\n'), ((5975, 6005), 'oneflow.env.log_dir', 'flow.env.log_dir', (['args.log_dir'], {}), '(args.log_dir)\n', (5991, 6005), True, 'import oneflow as flow\n'), ((2917, 3006), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(args.train_batch_size * args.train_num_segments, 3, 224, 224)'], {}), '((args.train_batch_size * args.train_num_segments, 3, \n 224, 224))\n', (2937, 3006), True, 'import oneflow.typing as tp\n'), ((3019, 3069), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(args.train_batch_size, 400)'], {}), '((args.train_batch_size, 400))\n', (3039, 3069), True, 'import oneflow.typing as tp\n'), ((3833, 3873), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(250, 3, 224, 224)'], {}), '((250, 3, 224, 224))\n', (3853, 3873), True, 'import oneflow.typing as tp\n'), ((4229, 4255), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['imgs'], {}), '(imgs)\n', (4249, 4255), True, 'import numpy as np\n'), ((3560, 3649), 'oneflow.optimizer.AdamW', 'flow.optimizer.AdamW', (['lr_scheduler'], {'weight_decay': '(0.0001)', 'grad_clipping': 'gradient_clip'}), '(lr_scheduler, weight_decay=0.0001, grad_clipping=\n gradient_clip)\n', (3580, 3649), True, 'import oneflow as flow\n'), ((6601, 6620), 'numpy.array', 'np.array', (['img_group'], {}), '(img_group)\n', (6609, 6620), True, 'import numpy as np\n'), ((6642, 6658), 'numpy.array', 'np.array', (['labels'], {}), '(labels)\n', (6650, 6658), True, 'import numpy as np\n'), ((6979, 7010), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img_group'], {}), '(img_group)\n', (6999, 7010), True, 'import numpy as np\n'), ((7461, 7488), 'shutil.rmtree', 'shutil.rmtree', (['args.out_dir'], {}), '(args.out_dir)\n', (7474, 7488), False, 'import shutil\n'), ((7501, 7523), 'os.mkdir', 'os.mkdir', (['args.out_dir'], {}), '(args.out_dir)\n', (7509, 7523), False, 'import os\n'), ((4439, 4466), 'numpy.arange', 'np.arange', (['args.num_classes'], {}), '(args.num_classes)\n', (4448, 4466), True, 'import numpy as np\n')] |
import math
from ..activations import ACT2FN
from .config_bert import BertConfig
from .tokenization_bert import BertTokenizer
from ..utils import load_state_dict_from_url, load_state_dict_from_file
import oneflow as flow
from oneflow import nn
from oneflow.nn import CrossEntropyLoss
model_urls = {
"bert-base-uncased": "http://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowtext/bert/bert-base-uncased-oneflow.tar.gz",
"bert-base-cased": "http://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowtext/bert/bert-base-cased-oneflow.tar.gz",
"bert-large-uncased": "http://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowtext/bert/bert-large-uncased-oneflow.tar.gz",
"bert-large-cased": "http://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowtext/bert/bert-large-cased-oneflow.tar.gz",
"bert-base-chinese": "http://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowtext/bert/bert-base-chinese-oneflow.tar.gz",
}
class BertEmbeddings(nn.Module):
def __init__(self, config):
super().__init__()
self.word_embeddings = nn.Embedding(
config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id
)
self.position_embeddings = nn.Embedding(
config.max_position_embeddings, config.hidden_size
)
self.token_type_embeddings = nn.Embedding(
config.type_vocab_size, config.hidden_size
)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.register_buffer(
"position_ids", flow.arange(config.max_position_embeddings).expand(1, -1)
)
self.register_buffer(
"token_type_ids",
flow.zeros(
self.position_ids.size(),
dtype=flow.long,
device=self.position_ids.device,
),
persistent=False,
)
def forward(
self,
input_ids=None,
token_type_ids=None,
position_ids=None,
inputs_embeds=None,
past_key_values_length=0,
):
if input_ids is not None:
input_shape = input_ids.size()
else:
input_shape = inputs_embeds.size()[:-1]
seq_length = input_shape[1]
if position_ids is None:
position_ids = self.position_ids[
:, past_key_values_length : seq_length + past_key_values_length
]
if token_type_ids is None:
if hasattr(self, "token_type_ids"):
buffered_token_type_ids = self.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
input_shape[0], seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = flow.zeros(
input_shape, dtype=flow.long, device=self.position_ids.device
)
if inputs_embeds is None:
inputs_embeds = self.word_embeddings(input_ids)
token_type_embeddings = self.token_type_embeddings(token_type_ids)
embeddings = inputs_embeds + token_type_embeddings
position_embeddings = self.position_embeddings(position_ids)
embeddings += position_embeddings
embeddings = self.LayerNorm(embeddings)
embeddings = self.dropout(embeddings)
return embeddings
class BertSelfAttention(nn.Module):
def __init__(self, config):
super().__init__()
if config.hidden_size % config.num_attention_heads != 0 and not hasattr(
config, "embedding_size"
):
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = nn.Linear(config.hidden_size, self.all_head_size)
self.key = nn.Linear(config.hidden_size, self.all_head_size)
self.value = nn.Linear(config.hidden_size, self.all_head_size)
self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
self.is_decoder = config.is_decoder
def transpose_for_scores(self, x):
x = flow.reshape(
x,
[
x.shape[0],
x.shape[1],
self.num_attention_heads,
self.attention_head_size,
],
)
return x.permute(0, 2, 1, 3)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
mixed_query_layer = self.query(hidden_states)
is_cross_attention = encoder_hidden_states is not None
if is_cross_attention and past_key_value is not None:
key_layer = past_key_value[0]
value_layer = past_key_value[1]
attention_mask = encoder_attention_mask
elif is_cross_attention:
key_layer = self.transpose_for_scores(self.key(encoder_hidden_states))
value_layer = self.transpose_for_scores(self.value(encoder_hidden_states))
attention_mask = encoder_attention_mask
elif past_key_value is not None:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
key_layer = flow.cat([past_key_value[0], key_layer], dim=2)
value_layer = flow.cat([past_key_value[1], value_layer], dim=2)
else:
key_layer = self.transpose_for_scores(self.key(hidden_states))
value_layer = self.transpose_for_scores(self.value(hidden_states))
query_layer = self.transpose_for_scores(mixed_query_layer)
if self.is_decoder:
past_key_value = (key_layer, value_layer)
attention_scores = flow.matmul(query_layer, key_layer.transpose(-1, -2))
attention_scores = attention_scores / math.sqrt(self.attention_head_size)
if attention_mask is not None:
attention_scores = attention_scores + attention_mask
attention_probs = nn.Softmax(dim=-1)(attention_scores)
attention_probs = self.dropout(attention_probs)
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = flow.matmul(attention_probs, value_layer)
context_layer = context_layer.permute(0, 2, 1, 3) # .contiguous()
context_layer = flow.reshape(
context_layer,
[context_layer.shape[0], context_layer.shape[1], self.all_head_size],
)
outputs = (
(context_layer, attention_probs) if output_attentions else (context_layer,)
)
if self.is_decoder:
outputs = outputs + (past_key_value,)
return outputs
class BertSelfOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertAttention(nn.Module):
def __init__(self, config):
super().__init__()
self.self = BertSelfAttention(config)
self.output = BertSelfOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
self_outputs = self.self(
hidden_states,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
attention_output = self.output(self_outputs[0], hidden_states)
outputs = (attention_output,) + self_outputs[1:]
return outputs
class BertIntermediate(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class BertOutput(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
def forward(self, hidden_states, input_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class BertLayer(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_len_dim = 1
self.attention = BertAttention(config)
self.is_decoder = config.is_decoder
self.add_cross_attention = config.add_cross_attention
if self.add_cross_attention:
if not self.is_decoder:
raise ValueError(
f"{self} should be used as a decoder model if cross attention is added"
)
self.crossattention = BertAttention(config)
self.intermediate = BertIntermediate(config)
self.output = BertOutput(config)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_value=None,
output_attentions=False,
):
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = (
past_key_value[:2] if past_key_value is not None else None
)
self_attention_outputs = self.attention(
hidden_states,
attention_mask,
head_mask,
output_attentions=output_attentions,
past_key_value=self_attn_past_key_value,
)
attention_output = self_attention_outputs[0]
if self.is_decoder:
outputs = self_attention_outputs[1:-1]
present_key_value = self_attention_outputs[-1]
else:
outputs = self_attention_outputs[1:]
cross_attn_present_key_value = None
if self.is_decoder and encoder_hidden_states is not None:
if not hasattr(self, "crossattention"):
raise ValueError(
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers by setting `config.add_cross_attention=True`"
)
cross_attn_past_key_value = (
past_key_value[-2:] if past_key_value is not None else None
)
cross_attention_outputs = self.crossattention(
attention_output,
attention_mask,
head_mask,
encoder_hidden_states,
encoder_attention_mask,
cross_attn_past_key_value,
output_attentions,
)
attention_output = cross_attention_outputs[0]
outputs = outputs + cross_attention_outputs[1:-1]
cross_attn_present_key_value = cross_attention_outputs[-1]
present_key_value = present_key_value + cross_attn_present_key_value
intermediate_output = self.intermediate(attention_output)
layer_output = self.output(intermediate_output, attention_output)
outputs = (layer_output,) + outputs
if self.is_decoder:
outputs = outputs + (present_key_value,)
return outputs
class BertEncoder(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.layer = nn.ModuleList(
[BertLayer(config) for _ in range(self.config.num_hidden_layers)]
)
def forward(
self,
hidden_states,
attention_mask=None,
head_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
output_attentions=False,
output_hidden_states=False,
):
all_hidden_states = () if output_hidden_states else None
all_self_attentions = () if output_attentions else None
all_cross_attentions = (
() if output_attentions and self.config.add_cross_attention else None
)
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_head_mask = head_mask[i] if head_mask is not None else None
past_key_value = past_key_values[i] if past_key_values is not None else None
layer_outputs = layer_module(
hidden_states,
attention_mask,
layer_head_mask,
encoder_hidden_states,
encoder_attention_mask,
past_key_value,
output_attentions,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_self_attentions = all_self_attentions + (layer_outputs[1],)
if self.config.add_cross_attention:
all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
return tuple(
v
for v in [
hidden_states,
all_hidden_states,
all_self_attentions,
all_cross_attentions,
]
if v is not None
)
class BertPooler(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
self.activation = nn.Tanh()
def forward(self, hidden_states):
first_token_tensor = hidden_states[:, 0]
pooled_output = self.dense(first_token_tensor)
pooled_output = self.activation(pooled_output)
return pooled_output
class BertPredictionHeadTransform(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.hidden_size)
if isinstance(config.hidden_act, str):
self.transform_act_fn = ACT2FN[config.hidden_act]
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
def forward(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class BertLMPredictionHead(nn.Module):
def __init__(self, config):
super().__init__()
self.transform = BertPredictionHeadTransform(config)
self.decoder = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
self.bias = nn.Parameter(flow.zeros(config.vocab_size))
self.decoder.bias = self.bias
def forward(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = self.decoder(hidden_states)
return hidden_states
class BertOnlyMLMHead(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
def forward(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
class BertOnlyNSPHead(nn.Module):
def __init__(self, config):
super().__init__()
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
class BertPreTrainingHeads(nn.Module):
def __init__(self, config):
super().__init__()
self.predictions = BertLMPredictionHead(config)
self.seq_relationship = nn.Linear(config.hidden_size, 2)
def forward(self, sequence_output, pooled_output):
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
return prediction_scores, seq_relationship_score
class BertModel(nn.Module):
def __init__(self, config, add_pooling_layer=True):
super().__init__()
self.config = config
self.embeddings = BertEmbeddings(config)
self.encoder = BertEncoder(config)
self.pooler = BertPooler(config) if add_pooling_layer else None
self.have_prefix = False
def get_input_embeddings(self):
return self.embeddings.word_embeddings
def set_input_embeddings(self, value):
self.embeddings.word_embeddings = value
def get_extended_attention_mask(self, attention_mask, input_shape, device):
if attention_mask.dim() == 3:
extended_attention_mask = attention_mask[:, None, :, :]
elif attention_mask.dim() == 2:
if self.config.is_decoder:
batch_size, seq_length = input_shape
seq_ids = flow.arange(seq_length, device=device)
causal_mask = (
seq_ids[None, None, :].repeat(batch_size, seq_length, 1)
<= seq_ids[None, :, None]
)
causal_mask = causal_mask.to(attention_mask.dtype)
if causal_mask.shape[1] < attention_mask.shape[1]:
prefix_seq_len = attention_mask.shape[1] - causal_mask.shape[1]
causal_mask = flow.cat(
[
flow.ones(
(batch_size, seq_length, prefix_seq_len),
device=device,
dtype=causal_mask.dtype,
),
causal_mask,
],
axis=-1,
)
extended_attention_mask = (
causal_mask[:, None, :, :] * attention_mask[:, None, None, :]
)
else:
extended_attention_mask = attention_mask[:, None, None, :]
else:
raise ValueError(
f"Wrong shape for input_ids (shape {input_shape}) or attention_mask (shape {attention_mask.shape})"
)
extended_attention_mask = extended_attention_mask.to(dtype=flow.float32)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def invert_attention_mask(self, encoder_attention_mask):
if encoder_attention_mask.dim() == 3:
encoder_extended_attention_mask = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
encoder_extended_attention_mask = encoder_attention_mask[:, None, None, :]
encoder_extended_attention_mask = encoder_extended_attention_mask.to(
dtype=self.dtype
)
if self.dtype == flow.float16:
encoder_extended_attention_mask = (
1.0 - encoder_extended_attention_mask
) * -1e4
elif self.dtype == flow.float32:
encoder_extended_attention_mask = (
1.0 - encoder_extended_attention_mask
) * -1e9
else:
raise ValueError(
f"{self.dtype} not recognized. `dtype` should be set to either `flow.float32` or `flow.float16`"
)
return encoder_extended_attention_mask
def get_head_mask(
self, head_mask, num_hidden_layers: int, is_attention_chunked: bool = False
):
if head_mask is not None:
head_mask = self._convert_head_mask_to_5d(head_mask, num_hidden_layers)
if is_attention_chunked is True:
head_mask = head_mask.unsqueeze(-1)
else:
head_mask = [None] * num_hidden_layers
return head_mask
def _convert_head_mask_to_5d(self, head_mask, num_hidden_layers):
"""-> [num_hidden_layers , batch , num_heads , seq_length , seq_length]"""
if head_mask.dim() == 1:
head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)
head_mask = head_mask.expand(num_hidden_layers, -1, -1, -1, -1)
elif head_mask.dim() == 2:
head_mask = head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)
assert head_mask.dim() == 5, f"head_mask.dim != 5, instead {head_mask.dim()}"
head_mask = head_mask.to(dtype=self.dtype)
return head_mask
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
past_key_values=None,
output_attentions=None,
output_hidden_states=None,
):
output_attentions = (
output_attentions
if output_attentions is not None
else self.config.output_attentions
)
output_hidden_states = (
output_hidden_states
if output_hidden_states is not None
else self.config.output_hidden_states
)
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both input_ids and inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
batch_size, seq_length = input_shape
device = input_ids.device if input_ids is not None else inputs_embeds.device
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if attention_mask is None:
attention_mask = flow.ones(
(batch_size, seq_length + past_key_values_length), device=device
)
if token_type_ids is None:
if hasattr(self.embeddings, "token_type_ids"):
buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
buffered_token_type_ids_expanded = buffered_token_type_ids.expand(
batch_size, seq_length
)
token_type_ids = buffered_token_type_ids_expanded
else:
token_type_ids = flow.zeros(input_shape, dtype=flow.long, device=device)
extended_attention_mask = self.get_extended_attention_mask(
attention_mask, input_shape, device
)
if self.config.is_decoder and encoder_hidden_states is not None:
(
encoder_batch_size,
encoder_sequence_length,
_,
) = encoder_hidden_states.size()
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
if encoder_attention_mask is None:
encoder_attention_mask = flow.ones(encoder_hidden_shape, device=device)
encoder_extended_attention_mask = self.invert_attention_mask(
encoder_attention_mask
)
else:
encoder_extended_attention_mask = None
embedding_output = self.embeddings(
input_ids=input_ids,
position_ids=position_ids,
token_type_ids=token_type_ids,
inputs_embeds=inputs_embeds,
past_key_values_length=past_key_values_length,
)
encoder_outputs = self.encoder(
embedding_output,
attention_mask=extended_attention_mask,
head_mask=head_mask,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_extended_attention_mask,
past_key_values=past_key_values,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output = encoder_outputs[0]
pooled_output = (
self.pooler(sequence_output) if self.pooler is not None else None
)
return (sequence_output, pooled_output) + encoder_outputs[1:]
class BertForPreTraining(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.bert = BertModel(config)
self.cls = BertPreTrainingHeads(config)
self.have_prefix = True
self.init_weights()
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
input_embeds=None,
labels=None,
next_sentence_label=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=input_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
sequence_output, pooled_output = outputs[:2]
prediction_scores, seq_relationship_score = self.cls(
sequence_output, pooled_output
)
total_loss = None
if labels is not None and next_sentence_label is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
prediction_scores.view(-1, self.config.vocab_size), labels.view(-1)
)
next_sentence_loss = loss_fct(
seq_relationship_score.view(-1, 2), next_sentence_label.view(-1)
)
total_loss = masked_lm_loss + next_sentence_loss
return (
total_loss,
prediction_scores,
seq_relationship_score,
outputs.hidden_states,
outputs.attentions,
)
def init_weights(self):
self.apply(self._init_weights)
self.clone_weights(
self.get_output_embeddings(), self.bert.get_input_embeddings()
)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.fill_(0.0)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zeros_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.fill_(0.0)
def clone_weights(self, output_embeddings, input_embeddings):
output_embeddings.weight = input_embeddings.weight
def get_output_embeddings(self):
return self.cls.predictions.decoder
class BertForSequenceClassification(nn.Module):
def __init__(self, config):
super().__init__()
self.config = config
self.num_labels = config.num_labels
self.bert = BertModel(config)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
self.have_prefix = True
self.init_weights()
def forward(
self,
input_ids,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
labels=None,
output_attentions=None,
output_hidden_states=None,
):
outputs = self.bert(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (labels.dtype == flow.long or labels.dtype == flow.int):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = nn.MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
def init_weights(self):
self.apply(self._init_weights)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.fill_(0.0)
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zeros_()
elif isinstance(module, nn.LayerNorm):
module.weight.data.fill_(1.0)
module.bias.data.fill_(0.0)
BertType = [BertModel, BertForPreTraining, BertForSequenceClassification]
def load_states_from_checkpoint(model, checkpoint):
# TODO: Add weight loading prompt. such as: weights that failed to load, weights that did not load, and weights that need training.
have_prefix = model.have_prefix
load_dict = {}
if not have_prefix:
for n, _ in model.named_parameters():
load_dict[n] = checkpoint['bert.' + n]
load_dict['embeddings.position_ids'] = checkpoint['bert.embeddings.position_ids']
else:
for n, w in model.named_parameters():
if n in checkpoint.keys():
load_dict[n] = checkpoint[n]
else:
load_dict[n] = w.detach().cpu()
load_dict['bert.embeddings.position_ids'] = checkpoint['bert.embeddings.position_ids']
if isinstance(model, BertForPreTraining):
load_dict["cls.predictions.decoder.weight"] = checkpoint["cls.predictions.decoder.weight"]
load_dict["cls.predictions.decoder.bias"] = checkpoint["cls.predictions.decoder.bias"]
model.load_state_dict(load_dict)
return model
def bert(
pretrained: bool = True,
model_type: str = "bert-base-uncased",
checkpoint_path: str = None,
bert_type: object = BertModel
):
assert (bert_type in BertType), f"The bert_type: {bert_type} not in {BertType}."
config = BertConfig()
if pretrained == False:
return bert_type(config), None, config
if checkpoint_path != None:
cpt, config_file, vocab_file = load_state_dict_from_file(checkpoint_path)
config.load_from_json(config_file)
bert = bert_type(config)
tokenizer = BertTokenizer(vocab_file)
try:
bert = load_states_from_checkpoint(bert, cpt)
except:
print("Checkpoint loading failed.")
return bert, tokenizer, config
assert (
model_type in model_urls
), f"The model_type {model_type} not identifiable, please confirm."
cpt, config_file, vocab_file = load_state_dict_from_url(model_urls[model_type], checkpoint_path)
config.load_from_json(config_file)
bert = bert_type(config)
tokenizer = BertTokenizer(vocab_file)
try:
bert = load_states_from_checkpoint(bert, cpt)
except:
print("Checkpoint loading failed.")
return bert, tokenizer, config
| [
"oneflow.nn.Softmax",
"oneflow.nn.Linear",
"oneflow.ones",
"oneflow.nn.LayerNorm",
"oneflow.nn.MSELoss",
"oneflow.nn.CrossEntropyLoss",
"oneflow.nn.Embedding",
"oneflow.nn.Tanh",
"oneflow.reshape",
"oneflow.nn.BCEWithLogitsLoss",
"oneflow.zeros",
"oneflow.nn.Dropout",
"oneflow.cat",
"onefl... | [((1100, 1189), 'oneflow.nn.Embedding', 'nn.Embedding', (['config.vocab_size', 'config.hidden_size'], {'padding_idx': 'config.pad_token_id'}), '(config.vocab_size, config.hidden_size, padding_idx=config.\n pad_token_id)\n', (1112, 1189), False, 'from oneflow import nn\n'), ((1242, 1306), 'oneflow.nn.Embedding', 'nn.Embedding', (['config.max_position_embeddings', 'config.hidden_size'], {}), '(config.max_position_embeddings, config.hidden_size)\n', (1254, 1306), False, 'from oneflow import nn\n'), ((1366, 1422), 'oneflow.nn.Embedding', 'nn.Embedding', (['config.type_vocab_size', 'config.hidden_size'], {}), '(config.type_vocab_size, config.hidden_size)\n', (1378, 1422), False, 'from oneflow import nn\n'), ((1470, 1529), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['config.hidden_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_size, eps=config.layer_norm_eps)\n', (1482, 1529), False, 'from oneflow import nn\n'), ((1553, 1591), 'oneflow.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (1563, 1591), False, 'from oneflow import nn\n'), ((4201, 4250), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (4210, 4250), False, 'from oneflow import nn\n'), ((4270, 4319), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (4279, 4319), False, 'from oneflow import nn\n'), ((4341, 4390), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'self.all_head_size'], {}), '(config.hidden_size, self.all_head_size)\n', (4350, 4390), False, 'from oneflow import nn\n'), ((4415, 4462), 'oneflow.nn.Dropout', 'nn.Dropout', (['config.attention_probs_dropout_prob'], {}), '(config.attention_probs_dropout_prob)\n', (4425, 4462), False, 'from oneflow import nn\n'), ((4559, 4657), 'oneflow.reshape', 'flow.reshape', (['x', '[x.shape[0], x.shape[1], self.num_attention_heads, self.attention_head_size]'], {}), '(x, [x.shape[0], x.shape[1], self.num_attention_heads, self.\n attention_head_size])\n', (4571, 4657), True, 'import oneflow as flow\n'), ((6798, 6839), 'oneflow.matmul', 'flow.matmul', (['attention_probs', 'value_layer'], {}), '(attention_probs, value_layer)\n', (6809, 6839), True, 'import oneflow as flow\n'), ((6941, 7042), 'oneflow.reshape', 'flow.reshape', (['context_layer', '[context_layer.shape[0], context_layer.shape[1], self.all_head_size]'], {}), '(context_layer, [context_layer.shape[0], context_layer.shape[1],\n self.all_head_size])\n', (6953, 7042), True, 'import oneflow as flow\n'), ((7410, 7459), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (7419, 7459), False, 'from oneflow import nn\n'), ((7485, 7544), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['config.hidden_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_size, eps=config.layer_norm_eps)\n', (7497, 7544), False, 'from oneflow import nn\n'), ((7568, 7606), 'oneflow.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (7578, 7606), False, 'from oneflow import nn\n'), ((8814, 8869), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.intermediate_size'], {}), '(config.hidden_size, config.intermediate_size)\n', (8823, 8869), False, 'from oneflow import nn\n'), ((9346, 9401), 'oneflow.nn.Linear', 'nn.Linear', (['config.intermediate_size', 'config.hidden_size'], {}), '(config.intermediate_size, config.hidden_size)\n', (9355, 9401), False, 'from oneflow import nn\n'), ((9427, 9486), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['config.hidden_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_size, eps=config.layer_norm_eps)\n', (9439, 9486), False, 'from oneflow import nn\n'), ((9510, 9548), 'oneflow.nn.Dropout', 'nn.Dropout', (['config.hidden_dropout_prob'], {}), '(config.hidden_dropout_prob)\n', (9520, 9548), False, 'from oneflow import nn\n'), ((14982, 15031), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (14991, 15031), False, 'from oneflow import nn\n'), ((15058, 15067), 'oneflow.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (15065, 15067), False, 'from oneflow import nn\n'), ((15423, 15472), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.hidden_size'], {}), '(config.hidden_size, config.hidden_size)\n', (15432, 15472), False, 'from oneflow import nn\n'), ((15675, 15734), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['config.hidden_size'], {'eps': 'config.layer_norm_eps'}), '(config.hidden_size, eps=config.layer_norm_eps)\n', (15687, 15734), False, 'from oneflow import nn\n'), ((16152, 16212), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.vocab_size'], {'bias': '(False)'}), '(config.hidden_size, config.vocab_size, bias=False)\n', (16161, 16212), False, 'from oneflow import nn\n'), ((16903, 16935), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', '(2)'], {}), '(config.hidden_size, 2)\n', (16912, 16935), False, 'from oneflow import nn\n'), ((17271, 17303), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', '(2)'], {}), '(config.hidden_size, 2)\n', (17280, 17303), False, 'from oneflow import nn\n'), ((28911, 28941), 'oneflow.nn.Dropout', 'nn.Dropout', (['classifier_dropout'], {}), '(classifier_dropout)\n', (28921, 28941), False, 'from oneflow import nn\n'), ((28968, 29016), 'oneflow.nn.Linear', 'nn.Linear', (['config.hidden_size', 'config.num_labels'], {}), '(config.hidden_size, config.num_labels)\n', (28977, 29016), False, 'from oneflow import nn\n'), ((6419, 6454), 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), '(self.attention_head_size)\n', (6428, 6454), False, 'import math\n'), ((6586, 6604), 'oneflow.nn.Softmax', 'nn.Softmax', ([], {'dim': '(-1)'}), '(dim=-1)\n', (6596, 6604), False, 'from oneflow import nn\n'), ((16246, 16275), 'oneflow.zeros', 'flow.zeros', (['config.vocab_size'], {}), '(config.vocab_size)\n', (16256, 16275), True, 'import oneflow as flow\n'), ((23413, 23488), 'oneflow.ones', 'flow.ones', (['(batch_size, seq_length + past_key_values_length)'], {'device': 'device'}), '((batch_size, seq_length + past_key_values_length), device=device)\n', (23422, 23488), True, 'import oneflow as flow\n'), ((26967, 26985), 'oneflow.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (26983, 26985), False, 'from oneflow.nn import CrossEntropyLoss\n'), ((2943, 3016), 'oneflow.zeros', 'flow.zeros', (['input_shape'], {'dtype': 'flow.long', 'device': 'self.position_ids.device'}), '(input_shape, dtype=flow.long, device=self.position_ids.device)\n', (2953, 3016), True, 'import oneflow as flow\n'), ((23964, 24019), 'oneflow.zeros', 'flow.zeros', (['input_shape'], {'dtype': 'flow.long', 'device': 'device'}), '(input_shape, dtype=flow.long, device=device)\n', (23974, 24019), True, 'import oneflow as flow\n'), ((24545, 24591), 'oneflow.ones', 'flow.ones', (['encoder_hidden_shape'], {'device': 'device'}), '(encoder_hidden_shape, device=device)\n', (24554, 24591), True, 'import oneflow as flow\n'), ((30425, 30437), 'oneflow.nn.MSELoss', 'nn.MSELoss', ([], {}), '()\n', (30435, 30437), False, 'from oneflow import nn\n'), ((1650, 1693), 'oneflow.arange', 'flow.arange', (['config.max_position_embeddings'], {}), '(config.max_position_embeddings)\n', (1661, 1693), True, 'import oneflow as flow\n'), ((5847, 5894), 'oneflow.cat', 'flow.cat', (['[past_key_value[0], key_layer]'], {'dim': '(2)'}), '([past_key_value[0], key_layer], dim=2)\n', (5855, 5894), True, 'import oneflow as flow\n'), ((5921, 5970), 'oneflow.cat', 'flow.cat', (['[past_key_value[1], value_layer]'], {'dim': '(2)'}), '([past_key_value[1], value_layer], dim=2)\n', (5929, 5970), True, 'import oneflow as flow\n'), ((18409, 18447), 'oneflow.arange', 'flow.arange', (['seq_length'], {'device': 'device'}), '(seq_length, device=device)\n', (18420, 18447), True, 'import oneflow as flow\n'), ((30728, 30746), 'oneflow.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (30744, 30746), False, 'from oneflow.nn import CrossEntropyLoss\n'), ((30932, 30954), 'oneflow.nn.BCEWithLogitsLoss', 'nn.BCEWithLogitsLoss', ([], {}), '()\n', (30952, 30954), False, 'from oneflow import nn\n'), ((18937, 19033), 'oneflow.ones', 'flow.ones', (['(batch_size, seq_length, prefix_seq_len)'], {'device': 'device', 'dtype': 'causal_mask.dtype'}), '((batch_size, seq_length, prefix_seq_len), device=device, dtype=\n causal_mask.dtype)\n', (18946, 19033), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import re
from contextlib import contextmanager
import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb
import oneflow.core.job.placement_pb2 as placement_pb
import oneflow.core.job.job_conf_pb2 as job_conf_pb
import oneflow.core.job.scope_pb2 as scope_pb
import oneflow.core.operator.op_conf_pb2 as op_conf_pb
import oneflow.core.operator.op_node_signature_pb2 as op_node_signature_pb
import oneflow.core.register.blob_desc_pb2 as blob_desc_pb
import oneflow.python.eager.blob_cache as blob_cache_util
import oneflow.python.eager.boxing_util as boxing_util
import oneflow.python.eager.object_storage as object_storage
import oneflow.python.eager.symbol as symbol_util
import oneflow.python.eager.symbol_storage as symbol_storage
import oneflow_api.oneflow.core.job.scope as scope_cfg
import oneflow.python.framework.balanced_splitter as balanced_splitter
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.python_callback as python_callback
import oneflow.python.framework.session_context as session_ctx
import oneflow.python.framework.python_interpreter_util as python_interpreter_util
from oneflow.python.eager.opkernel_object import OpKernelObject
import oneflow
import oneflow_api.oneflow.core.vm.instruction as instr_cfg
import oneflow_api.oneflow.core.job.placement as placement_cfg
import oneflow_api.oneflow.core.job.job_conf as job_conf_cfg
import oneflow_api.oneflow.core.operator.op_node_signature as op_node_signature_cfg
import oneflow_api.oneflow.core.eager.eager_symbol as eager_symbol_cfg
from google.protobuf import text_format
import oneflow_api
def PhysicalRun(build):
return _Run(
build,
oneflow_api.vm.PhysicalIdGenerator(),
oneflow_api.vm.RunPhysicalInstruction,
_ReleasePhysicalObject,
)
def LogicalRun(build):
return _Run(
build,
oneflow_api.vm.LogicalIdGenerator(),
oneflow_api.vm.RunLogicalInstruction,
_ReleaseLogicalObject,
)
def _Run(build, id_generator, run_api, release_object):
instruction_list = session_ctx.GetDefaultSession().instruction_list
eager_symbol_list = session_ctx.GetDefaultSession().eager_symbol_list
assert isinstance(instruction_list, instr_cfg.InstructionListProto)
assert isinstance(eager_symbol_list, eager_symbol_cfg.EagerSymbolList)
build(
oneflow_api.deprecated.InstructionsBuilder(
id_generator, instruction_list, eager_symbol_list, release_object
)
)
run_api(instruction_list, eager_symbol_list)
instruction_list.clear_instruction()
eager_symbol_list.clear_eager_symbol()
def _DefaultBlobObject4Ibn(ibn):
raise NotImplementedError
def StatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute, op_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
def FetchDelegateBlobObject(x_blob_object, op_arg_parallel_attr):
return boxing_util.BoxingTo(self, x_blob_object, op_arg_parallel_attr)
def GetDelegateBlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, FetchDelegateBlobObject, blob_object, op_arg_parallel_attr
)
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDelegateBlobObject,
)
def NoBoxingStatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute, op_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
def FetchDelegateBlobObject(blob_object, op_arg_parallel_attr):
from_pd = blob_object.parallel_desc_symbol
to_pd = op_arg_parallel_attr.parallel_desc_symbol
if from_pd == to_pd:
return blob_object
assert from_pd.device_tag == "cpu"
assert to_pd.device_tag == "cpu"
assert from_pd.parallel_num == to_pd.parallel_num
from_machine_ids = dict(from_pd.machine_id2device_id_list).keys()
to_machine_ids = dict(to_pd.machine_id2device_id_list).keys()
if (
len(from_pd.machine_id2device_id_list) == from_pd.parallel_num
and from_machine_ids == to_machine_ids
):
return self.BroadcastBlobReference(blob_object, to_pd)
return self.Build121To(blob_object, to_pd)
def GetDirectOr121BlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, FetchDelegateBlobObject, blob_object, op_arg_parallel_attr
)
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectOr121BlobObject,
)
def NoBoxingCudaD2HStatelessCall(
self, op_attribute, in_parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(in_parallel_conf)
blob_parallel_desc_sym = boxing_util.TryReplaceDeviceTag(
self, op_parallel_desc_sym, "cpu"
)
self._CheckRefInBlobObjectParallelDesc(
op_attribute, blob_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"copy_d2h",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=blob_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def NoBoxingCudaH2DStatelessCall(
self, op_attribute, out_parallel_conf, bn_in_op2blob_object={}
):
op_parallel_desc_sym = self.GetParallelDescSymbol(out_parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute, op_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"copy_h2d",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def RawStatelessCall(self, op_attribute, parallel_conf, bn_in_op2blob_object={}):
op_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
self._CheckRefInBlobObjectParallelDesc(
op_attribute, op_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
def GetDirectBlobObject(blob_object, op_arg_parallel_attr):
return blob_object
self._StatelessCall(
"compute",
op_attribute,
op_parallel_desc_sym=op_parallel_desc_sym,
blob_parallel_desc_sym=op_parallel_desc_sym,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDirectBlobObject,
)
def StatefulCall(self, op_attribute, opkernel_object, bn_in_op2blob_object={}):
op_parallel_desc_sym = opkernel_object.parallel_desc_symbol
parallel_sig = op_attribute.parallel_signature
assert parallel_sig.HasField("op_parallel_desc_symbol_id")
assert op_parallel_desc_sym.symbol_id == parallel_sig.op_parallel_desc_symbol_id
self._CheckRefInBlobObjectParallelDesc(
op_attribute, op_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
def FetchDelegateBlobObject(x_blob_object, op_arg_parallel_attr):
return boxing_util.BoxingTo(self, x_blob_object, op_arg_parallel_attr)
def GetDelegateBlobObject(blob_object, op_arg_parallel_attr):
return _FindOrCreateDelegateBlobObject(
self, FetchDelegateBlobObject, blob_object, op_arg_parallel_attr
)
self._StatefulCall(
op_attribute,
opkernel_object=opkernel_object,
bn_in_op2blob_object=bn_in_op2blob_object,
get_delegate_blob_object=GetDelegateBlobObject,
)
def InsertRemoveForeignCallbackInstruction(self, object_id, callback):
unique_callback_id = python_callback.GetIdForRegisteredCallback(callback)
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("RemoveForeignCallback")
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.DelObjectOperand(object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.Int64Operand(unique_callback_id)
)
self.instruction_list().mutable_instruction().Add().CopyFrom(instruction)
def FetchBlobHeader(self, blob_object, callback):
return self._FetchBlob("FetchBlobHeader", blob_object, callback)
def FetchBlobBody(self, blob_object, callback):
return self._FetchBlob("FetchBlobBody", blob_object, callback)
def MakeLazyRefBlobObject(self, interface_op_name):
sess = session_ctx.GetDefaultSession()
op_attribute = sess.OpAttribute4InterfaceOpName(interface_op_name)
assert len(op_attribute.output_bns) == 1
obn = op_attribute.output_bns[0]
parallel_conf = sess.ParallelConf4LazyInterfaceOpName(interface_op_name)
if not isinstance(
parallel_conf, oneflow_api.oneflow.core.job.placement.ParallelConf
):
parallel_conf_cfg = placement_cfg.ParallelConf()
parallel_conf_cfg.set_device_tag(parallel_conf.device_tag)
for device_name in parallel_conf.device_name:
parallel_conf_cfg.add_device_name(device_name)
parallel_conf = parallel_conf_cfg
blob_parallel_desc_sym = self.GetParallelDescSymbol(parallel_conf)
op_arg_parallel_attr = oneflow_api.GetOpArgParallelAttribute(
blob_parallel_desc_sym, str(op_attribute), obn
)
op_arg_blob_attr = oneflow_api.GetOpArgBlobAttribute(str(op_attribute), obn)
blob_object = self.NewBlobObject(op_arg_parallel_attr, op_arg_blob_attr)
self.LazyReference(blob_object, interface_op_name)
return blob_object
def GetSharedOpKernelObject4ParallelConfSymbol(self, parallel_desc_sym):
if object_storage.HasSharedOpKernelObject4ParallelConfSymbol(parallel_desc_sym):
return object_storage.GetSharedOpKernelObject4ParallelConfSymbol(
parallel_desc_sym
)
object_id = self.NewSharedOpKernelObjectId4ParallelConfSymbolId(parallel_desc_sym)
obj = oneflow_api.Object(object_id, parallel_desc_sym)
object_storage.SetSharedOpKernelObject4ParallelConfSymbol(parallel_desc_sym, obj)
return obj
@contextmanager
def CudaHostPinBlob(self, blob_object):
self.CudaHostRegisterBlob(blob_object)
try:
yield
finally:
self.CudaHostUnregisterBlob(blob_object)
def NewOpKernelObject(self, op_conf):
assert op_conf.HasField("scope_symbol_id")
scope_symbol = oneflow_api.GetScopeSymbol(op_conf.scope_symbol_id)
op_conf_sym = self._GetOpConfSymbol(op_conf)
parallel_desc_sym_id = c_api_util.GetOpParallelSymbolId(op_conf)
parallel_desc_symbol = oneflow_api.GetPlacementSymbol(parallel_desc_sym_id)
object_id = self._NewOpKernelObject(
parallel_desc_symbol, scope_symbol.job_desc_symbol, op_conf_sym
)
return OpKernelObject(object_id, op_conf, self.object_releaser())
def Build121To(self, blob_object, parallel_desc_symbol):
ref_blob_object = _MakeNewBlobObjectLike(self, blob_object, parallel_desc_symbol)
self.Build121AssignInstruction(ref_blob_object, blob_object)
return ref_blob_object
def _NewOpKernelObject(self, parallel_desc_symbol, job_desc_sym, op_conf_sym):
object_id = self.NewObjectId(parallel_desc_symbol)
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("InitOpKernelObject")
instruction.set_parallel_desc_symbol_id(parallel_desc_symbol.symbol_id)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(job_desc_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(op_conf_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.MutOperand(object_id)
)
self.instruction_list().mutable_instruction().Add().CopyFrom(instruction)
return object_id
def _StatelessCall(
self,
stream_tag,
op_attribute,
op_parallel_desc_sym=None,
blob_parallel_desc_sym=None,
bn_in_op2blob_object={},
get_delegate_blob_object=None,
):
assert callable(get_delegate_blob_object)
if op_attribute.parallel_signature.HasField("op_parallel_desc_symbol_id"):
symbol_id = op_attribute.parallel_signature.op_parallel_desc_symbol_id
op_parallel_desc_sym = oneflow_api.GetPlacementSymbol(symbol_id)
assert op_parallel_desc_sym is not None
def DelegateBlobObject4Ibn(ibn):
op_arg_parallel_attr = oneflow_api.GetOpArgParallelAttribute(
op_parallel_desc_sym, str(op_attribute), ibn
)
return get_delegate_blob_object(bn_in_op2blob_object[ibn], op_arg_parallel_attr)
op_conf = op_attribute.op_conf
assert op_conf.HasField("scope_symbol_id"), op_conf
scope_symbol = oneflow_api.GetScopeSymbol(op_conf.scope_symbol_id)
job_desc_sym = scope_symbol.job_desc_symbol
op_conf_sym = self._GetOpConfSymbol(op_conf)
op_node_signature_sym = self._GetOpNodeSignatureSymbol(op_attribute)
opkernel_obj = self.GetSharedOpKernelObject4ParallelConfSymbol(op_parallel_desc_sym)
assert opkernel_obj.parallel_desc_symbol == op_parallel_desc_sym, (
str(opkernel_obj.parallel_desc_symbol.parallel_conf),
str(op_parallel_desc_sym.parallel_conf),
)
const_input_operand_blob_objects = self._GetConstInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mutable_input_operand_blob_objects = self._GetMutableInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mut1_operand_blob_objects = self._GetMut1OperandBlobObjects(
op_attribute, blob_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
mut2_operand_blob_objects = self._GetMut2OperandBlobObjects(
op_attribute, blob_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
is_user_op = op_attribute.op_conf.HasField("user_conf")
instruction_prefix = "User" if is_user_op else "System"
self._StatelessCallOpKernel(
"%s.%sStatelessCallOpKernel" % (stream_tag, instruction_prefix),
op_parallel_desc_sym,
job_desc_sym,
op_conf_sym,
op_node_signature_sym,
opkernel_obj,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
)
def _StatefulCall(
self, op_attribute, opkernel_object, bn_in_op2blob_object, get_delegate_blob_object,
):
op_parallel_desc_sym = opkernel_object.parallel_desc_symbol
def DelegateBlobObject4Ibn(ibn):
op_arg_parallel_attr = oneflow_api.GetOpArgParallelAttribute(
op_parallel_desc_sym, str(op_attribute), ibn
)
return get_delegate_blob_object(bn_in_op2blob_object[ibn], op_arg_parallel_attr)
op_node_signature_sym = self._GetOpNodeSignatureSymbol(op_attribute)
const_input_operand_blob_objects = self._GetConstInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mutable_input_operand_blob_objects = self._GetMutableInputOperandBlobObjects(
op_attribute, blob_object4ibn=DelegateBlobObject4Ibn
)
mut1_operand_blob_objects = self._GetMut1OperandBlobObjects(
op_attribute, op_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
mut2_operand_blob_objects = self._GetMut2OperandBlobObjects(
op_attribute, op_parallel_desc_sym, bn_in_op2blob_object=bn_in_op2blob_object,
)
is_user_op = op_attribute.op_conf.HasField("user_conf")
assert is_user_op
instruction_prefix = "" if is_user_op else "System"
self._StatefulCallOpKernel(
"%sCallOpKernel" % instruction_prefix,
op_parallel_desc_sym,
opkernel_object,
op_node_signature_sym,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
)
def _GetOpConfSymbol(self, op_conf):
serialized_op_conf = op_conf.SerializeToString()
if symbol_storage.HasSymbol4SerializedOpConf(serialized_op_conf):
return symbol_storage.GetSymbol4SerializedOpConf(serialized_op_conf)
symbol_id = self._NewSymbolId4OpConf(op_conf)
symbol = symbol_util.Symbol(symbol_id, op_conf)
symbol_storage.SetSymbol4Id(symbol_id, symbol)
symbol_storage.SetSymbol4SerializedOpConf(serialized_op_conf, symbol)
return symbol
def _GetOpNodeSignatureSymbol(self, op_attribute):
new_op_node_signature = oneflow_api.deprecated.MakeOpNodeSignatureFromSerializedOpAttribute(
str(op_attribute)
)
if oneflow_api.HasOpNodeSignatureSymbol(new_op_node_signature):
return oneflow_api.GetOpNodeSignatureSymbol(new_op_node_signature)
symbol_id = self.NewSymbolId4OpNodeSignature(new_op_node_signature)
oneflow_api.AddOpNodeSignatureSymbol(symbol_id, new_op_node_signature)
return oneflow_api.GetOpNodeSignatureSymbol(symbol_id)
def _GetConstInputOperandBlobObjects(self, op_attribute, blob_object4ibn=None):
assert callable(blob_object4ibn)
const_input_operand_blob_objects = []
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if ibn2modifier[ibn].is_mutable:
continue
ibn_sym = self.GetSymbol4String(ibn)
in_object = blob_object4ibn(ibn)
const_input_operand_blob_objects.append((ibn_sym, in_object))
return const_input_operand_blob_objects
def _GetMutableInputOperandBlobObjects(self, op_attribute, blob_object4ibn=None):
mutable_input_operand_blob_objects = []
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if not ibn2modifier[ibn].is_mutable:
continue
ibn_sym = self.GetSymbol4String(ibn)
in_object = blob_object4ibn(ibn)
mutable_input_operand_blob_objects.append((ibn_sym, in_object))
return mutable_input_operand_blob_objects
def _GetMut1OperandBlobObjects(
self, op_attribute, parallel_desc_sym, bn_in_op2blob_object={}
):
mut1_operand_blob_objects = []
def GetOutBlobParallelDescSymbol(obn):
parallel_signature = op_attribute.parallel_signature
bn2symbol_id = parallel_signature.bn_in_op2parallel_desc_symbol_id
if obn in bn2symbol_id:
return oneflow_api.GetPlacementSymbol(bn2symbol_id[obn])
else:
return parallel_desc_sym
def OutputBns():
obn2modifier = op_attribute.arg_modifier_signature.obn2output_blob_modifier
for obn in op_attribute.output_bns:
if obn2modifier[obn].header_infered_before_compute:
yield obn
for tmp_bn in op_attribute.tmp_bns:
yield tmp_bn
for obn in OutputBns():
obn_sym = self.GetSymbol4String(obn)
op_arg_parallel_attr = oneflow_api.GetOpArgParallelAttribute(
GetOutBlobParallelDescSymbol(obn), str(op_attribute), obn
)
op_arg_blob_attr = oneflow_api.GetOpArgBlobAttribute(str(op_attribute), obn)
out_blob_object = self.NewBlobObject(op_arg_parallel_attr, op_arg_blob_attr)
lbi = op_attribute.arg_signature.bn_in_op2lbi[obn]
bn_in_op2blob_object[obn] = out_blob_object
mut1_operand_blob_objects.append((obn_sym, out_blob_object))
return mut1_operand_blob_objects
def _CheckRefInBlobObjectParallelDesc(
self, op_attribute, op_parallel_desc_sym, bn_in_op2blob_object={}
):
op_conf = op_attribute.op_conf
for ibn in op_attribute.input_bns:
ibn2modifier = op_attribute.arg_modifier_signature.ibn2input_blob_modifier
if not ibn2modifier[ibn].is_mutable:
continue
ref_blob_object = bn_in_op2blob_object[ibn]
assert op_parallel_desc_sym == ref_blob_object.parallel_desc_symbol, (
"op_conf: %s\n%s\nv.s.\n%s"
% (op_conf, op_parallel_desc_sym, ref_blob_object.parallel_desc_symbol)
)
def _GetMut2OperandBlobObjects(
self, op_attribute, parallel_desc_sym, bn_in_op2blob_object={}
):
mut2_operand_blob_objects = []
def GetOutBlobParallelDescSymbol(obn):
parallel_signature = op_attribute.parallel_signature
bn2symbol_id = parallel_signature.bn_in_op2parallel_desc_symbol_id
if obn in bn2symbol_id:
return oneflow_api.GetPlacementSymbol(bn2symbol_id[obn])
else:
return parallel_desc_sym
for obn in op_attribute.output_bns:
obn2modifier = op_attribute.arg_modifier_signature.obn2output_blob_modifier
if obn2modifier[obn].header_infered_before_compute:
continue
obn_sym = self.GetSymbol4String(obn)
op_arg_parallel_attr = oneflow_api.GetOpArgParallelAttribute(
GetOutBlobParallelDescSymbol(obn), str(op_attribute), obn
)
op_arg_blob_attr = oneflow_api.GetOpArgBlobAttribute(str(op_attribute), obn)
out_blob_object = self.NewBlobObject(op_arg_parallel_attr, op_arg_blob_attr)
bn_in_op2blob_object[obn] = out_blob_object
mut2_operand_blob_objects.append((obn_sym, out_blob_object))
return mut2_operand_blob_objects
def _NewSymbolId4OpConf(self, op_conf):
symbol_id = self.NewSymbolId()
self._InitOpConfSymbol(symbol_id, op_conf)
return symbol_id
def _StatelessCallOpKernel(
self,
instr_name,
parallel_desc_sym,
job_desc_sym,
op_conf_sym,
op_node_signature_sym,
shared_opkernel_obj,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name(
"%s.%s" % (parallel_desc_sym.device_tag, instr_name)
)
instruction.set_parallel_desc_symbol_id(parallel_desc_sym.symbol_id)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(job_desc_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(op_conf_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(op_node_signature_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.MutOperand(shared_opkernel_obj.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.OperandSeparator()
)
for ibn_sym, _ in const_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(ibn_sym.symbol_id)
)
for _, blob_object in const_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.ConstOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.OperandSeparator()
)
for ibn_sym, _ in mutable_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(ibn_sym.symbol_id)
)
for _, blob_object in mutable_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.MutOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.OperandSeparator()
)
for obn_sym, _ in mut1_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(obn_sym.symbol_id)
)
for _, blob_object in mut1_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.MutOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.OperandSeparator()
)
for obn_sym, _ in mut2_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(obn_sym.symbol_id)
)
for _, blob_object in mut2_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.Mut2Operand(blob_object.object_id)
)
self.instruction_list().mutable_instruction().Add().CopyFrom(instruction)
def _StatefulCallOpKernel(
self,
instr_name,
parallel_desc_sym,
opkernel_object,
op_node_signature_sym,
const_input_operand_blob_objects,
mutable_input_operand_blob_objects,
mut1_operand_blob_objects,
mut2_operand_blob_objects,
):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name(
"%s.%s" % (parallel_desc_sym.device_tag, instr_name,)
)
instruction.set_parallel_desc_symbol_id(parallel_desc_sym.symbol_id)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.MutOperand(opkernel_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(op_node_signature_sym.symbol_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.OperandSeparator()
)
for ibn_sym, _ in const_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(ibn_sym.symbol_id)
)
for _, blob_object in const_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.ConstOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.OperandSeparator()
)
for ibn_sym, _ in mutable_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(ibn_sym.symbol_id)
)
for _, blob_object in mutable_input_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.MutOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.OperandSeparator()
)
for obn_sym, _ in mut1_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(obn_sym.symbol_id)
)
for _, blob_object in mut1_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.MutOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.OperandSeparator()
)
for obn_sym, _ in mut2_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.SymbolOperand(obn_sym.symbol_id)
)
for _, blob_object in mut2_operand_blob_objects:
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.Mut2Operand(blob_object.object_id)
)
self.instruction_list().mutable_instruction().Add().CopyFrom(instruction)
def _InitOpConfSymbol(self, symbol_id, op_conf):
instruction = instr_cfg.InstructionProto()
instruction.set_instr_type_name("InitOperatorConfSymbol")
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.InitSymbolOperand(symbol_id)
)
self.instruction_list().mutable_instruction().Add().CopyFrom(instruction)
eager_symbol = eager_symbol_pb.EagerSymbol()
eager_symbol.symbol_id = symbol_id
eager_symbol.op_conf_symbol.CopyFrom(op_conf)
eager_symbol = oneflow_api.deprecated.MakeEagerSymbolByString(str(eager_symbol))
self.eager_symbol_list().mutable_eager_symbol().Add().CopyFrom(eager_symbol)
def _FetchBlob(self, instruction_name, blob_object, fetcher):
unique_callback_id = python_callback.GetIdForRegisteredCallback(fetcher)
instruction = instr_cfg.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.set_instr_type_name("%s.%s" % (device_tag, instruction_name))
instruction.set_parallel_desc_symbol_id(blob_object.parallel_desc_symbol.symbol_id)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.ConstOperand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.Int64Operand(unique_callback_id)
)
self.instruction_list().mutable_instruction().Add().CopyFrom(instruction)
def FeedBlob(self, blob_object, feeder):
unique_callback_id = python_callback.GetIdForRegisteredCallback(feeder)
instruction = instr_cfg.InstructionProto()
device_tag = blob_object.parallel_desc_symbol.device_tag
instruction.set_instr_type_name("%s.%s" % (device_tag, "FeedBlob"))
instruction.set_parallel_desc_symbol_id(blob_object.parallel_desc_symbol.symbol_id)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.Mut2Operand(blob_object.object_id)
)
instruction.mutable_operand().Add().CopyFrom(
oneflow_api.deprecated.vm.Int64Operand(unique_callback_id)
)
self.instruction_list().mutable_instruction().Add().CopyFrom(instruction)
def RegisterMethod4InstructionsBuilder():
oneflow_api.deprecated.InstructionsBuilder.StatelessCall = StatelessCall
oneflow_api.deprecated.InstructionsBuilder.NoBoxingStatelessCall = (
NoBoxingStatelessCall
)
oneflow_api.deprecated.InstructionsBuilder.NoBoxingCudaD2HStatelessCall = (
NoBoxingCudaD2HStatelessCall
)
oneflow_api.deprecated.InstructionsBuilder.NoBoxingCudaH2DStatelessCall = (
NoBoxingCudaH2DStatelessCall
)
oneflow_api.deprecated.InstructionsBuilder.RawStatelessCall = RawStatelessCall
oneflow_api.deprecated.InstructionsBuilder.StatefulCall = StatefulCall
oneflow_api.deprecated.InstructionsBuilder.InsertRemoveForeignCallbackInstruction = (
InsertRemoveForeignCallbackInstruction
)
oneflow_api.deprecated.InstructionsBuilder.FetchBlobHeader = FetchBlobHeader
oneflow_api.deprecated.InstructionsBuilder.FetchBlobBody = FetchBlobBody
oneflow_api.deprecated.InstructionsBuilder.MakeLazyRefBlobObject = (
MakeLazyRefBlobObject
)
oneflow_api.deprecated.InstructionsBuilder.GetSharedOpKernelObject4ParallelConfSymbol = (
GetSharedOpKernelObject4ParallelConfSymbol
)
oneflow_api.deprecated.InstructionsBuilder.CudaHostPinBlob = CudaHostPinBlob
oneflow_api.deprecated.InstructionsBuilder.NewOpKernelObject = NewOpKernelObject
oneflow_api.deprecated.InstructionsBuilder.Build121To = Build121To
oneflow_api.deprecated.InstructionsBuilder._NewOpKernelObject = _NewOpKernelObject
oneflow_api.deprecated.InstructionsBuilder._StatelessCall = _StatelessCall
oneflow_api.deprecated.InstructionsBuilder._StatefulCall = _StatefulCall
oneflow_api.deprecated.InstructionsBuilder._GetOpConfSymbol = _GetOpConfSymbol
oneflow_api.deprecated.InstructionsBuilder._GetOpNodeSignatureSymbol = (
_GetOpNodeSignatureSymbol
)
oneflow_api.deprecated.InstructionsBuilder._GetConstInputOperandBlobObjects = (
_GetConstInputOperandBlobObjects
)
oneflow_api.deprecated.InstructionsBuilder._GetMutableInputOperandBlobObjects = (
_GetMutableInputOperandBlobObjects
)
oneflow_api.deprecated.InstructionsBuilder._GetMut1OperandBlobObjects = (
_GetMut1OperandBlobObjects
)
oneflow_api.deprecated.InstructionsBuilder._CheckRefInBlobObjectParallelDesc = (
_CheckRefInBlobObjectParallelDesc
)
oneflow_api.deprecated.InstructionsBuilder._GetMut2OperandBlobObjects = (
_GetMut2OperandBlobObjects
)
oneflow_api.deprecated.InstructionsBuilder._NewSymbolId4OpConf = _NewSymbolId4OpConf
oneflow_api.deprecated.InstructionsBuilder._StatelessCallOpKernel = (
_StatelessCallOpKernel
)
oneflow_api.deprecated.InstructionsBuilder._StatefulCallOpKernel = (
_StatefulCallOpKernel
)
oneflow_api.deprecated.InstructionsBuilder._InitOpConfSymbol = _InitOpConfSymbol
oneflow_api.deprecated.InstructionsBuilder._FetchBlob = _FetchBlob
oneflow_api.deprecated.InstructionsBuilder.FeedBlob = FeedBlob
def _MakeNewBlobObjectLike(builder, blob_object, new_parallel_desc_symbol):
op_conf = op_conf_pb.OperatorConf()
op_conf.name = id_util.UniqueStr("Input")
op_conf.device_tag = new_parallel_desc_symbol.device_tag
op_conf.input_conf.out = "out"
cfg_interface_blob_conf = (
oneflow_api.oneflow.core.operator.interface_blob_conf.InterfaceBlobConf()
)
blob_object.op_arg_parallel_attr.DumpToInterfaceBlobConf(cfg_interface_blob_conf)
blob_object.op_arg_blob_attr.DumpToInterfaceBlobConf(cfg_interface_blob_conf)
text_format.Parse(str(cfg_interface_blob_conf), op_conf.input_conf.blob_conf)
op_conf.scope_symbol_id = oneflow.current_scope().symbol_id
upstream_signature = op_node_signature_pb.OpNodeSignature()
op_attribute = c_api_util.InferOpConf(op_conf, upstream_signature)
parallel_conf = new_parallel_desc_symbol.parallel_conf
bn_in_op2blob_object = {}
builder.RawStatelessCall(
op_attribute, parallel_conf, bn_in_op2blob_object=bn_in_op2blob_object
)
return bn_in_op2blob_object["out"]
def _FindOrCreateDelegateBlobObject(
builder, Fetch, x_blob_object, op_arg_parallel_attr
):
if x_blob_object.op_arg_parallel_attr == op_arg_parallel_attr:
return x_blob_object
blob_cache = blob_cache_util.FindOrCreateBlobCache(x_blob_object)
return blob_cache.GetCachedDelegateBlobObject(op_arg_parallel_attr, Fetch)
def _GetOpConfBlobNameAttr(pb_message, field):
if hasattr(pb_message, field):
return getattr(pb_message, field)
m = re.search("_(\d+)$", field)
assert m is not None
blob_name = field[0 : -len(m.group(0))]
index = int(m.group(0)[1:])
assert hasattr(pb_message, blob_name), (pb_message, blob_name)
repeated_field = getattr(pb_message, blob_name)
assert index >= 0
assert index < len(repeated_field)
return repeated_field[index]
def _ReleaseLogicalObject(obj, is_shutting_down=python_interpreter_util.IsShuttingDown):
if is_shutting_down():
return
LogicalRun(lambda builder: builder.DeleteObject(obj))
def _ReleasePhysicalObject(
obj, is_shutting_down=python_interpreter_util.IsShuttingDown
):
if is_shutting_down():
return
PhysicalRun(lambda builder: builder.DeleteObject(obj))
| [
"oneflow.python.eager.boxing_util.BoxingTo",
"oneflow.python.eager.symbol.Symbol",
"oneflow.python.eager.object_storage.GetSharedOpKernelObject4ParallelConfSymbol",
"oneflow.core.operator.op_conf_pb2.OperatorConf",
"oneflow.python.eager.symbol_storage.SetSymbol4SerializedOpConf",
"oneflow.core.operator.op... | [((6146, 6212), 'oneflow.python.eager.boxing_util.TryReplaceDeviceTag', 'boxing_util.TryReplaceDeviceTag', (['self', 'op_parallel_desc_sym', '"""cpu"""'], {}), "(self, op_parallel_desc_sym, 'cpu')\n", (6177, 6212), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((9231, 9283), 'oneflow.python.framework.python_callback.GetIdForRegisteredCallback', 'python_callback.GetIdForRegisteredCallback', (['callback'], {}), '(callback)\n', (9273, 9283), True, 'import oneflow.python.framework.python_callback as python_callback\n'), ((9302, 9330), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (9328, 9330), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((10014, 10045), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (10043, 10045), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((11179, 11255), 'oneflow.python.eager.object_storage.HasSharedOpKernelObject4ParallelConfSymbol', 'object_storage.HasSharedOpKernelObject4ParallelConfSymbol', (['parallel_desc_sym'], {}), '(parallel_desc_sym)\n', (11236, 11255), True, 'import oneflow.python.eager.object_storage as object_storage\n'), ((11468, 11516), 'oneflow_api.Object', 'oneflow_api.Object', (['object_id', 'parallel_desc_sym'], {}), '(object_id, parallel_desc_sym)\n', (11486, 11516), False, 'import oneflow_api\n'), ((11521, 11606), 'oneflow.python.eager.object_storage.SetSharedOpKernelObject4ParallelConfSymbol', 'object_storage.SetSharedOpKernelObject4ParallelConfSymbol', (['parallel_desc_sym', 'obj'], {}), '(parallel_desc_sym,\n obj)\n', (11578, 11606), True, 'import oneflow.python.eager.object_storage as object_storage\n'), ((11910, 11961), 'oneflow_api.GetScopeSymbol', 'oneflow_api.GetScopeSymbol', (['op_conf.scope_symbol_id'], {}), '(op_conf.scope_symbol_id)\n', (11936, 11961), False, 'import oneflow_api\n'), ((12038, 12079), 'oneflow.python.framework.c_api_util.GetOpParallelSymbolId', 'c_api_util.GetOpParallelSymbolId', (['op_conf'], {}), '(op_conf)\n', (12070, 12079), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((12107, 12159), 'oneflow_api.GetPlacementSymbol', 'oneflow_api.GetPlacementSymbol', (['parallel_desc_sym_id'], {}), '(parallel_desc_sym_id)\n', (12137, 12159), False, 'import oneflow_api\n'), ((12740, 12768), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (12766, 12768), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((14262, 14313), 'oneflow_api.GetScopeSymbol', 'oneflow_api.GetScopeSymbol', (['op_conf.scope_symbol_id'], {}), '(op_conf.scope_symbol_id)\n', (14288, 14313), False, 'import oneflow_api\n'), ((17579, 17640), 'oneflow.python.eager.symbol_storage.HasSymbol4SerializedOpConf', 'symbol_storage.HasSymbol4SerializedOpConf', (['serialized_op_conf'], {}), '(serialized_op_conf)\n', (17620, 17640), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17782, 17820), 'oneflow.python.eager.symbol.Symbol', 'symbol_util.Symbol', (['symbol_id', 'op_conf'], {}), '(symbol_id, op_conf)\n', (17800, 17820), True, 'import oneflow.python.eager.symbol as symbol_util\n'), ((17825, 17871), 'oneflow.python.eager.symbol_storage.SetSymbol4Id', 'symbol_storage.SetSymbol4Id', (['symbol_id', 'symbol'], {}), '(symbol_id, symbol)\n', (17852, 17871), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((17876, 17945), 'oneflow.python.eager.symbol_storage.SetSymbol4SerializedOpConf', 'symbol_storage.SetSymbol4SerializedOpConf', (['serialized_op_conf', 'symbol'], {}), '(serialized_op_conf, symbol)\n', (17917, 17945), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((18153, 18212), 'oneflow_api.HasOpNodeSignatureSymbol', 'oneflow_api.HasOpNodeSignatureSymbol', (['new_op_node_signature'], {}), '(new_op_node_signature)\n', (18189, 18212), False, 'import oneflow_api\n'), ((18365, 18435), 'oneflow_api.AddOpNodeSignatureSymbol', 'oneflow_api.AddOpNodeSignatureSymbol', (['symbol_id', 'new_op_node_signature'], {}), '(symbol_id, new_op_node_signature)\n', (18401, 18435), False, 'import oneflow_api\n'), ((18447, 18494), 'oneflow_api.GetOpNodeSignatureSymbol', 'oneflow_api.GetOpNodeSignatureSymbol', (['symbol_id'], {}), '(symbol_id)\n', (18483, 18494), False, 'import oneflow_api\n'), ((23226, 23254), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (23252, 23254), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((26284, 26312), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (26310, 26312), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((28866, 28894), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (28892, 28894), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((29173, 29202), 'oneflow.core.eager.eager_symbol_pb2.EagerSymbol', 'eager_symbol_pb.EagerSymbol', ([], {}), '()\n', (29200, 29202), True, 'import oneflow.core.eager.eager_symbol_pb2 as eager_symbol_pb\n'), ((29547, 29598), 'oneflow.python.framework.python_callback.GetIdForRegisteredCallback', 'python_callback.GetIdForRegisteredCallback', (['fetcher'], {}), '(fetcher)\n', (29589, 29598), True, 'import oneflow.python.framework.python_callback as python_callback\n'), ((29617, 29645), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (29643, 29645), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((30268, 30318), 'oneflow.python.framework.python_callback.GetIdForRegisteredCallback', 'python_callback.GetIdForRegisteredCallback', (['feeder'], {}), '(feeder)\n', (30310, 30318), True, 'import oneflow.python.framework.python_callback as python_callback\n'), ((30337, 30365), 'oneflow_api.oneflow.core.vm.instruction.InstructionProto', 'instr_cfg.InstructionProto', ([], {}), '()\n', (30363, 30365), True, 'import oneflow_api.oneflow.core.vm.instruction as instr_cfg\n'), ((34049, 34074), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_pb.OperatorConf', ([], {}), '()\n', (34072, 34074), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((34094, 34120), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Input"""'], {}), "('Input')\n", (34111, 34120), True, 'import oneflow.python.framework.id_util as id_util\n'), ((34257, 34330), 'oneflow_api.oneflow.core.operator.interface_blob_conf.InterfaceBlobConf', 'oneflow_api.oneflow.core.operator.interface_blob_conf.InterfaceBlobConf', ([], {}), '()\n', (34328, 34330), False, 'import oneflow_api\n'), ((34676, 34714), 'oneflow.core.operator.op_node_signature_pb2.OpNodeSignature', 'op_node_signature_pb.OpNodeSignature', ([], {}), '()\n', (34712, 34714), True, 'import oneflow.core.operator.op_node_signature_pb2 as op_node_signature_pb\n'), ((34734, 34785), 'oneflow.python.framework.c_api_util.InferOpConf', 'c_api_util.InferOpConf', (['op_conf', 'upstream_signature'], {}), '(op_conf, upstream_signature)\n', (34756, 34785), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((35240, 35292), 'oneflow.python.eager.blob_cache.FindOrCreateBlobCache', 'blob_cache_util.FindOrCreateBlobCache', (['x_blob_object'], {}), '(x_blob_object)\n', (35277, 35292), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((35506, 35534), 're.search', 're.search', (['"""_(\\\\d+)$"""', 'field'], {}), "('_(\\\\d+)$', field)\n", (35515, 35534), False, 'import re\n'), ((2424, 2460), 'oneflow_api.vm.PhysicalIdGenerator', 'oneflow_api.vm.PhysicalIdGenerator', ([], {}), '()\n', (2458, 2460), False, 'import oneflow_api\n'), ((2612, 2647), 'oneflow_api.vm.LogicalIdGenerator', 'oneflow_api.vm.LogicalIdGenerator', ([], {}), '()\n', (2645, 2647), False, 'import oneflow_api\n'), ((2813, 2844), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (2842, 2844), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((2886, 2917), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (2915, 2917), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((3102, 3215), 'oneflow_api.deprecated.InstructionsBuilder', 'oneflow_api.deprecated.InstructionsBuilder', (['id_generator', 'instruction_list', 'eager_symbol_list', 'release_object'], {}), '(id_generator, instruction_list,\n eager_symbol_list, release_object)\n', (3144, 3215), False, 'import oneflow_api\n'), ((3811, 3874), 'oneflow.python.eager.boxing_util.BoxingTo', 'boxing_util.BoxingTo', (['self', 'x_blob_object', 'op_arg_parallel_attr'], {}), '(self, x_blob_object, op_arg_parallel_attr)\n', (3831, 3874), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((8666, 8729), 'oneflow.python.eager.boxing_util.BoxingTo', 'boxing_util.BoxingTo', (['self', 'x_blob_object', 'op_arg_parallel_attr'], {}), '(self, x_blob_object, op_arg_parallel_attr)\n', (8686, 8729), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((9450, 9503), 'oneflow_api.deprecated.vm.DelObjectOperand', 'oneflow_api.deprecated.vm.DelObjectOperand', (['object_id'], {}), '(object_id)\n', (9492, 9503), False, 'import oneflow_api\n'), ((9568, 9626), 'oneflow_api.deprecated.vm.Int64Operand', 'oneflow_api.deprecated.vm.Int64Operand', (['unique_callback_id'], {}), '(unique_callback_id)\n', (9606, 9626), False, 'import oneflow_api\n'), ((10410, 10438), 'oneflow_api.oneflow.core.job.placement.ParallelConf', 'placement_cfg.ParallelConf', ([], {}), '()\n', (10436, 10438), True, 'import oneflow_api.oneflow.core.job.placement as placement_cfg\n'), ((11272, 11348), 'oneflow.python.eager.object_storage.GetSharedOpKernelObject4ParallelConfSymbol', 'object_storage.GetSharedOpKernelObject4ParallelConfSymbol', (['parallel_desc_sym'], {}), '(parallel_desc_sym)\n', (11329, 11348), True, 'import oneflow.python.eager.object_storage as object_storage\n'), ((12961, 13024), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['job_desc_sym.symbol_id'], {}), '(job_desc_sym.symbol_id)\n', (13000, 13024), False, 'import oneflow_api\n'), ((13089, 13151), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['op_conf_sym.symbol_id'], {}), '(op_conf_sym.symbol_id)\n', (13128, 13151), False, 'import oneflow_api\n'), ((13216, 13263), 'oneflow_api.deprecated.vm.MutOperand', 'oneflow_api.deprecated.vm.MutOperand', (['object_id'], {}), '(object_id)\n', (13252, 13263), False, 'import oneflow_api\n'), ((13801, 13842), 'oneflow_api.GetPlacementSymbol', 'oneflow_api.GetPlacementSymbol', (['symbol_id'], {}), '(symbol_id)\n', (13831, 13842), False, 'import oneflow_api\n'), ((17657, 17718), 'oneflow.python.eager.symbol_storage.GetSymbol4SerializedOpConf', 'symbol_storage.GetSymbol4SerializedOpConf', (['serialized_op_conf'], {}), '(serialized_op_conf)\n', (17698, 17718), True, 'import oneflow.python.eager.symbol_storage as symbol_storage\n'), ((18229, 18288), 'oneflow_api.GetOpNodeSignatureSymbol', 'oneflow_api.GetOpNodeSignatureSymbol', (['new_op_node_signature'], {}), '(new_op_node_signature)\n', (18265, 18288), False, 'import oneflow_api\n'), ((23490, 23553), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['job_desc_sym.symbol_id'], {}), '(job_desc_sym.symbol_id)\n', (23529, 23553), False, 'import oneflow_api\n'), ((23618, 23680), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['op_conf_sym.symbol_id'], {}), '(op_conf_sym.symbol_id)\n', (23657, 23680), False, 'import oneflow_api\n'), ((23745, 23817), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['op_node_signature_sym.symbol_id'], {}), '(op_node_signature_sym.symbol_id)\n', (23784, 23817), False, 'import oneflow_api\n'), ((23882, 23949), 'oneflow_api.deprecated.vm.MutOperand', 'oneflow_api.deprecated.vm.MutOperand', (['shared_opkernel_obj.object_id'], {}), '(shared_opkernel_obj.object_id)\n', (23918, 23949), False, 'import oneflow_api\n'), ((24014, 24058), 'oneflow_api.deprecated.vm.OperandSeparator', 'oneflow_api.deprecated.vm.OperandSeparator', ([], {}), '()\n', (24056, 24058), False, 'import oneflow_api\n'), ((24512, 24556), 'oneflow_api.deprecated.vm.OperandSeparator', 'oneflow_api.deprecated.vm.OperandSeparator', ([], {}), '()\n', (24554, 24556), False, 'import oneflow_api\n'), ((25012, 25056), 'oneflow_api.deprecated.vm.OperandSeparator', 'oneflow_api.deprecated.vm.OperandSeparator', ([], {}), '()\n', (25054, 25056), False, 'import oneflow_api\n'), ((25494, 25538), 'oneflow_api.deprecated.vm.OperandSeparator', 'oneflow_api.deprecated.vm.OperandSeparator', ([], {}), '()\n', (25536, 25538), False, 'import oneflow_api\n'), ((26549, 26612), 'oneflow_api.deprecated.vm.MutOperand', 'oneflow_api.deprecated.vm.MutOperand', (['opkernel_object.object_id'], {}), '(opkernel_object.object_id)\n', (26585, 26612), False, 'import oneflow_api\n'), ((26677, 26749), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['op_node_signature_sym.symbol_id'], {}), '(op_node_signature_sym.symbol_id)\n', (26716, 26749), False, 'import oneflow_api\n'), ((26814, 26858), 'oneflow_api.deprecated.vm.OperandSeparator', 'oneflow_api.deprecated.vm.OperandSeparator', ([], {}), '()\n', (26856, 26858), False, 'import oneflow_api\n'), ((27312, 27356), 'oneflow_api.deprecated.vm.OperandSeparator', 'oneflow_api.deprecated.vm.OperandSeparator', ([], {}), '()\n', (27354, 27356), False, 'import oneflow_api\n'), ((27812, 27856), 'oneflow_api.deprecated.vm.OperandSeparator', 'oneflow_api.deprecated.vm.OperandSeparator', ([], {}), '()\n', (27854, 27856), False, 'import oneflow_api\n'), ((28294, 28338), 'oneflow_api.deprecated.vm.OperandSeparator', 'oneflow_api.deprecated.vm.OperandSeparator', ([], {}), '()\n', (28336, 28338), False, 'import oneflow_api\n'), ((29015, 29069), 'oneflow_api.deprecated.vm.InitSymbolOperand', 'oneflow_api.deprecated.vm.InitSymbolOperand', (['symbol_id'], {}), '(symbol_id)\n', (29058, 29069), False, 'import oneflow_api\n'), ((29931, 29992), 'oneflow_api.deprecated.vm.ConstOperand', 'oneflow_api.deprecated.vm.ConstOperand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (29969, 29992), False, 'import oneflow_api\n'), ((30057, 30115), 'oneflow_api.deprecated.vm.Int64Operand', 'oneflow_api.deprecated.vm.Int64Operand', (['unique_callback_id'], {}), '(unique_callback_id)\n', (30095, 30115), False, 'import oneflow_api\n'), ((30645, 30705), 'oneflow_api.deprecated.vm.Mut2Operand', 'oneflow_api.deprecated.vm.Mut2Operand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (30682, 30705), False, 'import oneflow_api\n'), ((30770, 30828), 'oneflow_api.deprecated.vm.Int64Operand', 'oneflow_api.deprecated.vm.Int64Operand', (['unique_callback_id'], {}), '(unique_callback_id)\n', (30808, 30828), False, 'import oneflow_api\n'), ((34617, 34640), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (34638, 34640), False, 'import oneflow\n'), ((19930, 19979), 'oneflow_api.GetPlacementSymbol', 'oneflow_api.GetPlacementSymbol', (['bn2symbol_id[obn]'], {}), '(bn2symbol_id[obn])\n', (19960, 19979), False, 'import oneflow_api\n'), ((21924, 21973), 'oneflow_api.GetPlacementSymbol', 'oneflow_api.GetPlacementSymbol', (['bn2symbol_id[obn]'], {}), '(bn2symbol_id[obn])\n', (21954, 21973), False, 'import oneflow_api\n'), ((24187, 24245), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['ibn_sym.symbol_id'], {}), '(ibn_sym.symbol_id)\n', (24226, 24245), False, 'import oneflow_api\n'), ((24382, 24443), 'oneflow_api.deprecated.vm.ConstOperand', 'oneflow_api.deprecated.vm.ConstOperand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (24420, 24443), False, 'import oneflow_api\n'), ((24687, 24745), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['ibn_sym.symbol_id'], {}), '(ibn_sym.symbol_id)\n', (24726, 24745), False, 'import oneflow_api\n'), ((24884, 24943), 'oneflow_api.deprecated.vm.MutOperand', 'oneflow_api.deprecated.vm.MutOperand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (24920, 24943), False, 'import oneflow_api\n'), ((25178, 25236), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['obn_sym.symbol_id'], {}), '(obn_sym.symbol_id)\n', (25217, 25236), False, 'import oneflow_api\n'), ((25366, 25425), 'oneflow_api.deprecated.vm.MutOperand', 'oneflow_api.deprecated.vm.MutOperand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (25402, 25425), False, 'import oneflow_api\n'), ((25660, 25718), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['obn_sym.symbol_id'], {}), '(obn_sym.symbol_id)\n', (25699, 25718), False, 'import oneflow_api\n'), ((25848, 25908), 'oneflow_api.deprecated.vm.Mut2Operand', 'oneflow_api.deprecated.vm.Mut2Operand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (25885, 25908), False, 'import oneflow_api\n'), ((26987, 27045), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['ibn_sym.symbol_id'], {}), '(ibn_sym.symbol_id)\n', (27026, 27045), False, 'import oneflow_api\n'), ((27182, 27243), 'oneflow_api.deprecated.vm.ConstOperand', 'oneflow_api.deprecated.vm.ConstOperand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (27220, 27243), False, 'import oneflow_api\n'), ((27487, 27545), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['ibn_sym.symbol_id'], {}), '(ibn_sym.symbol_id)\n', (27526, 27545), False, 'import oneflow_api\n'), ((27684, 27743), 'oneflow_api.deprecated.vm.MutOperand', 'oneflow_api.deprecated.vm.MutOperand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (27720, 27743), False, 'import oneflow_api\n'), ((27978, 28036), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['obn_sym.symbol_id'], {}), '(obn_sym.symbol_id)\n', (28017, 28036), False, 'import oneflow_api\n'), ((28166, 28225), 'oneflow_api.deprecated.vm.MutOperand', 'oneflow_api.deprecated.vm.MutOperand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (28202, 28225), False, 'import oneflow_api\n'), ((28460, 28518), 'oneflow_api.deprecated.vm.SymbolOperand', 'oneflow_api.deprecated.vm.SymbolOperand', (['obn_sym.symbol_id'], {}), '(obn_sym.symbol_id)\n', (28499, 28518), False, 'import oneflow_api\n'), ((28648, 28708), 'oneflow_api.deprecated.vm.Mut2Operand', 'oneflow_api.deprecated.vm.Mut2Operand', (['blob_object.object_id'], {}), '(blob_object.object_id)\n', (28685, 28708), False, 'import oneflow_api\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
from datetime import datetime
import numpy
import oneflow as flow
import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util
import oneflow.core.operator.op_conf_pb2 as op_conf_util
_DATA_DIR = "/dataset/PNGS/PNG227/of_record_repeated"
_MODEL_SAVE_DIR = "./model_save-{}".format(
str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
)
_MODEL_LOAD = "/dataset/PNGS/cnns_model_for_test/alexnet/models/of_model_bk"
NODE_LIST = "192.168.1.12,192.168.1.14"
class DLNetSpec(object):
def __init__(self):
self.batch_size = 8
self.data_part_num = 32
self.eval_dir = _DATA_DIR
self.train_dir = _DATA_DIR
self.model_save_dir = _MODEL_SAVE_DIR
self.model_load_dir = _MODEL_LOAD
self.num_nodes = 1
self.gpu_num_per_node = 1
self.iter_num = 10
self.num_unpack = 2
parser = argparse.ArgumentParser(description="flags for multi-node and resource")
parser.add_argument("-nn", "--num_nodes", type=str, default=1, required=False)
parser.add_argument("-g", "--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("-i", "--iter_num", type=int, default=10, required=False)
parser.add_argument(
"-m", "--multinode", default=False, action="store_true", required=False
)
parser.add_argument("-n", "--node_list", type=str, default=NODE_LIST, required=False)
parser.add_argument(
"-s", "--skip_scp_binary", default=False, action="store_true", required=False
)
parser.add_argument(
"-c",
"--scp_binary_without_uuid",
default=False,
action="store_true",
required=False,
)
parser.add_argument(
"-r", "--remote_by_hand", default=False, action="store_true", required=False
)
parser.add_argument("-e", "--eval_dir", type=str, default=_DATA_DIR, required=False)
parser.add_argument("-t", "--train_dir", type=str, default=_DATA_DIR, required=False)
parser.add_argument(
"-load", "--model_load_dir", type=str, default=_MODEL_LOAD, required=False
)
parser.add_argument(
"-save", "--model_save_dir", type=str, default=_MODEL_SAVE_DIR, required=False
)
parser.add_argument("-dn", "--data_part_num", type=int, default=32, required=False)
parser.add_argument("-b", "--batch_size", type=int, default=8, required=False)
parser.add_argument("-p", "--num_piece_in_batch", type=int, default=2, required=False)
def _conv2d_layer(
args,
name,
input,
filters,
kernel_size=3,
strides=1,
padding="SAME",
data_format="NCHW",
dilation_rate=1,
activation=op_conf_util.kRelu,
use_bias=False,
weight_initializer=flow.random_uniform_initializer(),
bias_initializer=flow.random_uniform_initializer(),
):
weight_shape = (filters, input.shape[1], kernel_size, kernel_size)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=weight_initializer,
)
weight = flow.identity(weight)
weight = flow.repeat(weight, args.num_piece_in_batch)
output = flow.nn.conv2d(
input, weight, strides, padding, None, data_format, dilation_rate, name=name
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=bias_initializer,
)
bias = flow.identity(bias)
bias = flow.repeat(bias, args.num_piece_in_batch)
output = flow.nn.bias_add(output, bias, data_format)
if activation is not None:
if activation == op_conf_util.kRelu:
output = flow.math.relu(output)
else:
raise NotImplementedError
return output
def _data_load_layer(args, data_dir):
node_num = args.num_nodes
total_batch_size = args.batch_size * args.gpu_num_per_node * node_num
rgb_mean = [123.68, 116.78, 103.94]
ofrecord = flow.data.ofrecord_reader(
data_dir,
batch_size=total_batch_size,
data_part_num=args.data_part_num,
name="decode",
)
image = flow.data.ofrecord_image_decoder(ofrecord, "encoded", color_space="RGB")
label = flow.data.ofrecord_raw_decoder(
ofrecord, "class/label", shape=(), dtype=flow.int32
)
rsz = flow.image.resize(image, resize_x=227, resize_y=227, color_space="RGB")
normal = flow.image.crop_mirror_normalize(
rsz,
color_space="RGB",
output_layout="NCHW",
mean=rgb_mean,
output_dtype=flow.float,
)
return (
flow.unpack(label, args.num_piece_in_batch),
flow.unpack(normal, args.num_piece_in_batch),
)
def _dense_layer(
inputs,
units,
activation=None,
use_bias=True,
kernel_initializer=None,
bias_initializer=None,
trainable=True,
name=None,
):
in_shape = inputs.shape
in_num_axes = len(in_shape)
assert in_num_axes >= 2
name_prefix = name if name is not None else id_util.UniqueStr("Dense_")
inputs = flow.reshape(inputs, (-1, in_shape[-1])) if in_num_axes > 2 else inputs
weight = flow.get_variable(
name="{}-weight".format(name_prefix),
shape=(units, inputs.shape[1]),
dtype=inputs.dtype,
initializer=kernel_initializer
if kernel_initializer is not None
else flow.constant_initializer(0),
trainable=trainable,
model_name="weight",
)
weight = flow.identity(weight)
weight = flow.repeat(weight, args.num_piece_in_batch)
out = flow.matmul(
a=inputs, b=weight, transpose_b=True, name="{}_matmul".format(name_prefix)
)
if use_bias:
bias = flow.get_variable(
name="{}-bias".format(name_prefix),
shape=(units,),
dtype=inputs.dtype,
initializer=bias_initializer
if bias_initializer is not None
else flow.constant_initializer(0),
trainable=trainable,
model_name="bias",
)
bias = flow.identity(bias)
bias = flow.repeat(bias, args.num_piece_in_batch)
out = flow.nn.bias_add(out, bias, name="{}_bias_add".format(name_prefix))
out = (
activation(out, name="{}_activation".format(name_prefix))
if activation is not None
else out
)
out = flow.reshape(out, in_shape[:-1] + (units,)) if in_num_axes > 2 else out
return out
def alexnet(args, images, labels, trainable=True):
conv1 = _conv2d_layer(
args, "conv1", images, filters=64, kernel_size=11, strides=4, padding="VALID"
)
pool1 = flow.nn.avg_pool2d(conv1, 3, 2, "VALID", "NCHW", name="pool1")
conv2 = _conv2d_layer(args, "conv2", pool1, filters=192, kernel_size=5)
pool2 = flow.nn.avg_pool2d(conv2, 3, 2, "VALID", "NCHW", name="pool2")
conv3 = _conv2d_layer(args, "conv3", pool2, filters=384)
conv4 = _conv2d_layer(args, "conv4", conv3, filters=384)
conv5 = _conv2d_layer(args, "conv5", conv4, filters=256)
pool5 = flow.nn.avg_pool2d(conv5, 3, 2, "VALID", "NCHW", name="pool5")
def _get_initializer():
kernel_initializer = initializer_conf_util.InitializerConf()
kernel_initializer.truncated_normal_conf.std = 0.816496580927726
return kernel_initializer
if len(pool5.shape) > 2:
pool5 = flow.reshape(pool5, shape=(pool5.shape[0], -1))
fc1 = _dense_layer(
inputs=pool5,
units=4096,
activation=flow.math.relu,
use_bias=False,
kernel_initializer=_get_initializer(),
bias_initializer=False,
trainable=trainable,
name="fc1",
)
dropout1 = fc1
fc2 = _dense_layer(
inputs=dropout1,
units=4096,
activation=flow.math.relu,
use_bias=False,
kernel_initializer=_get_initializer(),
bias_initializer=False,
trainable=trainable,
name="fc2",
)
dropout2 = fc2
fc3 = _dense_layer(
inputs=dropout2,
units=1001,
activation=None,
use_bias=False,
kernel_initializer=_get_initializer(),
bias_initializer=False,
trainable=trainable,
name="fc3",
)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, fc3, name="softmax_loss"
)
return loss
def main(args):
flow.config.machine_num(args.num_nodes)
flow.config.gpu_device_num(args.gpu_num_per_node)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
func_config.default_data_type(flow.float)
func_config.cudnn_conv_force_fwd_algo(0)
func_config.cudnn_conv_force_bwd_data_algo(1)
func_config.cudnn_conv_force_bwd_filter_algo(1)
@flow.global_function(type="train", function_config=func_config)
def alexnet_train_job():
(labels, images) = _data_load_layer(args, args.train_dir)
loss = alexnet(args, images, labels)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-05]), momentum=0
).minimize(loss)
return flow.pack(loss, args.num_piece_in_batch)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(function_config=func_config)
def alexnet_eval_job():
with flow.scope.consistent_view():
(labels, images) = _data_load_layer(args, args.eval_dir)
loss = alexnet(args, images, labels)
return flow.pack(loss, args.num_piece_in_batch)
check_point = flow.train.CheckPoint()
if not args.model_load_dir:
check_point.init()
else:
check_point.load(args.model_load_dir)
num_nodes = args.num_nodes
print(
"Traning alexnet: num_gpu_per_node = {}, num_nodes = {}.".format(
args.gpu_num_per_node, num_nodes
)
)
print("{:>12} {:>12} {:>12}".format("iter", "loss type", "loss value"))
loss = []
for i in range(args.iter_num):
train_loss = alexnet_train_job().get().mean()
loss.append(train_loss)
fmt_str = "{:>12} {:>12} {:>12.6f}"
print(fmt_str.format(i, "train loss:", train_loss))
if (i + 1) % 100 == 0:
check_point.save(_MODEL_SAVE_DIR + str(i))
loss_file = "{}n{}c.npy".format(
str(num_nodes), str(args.gpu_num_per_node * num_nodes)
)
loss_path = "./of_loss/alexnet"
if not os.path.exists(loss_path):
os.makedirs(loss_path)
numpy.save(os.path.join(loss_path, loss_file), loss)
if __name__ == "__main__":
args = parser.parse_args()
args.num_nodes = len(args.node_list.strip().split(",")) if args.multinode else 1
flow.env.ctrl_port(9788)
if args.multinode:
flow.env.ctrl_port(12138)
nodes = []
for n in args.node_list.strip().split(","):
addr_dict = {}
addr_dict["addr"] = n
nodes.append(addr_dict)
flow.env.machine(nodes)
if args.remote_by_hand is False:
if args.scp_binary_without_uuid:
flow.deprecated.init_worker(scp_binary=True, use_uuid=False)
elif args.skip_scp_binary:
flow.deprecated.init_worker(scp_binary=False, use_uuid=False)
else:
flow.deprecated.init_worker(scp_binary=True, use_uuid=True)
main(args)
if (
args.multinode
and args.skip_scp_binary is False
and (args.scp_binary_without_uuid is False)
):
flow.deprecated.delete_worker()
| [
"oneflow.core.job.initializer_conf_pb2.InitializerConf",
"oneflow.constant_initializer",
"oneflow.image.crop_mirror_normalize",
"oneflow.repeat",
"oneflow.data.ofrecord_image_decoder",
"oneflow.data.ofrecord_reader",
"oneflow.pack",
"oneflow.math.relu",
"oneflow.unpack",
"oneflow.optimizer.Piecewi... | [((1478, 1550), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""flags for multi-node and resource"""'}), "(description='flags for multi-node and resource')\n", (1501, 1550), False, 'import argparse\n'), ((3188, 3221), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (3219, 3221), True, 'import oneflow as flow\n'), ((3244, 3277), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (3275, 3277), True, 'import oneflow as flow\n'), ((3366, 3476), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer'}), "(name + '-weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer)\n", (3383, 3476), True, 'import oneflow as flow\n'), ((3525, 3546), 'oneflow.identity', 'flow.identity', (['weight'], {}), '(weight)\n', (3538, 3546), True, 'import oneflow as flow\n'), ((3560, 3604), 'oneflow.repeat', 'flow.repeat', (['weight', 'args.num_piece_in_batch'], {}), '(weight, args.num_piece_in_batch)\n', (3571, 3604), True, 'import oneflow as flow\n'), ((3618, 3714), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'None', 'data_format', 'dilation_rate'], {'name': 'name'}), '(input, weight, strides, padding, None, data_format,\n dilation_rate, name=name)\n', (3632, 3714), True, 'import oneflow as flow\n'), ((4460, 4577), 'oneflow.data.ofrecord_reader', 'flow.data.ofrecord_reader', (['data_dir'], {'batch_size': 'total_batch_size', 'data_part_num': 'args.data_part_num', 'name': '"""decode"""'}), "(data_dir, batch_size=total_batch_size,\n data_part_num=args.data_part_num, name='decode')\n", (4485, 4577), True, 'import oneflow as flow\n'), ((4625, 4697), 'oneflow.data.ofrecord_image_decoder', 'flow.data.ofrecord_image_decoder', (['ofrecord', '"""encoded"""'], {'color_space': '"""RGB"""'}), "(ofrecord, 'encoded', color_space='RGB')\n", (4657, 4697), True, 'import oneflow as flow\n'), ((4710, 4798), 'oneflow.data.ofrecord_raw_decoder', 'flow.data.ofrecord_raw_decoder', (['ofrecord', '"""class/label"""'], {'shape': '()', 'dtype': 'flow.int32'}), "(ofrecord, 'class/label', shape=(), dtype=\n flow.int32)\n", (4740, 4798), True, 'import oneflow as flow\n'), ((4818, 4889), 'oneflow.image.resize', 'flow.image.resize', (['image'], {'resize_x': '(227)', 'resize_y': '(227)', 'color_space': '"""RGB"""'}), "(image, resize_x=227, resize_y=227, color_space='RGB')\n", (4835, 4889), True, 'import oneflow as flow\n'), ((4903, 5026), 'oneflow.image.crop_mirror_normalize', 'flow.image.crop_mirror_normalize', (['rsz'], {'color_space': '"""RGB"""', 'output_layout': '"""NCHW"""', 'mean': 'rgb_mean', 'output_dtype': 'flow.float'}), "(rsz, color_space='RGB', output_layout=\n 'NCHW', mean=rgb_mean, output_dtype=flow.float)\n", (4935, 5026), True, 'import oneflow as flow\n'), ((5968, 5989), 'oneflow.identity', 'flow.identity', (['weight'], {}), '(weight)\n', (5981, 5989), True, 'import oneflow as flow\n'), ((6003, 6047), 'oneflow.repeat', 'flow.repeat', (['weight', 'args.num_piece_in_batch'], {}), '(weight, args.num_piece_in_batch)\n', (6014, 6047), True, 'import oneflow as flow\n'), ((7116, 7178), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['conv1', '(3)', '(2)', '"""VALID"""', '"""NCHW"""'], {'name': '"""pool1"""'}), "(conv1, 3, 2, 'VALID', 'NCHW', name='pool1')\n", (7134, 7178), True, 'import oneflow as flow\n'), ((7267, 7329), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['conv2', '(3)', '(2)', '"""VALID"""', '"""NCHW"""'], {'name': '"""pool2"""'}), "(conv2, 3, 2, 'VALID', 'NCHW', name='pool2')\n", (7285, 7329), True, 'import oneflow as flow\n'), ((7525, 7587), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['conv5', '(3)', '(2)', '"""VALID"""', '"""NCHW"""'], {'name': '"""pool5"""'}), "(conv5, 3, 2, 'VALID', 'NCHW', name='pool5')\n", (7543, 7587), True, 'import oneflow as flow\n'), ((8709, 8796), 'oneflow.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', (['labels', 'fc3'], {'name': '"""softmax_loss"""'}), "(labels, fc3, name=\n 'softmax_loss')\n", (8757, 8796), True, 'import oneflow as flow\n'), ((8844, 8883), 'oneflow.config.machine_num', 'flow.config.machine_num', (['args.num_nodes'], {}), '(args.num_nodes)\n', (8867, 8883), True, 'import oneflow as flow\n'), ((8888, 8937), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (8914, 8937), True, 'import oneflow as flow\n'), ((8956, 8977), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (8975, 8977), True, 'import oneflow as flow\n'), ((9244, 9307), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (9264, 9307), True, 'import oneflow as flow\n'), ((9655, 9676), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (9674, 9676), True, 'import oneflow as flow\n'), ((9729, 9778), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (9749, 9778), True, 'import oneflow as flow\n'), ((10047, 10070), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (10068, 10070), True, 'import oneflow as flow\n'), ((11185, 11209), 'oneflow.env.ctrl_port', 'flow.env.ctrl_port', (['(9788)'], {}), '(9788)\n', (11203, 11209), True, 'import oneflow as flow\n'), ((3757, 3861), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-bias')"], {'shape': '(filters,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer'}), "(name + '-bias', shape=(filters,), dtype=input.dtype,\n initializer=bias_initializer)\n", (3774, 3861), True, 'import oneflow as flow\n'), ((3932, 3951), 'oneflow.identity', 'flow.identity', (['bias'], {}), '(bias)\n', (3945, 3951), True, 'import oneflow as flow\n'), ((3967, 4009), 'oneflow.repeat', 'flow.repeat', (['bias', 'args.num_piece_in_batch'], {}), '(bias, args.num_piece_in_batch)\n', (3978, 4009), True, 'import oneflow as flow\n'), ((4027, 4070), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (4043, 4070), True, 'import oneflow as flow\n'), ((5090, 5133), 'oneflow.unpack', 'flow.unpack', (['label', 'args.num_piece_in_batch'], {}), '(label, args.num_piece_in_batch)\n', (5101, 5133), True, 'import oneflow as flow\n'), ((5143, 5187), 'oneflow.unpack', 'flow.unpack', (['normal', 'args.num_piece_in_batch'], {}), '(normal, args.num_piece_in_batch)\n', (5154, 5187), True, 'import oneflow as flow\n'), ((5549, 5589), 'oneflow.reshape', 'flow.reshape', (['inputs', '(-1, in_shape[-1])'], {}), '(inputs, (-1, in_shape[-1]))\n', (5561, 5589), True, 'import oneflow as flow\n'), ((6540, 6559), 'oneflow.identity', 'flow.identity', (['bias'], {}), '(bias)\n', (6553, 6559), True, 'import oneflow as flow\n'), ((6575, 6617), 'oneflow.repeat', 'flow.repeat', (['bias', 'args.num_piece_in_batch'], {}), '(bias, args.num_piece_in_batch)\n', (6586, 6617), True, 'import oneflow as flow\n'), ((6845, 6888), 'oneflow.reshape', 'flow.reshape', (['out', '(in_shape[:-1] + (units,))'], {}), '(out, in_shape[:-1] + (units,))\n', (6857, 6888), True, 'import oneflow as flow\n'), ((7646, 7685), 'oneflow.core.job.initializer_conf_pb2.InitializerConf', 'initializer_conf_util.InitializerConf', ([], {}), '()\n', (7683, 7685), True, 'import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util\n'), ((7839, 7886), 'oneflow.reshape', 'flow.reshape', (['pool5'], {'shape': '(pool5.shape[0], -1)'}), '(pool5, shape=(pool5.shape[0], -1))\n', (7851, 7886), True, 'import oneflow as flow\n'), ((9015, 9043), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (9041, 9043), True, 'import oneflow as flow\n'), ((9595, 9635), 'oneflow.pack', 'flow.pack', (['loss', 'args.num_piece_in_batch'], {}), '(loss, args.num_piece_in_batch)\n', (9604, 9635), True, 'import oneflow as flow\n'), ((10921, 10946), 'os.path.exists', 'os.path.exists', (['loss_path'], {}), '(loss_path)\n', (10935, 10946), False, 'import os\n'), ((10956, 10978), 'os.makedirs', 'os.makedirs', (['loss_path'], {}), '(loss_path)\n', (10967, 10978), False, 'import os\n'), ((10994, 11028), 'os.path.join', 'os.path.join', (['loss_path', 'loss_file'], {}), '(loss_path, loss_file)\n', (11006, 11028), False, 'import os\n'), ((11241, 11266), 'oneflow.env.ctrl_port', 'flow.env.ctrl_port', (['(12138)'], {}), '(12138)\n', (11259, 11266), True, 'import oneflow as flow\n'), ((11443, 11466), 'oneflow.env.machine', 'flow.env.machine', (['nodes'], {}), '(nodes)\n', (11459, 11466), True, 'import oneflow as flow\n'), ((11997, 12028), 'oneflow.deprecated.delete_worker', 'flow.deprecated.delete_worker', ([], {}), '()\n', (12026, 12028), True, 'import oneflow as flow\n'), ((4168, 4190), 'oneflow.math.relu', 'flow.math.relu', (['output'], {}), '(output)\n', (4182, 4190), True, 'import oneflow as flow\n'), ((9820, 9848), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (9846, 9848), True, 'import oneflow as flow\n'), ((9987, 10027), 'oneflow.pack', 'flow.pack', (['loss', 'args.num_piece_in_batch'], {}), '(loss, args.num_piece_in_batch)\n', (9996, 10027), True, 'import oneflow as flow\n'), ((918, 932), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (930, 932), False, 'from datetime import datetime\n'), ((5861, 5889), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (5886, 5889), True, 'import oneflow as flow\n'), ((11569, 11629), 'oneflow.deprecated.init_worker', 'flow.deprecated.init_worker', ([], {'scp_binary': '(True)', 'use_uuid': '(False)'}), '(scp_binary=True, use_uuid=False)\n', (11596, 11629), True, 'import oneflow as flow\n'), ((6421, 6449), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (6446, 6449), True, 'import oneflow as flow\n'), ((9488, 9542), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[1e-05]'], {}), '([], [1e-05])\n', (9529, 9542), True, 'import oneflow as flow\n'), ((11685, 11746), 'oneflow.deprecated.init_worker', 'flow.deprecated.init_worker', ([], {'scp_binary': '(False)', 'use_uuid': '(False)'}), '(scp_binary=False, use_uuid=False)\n', (11712, 11746), True, 'import oneflow as flow\n'), ((11781, 11840), 'oneflow.deprecated.init_worker', 'flow.deprecated.init_worker', ([], {'scp_binary': '(True)', 'use_uuid': '(True)'}), '(scp_binary=True, use_uuid=True)\n', (11808, 11840), True, 'import oneflow as flow\n')] |
import oneflow as flow
import argparse
import numpy as np
import os
import time
from tqdm import tqdm
import sys
sys.path.append(".")
from models.resnet50 import resnet50
from utils.ofrecord_data_utils import OFRecordDataLoader
def _parse_args():
parser = argparse.ArgumentParser("flags for train resnet50")
parser.add_argument(
"--save_checkpoint_path",
type=str,
default="./checkpoints",
help="save checkpoint root dir",
)
parser.add_argument(
"--load_checkpoint", type=str, default="", help="load checkpoint"
)
parser.add_argument(
"--ofrecord_path", type=str, default="./ofrecord/", help="dataset path"
)
# training hyper-parameters
parser.add_argument(
"--learning_rate", type=float, default=0.001, help="learning rate"
)
parser.add_argument("--mom", type=float, default=0.9, help="momentum")
parser.add_argument("--epochs", type=int, default=10, help="training epochs")
parser.add_argument(
"--train_batch_size", type=int, default=16, help="train batch size"
)
parser.add_argument("--val_batch_size", type=int, default=4, help="val batch size")
parser.add_argument("--results", type=str, default="./results", help="tensorboard file path")
parser.add_argument("--tag", type=str, default="default", help="tag of experiment")
parser.add_argument(
"--print_interval", type=int, default=10, help="print info frequency"
)
return parser.parse_args()
def setup(args):
train_data_loader = OFRecordDataLoader(
ofrecord_root=args.ofrecord_path,
mode="train",
dataset_size=9469,
batch_size=args.train_batch_size,
)
val_data_loader = OFRecordDataLoader(
ofrecord_root=args.ofrecord_path,
mode="val",
dataset_size=3925,
batch_size=args.val_batch_size,
)
criterion = flow.nn.CrossEntropyLoss()
# model setup
eager_model = resnet50()
graph_model = resnet50()
graph_model.load_state_dict(eager_model.state_dict())
eager_model.to("cuda")
graph_model.to("cuda")
# optimizer setup
eager_optimizer = flow.optim.SGD(
eager_model.parameters(), lr=args.learning_rate, momentum=args.mom
)
graph_optimizer = flow.optim.SGD(
graph_model.parameters(), lr=args.learning_rate, momentum=args.mom
)
# criterion setup
criterion = flow.nn.CrossEntropyLoss()
criterion = criterion.to("cuda")
class ModelTrainGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.graph_model = graph_model
self.criterion = criterion
self.add_optimizer("sgd", graph_optimizer)
def build(self, image, label):
logits = self.graph_model(image)
loss = self.criterion(logits, label)
loss.backward()
return loss
class ModelEvalGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.graph_model = graph_model
def build(self, image):
with flow.no_grad():
logits = self.graph_model(image)
predictions = logits.softmax()
return predictions
model_train_graph = ModelTrainGraph()
model_eval_graph = ModelEvalGraph()
dic = { "train_dataloader": train_data_loader,
"val_dataloader": val_data_loader,
"eager": [eager_model, eager_optimizer, criterion],
"graph": [graph_model, model_train_graph, model_eval_graph]
}
return dic
class Trainer(object):
def __init__(self, args):
super().__init__()
self.graph_losses = []
self.eager_losses = []
self.graph_acc = []
self.eager_acc = []
self.graph_train_step_time_list = []
self.eager_train_step_time_list = []
self.graph_train_epoch_time_list = []
self.eager_train_epoch_time_list = []
self.graph_eval_epoch_time_list = []
self.eager_eval_epoch_time_list = []
self.eager_graph_model_diff_list = []
self.graph_train_total_time = 0.0
self.eager_train_total_time = 0.0
self.graph_eval_total_time = 0.0
self.eager_val_total_time = 0.0
self.args = args
def compare_eager_graph(self, compare_dic):
train_data_loader = compare_dic["train_dataloader"]
val_data_loader = compare_dic["val_dataloader"]
eager_model, eager_optimizer, criterion = compare_dic["eager"]
graph_model, model_train_graph, model_eval_graph = compare_dic["graph"]
all_samples = len(val_data_loader) * self.args.val_batch_size
print_interval = self.args.print_interval
print("start training")
for epoch in range(self.args.epochs):
# train
eager_model.train()
graph_model.train()
start_training_time = time.time()
total_graph_iter_time, total_eager_iter_time = 0, 0
for b in range(len(train_data_loader)):
image, label = train_data_loader()
image = image.to("cuda")
label = label.to("cuda")
# oneflow graph train
graph_iter_start_time = time.time()
graph_loss = model_train_graph(image, label)
graph_loss.numpy() # for synchronize CPU and GPU, get accurate running time
graph_iter_end_time = time.time()
# oneflow eager train
eager_iter_start_time = time.time()
logits = eager_model(image)
eager_loss = criterion(logits, label)
eager_loss.backward()
eager_optimizer.step()
eager_optimizer.zero_grad()
eager_loss.numpy() # for synchronize CPU and GPU, get accurate running time
eager_iter_end_time = time.time()
model_param_diff = compare_model_params(eager_model, model_train_graph)
self.eager_graph_model_diff_list.append(model_param_diff)
# get time
graph_iter_time = graph_iter_end_time - graph_iter_start_time
eager_iter_time = eager_iter_end_time - eager_iter_start_time
total_graph_iter_time += graph_iter_time
total_eager_iter_time += eager_iter_time
if b % print_interval == 0:
gl, el = graph_loss.numpy(), eager_loss.numpy()
print(
"epoch {} train iter {} ; graph loss {} eager loss {}; graph train time: {} eager train time {}".format(
epoch, b, gl, el, graph_iter_time, eager_iter_time
)
)
self.graph_losses.append(gl)
self.graph_train_step_time_list.append(graph_iter_time)
self.eager_losses.append(el)
self.eager_train_step_time_list.append(eager_iter_time)
end_training_time = time.time()
self.graph_train_epoch_time_list.append(end_training_time - start_training_time- total_eager_iter_time)
self.eager_train_epoch_time_list.append(end_training_time - start_training_time- total_graph_iter_time)
print("epoch %d train done, start validation" % epoch)
# validate
eager_model.eval()
graph_model.eval()
graph_correct, eager_correct = 0.0, 0.0
eval_start_time = time.time()
total_graph_infer_time, total_eager_infer_time = 0, 0
for b in tqdm(range(len(val_data_loader))):
image, label = val_data_loader()
image = image.to("cuda")
# graph val
graph_infer_time = time.time()
predictions = model_eval_graph(image)
graph_preds = predictions.numpy()
graph_clsidxs = np.argmax(graph_preds, axis=1)
total_graph_infer_time += time.time() - graph_infer_time
# eager val
eager_infer_time = time.time()
with flow.no_grad():
logits = eager_model(image)
predictions = logits.softmax()
eager_preds = predictions.numpy()
eager_clsidxs = np.argmax(eager_preds, axis=1)
total_eager_infer_time += time.time() - eager_infer_time
label_nd = label.numpy()
for i in range(self.args.val_batch_size):
if graph_clsidxs[i] == label_nd[i]:
graph_correct += 1
if eager_clsidxs[i] == label_nd[i]:
eager_correct += 1
eval_end_time = time.time()
self.graph_eval_epoch_time_list.append(eval_end_time - eval_start_time - total_eager_infer_time)
self.eager_eval_epoch_time_list.append(eval_end_time - eval_start_time - total_graph_infer_time)
graph_top1_acc, eager_top1_acc = graph_correct / all_samples, eager_correct / all_samples
self.graph_acc.append(graph_top1_acc)
self.eager_acc.append(eager_top1_acc)
print("epoch %d, graph top1 val acc: %f, eager top1 val acc: %f" % (epoch, graph_top1_acc, eager_top1_acc))
def save_report(self,):
print("***** Save Report *****")
# folder setup
report_path = os.path.join(self.args.results)
os.makedirs(report_path, exist_ok=True)
# calculate absolute loss difference
abs_loss_diff = abs(np.array(self.eager_losses) - np.array(self.graph_losses))
# calculate losses linear correlation
loss_corr = calc_corr(self.eager_losses, self.graph_losses)
# calculate accuracy linear correlation
acc_corr = calc_corr(self.eager_acc, self.graph_acc)
# training time compare
train_time_compare = time_compare(self.graph_train_epoch_time_list, self.eager_train_epoch_time_list)
# validate time compare
val_time_compare = time_compare(self.graph_eval_epoch_time_list, self.eager_eval_epoch_time_list)
# eager graph model diff compare
model_diff_compare = np.array(self.eager_graph_model_diff_list)
# save report
save_path = os.path.join(report_path, 'check_report.txt')
writer = open(save_path, "w")
writer.write("Check Report\n")
writer.write("Model: Resnet50\n")
writer.write("Check Results Between Eager Model and Graph Model\n")
writer.write("=================================================\n")
writer.write("Loss Correlation: %.4f\n\n" % loss_corr)
writer.write("Max Loss Difference: %.4f\n" % abs_loss_diff.max())
writer.write("Min Loss Difference: %.4f\n" % abs_loss_diff.min())
writer.write("Loss Difference Range: (%.4f, %.4f)\n\n" % (abs_loss_diff.min(), abs_loss_diff.max()))
writer.write("Model Param Difference Range: (%.4f, %.4f)\n\n" % (model_diff_compare.min(), model_diff_compare.max()))
writer.write("Accuracy Correlation: %.4f\n\n" % acc_corr)
writer.write("Train Time Compare: %.4f (Eager) : %.4f (Graph)\n\n" % (1.0, train_time_compare))
writer.write("Val Time Compare: %.4f (Eager) : %.4f (Graph)" % (1.0, val_time_compare))
writer.close()
print("Report saved to: ", save_path)
def save_result(self,):
# create folder
training_results_path = os.path.join(self.args.results, self.args.tag)
os.makedirs(training_results_path, exist_ok=True)
print("***** Save Results *****")
save_results(self.graph_losses, os.path.join(training_results_path, 'graph_losses.txt'))
save_results(self.eager_losses, os.path.join(training_results_path, 'eager_losses.txt'))
save_results(self.graph_acc, os.path.join(training_results_path, 'graph_acc.txt'))
save_results(self.eager_acc, os.path.join(training_results_path, 'eager_acc.txt'))
save_results(self.graph_train_step_time_list, os.path.join(training_results_path, 'graph_train_step_time_list.txt'))
save_results(self.eager_train_step_time_list, os.path.join(training_results_path, 'eager_train_step_time_list.txt'))
save_results(self.graph_train_epoch_time_list, os.path.join(training_results_path, 'graph_train_epoch_time_list.txt'))
save_results(self.eager_train_epoch_time_list, os.path.join(training_results_path, 'eager_train_epoch_time_list.txt'))
save_results(self.graph_eval_epoch_time_list, os.path.join(training_results_path, 'graph_eval_epoch_time_list.txt'))
save_results(self.eager_eval_epoch_time_list, os.path.join(training_results_path, 'eager_eval_epoch_time_list.txt'))
save_results(self.eager_graph_model_diff_list, os.path.join(training_results_path, 'eager_graph_model_diff_list.txt'))
print("Results saved to: ", training_results_path)
def compare_model_params(eager_model, graph_model):
num_params = len(eager_model.state_dict().keys())
sum_diff = 0.0
for key in eager_model.state_dict():
mean_single_diff = (eager_model.state_dict()[key] - graph_model.graph_model.state_dict()[key]._origin).abs().mean()
sum_diff += mean_single_diff
mean_diff = float(sum_diff.numpy() / num_params)
return mean_diff
def save_results(training_info, file_path):
writer = open(file_path, "w")
for info in training_info:
writer.write("%f\n" % info)
writer.close()
# report helpers
def square(lst):
res = list(map(lambda x: x ** 2, lst))
return res
# calculate correlation
def calc_corr(a, b):
E_a = np.mean(a)
E_b = np.mean(b)
E_ab = np.mean(list(map(lambda x: x[0] * x[1], zip(a, b))))
cov_ab = E_ab - E_a * E_b
D_a = np.mean(square(a)) - E_a ** 2
D_b = np.mean(square(b)) - E_b ** 2
σ_a = np.sqrt(D_a)
σ_b = np.sqrt(D_b)
corr_factor = cov_ab / (σ_a * σ_b)
return corr_factor
def time_compare(a, b):
return np.divide(a, b).mean()
if __name__ == "__main__":
args = _parse_args()
trainer = Trainer(args)
compare_dic = setup(args)
print("init done")
trainer.compare_eager_graph(compare_dic)
del compare_dic
# save results
trainer.save_result()
trainer.save_report()
| [
"oneflow.nn.CrossEntropyLoss",
"oneflow.no_grad"
] | [((122, 142), 'sys.path.append', 'sys.path.append', (['"""."""'], {}), "('.')\n", (137, 142), False, 'import sys\n'), ((277, 328), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for train resnet50"""'], {}), "('flags for train resnet50')\n", (300, 328), False, 'import argparse\n'), ((1591, 1714), 'utils.ofrecord_data_utils.OFRecordDataLoader', 'OFRecordDataLoader', ([], {'ofrecord_root': 'args.ofrecord_path', 'mode': '"""train"""', 'dataset_size': '(9469)', 'batch_size': 'args.train_batch_size'}), "(ofrecord_root=args.ofrecord_path, mode='train',\n dataset_size=9469, batch_size=args.train_batch_size)\n", (1609, 1714), False, 'from utils.ofrecord_data_utils import OFRecordDataLoader\n'), ((1780, 1899), 'utils.ofrecord_data_utils.OFRecordDataLoader', 'OFRecordDataLoader', ([], {'ofrecord_root': 'args.ofrecord_path', 'mode': '"""val"""', 'dataset_size': '(3925)', 'batch_size': 'args.val_batch_size'}), "(ofrecord_root=args.ofrecord_path, mode='val',\n dataset_size=3925, batch_size=args.val_batch_size)\n", (1798, 1899), False, 'from utils.ofrecord_data_utils import OFRecordDataLoader\n'), ((1959, 1985), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (1983, 1985), True, 'import oneflow as flow\n'), ((2026, 2036), 'models.resnet50.resnet50', 'resnet50', ([], {}), '()\n', (2034, 2036), False, 'from models.resnet50 import resnet50\n'), ((2056, 2066), 'models.resnet50.resnet50', 'resnet50', ([], {}), '()\n', (2064, 2066), False, 'from models.resnet50 import resnet50\n'), ((2493, 2519), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {}), '()\n', (2517, 2519), True, 'import oneflow as flow\n'), ((14180, 14190), 'numpy.mean', 'np.mean', (['a'], {}), '(a)\n', (14187, 14190), True, 'import numpy as np\n'), ((14202, 14212), 'numpy.mean', 'np.mean', (['b'], {}), '(b)\n', (14209, 14212), True, 'import numpy as np\n'), ((14409, 14421), 'numpy.sqrt', 'np.sqrt', (['D_a'], {}), '(D_a)\n', (14416, 14421), True, 'import numpy as np\n'), ((14433, 14445), 'numpy.sqrt', 'np.sqrt', (['D_b'], {}), '(D_b)\n', (14440, 14445), True, 'import numpy as np\n'), ((9802, 9833), 'os.path.join', 'os.path.join', (['self.args.results'], {}), '(self.args.results)\n', (9814, 9833), False, 'import os\n'), ((9843, 9882), 'os.makedirs', 'os.makedirs', (['report_path'], {'exist_ok': '(True)'}), '(report_path, exist_ok=True)\n', (9854, 9882), False, 'import os\n'), ((10620, 10662), 'numpy.array', 'np.array', (['self.eager_graph_model_diff_list'], {}), '(self.eager_graph_model_diff_list)\n', (10628, 10662), True, 'import numpy as np\n'), ((10709, 10754), 'os.path.join', 'os.path.join', (['report_path', '"""check_report.txt"""'], {}), "(report_path, 'check_report.txt')\n", (10721, 10754), False, 'import os\n'), ((11911, 11957), 'os.path.join', 'os.path.join', (['self.args.results', 'self.args.tag'], {}), '(self.args.results, self.args.tag)\n', (11923, 11957), False, 'import os\n'), ((11967, 12016), 'os.makedirs', 'os.makedirs', (['training_results_path'], {'exist_ok': '(True)'}), '(training_results_path, exist_ok=True)\n', (11978, 12016), False, 'import os\n'), ((5140, 5151), 'time.time', 'time.time', ([], {}), '()\n', (5149, 5151), False, 'import time\n'), ((7342, 7353), 'time.time', 'time.time', ([], {}), '()\n', (7351, 7353), False, 'import time\n'), ((7830, 7841), 'time.time', 'time.time', ([], {}), '()\n', (7839, 7841), False, 'import time\n'), ((9122, 9133), 'time.time', 'time.time', ([], {}), '()\n', (9131, 9133), False, 'import time\n'), ((12101, 12156), 'os.path.join', 'os.path.join', (['training_results_path', '"""graph_losses.txt"""'], {}), "(training_results_path, 'graph_losses.txt')\n", (12113, 12156), False, 'import os\n'), ((12199, 12254), 'os.path.join', 'os.path.join', (['training_results_path', '"""eager_losses.txt"""'], {}), "(training_results_path, 'eager_losses.txt')\n", (12211, 12254), False, 'import os\n'), ((12304, 12356), 'os.path.join', 'os.path.join', (['training_results_path', '"""graph_acc.txt"""'], {}), "(training_results_path, 'graph_acc.txt')\n", (12316, 12356), False, 'import os\n'), ((12396, 12448), 'os.path.join', 'os.path.join', (['training_results_path', '"""eager_acc.txt"""'], {}), "(training_results_path, 'eager_acc.txt')\n", (12408, 12448), False, 'import os\n'), ((12515, 12584), 'os.path.join', 'os.path.join', (['training_results_path', '"""graph_train_step_time_list.txt"""'], {}), "(training_results_path, 'graph_train_step_time_list.txt')\n", (12527, 12584), False, 'import os\n'), ((12641, 12710), 'os.path.join', 'os.path.join', (['training_results_path', '"""eager_train_step_time_list.txt"""'], {}), "(training_results_path, 'eager_train_step_time_list.txt')\n", (12653, 12710), False, 'import os\n'), ((12778, 12848), 'os.path.join', 'os.path.join', (['training_results_path', '"""graph_train_epoch_time_list.txt"""'], {}), "(training_results_path, 'graph_train_epoch_time_list.txt')\n", (12790, 12848), False, 'import os\n'), ((12906, 12976), 'os.path.join', 'os.path.join', (['training_results_path', '"""eager_train_epoch_time_list.txt"""'], {}), "(training_results_path, 'eager_train_epoch_time_list.txt')\n", (12918, 12976), False, 'import os\n'), ((13043, 13112), 'os.path.join', 'os.path.join', (['training_results_path', '"""graph_eval_epoch_time_list.txt"""'], {}), "(training_results_path, 'graph_eval_epoch_time_list.txt')\n", (13055, 13112), False, 'import os\n'), ((13169, 13238), 'os.path.join', 'os.path.join', (['training_results_path', '"""eager_eval_epoch_time_list.txt"""'], {}), "(training_results_path, 'eager_eval_epoch_time_list.txt')\n", (13181, 13238), False, 'import os\n'), ((13306, 13376), 'os.path.join', 'os.path.join', (['training_results_path', '"""eager_graph_model_diff_list.txt"""'], {}), "(training_results_path, 'eager_graph_model_diff_list.txt')\n", (13318, 13376), False, 'import os\n'), ((14550, 14565), 'numpy.divide', 'np.divide', (['a', 'b'], {}), '(a, b)\n', (14559, 14565), True, 'import numpy as np\n'), ((3238, 3252), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (3250, 3252), True, 'import oneflow as flow\n'), ((5490, 5501), 'time.time', 'time.time', ([], {}), '()\n', (5499, 5501), False, 'import time\n'), ((5696, 5707), 'time.time', 'time.time', ([], {}), '()\n', (5705, 5707), False, 'import time\n'), ((5790, 5801), 'time.time', 'time.time', ([], {}), '()\n', (5799, 5801), False, 'import time\n'), ((6159, 6170), 'time.time', 'time.time', ([], {}), '()\n', (6168, 6170), False, 'import time\n'), ((8125, 8136), 'time.time', 'time.time', ([], {}), '()\n', (8134, 8136), False, 'import time\n'), ((8276, 8306), 'numpy.argmax', 'np.argmax', (['graph_preds'], {'axis': '(1)'}), '(graph_preds, axis=1)\n', (8285, 8306), True, 'import numpy as np\n'), ((8448, 8459), 'time.time', 'time.time', ([], {}), '()\n', (8457, 8459), False, 'import time\n'), ((8683, 8713), 'numpy.argmax', 'np.argmax', (['eager_preds'], {'axis': '(1)'}), '(eager_preds, axis=1)\n', (8692, 8713), True, 'import numpy as np\n'), ((9968, 9995), 'numpy.array', 'np.array', (['self.eager_losses'], {}), '(self.eager_losses)\n', (9976, 9995), True, 'import numpy as np\n'), ((9998, 10025), 'numpy.array', 'np.array', (['self.graph_losses'], {}), '(self.graph_losses)\n', (10006, 10025), True, 'import numpy as np\n'), ((8350, 8361), 'time.time', 'time.time', ([], {}), '()\n', (8359, 8361), False, 'import time\n'), ((8482, 8496), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (8494, 8496), True, 'import oneflow as flow\n'), ((8757, 8768), 'time.time', 'time.time', ([], {}), '()\n', (8766, 8768), False, 'import time\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import oneflow as flow
from oneflow.nn import init
from oneflow.nn.common_types import _size_1_t, _size_2_t, _size_3_t
from oneflow.nn.module import Module
from oneflow.nn.modules.utils import _single, _pair, _triple
def slice(x, begin, size):
ndim = len(x.shape)
if not isinstance(begin, (list, tuple)) or len(begin) != ndim:
raise ValueError(
"begin must be a list/tuple with the same length as input tensor's number of dimensions"
)
if not all((isinstance(b, int) or b is None for b in begin)):
raise ValueError("element of begin must be a int or None")
if not isinstance(size, (list, tuple)) or len(size) != ndim:
raise ValueError(
"size must be a list/tuple with the same length as input tensor's number of dimensions."
)
if not all((isinstance(s, int) or s is None for s in size)):
raise ValueError("element of size must be a int or None")
slice_tup_list = []
for (b, s, dim_size) in zip(begin, size, x.shape):
(start, stop, step) = (None, None, 1)
if b is not None:
if b < -dim_size or b >= dim_size:
raise ValueError("element of begin is out of range")
start = b
if s is not None:
if s == -1:
stop = dim_size
else:
if s <= 0 or s > dim_size:
raise ValueError("element of size is invalid")
if b + s < dim_size:
stop = b + s
slice_tup_list.append((start, stop, step))
return flow.slice(x, slice_tup_list)
class ConvUtil(object):
@classmethod
def split(cls, x, axis, split_num):
split_len = x.shape[axis] // split_num
result_list = []
slice_begin = [0] * len(x.shape)
slice_size = [-1] * len(x.shape)
slice_size[axis] = split_len
for i in range(split_num):
slice_begin[axis] = i * split_len
result = slice(x, slice_begin, slice_size)
result_list.append(result)
return result_list
class Conv1d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/master/generated/torch.nn.Conv1d.html#conv1d
Applies a 1D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{\\text{in}}, L)` and output :math:`(N, C_{\\text{out}}, L_{\\text{out}})` can be
precisely described as:
.. math::
\\text{out}(N_i, C_{\\text{out}_j}) = \\text{bias}(C_{\\text{out}_j}) +
\\sum_{k = 0}^{C_{in} - 1} \\text{weight}(C_{\\text{out}_j}, k)
\\star \\text{input}(N_i, k)
where :math:`\\star` is the valid `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`L` is a length of signal sequence.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a one-element tuple.
* :attr:`padding` controls the amount of padding applied to the input. It
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
Note:
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the shape as the input. However, this mode
doesn't support any stride values other than 1.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to both sides of
the input. Default: 0
padding_mode (string, optional): ``'zeros'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel
elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, L_{in})`
- Output: :math:`(N, C_{out}, L_{out})` where
.. math::
L_{out} = \\left\\lfloor\\frac{L_{in} + 2 \\times \\text{padding} - \\text{dilation}
\\times (\\text{kernel\\_size} - 1) - 1}{\\text{stride}} + 1\\right\\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\\text{out\\_channels},
\\frac{\\text{in\\_channels}}{\\text{groups}}, \\text{kernel\\_size})`.
The values of these weights are sampled from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\text{kernel\\_size}}`
bias (Tensor): the learnable bias of the module of shape
(out_channels). If :attr:`bias` is ``True``, then the values of these weights are
sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\text{kernel\\_size}}`
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> import oneflow.nn as nn
>>> arr = np.random.randn(20, 16, 50)
>>> input = flow.Tensor(arr)
>>> m = nn.Conv1d(16, 33, 3, stride=2)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_1_t,
stride: _size_1_t = 1,
padding: _size_1_t = 0,
dilation: _size_1_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
):
super().__init__()
assert padding_mode == "zeros"
self.padding_mode = padding_mode
self.kernel_size = _single(kernel_size)
self.stride = _single(stride)
self.padding = _single(padding)
self.dilation = _single(dilation)
self.groups = groups
assert in_channels % groups == 0
assert out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = flow.nn.Parameter(
flow.Tensor(out_channels, in_channels // groups, *self.kernel_size)
)
self.out_channel_groups = out_channels // groups
self.bias = None
if bias:
self.bias = flow.nn.Parameter(flow.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, x):
if x.device.type == "cpu" and self.groups > 1:
in_channel_axis = 1
weight_channel_axis = 0
bias_channel_axis = 0
in_split_list = ConvUtil.split(
x, axis=in_channel_axis, split_num=self.groups
)
out_list = []
for i in range(len(in_split_list)):
out_list.append(
flow.F.conv1d(
in_split_list[i],
self.weight[
i
* self.out_channel_groups : (i + 1)
* self.out_channel_groups,
:,
:,
],
self.bias[
i
* self.out_channel_groups : (i + 1)
* self.out_channel_groups
]
if self.bias is not None
else None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=1,
)
)
res = flow.cat(out_list, dim=in_channel_axis)
else:
res = flow.F.conv1d(
x,
self.weight,
self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return res
def extra_repr(self):
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
if self.padding_mode != "zeros":
s += ", padding_mode={padding_mode}"
return s.format(**self.__dict__)
class Conv2d(Module):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/master/generated/torch.nn.Conv2d.html#conv2d
Applies a 2D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size
:math:`(N, C_{\\text{in}}, H, W)` and output :math:`(N, C_{\\text{out}}, H_{\\text{out}}, W_{\\text{out}})`
can be precisely described as:
.. math::
\\text{out}(N_i, C_{\\text{out}_j}) = \\text{bias}(C_{\\text{out}_j}) +
\\sum_{k = 0}^{C_{\\text{in}} - 1} \\text{weight}(C_{\\text{out}_j}, k) \\star \\text{input}(N_i, k)
where :math:`\\star` is the valid 2D `cross-correlation`_ operator,
:math:`N` is a batch size, :math:`C` denotes a number of channels,
:math:`H` is a height of input planes in pixels, and :math:`W` is
width in pixels.
* :attr:`stride` controls the stride for the cross-correlation, a single
number or a tuple.
* :attr:`padding` controls the amount of implicit padding on both
sides for :attr:`padding` number of points for each dimension.
* :attr:`dilation` controls the spacing between the kernel points; also
known as the à trous algorithm. It is harder to describe, but this `link`_
has a nice visualization of what :attr:`dilation` does.
* :attr:`groups` controls the connections between inputs and outputs.
:attr:`in_channels` and :attr:`out_channels` must both be divisible by
:attr:`groups`. For example,
* At groups=1, all inputs are convolved to all outputs.
* At groups=2, the operation becomes equivalent to having two conv
layers side by side, each seeing half the input channels
and producing half the output channels, and both subsequently
concatenated.
* At groups= :attr:`in_channels`, each input channel is convolved with
its own set of filters (of size
:math:`\\frac{\\text{out_channels}}{\\text{in_channels}}`).,
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the height and width dimension
- a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
and the second `int` for the width dimension
Note:
When `groups == in_channels` and `out_channels == K * in_channels`,
where `K` is a positive integer, this operation is also known as a "depthwise convolution".
In other words, for an input of size :math:`(N, C_{in}, L_{in})`,
a depthwise convolution with a depthwise multiplier `K` can be performed with the arguments
:math:`(C_\\text{in}=C_\\text{in}, C_\\text{out}=C_\\text{in} \\times \\text{K}, ..., \\text{groups}=C_\\text{in})`.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int or tuple, optional): Zero-padding added to both sides of
the input. Default: 0
padding_mode (string, optional): ``'zeros'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input
channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the
output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, H_{out}, W_{out})` where
.. math::
H_{out} = \\left\\lfloor\\frac{H_{in} + 2 \\times \\text{padding}[0] - \\text{dilation}[0]
\\times (\\text{kernel_size}[0] - 1) - 1}{\\text{stride}[0]} + 1\\right\\rfloor
.. math::
W_{out} = \\left\\lfloor\\frac{W_{in} + 2 \\times \\text{padding}[1] - \\text{dilation}[1]
\\times (\\text{kernel_size}[1] - 1) - 1}{\\text{stride}[1]} + 1\\right\\rfloor
Attr:
- weight (Tensor): the learnable weights of the module of shape
:math:`(\\text{out_channels}, \\frac{\\text{in_channels}}{\\text{groups}},`
:math:`\\text{kernel_size[0]}, \\text{kernel_size[1]})`.
The values of these weights are sampled from
:math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel_size}[i]}`
- bias (Tensor): the learnable bias of the module of shape
(out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\\mathcal{U}(-\\sqrt{k}, \\sqrt{k})` where
:math:`k = \\frac{groups}{C_\\text{in} * \\prod_{i=0}^{1}\\text{kernel_size}[i]}`
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> import oneflow.nn as nn
>>> arr = np.random.randn(20, 16, 50, 100)
>>> input = flow.Tensor(arr)
>>> m = nn.Conv2d(16, 33, (3, 5), stride=(2, 1), padding=(4, 2), dilation=(3, 1))
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_2_t,
stride: _size_2_t = 1,
padding: _size_2_t = 0,
dilation: _size_2_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros",
):
super().__init__()
assert padding_mode == "zeros"
self.padding_mode = padding_mode
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
assert in_channels % groups == 0
assert out_channels % groups == 0
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = flow.nn.Parameter(
flow.Tensor(out_channels, in_channels // groups, *self.kernel_size)
)
self.out_channel_groups = out_channels // groups
self.bias = None
if bias:
self.bias = flow.nn.Parameter(flow.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
(fan_in, _) = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, x):
if x.shape[1] != self.in_channels:
raise ValueError("The input channels should be equal to self.in_channels")
# TODO(zwx): Use `tensor.device_type()` method to help checking if x is on cpu.
# Using `if x.device == flow.device("cpu"):` will fail as consistent tensor has
# no device, however using `x.is_cuda` is not a good choice.
if not x.is_cuda and self.groups > 1:
in_channel_axis = 1
in_split_list = ConvUtil.split(
x, axis=in_channel_axis, split_num=self.groups
)
out_list = []
for i in range(len(in_split_list)):
out_list.append(
flow.F.conv2d(
in_split_list[i],
self.weight[
i
* self.out_channel_groups : (i + 1)
* self.out_channel_groups,
:,
:,
:,
],
self.bias[
i
* self.out_channel_groups : (i + 1)
* self.out_channel_groups
]
if self.bias is not None
else None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=1,
)
)
res = flow.cat(out_list, dim=in_channel_axis)
else:
res = flow.F.conv2d(
x,
self.weight,
self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return res
def extra_repr(self):
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
if self.padding_mode != "zeros":
s += ", padding_mode={padding_mode}"
return s.format(**self.__dict__)
class Conv3d(Module):
r"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/master/generated/torch.nn.Conv3d.html#conv3d
Applies a 3D convolution over an input signal composed of several input
planes.
In the simplest case, the output value of the layer with input size :math:`(N, C_{in}, D, H, W)`
and output :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` can be precisely described as:
.. math::
out(N_i, C_{out_j}) = bias(C_{out_j}) +
\sum_{k = 0}^{C_{in} - 1} weight(C_{out_j}, k) \star input(N_i, k)
where :math:`\star` is the valid 3D `cross-correlation`_ operator
* :attr:`stride` controls the stride for the cross-correlation.
* :attr:`padding` controls the amount of padding applied to the input. It
can be either a string {{'valid', 'same'}} or a tuple of ints giving the
amount of implicit padding applied on both sides.
* :attr:`dilation` controls the spacing between the kernel points; also known as the à trous algorithm.
It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
- a single ``int`` -- in which case the same value is used for the depth, height and width dimension
- a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
the second `int` for the height dimension and the third `int` for the width dimension
Note:
``padding='valid'`` is the same as no padding. ``padding='same'`` pads
the input so the output has the shape as the input. However, this mode
doesn't support any stride values other than 1.
Args:
in_channels (int): Number of channels in the input image
out_channels (int): Number of channels produced by the convolution
kernel_size (int or tuple): Size of the convolving kernel
stride (int or tuple, optional): Stride of the convolution. Default: 1
padding (int, tuple or str, optional): Padding added to all six sides of
the input. Default: 0
padding_mode (string, optional): ``'zeros'``. Default: ``'zeros'``
dilation (int or tuple, optional): Spacing between kernel elements. Default: 1
groups (int, optional): Number of blocked connections from input channels to output channels. Default: 1
bias (bool, optional): If ``True``, adds a learnable bias to the output. Default: ``True``
Shape:
- Input: :math:`(N, C_{in}, D_{in}, H_{in}, W_{in})`
- Output: :math:`(N, C_{out}, D_{out}, H_{out}, W_{out})` where
.. math::
D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0]
\times (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
.. math::
H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1]
\times (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
.. math::
W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2]
\times (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
Attributes:
weight (Tensor): the learnable weights of the module of shape
:math:`(\text{out\_channels}, \frac{\text{in\_channels}}{\text{groups}},`
:math:`\text{kernel\_size[0]}, \text{kernel\_size[1]}, \text{kernel\_size[2]})`.
The values of these weights are sampled from
:math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
bias (Tensor): the learnable bias of the module of shape (out_channels). If :attr:`bias` is ``True``,
then the values of these weights are
sampled from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})` where
:math:`k = \frac{groups}{C_\text{in} * \prod_{i=0}^{2}\text{kernel\_size}[i]}`
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> import oneflow.nn as nn
>>> arr = np.random.randn(1, 2, 5, 5, 5)
>>> input = flow.Tensor(arr)
>>> m = nn.Conv3d(2, 4, kernel_size=3, stride=1)
>>> output = m(input)
.. _cross-correlation:
https://en.wikipedia.org/wiki/Cross-correlation
.. _link:
https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
"""
def __init__(
self,
in_channels: int,
out_channels: int,
kernel_size: _size_3_t,
stride: _size_3_t = 1,
padding: _size_3_t = 0,
dilation: _size_3_t = 1,
groups: int = 1,
bias: bool = True,
padding_mode: str = "zeros", # TODO: refine this type
):
super().__init__()
assert padding_mode == "zeros"
self.kernel_size = _triple(kernel_size)
self.stride = _triple(stride)
self.padding = _triple(padding)
self.dilation = _triple(dilation)
self.groups = groups
assert in_channels % groups == 0, "in_channels must be divisible by groups"
assert out_channels % groups == 0, "out_channels must be divisible by groups"
self.in_channels = in_channels
self.out_channels = out_channels
self.weight = flow.nn.Parameter(
flow.Tensor(out_channels, in_channels // groups, *self.kernel_size)
)
self.out_channel_groups = out_channels // groups
self.bias = None
if bias:
self.bias = flow.nn.Parameter(flow.Tensor(out_channels))
self.reset_parameters()
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
def forward(self, x):
if x.shape[1] != self.in_channels:
raise ValueError("The input channels should be equal to self.in_channels")
if x.device.type == "cpu" and self.groups > 1:
in_channel_axis = 1
in_split_list = ConvUtil.split(
x, axis=in_channel_axis, split_num=self.groups
)
out_list = []
for i in range(len(in_split_list)):
out_list.append(
flow.F.conv3d(
in_split_list[i],
self.weight[
i
* self.out_channel_groups : (i + 1)
* self.out_channel_groups,
:,
:,
:,
],
self.bias[
i
* self.out_channel_groups : (i + 1)
* self.out_channel_groups
]
if self.bias is not None
else None,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=1,
)
)
res = flow.cat(out_list, dim=in_channel_axis)
else:
res = flow.F.conv3d(
x,
self.weight,
self.bias,
stride=self.stride,
padding=self.padding,
dilation=self.dilation,
groups=self.groups,
)
return res
def extra_repr(self):
s = "{in_channels}, {out_channels}, kernel_size={kernel_size}, stride={stride}"
if self.padding != (0,) * len(self.padding):
s += ", padding={padding}"
if self.dilation != (1,) * len(self.dilation):
s += ", dilation={dilation}"
if self.groups != 1:
s += ", groups={groups}"
if self.bias is None:
s += ", bias=False"
if self.padding_mode != "zeros":
s += ", padding_mode={padding_mode}"
return s.format(**self.__dict__)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.F.conv3d",
"oneflow.F.conv1d",
"oneflow.nn.modules.utils._triple",
"oneflow.slice",
"oneflow.F.conv2d",
"oneflow.nn.modules.utils._pair",
"oneflow.nn.modules.utils._single",
"oneflow.cat",
"oneflow.Tensor",
"oneflow.nn.init._calculate_fan_in_and_fan_out",
"oneflow.nn.init.uniform_"
] | [((2174, 2203), 'oneflow.slice', 'flow.slice', (['x', 'slice_tup_list'], {}), '(x, slice_tup_list)\n', (2184, 2203), True, 'import oneflow as flow\n'), ((28574, 28610), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (28589, 28610), False, 'import doctest\n'), ((7145, 7165), 'oneflow.nn.modules.utils._single', '_single', (['kernel_size'], {}), '(kernel_size)\n', (7152, 7165), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((7188, 7203), 'oneflow.nn.modules.utils._single', '_single', (['stride'], {}), '(stride)\n', (7195, 7203), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((7227, 7243), 'oneflow.nn.modules.utils._single', '_single', (['padding'], {}), '(padding)\n', (7234, 7243), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((7268, 7285), 'oneflow.nn.modules.utils._single', '_single', (['dilation'], {}), '(dilation)\n', (7275, 7285), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((16431, 16449), 'oneflow.nn.modules.utils._pair', '_pair', (['kernel_size'], {}), '(kernel_size)\n', (16436, 16449), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((16472, 16485), 'oneflow.nn.modules.utils._pair', '_pair', (['stride'], {}), '(stride)\n', (16477, 16485), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((16509, 16523), 'oneflow.nn.modules.utils._pair', '_pair', (['padding'], {}), '(padding)\n', (16514, 16523), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((16548, 16563), 'oneflow.nn.modules.utils._pair', '_pair', (['dilation'], {}), '(dilation)\n', (16553, 16563), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((25185, 25205), 'oneflow.nn.modules.utils._triple', '_triple', (['kernel_size'], {}), '(kernel_size)\n', (25192, 25205), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((25228, 25243), 'oneflow.nn.modules.utils._triple', '_triple', (['stride'], {}), '(stride)\n', (25235, 25243), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((25267, 25283), 'oneflow.nn.modules.utils._triple', '_triple', (['padding'], {}), '(padding)\n', (25274, 25283), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((25308, 25325), 'oneflow.nn.modules.utils._triple', '_triple', (['dilation'], {}), '(dilation)\n', (25315, 25325), False, 'from oneflow.nn.modules.utils import _single, _pair, _triple\n'), ((7531, 7598), 'oneflow.Tensor', 'flow.Tensor', (['out_channels', '(in_channels // groups)', '*self.kernel_size'], {}), '(out_channels, in_channels // groups, *self.kernel_size)\n', (7542, 7598), True, 'import oneflow as flow\n'), ((7969, 8016), 'oneflow.nn.init._calculate_fan_in_and_fan_out', 'init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (8003, 8016), False, 'from oneflow.nn import init\n'), ((8071, 8110), 'oneflow.nn.init.uniform_', 'init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (8084, 8110), False, 'from oneflow.nn import init\n'), ((9398, 9437), 'oneflow.cat', 'flow.cat', (['out_list'], {'dim': 'in_channel_axis'}), '(out_list, dim=in_channel_axis)\n', (9406, 9437), True, 'import oneflow as flow\n'), ((9470, 9601), 'oneflow.F.conv1d', 'flow.F.conv1d', (['x', 'self.weight', 'self.bias'], {'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(x, self.weight, self.bias, stride=self.stride, padding=self.\n padding, dilation=self.dilation, groups=self.groups)\n', (9483, 9601), True, 'import oneflow as flow\n'), ((16809, 16876), 'oneflow.Tensor', 'flow.Tensor', (['out_channels', '(in_channels // groups)', '*self.kernel_size'], {}), '(out_channels, in_channels // groups, *self.kernel_size)\n', (16820, 16876), True, 'import oneflow as flow\n'), ((17247, 17294), 'oneflow.nn.init._calculate_fan_in_and_fan_out', 'init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (17281, 17294), False, 'from oneflow.nn import init\n'), ((17349, 17388), 'oneflow.nn.init.uniform_', 'init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (17362, 17388), False, 'from oneflow.nn import init\n'), ((19003, 19042), 'oneflow.cat', 'flow.cat', (['out_list'], {'dim': 'in_channel_axis'}), '(out_list, dim=in_channel_axis)\n', (19011, 19042), True, 'import oneflow as flow\n'), ((19075, 19206), 'oneflow.F.conv2d', 'flow.F.conv2d', (['x', 'self.weight', 'self.bias'], {'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(x, self.weight, self.bias, stride=self.stride, padding=self.\n padding, dilation=self.dilation, groups=self.groups)\n', (19088, 19206), True, 'import oneflow as flow\n'), ((25658, 25725), 'oneflow.Tensor', 'flow.Tensor', (['out_channels', '(in_channels // groups)', '*self.kernel_size'], {}), '(out_channels, in_channels // groups, *self.kernel_size)\n', (25669, 25725), True, 'import oneflow as flow\n'), ((26094, 26141), 'oneflow.nn.init._calculate_fan_in_and_fan_out', 'init._calculate_fan_in_and_fan_out', (['self.weight'], {}), '(self.weight)\n', (26128, 26141), False, 'from oneflow.nn import init\n'), ((26196, 26235), 'oneflow.nn.init.uniform_', 'init.uniform_', (['self.bias', '(-bound)', 'bound'], {}), '(self.bias, -bound, bound)\n', (26209, 26235), False, 'from oneflow.nn import init\n'), ((27614, 27653), 'oneflow.cat', 'flow.cat', (['out_list'], {'dim': 'in_channel_axis'}), '(out_list, dim=in_channel_axis)\n', (27622, 27653), True, 'import oneflow as flow\n'), ((27686, 27817), 'oneflow.F.conv3d', 'flow.F.conv3d', (['x', 'self.weight', 'self.bias'], {'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': 'self.groups'}), '(x, self.weight, self.bias, stride=self.stride, padding=self.\n padding, dilation=self.dilation, groups=self.groups)\n', (27699, 27817), True, 'import oneflow as flow\n'), ((7750, 7775), 'oneflow.Tensor', 'flow.Tensor', (['out_channels'], {}), '(out_channels)\n', (7761, 7775), True, 'import oneflow as flow\n'), ((7895, 7907), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (7904, 7907), False, 'import math\n'), ((8041, 8058), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (8050, 8058), False, 'import math\n'), ((17028, 17053), 'oneflow.Tensor', 'flow.Tensor', (['out_channels'], {}), '(out_channels)\n', (17039, 17053), True, 'import oneflow as flow\n'), ((17173, 17185), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (17182, 17185), False, 'import math\n'), ((17319, 17336), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (17328, 17336), False, 'import math\n'), ((25877, 25902), 'oneflow.Tensor', 'flow.Tensor', (['out_channels'], {}), '(out_channels)\n', (25888, 25902), True, 'import oneflow as flow\n'), ((26022, 26034), 'math.sqrt', 'math.sqrt', (['(5)'], {}), '(5)\n', (26031, 26034), False, 'import math\n'), ((26166, 26183), 'math.sqrt', 'math.sqrt', (['fan_in'], {}), '(fan_in)\n', (26175, 26183), False, 'import math\n'), ((8543, 8859), 'oneflow.F.conv1d', 'flow.F.conv1d', (['in_split_list[i]', 'self.weight[i * self.out_channel_groups:(i + 1) * self.out_channel_groups, :, :\n ]', '(self.bias[i * self.out_channel_groups:(i + 1) * self.out_channel_groups] if\n self.bias is not None else None)'], {'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': '(1)'}), '(in_split_list[i], self.weight[i * self.out_channel_groups:(i +\n 1) * self.out_channel_groups, :, :], self.bias[i * self.\n out_channel_groups:(i + 1) * self.out_channel_groups] if self.bias is not\n None else None, stride=self.stride, padding=self.padding, dilation=self\n .dilation, groups=1)\n', (8556, 8859), True, 'import oneflow as flow\n'), ((18117, 18436), 'oneflow.F.conv2d', 'flow.F.conv2d', (['in_split_list[i]', 'self.weight[i * self.out_channel_groups:(i + 1) * self.out_channel_groups,\n :, :, :]', '(self.bias[i * self.out_channel_groups:(i + 1) * self.out_channel_groups] if\n self.bias is not None else None)'], {'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': '(1)'}), '(in_split_list[i], self.weight[i * self.out_channel_groups:(i +\n 1) * self.out_channel_groups, :, :, :], self.bias[i * self.\n out_channel_groups:(i + 1) * self.out_channel_groups] if self.bias is not\n None else None, stride=self.stride, padding=self.padding, dilation=self\n .dilation, groups=1)\n', (18130, 18436), True, 'import oneflow as flow\n'), ((26728, 27047), 'oneflow.F.conv3d', 'flow.F.conv3d', (['in_split_list[i]', 'self.weight[i * self.out_channel_groups:(i + 1) * self.out_channel_groups,\n :, :, :]', '(self.bias[i * self.out_channel_groups:(i + 1) * self.out_channel_groups] if\n self.bias is not None else None)'], {'stride': 'self.stride', 'padding': 'self.padding', 'dilation': 'self.dilation', 'groups': '(1)'}), '(in_split_list[i], self.weight[i * self.out_channel_groups:(i +\n 1) * self.out_channel_groups, :, :, :], self.bias[i * self.\n out_channel_groups:(i + 1) * self.out_channel_groups] if self.bias is not\n None else None, stride=self.stride, padding=self.padding, dilation=self\n .dilation, groups=1)\n', (26741, 27047), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import cv2
import numpy as np
import oneflow as flow
from PIL import Image
import oneflow.typing as oft
def _of_image_target_resize(images, image_static_shape, target_size, max_size):
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_target_resize_job(
images_def: oft.ListListNumpy.Placeholder(
shape=image_static_shape, dtype=flow.float
)
):
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
resized_images_buffer, size, scale = flow.image_target_resize(
images_buffer,
target_size=target_size,
max_size=max_size,
resize_side="shorter",
)
resized_images = flow.tensor_buffer_to_tensor_list(
resized_images_buffer,
shape=(target_size, max_size, image_static_shape[-1]),
dtype=flow.float,
)
return resized_images, size, scale
resized_images, size, scale = image_target_resize_job([images]).get()
resized_images = resized_images.numpy_lists()[0]
size = size.numpy_list()[0]
scale = scale.numpy_list()[0]
return resized_images, size, scale
def _read_images_by_pil(image_files):
images = [Image.open(image_file) for image_file in image_files]
# convert image to BGR
converted_images = [
np.array(image).astype(np.single)[:, :, ::-1] for image in images
]
return [np.expand_dims(image, axis=0) for image in converted_images]
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
assert image_static_shape[0] == 1, str(image_static_shape)
image_static_shape[0] = len(image_shapes)
return image_static_shape
def _target_resize_by_cv(images, target_size, max_size):
resized_images = []
resized_sizes = []
resized_scales = []
for image in images:
squeeze_image = image.squeeze()
resized_size, resized_scale = _get_target_resize_size(
squeeze_image.shape[1], squeeze_image.shape[0], target_size, max_size
)
resized_images.append(cv2.resize(squeeze_image, resized_size))
resized_sizes.append(resized_size)
resized_scales.append(resized_scale)
return resized_images, resized_sizes, resized_scales
def _get_target_resize_size(w, h, target_size, max_size):
min_original_size = float(min((w, h)))
max_original_size = float(max((w, h)))
min_resized_size = target_size
max_resized_size = int(
round(max_original_size / min_original_size * min_resized_size)
)
if max_size > 0 and max_resized_size > max_size:
max_resized_size = max_size
min_resized_size = int(
round(max_resized_size * min_original_size / max_original_size)
)
if w < h:
res_w = min_resized_size
res_h = max_resized_size
else:
res_w = max_resized_size
res_h = min_resized_size
scale_w = res_w / w
scale_h = res_h / h
return (res_w, res_h), (scale_w, scale_h)
def _compare_image_target_resize_with_cv(
test_case, image_files, target_size, max_size, print_debug_info=False
):
images = _read_images_by_cv(image_files)
image_static_shape = _get_images_static_shape(images)
resized_images, size, scale = _of_image_target_resize(
images, tuple(image_static_shape), target_size, max_size
)
cv_resized_images, cv_resized_sizes, cv_resized_scales = _target_resize_by_cv(
images, target_size, max_size
)
for (
resized_image,
cv_resized_image,
image_size,
image_scale,
resized_size,
resized_scale,
) in zip(
resized_images,
cv_resized_images,
size,
scale,
cv_resized_sizes,
cv_resized_scales,
):
if print_debug_info:
print("resized_image shape:", resized_image.shape)
print("cv_resized_image shape:", cv_resized_image.shape)
print("resized w & h:", image_size, resized_size)
print("resize w_scale & h_scale:", image_scale, resized_scale)
test_case.assertTrue(np.allclose(resized_image, cv_resized_image))
test_case.assertTrue(np.allclose(image_size, resized_size))
test_case.assertTrue(np.allclose(image_scale, resized_scale))
def test_image_target_resize(test_case):
_compare_image_target_resize_with_cv(
test_case,
[
"/dataset/mscoco_2017/val2017/000000000139.jpg",
"/dataset/mscoco_2017/val2017/000000000632.jpg",
],
800,
1333,
# True,
)
| [
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.image_target_resize",
"oneflow.scope.mirrored_view",
"oneflow.typing.ListListNumpy.Placeholder",
"oneflow.tensor_buffer_to_tensor_list",
"oneflow.FunctionConfig",
"oneflow.tensor_list_to_tensor_buffer"
] | [((780, 808), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (806, 808), True, 'import oneflow as flow\n'), ((827, 848), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (846, 848), True, 'import oneflow as flow\n'), ((966, 1015), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (986, 1015), True, 'import oneflow as flow\n'), ((2550, 2579), 'numpy.amax', 'np.amax', (['image_shapes'], {'axis': '(0)'}), '(image_shapes, axis=0)\n', (2557, 2579), True, 'import numpy as np\n'), ((932, 958), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (956, 958), True, 'import oneflow as flow\n'), ((1196, 1241), 'oneflow.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['images_def'], {}), '(images_def)\n', (1229, 1241), True, 'import oneflow as flow\n'), ((1287, 1398), 'oneflow.image_target_resize', 'flow.image_target_resize', (['images_buffer'], {'target_size': 'target_size', 'max_size': 'max_size', 'resize_side': '"""shorter"""'}), "(images_buffer, target_size=target_size, max_size=\n max_size, resize_side='shorter')\n", (1311, 1398), True, 'import oneflow as flow\n'), ((1478, 1611), 'oneflow.tensor_buffer_to_tensor_list', 'flow.tensor_buffer_to_tensor_list', (['resized_images_buffer'], {'shape': '(target_size, max_size, image_static_shape[-1])', 'dtype': 'flow.float'}), '(resized_images_buffer, shape=(target_size,\n max_size, image_static_shape[-1]), dtype=flow.float)\n', (1511, 1611), True, 'import oneflow as flow\n'), ((1985, 2007), 'PIL.Image.open', 'Image.open', (['image_file'], {}), '(image_file)\n', (1995, 2007), False, 'from PIL import Image\n'), ((2183, 2212), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2197, 2212), True, 'import numpy as np\n'), ((2381, 2410), 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), '(image, axis=0)\n', (2395, 2410), True, 'import numpy as np\n'), ((1069, 1142), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', ([], {'shape': 'image_static_shape', 'dtype': 'flow.float'}), '(shape=image_static_shape, dtype=flow.float)\n', (1098, 1142), True, 'import oneflow.typing as oft\n'), ((3329, 3368), 'cv2.resize', 'cv2.resize', (['squeeze_image', 'resized_size'], {}), '(squeeze_image, resized_size)\n', (3339, 3368), False, 'import cv2\n'), ((5374, 5418), 'numpy.allclose', 'np.allclose', (['resized_image', 'cv_resized_image'], {}), '(resized_image, cv_resized_image)\n', (5385, 5418), True, 'import numpy as np\n'), ((5449, 5486), 'numpy.allclose', 'np.allclose', (['image_size', 'resized_size'], {}), '(image_size, resized_size)\n', (5460, 5486), True, 'import numpy as np\n'), ((5517, 5556), 'numpy.allclose', 'np.allclose', (['image_scale', 'resized_scale'], {}), '(image_scale, resized_scale)\n', (5528, 5556), True, 'import numpy as np\n'), ((2297, 2319), 'cv2.imread', 'cv2.imread', (['image_file'], {}), '(image_file)\n', (2307, 2319), False, 'import cv2\n'), ((2099, 2114), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2107, 2114), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow as flow
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.id_util as id_util
from oneflow.python.oneflow_export import oneflow_export
from oneflow.python.framework.remote_blob import BlobDef
from typing import Optional, Sequence
@oneflow_export("tensor_buffer_to_tensor")
def tensor_buffer_to_tensor(
x: BlobDef,
dtype: dtype_util.dtype,
instance_shape: Sequence[int],
name: Optional[str] = None,
) -> BlobDef:
r"""This operator converts the Blob's type from TensorBuffer to Tensor.
Some operator's output data type is `TensorBuffer`, you can use this operator to convert back
to `Tensor`.
Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_
for more about TensorBuffer.
Args:
x (BlobDef): Input `Blob`.
dtype (dtype_util.dtype): The data dtype.
instance_shape (Sequence[int]): The shape of each TensorBuffer instance.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: A `Blob`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),
) -> tp.Numpy:
x = flow.tensor_to_tensor_buffer(x,
instance_dims=2)
return flow.tensor_buffer_to_tensor(x,
instance_shape=(64, 64),
dtype=flow.float)
x = np.random.randn(4, 16, 64, 64).astype(np.float32)
out = tensor_buffer_to_tensor_Job(x)
# out.shape (4, 16, 64, 64)
"""
if name is None:
name = id_util.UniqueStr("TensorBufferToTensor_")
return (
flow.user_op_builder(name)
.Op("tensor_buffer_to_tensor")
.Input("in", [x])
.Output("out")
.Attr("dtype", dtype)
.Attr("instance_shape", instance_shape)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("tensor_to_tensor_buffer")
def tensor_to_tensor_buffer(
x: BlobDef, instance_dims: int, name: Optional[str] = None,
) -> BlobDef:
r"""This operator converts the Blob's type from Tensor to TensorBuffer.
Refer to `Concept Explanation <https://docs.oneflow.org/basics_topics/concept_explanation.html#3tensorbuffer-tensorlist>`_
for more about TensorBuffer.
Args:
x (BlobDef): Input `Blob`.
instance_dims (int): The dimensions of dynamic tensor instance.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def tensor_buffer_to_tensor_Job(x: tp.Numpy.Placeholder(shape=(4, 16, 64, 64), dtype=flow.float32),
) -> tp.Numpy:
x = flow.tensor_to_tensor_buffer(x,
instance_dims=2)
return flow.tensor_buffer_to_tensor(x,
instance_shape=(64, 64),
dtype=flow.float)
x = np.random.randn(4, 16, 64, 64).astype(np.float32)
out = tensor_buffer_to_tensor_Job(x)
# out.shape (4, 16, 64, 64)
"""
if name is None:
name = id_util.UniqueStr("TensorToTensorBuffer_")
return (
flow.user_op_builder(name)
.Op("tensor_to_tensor_buffer")
.Input("in", [x])
.Output("out")
.Attr("instance_dims", instance_dims)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
| [
"oneflow.user_op_builder",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.id_util.UniqueStr"
] | [((912, 953), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tensor_buffer_to_tensor"""'], {}), "('tensor_buffer_to_tensor')\n", (926, 953), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2912, 2953), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tensor_to_tensor_buffer"""'], {}), "('tensor_to_tensor_buffer')\n", (2926, 2953), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2574, 2616), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TensorBufferToTensor_"""'], {}), "('TensorBufferToTensor_')\n", (2591, 2616), True, 'import oneflow.python.framework.id_util as id_util\n'), ((4356, 4398), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TensorToTensorBuffer_"""'], {}), "('TensorToTensorBuffer_')\n", (4373, 4398), True, 'import oneflow.python.framework.id_util as id_util\n'), ((4420, 4446), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (4440, 4446), True, 'import oneflow as flow\n'), ((2638, 2664), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (2658, 2664), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import torch
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.test_util import GenArgList, type_name_to_flow_type
from oneflow.test_utils.automated_test_util.generators import *
from oneflow.test_utils.automated_test_util.torch_flow_dual_object import globaltest
def _compare_eager_global_with_torch(
placement, logits_sbp, labels_sbp, data_type, label_type, batch_size, num_classes,
):
data_type = type_name_to_flow_type[data_type]
label_type = type_name_to_flow_type[label_type]
np_labels = np.random.randint(0, num_classes, size=(batch_size,)).astype(np.int32)
np_logits = np.random.random((batch_size, num_classes)).astype(np.float32)
if flow.env.get_rank() == 0:
torch_logits = torch.tensor(np_logits, dtype=torch.float32, requires_grad=True)
torch_labels = torch.tensor(np_labels, dtype=torch.int64)
torch_output = torch.nn.functional.cross_entropy(
torch_logits, torch_labels, reduction="none"
)
torch_output.sum().backward()
of_logits = flow.tensor(np_logits, dtype=data_type, requires_grad=True).to_global(
flow.env.all_device_placement("cpu"), flow.sbp.broadcast
)
of_logits = of_logits.to_global(placement, logits_sbp)
of_logits.retain_grad()
of_labels = flow.tensor(np_labels, dtype=label_type).to_global(
flow.env.all_device_placement("cpu"), flow.sbp.broadcast
)
of_labels = of_labels.to_global(placement, labels_sbp)
of_output = flow.nn.functional.sparse_softmax_cross_entropy(
labels=of_labels, logits=of_logits
)
of_output.sum().backward()
of_logits_grad = of_logits.grad.to_global(
flow.env.all_device_placement("cpu"), flow.sbp.broadcast
)
of_logits_grad = of_logits_grad.to_local()
of_output = of_output.to_global(
flow.env.all_device_placement("cpu"), flow.sbp.broadcast
)
of_output = of_output.to_local()
if flow.env.get_rank() == 0:
assert np.allclose(
of_output.numpy(), torch_output.detach().numpy(), rtol=1e-03, atol=1e-04
)
assert np.allclose(
of_logits_grad.numpy(), torch_logits.grad, rtol=1e-03, atol=1e-04
)
def _compare_lazy_global_with_torch(
placement, logits_sbp, labels_sbp, data_type, label_type, batch_size, num_classes,
):
data_type = type_name_to_flow_type[data_type]
label_type = type_name_to_flow_type[label_type]
np_labels = np.random.randint(0, num_classes, size=(batch_size,)).astype(np.int32)
np_logits = np.random.random((batch_size, num_classes)).astype(np.float32)
if flow.env.get_rank() == 0:
torch_logits = torch.tensor(np_logits, dtype=torch.float32, requires_grad=True)
torch_labels = torch.tensor(np_labels, dtype=torch.int64)
torch_output = torch.nn.functional.cross_entropy(
torch_logits, torch_labels, reduction="none"
)
class MyModule(flow.nn.Graph):
def __init__(self):
super(MyModule, self).__init__()
# nn.graph no support get input.grad
def build(self, logits, labels):
output = flow.nn.functional.sparse_softmax_cross_entropy(
labels=labels, logits=logits
)
return output
of_logits = flow.tensor(np_logits, dtype=data_type, requires_grad=True).to_global(
flow.env.all_device_placement("cpu"), flow.sbp.broadcast
)
of_logits = of_logits.to_global(placement, logits_sbp)
of_labels = flow.tensor(np_labels, dtype=label_type).to_global(
flow.env.all_device_placement("cpu"), flow.sbp.broadcast
)
of_labels = of_labels.to_global(placement, labels_sbp)
graph = MyModule()
of_output = graph(of_logits, of_labels)
of_output = of_output.to_global(
placement=flow.env.all_device_placement("cpu"), sbp=[flow.sbp.broadcast]
)
of_output = of_output.to_local()
flow._oneflow_internal.eager.multi_client.Sync()
if flow.env.get_rank() == 0:
assert np.allclose(
of_output.numpy(), torch_output.detach().numpy(), rtol=1e-03, atol=1e-04
)
class TestConsistentSparseSoftmaxCrossEntropyWithLogits(flow.unittest.TestCase):
@globaltest
def test_eager_global_sparse_softmax_cross_entropy(test_case):
arg_dict = OrderedDict()
arg_dict["data_type"] = ["float32", "double"]
arg_dict["label_type"] = ["int32", "int64"]
arg_dict["batch_size"] = [64]
arg_dict["num_classes"] = [1024]
for arg in GenArgList(arg_dict):
for placement in all_placement():
for logits_sbp in all_sbp(placement, max_dim=2):
for labels_sbp in all_sbp(placement, max_dim=1):
_compare_eager_global_with_torch(
placement, logits_sbp, labels_sbp, *arg
)
# TODO: Too many streams will cause bugs, open the graph mode after solving
# @globaltest
# def test_lazy_global_sparse_softmax_cross_entropy(test_case):
# arg_dict = OrderedDict()
# arg_dict["data_type"] = ["float32", "double"]
# arg_dict["label_type"] = ["int32", "int64"]
# arg_dict["batch_size"] = [64]
# arg_dict["num_classes"] = [1024]
# for arg in GenArgList(arg_dict):
# for placement in all_placement():
# for logits_sbp in all_sbp(placement, max_dim=2):
# for labels_sbp in all_sbp(placement, max_dim=1):
# _compare_lazy_global_with_torch(
# placement, logits_sbp, labels_sbp, *arg
# )
if __name__ == "__main__":
unittest.main()
| [
"oneflow.env.all_device_placement",
"oneflow.tensor",
"oneflow.test_utils.test_util.GenArgList",
"oneflow.nn.functional.sparse_softmax_cross_entropy",
"oneflow.env.get_rank",
"oneflow._oneflow_internal.eager.multi_client.Sync"
] | [((2170, 2258), 'oneflow.nn.functional.sparse_softmax_cross_entropy', 'flow.nn.functional.sparse_softmax_cross_entropy', ([], {'labels': 'of_labels', 'logits': 'of_logits'}), '(labels=of_labels, logits=\n of_logits)\n', (2217, 2258), True, 'import oneflow as flow\n'), ((4592, 4640), 'oneflow._oneflow_internal.eager.multi_client.Sync', 'flow._oneflow_internal.eager.multi_client.Sync', ([], {}), '()\n', (4638, 4640), True, 'import oneflow as flow\n'), ((6369, 6384), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6382, 6384), False, 'import unittest\n'), ((1364, 1383), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (1381, 1383), True, 'import oneflow as flow\n'), ((1413, 1477), 'torch.tensor', 'torch.tensor', (['np_logits'], {'dtype': 'torch.float32', 'requires_grad': '(True)'}), '(np_logits, dtype=torch.float32, requires_grad=True)\n', (1425, 1477), False, 'import torch\n'), ((1501, 1543), 'torch.tensor', 'torch.tensor', (['np_labels'], {'dtype': 'torch.int64'}), '(np_labels, dtype=torch.int64)\n', (1513, 1543), False, 'import torch\n'), ((1567, 1646), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['torch_logits', 'torch_labels'], {'reduction': '"""none"""'}), "(torch_logits, torch_labels, reduction='none')\n", (1600, 1646), False, 'import torch\n'), ((1803, 1839), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (1832, 1839), True, 'import oneflow as flow\n'), ((2031, 2067), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (2060, 2067), True, 'import oneflow as flow\n'), ((2354, 2390), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (2383, 2390), True, 'import oneflow as flow\n'), ((2509, 2545), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (2538, 2545), True, 'import oneflow as flow\n'), ((2617, 2636), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (2634, 2636), True, 'import oneflow as flow\n'), ((3286, 3305), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (3303, 3305), True, 'import oneflow as flow\n'), ((3335, 3399), 'torch.tensor', 'torch.tensor', (['np_logits'], {'dtype': 'torch.float32', 'requires_grad': '(True)'}), '(np_logits, dtype=torch.float32, requires_grad=True)\n', (3347, 3399), False, 'import torch\n'), ((3423, 3465), 'torch.tensor', 'torch.tensor', (['np_labels'], {'dtype': 'torch.int64'}), '(np_labels, dtype=torch.int64)\n', (3435, 3465), False, 'import torch\n'), ((3489, 3568), 'torch.nn.functional.cross_entropy', 'torch.nn.functional.cross_entropy', (['torch_logits', 'torch_labels'], {'reduction': '"""none"""'}), "(torch_logits, torch_labels, reduction='none')\n", (3522, 3568), False, 'import torch\n'), ((4038, 4074), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (4067, 4074), True, 'import oneflow as flow\n'), ((4237, 4273), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (4266, 4273), True, 'import oneflow as flow\n'), ((4649, 4668), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (4666, 4668), True, 'import oneflow as flow\n'), ((4983, 4996), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4994, 4996), False, 'from collections import OrderedDict\n'), ((5201, 5221), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5211, 5221), False, 'from oneflow.test_utils.test_util import GenArgList, type_name_to_flow_type\n'), ((1207, 1260), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_classes'], {'size': '(batch_size,)'}), '(0, num_classes, size=(batch_size,))\n', (1224, 1260), True, 'import numpy as np\n'), ((1294, 1337), 'numpy.random.random', 'np.random.random', (['(batch_size, num_classes)'], {}), '((batch_size, num_classes))\n', (1310, 1337), True, 'import numpy as np\n'), ((1724, 1783), 'oneflow.tensor', 'flow.tensor', (['np_logits'], {'dtype': 'data_type', 'requires_grad': '(True)'}), '(np_logits, dtype=data_type, requires_grad=True)\n', (1735, 1783), True, 'import oneflow as flow\n'), ((1971, 2011), 'oneflow.tensor', 'flow.tensor', (['np_labels'], {'dtype': 'label_type'}), '(np_labels, dtype=label_type)\n', (1982, 2011), True, 'import oneflow as flow\n'), ((3129, 3182), 'numpy.random.randint', 'np.random.randint', (['(0)', 'num_classes'], {'size': '(batch_size,)'}), '(0, num_classes, size=(batch_size,))\n', (3146, 3182), True, 'import numpy as np\n'), ((3216, 3259), 'numpy.random.random', 'np.random.random', (['(batch_size, num_classes)'], {}), '((batch_size, num_classes))\n', (3232, 3259), True, 'import numpy as np\n'), ((3808, 3885), 'oneflow.nn.functional.sparse_softmax_cross_entropy', 'flow.nn.functional.sparse_softmax_cross_entropy', ([], {'labels': 'labels', 'logits': 'logits'}), '(labels=labels, logits=logits)\n', (3855, 3885), True, 'import oneflow as flow\n'), ((3959, 4018), 'oneflow.tensor', 'flow.tensor', (['np_logits'], {'dtype': 'data_type', 'requires_grad': '(True)'}), '(np_logits, dtype=data_type, requires_grad=True)\n', (3970, 4018), True, 'import oneflow as flow\n'), ((4177, 4217), 'oneflow.tensor', 'flow.tensor', (['np_labels'], {'dtype': 'label_type'}), '(np_labels, dtype=label_type)\n', (4188, 4217), True, 'import oneflow as flow\n'), ((4481, 4517), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (4510, 4517), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
from oneflow.test_utils.automated_test_util import *
import oneflow as flow
@flow.unittest.skip_unless_1n1d()
class TestTripletMarginLoss(flow.unittest.TestCase):
@autotest(n=10)
def test_triplet_marginloss_with_random_data(test_case):
margin = random().to(float)
p = random().to(float)
swap = random_bool()
reduction = oneof("none", "sum", "mean", nothing())
m = torch.nn.TripletMarginLoss(
margin=margin, p=p, swap=swap, reduction=reduction
)
m.train(random())
device = random_device()
m.to(device)
shape = random_tensor(ndim=2, dim0=random(1, 8)).value().shape
anchor = random_pytorch_tensor(len(shape), *shape).to(device)
pos = random_pytorch_tensor(len(shape), *shape).to(device)
neg = random_pytorch_tensor(len(shape), *shape).to(device)
y = m(anchor, pos, neg)
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((775, 807), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (805, 807), True, 'import oneflow as flow\n'), ((1648, 1663), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1661, 1663), False, 'import unittest\n')] |
# coding=utf-8
import numpy as np
import oneflow.experimental as flow
import oneflow.experimental.nn as nn
# define LeNet module
class LeNet5(nn.Module):
def __init__(self, n_classes):
super(LeNet5, self).__init__()
self.feature_extractor = nn.Sequential(
nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, stride=1),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2),
nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5, stride=1),
nn.Tanh(),
nn.AvgPool2d(kernel_size=2),
nn.Conv2d(in_channels=16, out_channels=120, kernel_size=5, stride=1),
nn.Tanh(),
)
self.classifier = nn.Sequential(
nn.Linear(in_features=120, out_features=84),
nn.Tanh(),
nn.Linear(in_features=84, out_features=n_classes),
)
def forward(self, x):
x = self.feature_extractor(x)
x = flow.flatten(x, 1)
logits = self.classifier(x)
probs = flow.softmax(logits, dim=1)
return logits, probs
# enable eager mode
flow.enable_eager_execution()
# init model
model = LeNet5(10)
criterion = nn.CrossEntropyLoss()
# enable module to use cuda
model.to("cuda")
criterion.to("cuda")
learning_rate = 0.005
optimizer = flow.optim.SGD(model.parameters(), lr=learning_rate, momentum=0.9)
# generate random data and label
train_data = flow.Tensor(
np.random.uniform(size=(30, 1, 32, 32)).astype(np.float32), device="cuda"
)
train_label = flow.Tensor(
np.random.uniform(size=(30)).astype(np.int32), dtype=flow.int32, device="cuda"
)
# run forward, backward and update parameters
logits, probs = model(train_data)
loss = criterion(logits, train_label)
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(loss.numpy())
| [
"oneflow.experimental.flatten",
"oneflow.experimental.nn.Linear",
"oneflow.experimental.softmax",
"oneflow.experimental.nn.Conv2d",
"oneflow.experimental.enable_eager_execution",
"oneflow.experimental.nn.Tanh",
"oneflow.experimental.nn.AvgPool2d",
"oneflow.experimental.nn.CrossEntropyLoss"
] | [((1100, 1129), 'oneflow.experimental.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (1127, 1129), True, 'import oneflow.experimental as flow\n'), ((1175, 1196), 'oneflow.experimental.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (1194, 1196), True, 'import oneflow.experimental.nn as nn\n'), ((950, 968), 'oneflow.experimental.flatten', 'flow.flatten', (['x', '(1)'], {}), '(x, 1)\n', (962, 968), True, 'import oneflow.experimental as flow\n'), ((1021, 1048), 'oneflow.experimental.softmax', 'flow.softmax', (['logits'], {'dim': '(1)'}), '(logits, dim=1)\n', (1033, 1048), True, 'import oneflow.experimental as flow\n'), ((289, 354), 'oneflow.experimental.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(6)', 'kernel_size': '(5)', 'stride': '(1)'}), '(in_channels=1, out_channels=6, kernel_size=5, stride=1)\n', (298, 354), True, 'import oneflow.experimental.nn as nn\n'), ((368, 377), 'oneflow.experimental.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (375, 377), True, 'import oneflow.experimental.nn as nn\n'), ((391, 418), 'oneflow.experimental.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (403, 418), True, 'import oneflow.experimental.nn as nn\n'), ((432, 498), 'oneflow.experimental.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(6)', 'out_channels': '(16)', 'kernel_size': '(5)', 'stride': '(1)'}), '(in_channels=6, out_channels=16, kernel_size=5, stride=1)\n', (441, 498), True, 'import oneflow.experimental.nn as nn\n'), ((512, 521), 'oneflow.experimental.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (519, 521), True, 'import oneflow.experimental.nn as nn\n'), ((535, 562), 'oneflow.experimental.nn.AvgPool2d', 'nn.AvgPool2d', ([], {'kernel_size': '(2)'}), '(kernel_size=2)\n', (547, 562), True, 'import oneflow.experimental.nn as nn\n'), ((576, 644), 'oneflow.experimental.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(16)', 'out_channels': '(120)', 'kernel_size': '(5)', 'stride': '(1)'}), '(in_channels=16, out_channels=120, kernel_size=5, stride=1)\n', (585, 644), True, 'import oneflow.experimental.nn as nn\n'), ((658, 667), 'oneflow.experimental.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (665, 667), True, 'import oneflow.experimental.nn as nn\n'), ((732, 775), 'oneflow.experimental.nn.Linear', 'nn.Linear', ([], {'in_features': '(120)', 'out_features': '(84)'}), '(in_features=120, out_features=84)\n', (741, 775), True, 'import oneflow.experimental.nn as nn\n'), ((789, 798), 'oneflow.experimental.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (796, 798), True, 'import oneflow.experimental.nn as nn\n'), ((812, 861), 'oneflow.experimental.nn.Linear', 'nn.Linear', ([], {'in_features': '(84)', 'out_features': 'n_classes'}), '(in_features=84, out_features=n_classes)\n', (821, 861), True, 'import oneflow.experimental.nn as nn\n'), ((1430, 1469), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(30, 1, 32, 32)'}), '(size=(30, 1, 32, 32))\n', (1447, 1469), True, 'import numpy as np\n'), ((1537, 1563), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(30)'}), '(size=30)\n', (1554, 1563), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
from scipy.special import erf, erfc, gammaln
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
@flow.unittest.skip_unless_1n2d()
class TestUnaryElementwiseOps(flow.unittest.TestCase):
def test_abs(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AbsJob(a: oft.Numpy.Placeholder((5, 2))):
return flow.math.abs(a)
x = np.random.rand(5, 2).astype(np.float32)
y = AbsJob(x).get().numpy()
test_case.assertTrue(np.array_equal(y, np.absolute(x)))
def test_acos(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AcosJob(a: oft.Numpy.Placeholder((5, 2))):
return flow.math.acos(a)
x = np.random.rand(5, 2).astype(np.float32)
y = AcosJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arccos(x)))
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_acos_consistent_1n2c(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AcosJob(a: oft.Numpy.Placeholder((5, 2))):
return flow.math.acos(a)
x = np.random.rand(5, 2).astype(np.float32)
y = AcosJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arccos(x)))
def test_acos_cpu(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AcosJob(a: oft.Numpy.Placeholder((5, 2))):
return flow.math.acos(a)
x = np.random.rand(5, 2).astype(np.float32)
y = AcosJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arccos(x)))
def test_acos_double(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AcosJob(a: oft.Numpy.Placeholder((5, 2), dtype=flow.double)):
return flow.math.acos(a)
x = np.random.rand(5, 2).astype(np.double)
y = AcosJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arccos(x)))
def test_acosh(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AcoshJob(a: oft.Numpy.Placeholder((7,))):
return flow.math.acosh(a)
x = np.array([-2, -0.5, 1, 1.2, 200, 10000, float("inf")], dtype=np.float32)
y = AcoshJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arccosh(x), equal_nan=True))
x = np.random.uniform(low=1.0, high=100.0, size=(7,)).astype(np.float32)
y = AcoshJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arccosh(x), equal_nan=True))
def test_asin(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AsinJob(a: oft.Numpy.Placeholder((2,))):
return flow.math.asin(a)
x = np.array([0.8659266, 0.7068252], dtype=np.float32)
y = AsinJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arcsin(x), equal_nan=True))
x = np.random.uniform(low=-1.0, high=1.0, size=(2,)).astype(np.float32)
y = AsinJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arcsin(x), equal_nan=True))
def test_asinh(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AsinhJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.asinh(a)
x = np.array(
[-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")],
dtype=np.float32,
)
y = AsinhJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arcsinh(x), equal_nan=True))
x = np.random.uniform(size=(8,)).astype(np.float32)
y = AsinhJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arcsinh(x), equal_nan=True))
def test_atan(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AtanJob(a: oft.Numpy.Placeholder((2,))):
return flow.math.atan(a)
x = np.array([1.731261, 0.99920404], dtype=np.float32)
y = AtanJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arctan(x), equal_nan=True))
pi = 3.14159265357
x = np.random.uniform(low=-pi / 2, high=pi / 2, size=(2,)).astype(np.float32)
y = AtanJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arctan(x), equal_nan=True))
def test_atanh(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def AtanhJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.atanh(a)
x = np.array(
[-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")], dtype=np.float32
)
y = AtanhJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arctanh(x), equal_nan=True))
x = np.random.uniform(size=(8,)).astype(np.float32)
y = AtanhJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.arctanh(x), equal_nan=True))
def test_ceil(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def CeilJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.ceil(a)
x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)
y = CeilJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.ceil(x), equal_nan=True))
def test_cos(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def CosJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.cos(a)
x = np.array(
[-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")],
dtype=np.float32,
)
y = CosJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.cos(x), equal_nan=True))
x = np.random.uniform(size=(8,)).astype(np.float32)
y = CosJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.cos(x), equal_nan=True))
def test_cosh(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def CoshJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.cosh(a)
x = np.array(
[-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")], dtype=np.float32
)
y = CoshJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.cosh(x), equal_nan=True))
x = np.random.uniform(size=(8,)).astype(np.float32)
y = CoshJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.cosh(x), equal_nan=True))
def test_erf(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def ErfJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.erf(a)
x = np.random.uniform(size=(8,)).astype(np.float32)
y = ErfJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, erf(x), equal_nan=True))
def test_erfc(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def ErfcJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.erfc(a)
x = np.random.uniform(size=(8,)).astype(np.float32)
y = ErfcJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, erfc(x), equal_nan=True))
def test_exp(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def ExpJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.exp(a)
x = np.random.uniform(size=(8,)).astype(np.float32)
y = ExpJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.exp(x), equal_nan=True))
def test_expm1(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Expm1Job(a: oft.Numpy.Placeholder((8,))):
return flow.math.expm1(a)
x = np.random.uniform(size=(8,)).astype(np.float32)
y = Expm1Job(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.expm1(x), equal_nan=True))
def test_floor(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def FloorJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.floor(a)
x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)
y = FloorJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.floor(x), equal_nan=True))
def test_lgamma(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def LgammaJob(a: oft.Numpy.Placeholder((6,))):
return flow.math.lgamma(a)
x = np.array([0, 0.5, 1, 4.5, -4, -5.6], dtype=np.float32)
y = LgammaJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, gammaln(x), equal_nan=True))
def test_log(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def LogJob(a: oft.Numpy.Placeholder((4,))):
return flow.math.log(a)
x = np.array([0, 0.5, 1, 5], dtype=np.float32)
y = LogJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.log(x), equal_nan=True))
def test_log1p(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Log1pJob(a: oft.Numpy.Placeholder((4,))):
return flow.math.log1p(a)
x = np.array([0, 0.5, 1, 5], dtype=np.float32)
y = Log1pJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.log1p(x), equal_nan=True))
def test_log_sigmoid(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def LogSigmoidJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.log_sigmoid(a)
x = np.random.uniform(low=-5.0, high=5.0, size=(8,)).astype(np.float32)
y = LogSigmoidJob(x).get().numpy()
test_case.assertTrue(
np.allclose(
y, -np.log(1 + np.exp(-x)), equal_nan=True, rtol=0.001, atol=1e-05
)
)
def test_negative(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def NegativeJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.negative(a)
x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)
y = NegativeJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, -x, equal_nan=True))
def test_reciprocal(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def ReciprocalJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.reciprocal(a)
x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)
y = ReciprocalJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, 1.0 / x, equal_nan=True))
def test_reciprocal_no_nan(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def ReciprocalNoNanJob(a: oft.Numpy.Placeholder((4,))):
return flow.math.reciprocal_no_nan(a)
x = np.array([2.0, 0.5, 0, 1], dtype=np.float32)
out = np.array([0.5, 2, 0.0, 1.0], dtype=np.float32)
y = ReciprocalNoNanJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, out, equal_nan=True))
def test_rint(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def RintJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.rint(a)
x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)
y = RintJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.rint(x), equal_nan=True))
def test_rint_special_value(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def RintJob(a: oft.Numpy.Placeholder((9,))):
return flow.math.rint(a)
x = np.array(
[0.5000001, -1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.5, 3.5], dtype=np.float32
)
out = np.array(
[1.0, -2.0, -2.0, -0.0, 0.0, 2.0, 2.0, 2.0, 4.0], dtype=np.float32
)
y = RintJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, out, equal_nan=True))
def test_round(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def RoundJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.round(a)
x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)
y = RoundJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.round(x), equal_nan=True))
def test_round_special_value(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def RoundJob(a: oft.Numpy.Placeholder((5,))):
return flow.math.round(a)
x = np.array([0.9, 2.5, 2.3, 1.5, -4.5], dtype=np.float32)
out = np.array([1.0, 2.0, 2.0, 2.0, -4.0], dtype=np.float32)
y = RoundJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, out, equal_nan=True))
def test_rsqrt(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def RsqrtJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.rsqrt(a)
x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)
y = RsqrtJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, 1 / np.sqrt(x), equal_nan=True))
def test_sigmoid_v2(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def SigmoidJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.sigmoid_v2(a)
x = np.random.uniform(low=-2.0, high=2.0, size=(8,)).astype(np.float32)
y = SigmoidJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, 1.0 / (1.0 + np.exp(-x)), equal_nan=True))
def test_sign(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def SignJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.sign(a)
x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)
y = SignJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.sign(x), equal_nan=True))
def test_sign_double(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def SignJob(a: oft.Numpy.Placeholder((8,), dtype=flow.double)):
return flow.math.sign(a)
x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.double)
y = SignJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.sign(x), equal_nan=True))
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_sign_double_consistent_1n2c(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def SignJob(a: oft.Numpy.Placeholder((8,), dtype=flow.double)):
return flow.math.sign(a)
x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.double)
y = SignJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.sign(x), equal_nan=True))
def test_sin(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def SinJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.sin(a)
x = np.array(
[-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")], dtype=np.float32
)
y = SinJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.sin(x), equal_nan=True))
x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)
y = SinJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.sin(x), equal_nan=True))
def test_softplus(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def SoftplusJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.softplus(a)
x = np.random.uniform(low=-10.0, high=10.0, size=(8,)).astype(np.float32)
y = SoftplusJob(x).get().numpy()
test_case.assertTrue(
np.allclose(
y, np.log(np.exp(x) + 1), equal_nan=True, rtol=0.001, atol=1e-05
)
)
def test_sqrt(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def SqrtJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.sqrt(a)
x = np.random.uniform(low=0.0, high=100.0, size=(8,)).astype(np.float32)
y = SqrtJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.sqrt(x), equal_nan=True))
def test_square(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def SquareJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.square(a)
x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)
y = SquareJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, x * x, equal_nan=True))
def test_tan(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def TanJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.tan(a)
x = np.array(
[-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")],
dtype=np.float32,
)
y = TanJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.tan(x), equal_nan=True))
x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)
y = TanJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.tan(x), equal_nan=True))
def test_tanh(test_case):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def TanhJob(a: oft.Numpy.Placeholder((8,))):
return flow.math.tanh(a)
x = np.array(
[-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")], dtype=np.float32
)
y = TanhJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.tanh(x), equal_nan=True))
x = np.random.uniform(low=-100.0, high=100.0, size=(8,)).astype(np.float32)
y = TanhJob(x).get().numpy()
test_case.assertTrue(np.allclose(y, np.tanh(x), equal_nan=True))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.math.reciprocal_no_nan",
"oneflow.compatible.single_client.math.tanh",
"oneflow.compatible.single_client.math.asinh",
"oneflow.compatible.single_client.math.atan",
"oneflow.compatible.single_client.math.asin",
"oneflow.compatible.single_client.math.expm1",
"oneflow.comp... | [((847, 879), 'oneflow.compatible.single_client.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (877, 879), True, 'from oneflow.compatible import single_client as flow\n'), ((25336, 25351), 'unittest.main', 'unittest.main', ([], {}), '()\n', (25349, 25351), False, 'import unittest\n'), ((986, 1007), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1005, 1007), True, 'from oneflow.compatible import single_client as flow\n'), ((1139, 1188), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1159, 1188), True, 'from oneflow.compatible import single_client as flow\n'), ((1485, 1506), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1504, 1506), True, 'from oneflow.compatible import single_client as flow\n'), ((1638, 1687), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1658, 1687), True, 'from oneflow.compatible import single_client as flow\n'), ((2064, 2093), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (2090, 2093), True, 'from oneflow.compatible import single_client as flow\n'), ((2116, 2137), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2135, 2137), True, 'from oneflow.compatible import single_client as flow\n'), ((2269, 2318), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2289, 2318), True, 'from oneflow.compatible import single_client as flow\n'), ((1951, 1985), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1960, 1985), False, 'import os\n'), ((2617, 2638), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2636, 2638), True, 'from oneflow.compatible import single_client as flow\n'), ((2850, 2899), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2870, 2899), True, 'from oneflow.compatible import single_client as flow\n'), ((3201, 3222), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3220, 3222), True, 'from oneflow.compatible import single_client as flow\n'), ((3354, 3403), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3374, 3403), True, 'from oneflow.compatible import single_client as flow\n'), ((3717, 3738), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3736, 3738), True, 'from oneflow.compatible import single_client as flow\n'), ((3870, 3919), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3890, 3919), True, 'from oneflow.compatible import single_client as flow\n'), ((4460, 4481), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (4479, 4481), True, 'from oneflow.compatible import single_client as flow\n'), ((4613, 4662), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4633, 4662), True, 'from oneflow.compatible import single_client as flow\n'), ((4766, 4816), 'numpy.array', 'np.array', (['[0.8659266, 0.7068252]'], {'dtype': 'np.float32'}), '([0.8659266, 0.7068252], dtype=np.float32)\n', (4774, 4816), True, 'import numpy as np\n'), ((5175, 5196), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (5194, 5196), True, 'from oneflow.compatible import single_client as flow\n'), ((5328, 5377), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (5348, 5377), True, 'from oneflow.compatible import single_client as flow\n'), ((5947, 5968), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (5966, 5968), True, 'from oneflow.compatible import single_client as flow\n'), ((6100, 6149), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (6120, 6149), True, 'from oneflow.compatible import single_client as flow\n'), ((6253, 6303), 'numpy.array', 'np.array', (['[1.731261, 0.99920404]'], {'dtype': 'np.float32'}), '([1.731261, 0.99920404], dtype=np.float32)\n', (6261, 6303), True, 'import numpy as np\n'), ((6695, 6716), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (6714, 6716), True, 'from oneflow.compatible import single_client as flow\n'), ((6848, 6897), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (6868, 6897), True, 'from oneflow.compatible import single_client as flow\n'), ((7449, 7470), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (7468, 7470), True, 'from oneflow.compatible import single_client as flow\n'), ((7602, 7651), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (7622, 7651), True, 'from oneflow.compatible import single_client as flow\n'), ((7987, 8008), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (8006, 8008), True, 'from oneflow.compatible import single_client as flow\n'), ((8140, 8189), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (8160, 8189), True, 'from oneflow.compatible import single_client as flow\n'), ((8743, 8764), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (8762, 8764), True, 'from oneflow.compatible import single_client as flow\n'), ((8896, 8945), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (8916, 8945), True, 'from oneflow.compatible import single_client as flow\n'), ((9486, 9507), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (9505, 9507), True, 'from oneflow.compatible import single_client as flow\n'), ((9639, 9688), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (9659, 9688), True, 'from oneflow.compatible import single_client as flow\n'), ((9996, 10017), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (10015, 10017), True, 'from oneflow.compatible import single_client as flow\n'), ((10149, 10198), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (10169, 10198), True, 'from oneflow.compatible import single_client as flow\n'), ((10509, 10530), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (10528, 10530), True, 'from oneflow.compatible import single_client as flow\n'), ((10662, 10711), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (10682, 10711), True, 'from oneflow.compatible import single_client as flow\n'), ((11023, 11044), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (11042, 11044), True, 'from oneflow.compatible import single_client as flow\n'), ((11176, 11225), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (11196, 11225), True, 'from oneflow.compatible import single_client as flow\n'), ((11545, 11566), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (11564, 11566), True, 'from oneflow.compatible import single_client as flow\n'), ((11698, 11747), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (11718, 11747), True, 'from oneflow.compatible import single_client as flow\n'), ((12090, 12111), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (12109, 12111), True, 'from oneflow.compatible import single_client as flow\n'), ((12243, 12292), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (12263, 12292), True, 'from oneflow.compatible import single_client as flow\n'), ((12400, 12454), 'numpy.array', 'np.array', (['[0, 0.5, 1, 4.5, -4, -5.6]'], {'dtype': 'np.float32'}), '([0, 0.5, 1, 4.5, -4, -5.6], dtype=np.float32)\n', (12408, 12454), True, 'import numpy as np\n'), ((12619, 12640), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (12638, 12640), True, 'from oneflow.compatible import single_client as flow\n'), ((12772, 12821), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (12792, 12821), True, 'from oneflow.compatible import single_client as flow\n'), ((12923, 12965), 'numpy.array', 'np.array', (['[0, 0.5, 1, 5]'], {'dtype': 'np.float32'}), '([0, 0.5, 1, 5], dtype=np.float32)\n', (12931, 12965), True, 'import numpy as np\n'), ((13128, 13149), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (13147, 13149), True, 'from oneflow.compatible import single_client as flow\n'), ((13281, 13330), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (13301, 13330), True, 'from oneflow.compatible import single_client as flow\n'), ((13436, 13478), 'numpy.array', 'np.array', (['[0, 0.5, 1, 5]'], {'dtype': 'np.float32'}), '([0, 0.5, 1, 5], dtype=np.float32)\n', (13444, 13478), True, 'import numpy as np\n'), ((13651, 13672), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (13670, 13672), True, 'from oneflow.compatible import single_client as flow\n'), ((13804, 13853), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (13824, 13853), True, 'from oneflow.compatible import single_client as flow\n'), ((14300, 14321), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (14319, 14321), True, 'from oneflow.compatible import single_client as flow\n'), ((14453, 14502), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (14473, 14502), True, 'from oneflow.compatible import single_client as flow\n'), ((14849, 14870), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (14868, 14870), True, 'from oneflow.compatible import single_client as flow\n'), ((15002, 15051), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (15022, 15051), True, 'from oneflow.compatible import single_client as flow\n'), ((15416, 15437), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (15435, 15437), True, 'from oneflow.compatible import single_client as flow\n'), ((15569, 15618), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (15589, 15618), True, 'from oneflow.compatible import single_client as flow\n'), ((15746, 15790), 'numpy.array', 'np.array', (['[2.0, 0.5, 0, 1]'], {'dtype': 'np.float32'}), '([2.0, 0.5, 0, 1], dtype=np.float32)\n', (15754, 15790), True, 'import numpy as np\n'), ((15805, 15851), 'numpy.array', 'np.array', (['[0.5, 2, 0.0, 1.0]'], {'dtype': 'np.float32'}), '([0.5, 2, 0.0, 1.0], dtype=np.float32)\n', (15813, 15851), True, 'import numpy as np\n'), ((16019, 16040), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (16038, 16040), True, 'from oneflow.compatible import single_client as flow\n'), ((16172, 16221), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (16192, 16221), True, 'from oneflow.compatible import single_client as flow\n'), ((16572, 16593), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (16591, 16593), True, 'from oneflow.compatible import single_client as flow\n'), ((16725, 16774), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (16745, 16774), True, 'from oneflow.compatible import single_client as flow\n'), ((16878, 16965), 'numpy.array', 'np.array', (['[0.5000001, -1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.5, 3.5]'], {'dtype': 'np.float32'}), '([0.5000001, -1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.5, 3.5], dtype=np.\n float32)\n', (16886, 16965), True, 'import numpy as np\n'), ((16997, 17073), 'numpy.array', 'np.array', (['[1.0, -2.0, -2.0, -0.0, 0.0, 2.0, 2.0, 2.0, 4.0]'], {'dtype': 'np.float32'}), '([1.0, -2.0, -2.0, -0.0, 0.0, 2.0, 2.0, 2.0, 4.0], dtype=np.float32)\n', (17005, 17073), True, 'import numpy as np\n'), ((17253, 17274), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (17272, 17274), True, 'from oneflow.compatible import single_client as flow\n'), ((17406, 17455), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (17426, 17455), True, 'from oneflow.compatible import single_client as flow\n'), ((17811, 17832), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (17830, 17832), True, 'from oneflow.compatible import single_client as flow\n'), ((17964, 18013), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (17984, 18013), True, 'from oneflow.compatible import single_client as flow\n'), ((18119, 18173), 'numpy.array', 'np.array', (['[0.9, 2.5, 2.3, 1.5, -4.5]'], {'dtype': 'np.float32'}), '([0.9, 2.5, 2.3, 1.5, -4.5], dtype=np.float32)\n', (18127, 18173), True, 'import numpy as np\n'), ((18188, 18242), 'numpy.array', 'np.array', (['[1.0, 2.0, 2.0, 2.0, -4.0]'], {'dtype': 'np.float32'}), '([1.0, 2.0, 2.0, 2.0, -4.0], dtype=np.float32)\n', (18196, 18242), True, 'import numpy as np\n'), ((18401, 18422), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (18420, 18422), True, 'from oneflow.compatible import single_client as flow\n'), ((18554, 18603), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (18574, 18603), True, 'from oneflow.compatible import single_client as flow\n'), ((18953, 18974), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (18972, 18974), True, 'from oneflow.compatible import single_client as flow\n'), ((19106, 19155), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (19126, 19155), True, 'from oneflow.compatible import single_client as flow\n'), ((19516, 19537), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (19535, 19537), True, 'from oneflow.compatible import single_client as flow\n'), ((19669, 19718), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (19689, 19718), True, 'from oneflow.compatible import single_client as flow\n'), ((20064, 20085), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (20083, 20085), True, 'from oneflow.compatible import single_client as flow\n'), ((20217, 20266), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (20237, 20266), True, 'from oneflow.compatible import single_client as flow\n'), ((20712, 20741), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (20738, 20741), True, 'from oneflow.compatible import single_client as flow\n'), ((20764, 20785), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (20783, 20785), True, 'from oneflow.compatible import single_client as flow\n'), ((20917, 20966), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (20937, 20966), True, 'from oneflow.compatible import single_client as flow\n'), ((20592, 20626), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (20601, 20626), False, 'import os\n'), ((21322, 21343), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (21341, 21343), True, 'from oneflow.compatible import single_client as flow\n'), ((21475, 21524), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (21495, 21524), True, 'from oneflow.compatible import single_client as flow\n'), ((22090, 22111), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (22109, 22111), True, 'from oneflow.compatible import single_client as flow\n'), ((22243, 22292), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (22263, 22292), True, 'from oneflow.compatible import single_client as flow\n'), ((22728, 22749), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (22747, 22749), True, 'from oneflow.compatible import single_client as flow\n'), ((22881, 22930), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (22901, 22930), True, 'from oneflow.compatible import single_client as flow\n'), ((23268, 23289), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (23287, 23289), True, 'from oneflow.compatible import single_client as flow\n'), ((23421, 23470), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (23441, 23470), True, 'from oneflow.compatible import single_client as flow\n'), ((23809, 23830), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (23828, 23830), True, 'from oneflow.compatible import single_client as flow\n'), ((23962, 24011), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (23982, 24011), True, 'from oneflow.compatible import single_client as flow\n'), ((24589, 24610), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (24608, 24610), True, 'from oneflow.compatible import single_client as flow\n'), ((24742, 24791), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (24762, 24791), True, 'from oneflow.compatible import single_client as flow\n'), ((1099, 1127), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1125, 1127), True, 'from oneflow.compatible import single_client as flow\n'), ((1262, 1278), 'oneflow.compatible.single_client.math.abs', 'flow.math.abs', (['a'], {}), '(a)\n', (1275, 1278), True, 'from oneflow.compatible import single_client as flow\n'), ((1598, 1626), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1624, 1626), True, 'from oneflow.compatible import single_client as flow\n'), ((1762, 1779), 'oneflow.compatible.single_client.math.acos', 'flow.math.acos', (['a'], {}), '(a)\n', (1776, 1779), True, 'from oneflow.compatible import single_client as flow\n'), ((2229, 2257), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2255, 2257), True, 'from oneflow.compatible import single_client as flow\n'), ((2393, 2410), 'oneflow.compatible.single_client.math.acos', 'flow.math.acos', (['a'], {}), '(a)\n', (2407, 2410), True, 'from oneflow.compatible import single_client as flow\n'), ((2733, 2767), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (2753, 2767), True, 'from oneflow.compatible import single_client as flow\n'), ((2810, 2838), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (2836, 2838), True, 'from oneflow.compatible import single_client as flow\n'), ((2974, 2991), 'oneflow.compatible.single_client.math.acos', 'flow.math.acos', (['a'], {}), '(a)\n', (2988, 2991), True, 'from oneflow.compatible import single_client as flow\n'), ((3314, 3342), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (3340, 3342), True, 'from oneflow.compatible import single_client as flow\n'), ((3497, 3514), 'oneflow.compatible.single_client.math.acos', 'flow.math.acos', (['a'], {}), '(a)\n', (3511, 3514), True, 'from oneflow.compatible import single_client as flow\n'), ((3830, 3858), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (3856, 3858), True, 'from oneflow.compatible import single_client as flow\n'), ((3993, 4011), 'oneflow.compatible.single_client.math.acosh', 'flow.math.acosh', (['a'], {}), '(a)\n', (4008, 4011), True, 'from oneflow.compatible import single_client as flow\n'), ((4573, 4601), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (4599, 4601), True, 'from oneflow.compatible import single_client as flow\n'), ((4735, 4752), 'oneflow.compatible.single_client.math.asin', 'flow.math.asin', (['a'], {}), '(a)\n', (4749, 4752), True, 'from oneflow.compatible import single_client as flow\n'), ((5288, 5316), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (5314, 5316), True, 'from oneflow.compatible import single_client as flow\n'), ((5451, 5469), 'oneflow.compatible.single_client.math.asinh', 'flow.math.asinh', (['a'], {}), '(a)\n', (5466, 5469), True, 'from oneflow.compatible import single_client as flow\n'), ((6060, 6088), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (6086, 6088), True, 'from oneflow.compatible import single_client as flow\n'), ((6222, 6239), 'oneflow.compatible.single_client.math.atan', 'flow.math.atan', (['a'], {}), '(a)\n', (6236, 6239), True, 'from oneflow.compatible import single_client as flow\n'), ((6808, 6836), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (6834, 6836), True, 'from oneflow.compatible import single_client as flow\n'), ((6971, 6989), 'oneflow.compatible.single_client.math.atanh', 'flow.math.atanh', (['a'], {}), '(a)\n', (6986, 6989), True, 'from oneflow.compatible import single_client as flow\n'), ((7562, 7590), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (7588, 7590), True, 'from oneflow.compatible import single_client as flow\n'), ((7724, 7741), 'oneflow.compatible.single_client.math.ceil', 'flow.math.ceil', (['a'], {}), '(a)\n', (7738, 7741), True, 'from oneflow.compatible import single_client as flow\n'), ((8100, 8128), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (8126, 8128), True, 'from oneflow.compatible import single_client as flow\n'), ((8261, 8277), 'oneflow.compatible.single_client.math.cos', 'flow.math.cos', (['a'], {}), '(a)\n', (8274, 8277), True, 'from oneflow.compatible import single_client as flow\n'), ((8856, 8884), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (8882, 8884), True, 'from oneflow.compatible import single_client as flow\n'), ((9018, 9035), 'oneflow.compatible.single_client.math.cosh', 'flow.math.cosh', (['a'], {}), '(a)\n', (9032, 9035), True, 'from oneflow.compatible import single_client as flow\n'), ((9599, 9627), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (9625, 9627), True, 'from oneflow.compatible import single_client as flow\n'), ((9760, 9776), 'oneflow.compatible.single_client.math.erf', 'flow.math.erf', (['a'], {}), '(a)\n', (9773, 9776), True, 'from oneflow.compatible import single_client as flow\n'), ((10109, 10137), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (10135, 10137), True, 'from oneflow.compatible import single_client as flow\n'), ((10271, 10288), 'oneflow.compatible.single_client.math.erfc', 'flow.math.erfc', (['a'], {}), '(a)\n', (10285, 10288), True, 'from oneflow.compatible import single_client as flow\n'), ((10622, 10650), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (10648, 10650), True, 'from oneflow.compatible import single_client as flow\n'), ((10783, 10799), 'oneflow.compatible.single_client.math.exp', 'flow.math.exp', (['a'], {}), '(a)\n', (10796, 10799), True, 'from oneflow.compatible import single_client as flow\n'), ((11136, 11164), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (11162, 11164), True, 'from oneflow.compatible import single_client as flow\n'), ((11299, 11317), 'oneflow.compatible.single_client.math.expm1', 'flow.math.expm1', (['a'], {}), '(a)\n', (11314, 11317), True, 'from oneflow.compatible import single_client as flow\n'), ((11658, 11686), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (11684, 11686), True, 'from oneflow.compatible import single_client as flow\n'), ((11821, 11839), 'oneflow.compatible.single_client.math.floor', 'flow.math.floor', (['a'], {}), '(a)\n', (11836, 11839), True, 'from oneflow.compatible import single_client as flow\n'), ((12203, 12231), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (12229, 12231), True, 'from oneflow.compatible import single_client as flow\n'), ((12367, 12386), 'oneflow.compatible.single_client.math.lgamma', 'flow.math.lgamma', (['a'], {}), '(a)\n', (12383, 12386), True, 'from oneflow.compatible import single_client as flow\n'), ((12732, 12760), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (12758, 12760), True, 'from oneflow.compatible import single_client as flow\n'), ((12893, 12909), 'oneflow.compatible.single_client.math.log', 'flow.math.log', (['a'], {}), '(a)\n', (12906, 12909), True, 'from oneflow.compatible import single_client as flow\n'), ((13241, 13269), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (13267, 13269), True, 'from oneflow.compatible import single_client as flow\n'), ((13404, 13422), 'oneflow.compatible.single_client.math.log1p', 'flow.math.log1p', (['a'], {}), '(a)\n', (13419, 13422), True, 'from oneflow.compatible import single_client as flow\n'), ((13764, 13792), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (13790, 13792), True, 'from oneflow.compatible import single_client as flow\n'), ((13932, 13956), 'oneflow.compatible.single_client.math.log_sigmoid', 'flow.math.log_sigmoid', (['a'], {}), '(a)\n', (13953, 13956), True, 'from oneflow.compatible import single_client as flow\n'), ((14413, 14441), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (14439, 14441), True, 'from oneflow.compatible import single_client as flow\n'), ((14579, 14600), 'oneflow.compatible.single_client.math.negative', 'flow.math.negative', (['a'], {}), '(a)\n', (14597, 14600), True, 'from oneflow.compatible import single_client as flow\n'), ((14754, 14788), 'numpy.allclose', 'np.allclose', (['y', '(-x)'], {'equal_nan': '(True)'}), '(y, -x, equal_nan=True)\n', (14765, 14788), True, 'import numpy as np\n'), ((14962, 14990), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (14988, 14990), True, 'from oneflow.compatible import single_client as flow\n'), ((15130, 15153), 'oneflow.compatible.single_client.math.reciprocal', 'flow.math.reciprocal', (['a'], {}), '(a)\n', (15150, 15153), True, 'from oneflow.compatible import single_client as flow\n'), ((15309, 15348), 'numpy.allclose', 'np.allclose', (['y', '(1.0 / x)'], {'equal_nan': '(True)'}), '(y, 1.0 / x, equal_nan=True)\n', (15320, 15348), True, 'import numpy as np\n'), ((15529, 15557), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (15555, 15557), True, 'from oneflow.compatible import single_client as flow\n'), ((15702, 15732), 'oneflow.compatible.single_client.math.reciprocal_no_nan', 'flow.math.reciprocal_no_nan', (['a'], {}), '(a)\n', (15729, 15732), True, 'from oneflow.compatible import single_client as flow\n'), ((15929, 15964), 'numpy.allclose', 'np.allclose', (['y', 'out'], {'equal_nan': '(True)'}), '(y, out, equal_nan=True)\n', (15940, 15964), True, 'import numpy as np\n'), ((16132, 16160), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (16158, 16160), True, 'from oneflow.compatible import single_client as flow\n'), ((16294, 16311), 'oneflow.compatible.single_client.math.rint', 'flow.math.rint', (['a'], {}), '(a)\n', (16308, 16311), True, 'from oneflow.compatible import single_client as flow\n'), ((16685, 16713), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (16711, 16713), True, 'from oneflow.compatible import single_client as flow\n'), ((16847, 16864), 'oneflow.compatible.single_client.math.rint', 'flow.math.rint', (['a'], {}), '(a)\n', (16861, 16864), True, 'from oneflow.compatible import single_client as flow\n'), ((17162, 17197), 'numpy.allclose', 'np.allclose', (['y', 'out'], {'equal_nan': '(True)'}), '(y, out, equal_nan=True)\n', (17173, 17197), True, 'import numpy as np\n'), ((17366, 17394), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (17392, 17394), True, 'from oneflow.compatible import single_client as flow\n'), ((17529, 17547), 'oneflow.compatible.single_client.math.round', 'flow.math.round', (['a'], {}), '(a)\n', (17544, 17547), True, 'from oneflow.compatible import single_client as flow\n'), ((17924, 17952), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (17950, 17952), True, 'from oneflow.compatible import single_client as flow\n'), ((18087, 18105), 'oneflow.compatible.single_client.math.round', 'flow.math.round', (['a'], {}), '(a)\n', (18102, 18105), True, 'from oneflow.compatible import single_client as flow\n'), ((18310, 18345), 'numpy.allclose', 'np.allclose', (['y', 'out'], {'equal_nan': '(True)'}), '(y, out, equal_nan=True)\n', (18321, 18345), True, 'import numpy as np\n'), ((18514, 18542), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (18540, 18542), True, 'from oneflow.compatible import single_client as flow\n'), ((18677, 18695), 'oneflow.compatible.single_client.math.rsqrt', 'flow.math.rsqrt', (['a'], {}), '(a)\n', (18692, 18695), True, 'from oneflow.compatible import single_client as flow\n'), ((19066, 19094), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (19092, 19094), True, 'from oneflow.compatible import single_client as flow\n'), ((19231, 19254), 'oneflow.compatible.single_client.math.sigmoid_v2', 'flow.math.sigmoid_v2', (['a'], {}), '(a)\n', (19251, 19254), True, 'from oneflow.compatible import single_client as flow\n'), ((19629, 19657), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (19655, 19657), True, 'from oneflow.compatible import single_client as flow\n'), ((19791, 19808), 'oneflow.compatible.single_client.math.sign', 'flow.math.sign', (['a'], {}), '(a)\n', (19805, 19808), True, 'from oneflow.compatible import single_client as flow\n'), ((20177, 20205), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (20203, 20205), True, 'from oneflow.compatible import single_client as flow\n'), ((20358, 20375), 'oneflow.compatible.single_client.math.sign', 'flow.math.sign', (['a'], {}), '(a)\n', (20372, 20375), True, 'from oneflow.compatible import single_client as flow\n'), ((20877, 20905), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (20903, 20905), True, 'from oneflow.compatible import single_client as flow\n'), ((21058, 21075), 'oneflow.compatible.single_client.math.sign', 'flow.math.sign', (['a'], {}), '(a)\n', (21072, 21075), True, 'from oneflow.compatible import single_client as flow\n'), ((21435, 21463), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (21461, 21463), True, 'from oneflow.compatible import single_client as flow\n'), ((21596, 21612), 'oneflow.compatible.single_client.math.sin', 'flow.math.sin', (['a'], {}), '(a)\n', (21609, 21612), True, 'from oneflow.compatible import single_client as flow\n'), ((22203, 22231), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (22229, 22231), True, 'from oneflow.compatible import single_client as flow\n'), ((22369, 22390), 'oneflow.compatible.single_client.math.softplus', 'flow.math.softplus', (['a'], {}), '(a)\n', (22387, 22390), True, 'from oneflow.compatible import single_client as flow\n'), ((22841, 22869), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (22867, 22869), True, 'from oneflow.compatible import single_client as flow\n'), ((23003, 23020), 'oneflow.compatible.single_client.math.sqrt', 'flow.math.sqrt', (['a'], {}), '(a)\n', (23017, 23020), True, 'from oneflow.compatible import single_client as flow\n'), ((23381, 23409), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (23407, 23409), True, 'from oneflow.compatible import single_client as flow\n'), ((23545, 23564), 'oneflow.compatible.single_client.math.square', 'flow.math.square', (['a'], {}), '(a)\n', (23561, 23564), True, 'from oneflow.compatible import single_client as flow\n'), ((23718, 23755), 'numpy.allclose', 'np.allclose', (['y', '(x * x)'], {'equal_nan': '(True)'}), '(y, x * x, equal_nan=True)\n', (23729, 23755), True, 'import numpy as np\n'), ((23922, 23950), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (23948, 23950), True, 'from oneflow.compatible import single_client as flow\n'), ((24083, 24099), 'oneflow.compatible.single_client.math.tan', 'flow.math.tan', (['a'], {}), '(a)\n', (24096, 24099), True, 'from oneflow.compatible import single_client as flow\n'), ((24702, 24730), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (24728, 24730), True, 'from oneflow.compatible import single_client as flow\n'), ((24864, 24881), 'oneflow.compatible.single_client.math.tanh', 'flow.math.tanh', (['a'], {}), '(a)\n', (24878, 24881), True, 'from oneflow.compatible import single_client as flow\n'), ((1211, 1240), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (1232, 1240), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1292, 1312), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (1306, 1312), True, 'import numpy as np\n'), ((1415, 1429), 'numpy.absolute', 'np.absolute', (['x'], {}), '(x)\n', (1426, 1429), True, 'import numpy as np\n'), ((1711, 1740), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (1732, 1740), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1793, 1813), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (1807, 1813), True, 'import numpy as np\n'), ((1914, 1926), 'numpy.arccos', 'np.arccos', (['x'], {}), '(x)\n', (1923, 1926), True, 'import numpy as np\n'), ((2342, 2371), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (2363, 2371), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2424, 2444), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (2438, 2444), True, 'import numpy as np\n'), ((2545, 2557), 'numpy.arccos', 'np.arccos', (['x'], {}), '(x)\n', (2554, 2557), True, 'import numpy as np\n'), ((2923, 2952), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {}), '((5, 2))\n', (2944, 2952), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3005, 3025), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (3019, 3025), True, 'import numpy as np\n'), ((3126, 3138), 'numpy.arccos', 'np.arccos', (['x'], {}), '(x)\n', (3135, 3138), True, 'import numpy as np\n'), ((3427, 3475), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5, 2)'], {'dtype': 'flow.double'}), '((5, 2), dtype=flow.double)\n', (3448, 3475), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((3528, 3548), 'numpy.random.rand', 'np.random.rand', (['(5)', '(2)'], {}), '(5, 2)\n', (3542, 3548), True, 'import numpy as np\n'), ((3648, 3660), 'numpy.arccos', 'np.arccos', (['x'], {}), '(x)\n', (3657, 3660), True, 'import numpy as np\n'), ((3944, 3971), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(7,)'], {}), '((7,))\n', (3965, 3971), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((4180, 4193), 'numpy.arccosh', 'np.arccosh', (['x'], {}), '(x)\n', (4190, 4193), True, 'import numpy as np\n'), ((4224, 4273), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(1.0)', 'high': '(100.0)', 'size': '(7,)'}), '(low=1.0, high=100.0, size=(7,))\n', (4241, 4273), True, 'import numpy as np\n'), ((4375, 4388), 'numpy.arccosh', 'np.arccosh', (['x'], {}), '(x)\n', (4385, 4388), True, 'import numpy as np\n'), ((4686, 4713), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2,)'], {}), '((2,))\n', (4707, 4713), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((4898, 4910), 'numpy.arcsin', 'np.arcsin', (['x'], {}), '(x)\n', (4907, 4910), True, 'import numpy as np\n'), ((4941, 4989), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-1.0)', 'high': '(1.0)', 'size': '(2,)'}), '(low=-1.0, high=1.0, size=(2,))\n', (4958, 4989), True, 'import numpy as np\n'), ((5090, 5102), 'numpy.arcsin', 'np.arcsin', (['x'], {}), '(x)\n', (5099, 5102), True, 'import numpy as np\n'), ((5402, 5429), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (5423, 5429), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((5688, 5701), 'numpy.arcsinh', 'np.arcsinh', (['x'], {}), '(x)\n', (5698, 5701), True, 'import numpy as np\n'), ((5732, 5760), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(8,)'}), '(size=(8,))\n', (5749, 5760), True, 'import numpy as np\n'), ((5862, 5875), 'numpy.arcsinh', 'np.arcsinh', (['x'], {}), '(x)\n', (5872, 5875), True, 'import numpy as np\n'), ((6173, 6200), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(2,)'], {}), '((2,))\n', (6194, 6200), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((6385, 6397), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (6394, 6397), True, 'import numpy as np\n'), ((6455, 6509), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-pi / 2)', 'high': '(pi / 2)', 'size': '(2,)'}), '(low=-pi / 2, high=pi / 2, size=(2,))\n', (6472, 6509), True, 'import numpy as np\n'), ((6610, 6622), 'numpy.arctan', 'np.arctan', (['x'], {}), '(x)\n', (6619, 6622), True, 'import numpy as np\n'), ((6922, 6949), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (6943, 6949), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((7190, 7203), 'numpy.arctanh', 'np.arctanh', (['x'], {}), '(x)\n', (7200, 7203), True, 'import numpy as np\n'), ((7234, 7262), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(8,)'}), '(size=(8,))\n', (7251, 7262), True, 'import numpy as np\n'), ((7364, 7377), 'numpy.arctanh', 'np.arctanh', (['x'], {}), '(x)\n', (7374, 7377), True, 'import numpy as np\n'), ((7675, 7702), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (7696, 7702), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((7755, 7805), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '(8,)'}), '(low=-10.0, high=10.0, size=(8,))\n', (7772, 7805), True, 'import numpy as np\n'), ((7906, 7916), 'numpy.ceil', 'np.ceil', (['x'], {}), '(x)\n', (7913, 7916), True, 'import numpy as np\n'), ((8212, 8239), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (8233, 8239), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((8494, 8503), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (8500, 8503), True, 'import numpy as np\n'), ((8534, 8562), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(8,)'}), '(size=(8,))\n', (8551, 8562), True, 'import numpy as np\n'), ((8662, 8671), 'numpy.cos', 'np.cos', (['x'], {}), '(x)\n', (8668, 8671), True, 'import numpy as np\n'), ((8969, 8996), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (8990, 8996), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((9235, 9245), 'numpy.cosh', 'np.cosh', (['x'], {}), '(x)\n', (9242, 9245), True, 'import numpy as np\n'), ((9276, 9304), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(8,)'}), '(size=(8,))\n', (9293, 9304), True, 'import numpy as np\n'), ((9405, 9415), 'numpy.cosh', 'np.cosh', (['x'], {}), '(x)\n', (9412, 9415), True, 'import numpy as np\n'), ((9711, 9738), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (9732, 9738), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((9790, 9818), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(8,)'}), '(size=(8,))\n', (9807, 9818), True, 'import numpy as np\n'), ((9918, 9924), 'scipy.special.erf', 'erf', (['x'], {}), '(x)\n', (9921, 9924), False, 'from scipy.special import erf, erfc, gammaln\n'), ((10222, 10249), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (10243, 10249), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((10302, 10330), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(8,)'}), '(size=(8,))\n', (10319, 10330), True, 'import numpy as np\n'), ((10431, 10438), 'scipy.special.erfc', 'erfc', (['x'], {}), '(x)\n', (10435, 10438), False, 'from scipy.special import erf, erfc, gammaln\n'), ((10734, 10761), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (10755, 10761), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((10813, 10841), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(8,)'}), '(size=(8,))\n', (10830, 10841), True, 'import numpy as np\n'), ((10941, 10950), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (10947, 10950), True, 'import numpy as np\n'), ((11250, 11277), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (11271, 11277), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((11331, 11359), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(8,)'}), '(size=(8,))\n', (11348, 11359), True, 'import numpy as np\n'), ((11461, 11472), 'numpy.expm1', 'np.expm1', (['x'], {}), '(x)\n', (11469, 11472), True, 'import numpy as np\n'), ((11772, 11799), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (11793, 11799), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((11853, 11903), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '(8,)'}), '(low=-10.0, high=10.0, size=(8,))\n', (11870, 11903), True, 'import numpy as np\n'), ((12005, 12016), 'numpy.floor', 'np.floor', (['x'], {}), '(x)\n', (12013, 12016), True, 'import numpy as np\n'), ((12318, 12345), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(6,)'], {}), '((6,))\n', (12339, 12345), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((12538, 12548), 'scipy.special.gammaln', 'gammaln', (['x'], {}), '(x)\n', (12545, 12548), False, 'from scipy.special import erf, erfc, gammaln\n'), ((12844, 12871), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(4,)'], {}), '((4,))\n', (12865, 12871), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((13046, 13055), 'numpy.log', 'np.log', (['x'], {}), '(x)\n', (13052, 13055), True, 'import numpy as np\n'), ((13355, 13382), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(4,)'], {}), '((4,))\n', (13376, 13382), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((13561, 13572), 'numpy.log1p', 'np.log1p', (['x'], {}), '(x)\n', (13569, 13572), True, 'import numpy as np\n'), ((13883, 13910), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (13904, 13910), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((13970, 14018), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-5.0)', 'high': '(5.0)', 'size': '(8,)'}), '(low=-5.0, high=5.0, size=(8,))\n', (13987, 14018), True, 'import numpy as np\n'), ((14530, 14557), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (14551, 14557), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((14614, 14664), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '(8,)'}), '(low=-10.0, high=10.0, size=(8,))\n', (14631, 14664), True, 'import numpy as np\n'), ((15081, 15108), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (15102, 15108), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((15167, 15217), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '(8,)'}), '(low=-10.0, high=10.0, size=(8,))\n', (15184, 15217), True, 'import numpy as np\n'), ((15653, 15680), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(4,)'], {}), '((4,))\n', (15674, 15680), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((16245, 16272), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (16266, 16272), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((16325, 16375), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '(8,)'}), '(low=-10.0, high=10.0, size=(8,))\n', (16342, 16375), True, 'import numpy as np\n'), ((16476, 16486), 'numpy.rint', 'np.rint', (['x'], {}), '(x)\n', (16483, 16486), True, 'import numpy as np\n'), ((16798, 16825), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(9,)'], {}), '((9,))\n', (16819, 16825), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((17480, 17507), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (17501, 17507), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((17561, 17611), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '(8,)'}), '(low=-10.0, high=10.0, size=(8,))\n', (17578, 17611), True, 'import numpy as np\n'), ((17713, 17724), 'numpy.round', 'np.round', (['x'], {}), '(x)\n', (17721, 17724), True, 'import numpy as np\n'), ((18038, 18065), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(5,)'], {}), '((5,))\n', (18059, 18065), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((18628, 18655), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (18649, 18655), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((18709, 18759), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '(8,)'}), '(low=-10.0, high=10.0, size=(8,))\n', (18726, 18759), True, 'import numpy as np\n'), ((19182, 19209), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (19203, 19209), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((19268, 19316), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-2.0)', 'high': '(2.0)', 'size': '(8,)'}), '(low=-2.0, high=2.0, size=(8,))\n', (19285, 19316), True, 'import numpy as np\n'), ((19742, 19769), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (19763, 19769), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((19822, 19874), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(8,)'}), '(low=-100.0, high=100.0, size=(8,))\n', (19839, 19874), True, 'import numpy as np\n'), ((19975, 19985), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (19982, 19985), True, 'import numpy as np\n'), ((20290, 20336), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {'dtype': 'flow.double'}), '((8,), dtype=flow.double)\n', (20311, 20336), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((20389, 20441), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(8,)'}), '(low=-100.0, high=100.0, size=(8,))\n', (20406, 20441), True, 'import numpy as np\n'), ((20541, 20551), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (20548, 20551), True, 'import numpy as np\n'), ((20990, 21036), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {'dtype': 'flow.double'}), '((8,), dtype=flow.double)\n', (21011, 21036), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((21089, 21141), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(8,)'}), '(low=-100.0, high=100.0, size=(8,))\n', (21106, 21141), True, 'import numpy as np\n'), ((21241, 21251), 'numpy.sign', 'np.sign', (['x'], {}), '(x)\n', (21248, 21251), True, 'import numpy as np\n'), ((21547, 21574), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (21568, 21574), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((21813, 21822), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (21819, 21822), True, 'import numpy as np\n'), ((21853, 21905), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(8,)'}), '(low=-100.0, high=100.0, size=(8,))\n', (21870, 21905), True, 'import numpy as np\n'), ((22005, 22014), 'numpy.sin', 'np.sin', (['x'], {}), '(x)\n', (22011, 22014), True, 'import numpy as np\n'), ((22320, 22347), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (22341, 22347), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((22404, 22454), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-10.0)', 'high': '(10.0)', 'size': '(8,)'}), '(low=-10.0, high=10.0, size=(8,))\n', (22421, 22454), True, 'import numpy as np\n'), ((22954, 22981), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (22975, 22981), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((23034, 23083), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(0.0)', 'high': '(100.0)', 'size': '(8,)'}), '(low=0.0, high=100.0, size=(8,))\n', (23051, 23083), True, 'import numpy as np\n'), ((23184, 23194), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (23191, 23194), True, 'import numpy as np\n'), ((23496, 23523), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (23517, 23523), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((23578, 23630), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(8,)'}), '(low=-100.0, high=100.0, size=(8,))\n', (23595, 23630), True, 'import numpy as np\n'), ((24034, 24061), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (24055, 24061), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((24316, 24325), 'numpy.tan', 'np.tan', (['x'], {}), '(x)\n', (24322, 24325), True, 'import numpy as np\n'), ((24356, 24408), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(8,)'}), '(low=-100.0, high=100.0, size=(8,))\n', (24373, 24408), True, 'import numpy as np\n'), ((24508, 24517), 'numpy.tan', 'np.tan', (['x'], {}), '(x)\n', (24514, 24517), True, 'import numpy as np\n'), ((24815, 24842), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(8,)'], {}), '((8,))\n', (24836, 24842), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((25080, 25090), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (25087, 25090), True, 'import numpy as np\n'), ((25121, 25173), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-100.0)', 'high': '(100.0)', 'size': '(8,)'}), '(low=-100.0, high=100.0, size=(8,))\n', (25138, 25173), True, 'import numpy as np\n'), ((25274, 25284), 'numpy.tanh', 'np.tanh', (['x'], {}), '(x)\n', (25281, 25284), True, 'import numpy as np\n'), ((18865, 18875), 'numpy.sqrt', 'np.sqrt', (['x'], {}), '(x)\n', (18872, 18875), True, 'import numpy as np\n'), ((19433, 19443), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (19439, 19443), True, 'import numpy as np\n'), ((22596, 22605), 'numpy.exp', 'np.exp', (['x'], {}), '(x)\n', (22602, 22605), True, 'import numpy as np\n'), ((14167, 14177), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (14173, 14177), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@flow.unittest.skip_unless_1n1d()
class TestLinspace(flow.unittest.TestCase):
@autotest(n=30, auto_backward=False, rtol=1e-5, atol=1e-5, check_graph=True)
def test_linspace_int_with_random_data(test_case):
start = random().to(int)
end = start + random().to(int)
steps = random(0, end - start).to(int)
x = torch.linspace(start=start, end=end, steps=steps)
device = random_device()
x.to(device)
return x
@autotest(n=30, auto_backward=False, rtol=1e-5, atol=1e-5, check_graph=True)
def test_linspace_float_with_random_data(test_case):
start = random()
end = start + random()
steps = random(0, end - start).to(int)
x = torch.linspace(start=start, end=end, steps=steps)
device = random_device()
x.to(device)
return x
def test_global_naive(test_case):
placement = flow.placement("cpu", ranks=[0])
sbp = (flow.sbp.broadcast,)
x = flow.linspace(start=0, end=10, steps=2, placement=placement, sbp=sbp)
test_case.assertEqual(x.sbp, sbp)
test_case.assertEqual(x.placement, placement)
def test_linspace_in_transformer_bug(test_case):
drop_path_rate = 0.1
depths = [2, 2, 6, 2]
flow_res = flow.linspace(0, drop_path_rate, sum(depths))
torch_res = np.array(
[
0.0000,
0.0091,
0.0182,
0.0273,
0.0364,
0.0455,
0.0545,
0.0636,
0.0727,
0.0818,
0.0909,
0.1000,
]
)
test_case.assertTrue(np.allclose(flow_res.numpy(), torch_res, atol=1e-4))
drop_path_rate = 0.2
depths = [2, 2, 6, 2]
flow_res = flow.linspace(0, drop_path_rate, sum(depths))
torch_res = np.array(
[
0.0000,
0.0182,
0.0364,
0.0545,
0.0727,
0.0909,
0.1091,
0.1273,
0.1455,
0.1636,
0.1818,
0.2000,
]
)
test_case.assertTrue(np.allclose(flow_res.numpy(), torch_res, atol=1e-4))
drop_path_rate = 0.3
depths = [2, 2, 18, 2]
flow_res = flow.linspace(0, drop_path_rate, sum(depths))
torch_res = np.array(
[
0.0000,
0.0130,
0.0261,
0.0391,
0.0522,
0.0652,
0.0783,
0.0913,
0.1043,
0.1174,
0.1304,
0.1435,
0.1565,
0.1696,
0.1826,
0.1957,
0.2087,
0.2217,
0.2348,
0.2478,
0.2609,
0.2739,
0.2870,
0.3000,
]
)
test_case.assertTrue(np.allclose(flow_res.numpy(), torch_res, atol=1e-4))
drop_path_rate = 0.1
depths = [2, 2, 18, 2]
flow_res = flow.linspace(0, drop_path_rate, sum(depths))
torch_res = np.array(
[
0.0000,
0.0043,
0.0087,
0.0130,
0.0174,
0.0217,
0.0261,
0.0304,
0.0348,
0.0391,
0.0435,
0.0478,
0.0522,
0.0565,
0.0609,
0.0652,
0.0696,
0.0739,
0.0783,
0.0826,
0.0870,
0.0913,
0.0957,
0.1000,
]
)
test_case.assertTrue(np.allclose(flow_res.numpy(), torch_res, atol=1e-4))
drop_path_rate = 0.5
depths = [2, 2, 18, 2]
flow_res = flow.linspace(0, drop_path_rate, sum(depths))
torch_res = np.array(
[
0.0000,
0.0217,
0.0435,
0.0652,
0.0870,
0.1087,
0.1304,
0.1522,
0.1739,
0.1957,
0.2174,
0.2391,
0.2609,
0.2826,
0.3043,
0.3261,
0.3478,
0.3696,
0.3913,
0.4130,
0.4348,
0.4565,
0.4783,
0.5000,
]
)
test_case.assertTrue(np.allclose(flow_res.numpy(), torch_res, atol=1e-4))
def test_linspace_start_equal_end_bug(test_case):
flow_res = flow.linspace(0, 0.0, 12).numpy()
torch_res = np.array(
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
)
test_case.assertTrue(np.allclose(flow_res, torch_res, atol=1e-4))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.placement",
"oneflow.linspace"
] | [((848, 880), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (878, 880), True, 'import oneflow as flow\n'), ((6229, 6244), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6242, 6244), False, 'import unittest\n'), ((1770, 1802), 'oneflow.placement', 'flow.placement', (['"""cpu"""'], {'ranks': '[0]'}), "('cpu', ranks=[0])\n", (1784, 1802), True, 'import oneflow as flow\n'), ((1853, 1922), 'oneflow.linspace', 'flow.linspace', ([], {'start': '(0)', 'end': '(10)', 'steps': '(2)', 'placement': 'placement', 'sbp': 'sbp'}), '(start=0, end=10, steps=2, placement=placement, sbp=sbp)\n', (1866, 1922), True, 'import oneflow as flow\n'), ((2225, 2330), 'numpy.array', 'np.array', (['[0.0, 0.0091, 0.0182, 0.0273, 0.0364, 0.0455, 0.0545, 0.0636, 0.0727, \n 0.0818, 0.0909, 0.1]'], {}), '([0.0, 0.0091, 0.0182, 0.0273, 0.0364, 0.0455, 0.0545, 0.0636, \n 0.0727, 0.0818, 0.0909, 0.1])\n', (2233, 2330), True, 'import numpy as np\n'), ((2807, 2912), 'numpy.array', 'np.array', (['[0.0, 0.0182, 0.0364, 0.0545, 0.0727, 0.0909, 0.1091, 0.1273, 0.1455, \n 0.1636, 0.1818, 0.2]'], {}), '([0.0, 0.0182, 0.0364, 0.0545, 0.0727, 0.0909, 0.1091, 0.1273, \n 0.1455, 0.1636, 0.1818, 0.2])\n', (2815, 2912), True, 'import numpy as np\n'), ((3390, 3593), 'numpy.array', 'np.array', (['[0.0, 0.013, 0.0261, 0.0391, 0.0522, 0.0652, 0.0783, 0.0913, 0.1043, 0.1174,\n 0.1304, 0.1435, 0.1565, 0.1696, 0.1826, 0.1957, 0.2087, 0.2217, 0.2348,\n 0.2478, 0.2609, 0.2739, 0.287, 0.3]'], {}), '([0.0, 0.013, 0.0261, 0.0391, 0.0522, 0.0652, 0.0783, 0.0913, \n 0.1043, 0.1174, 0.1304, 0.1435, 0.1565, 0.1696, 0.1826, 0.1957, 0.2087,\n 0.2217, 0.2348, 0.2478, 0.2609, 0.2739, 0.287, 0.3])\n', (3398, 3593), True, 'import numpy as np\n'), ((4273, 4476), 'numpy.array', 'np.array', (['[0.0, 0.0043, 0.0087, 0.013, 0.0174, 0.0217, 0.0261, 0.0304, 0.0348, 0.0391,\n 0.0435, 0.0478, 0.0522, 0.0565, 0.0609, 0.0652, 0.0696, 0.0739, 0.0783,\n 0.0826, 0.087, 0.0913, 0.0957, 0.1]'], {}), '([0.0, 0.0043, 0.0087, 0.013, 0.0174, 0.0217, 0.0261, 0.0304, \n 0.0348, 0.0391, 0.0435, 0.0478, 0.0522, 0.0565, 0.0609, 0.0652, 0.0696,\n 0.0739, 0.0783, 0.0826, 0.087, 0.0913, 0.0957, 0.1])\n', (4281, 4476), True, 'import numpy as np\n'), ((5156, 5359), 'numpy.array', 'np.array', (['[0.0, 0.0217, 0.0435, 0.0652, 0.087, 0.1087, 0.1304, 0.1522, 0.1739, 0.1957,\n 0.2174, 0.2391, 0.2609, 0.2826, 0.3043, 0.3261, 0.3478, 0.3696, 0.3913,\n 0.413, 0.4348, 0.4565, 0.4783, 0.5]'], {}), '([0.0, 0.0217, 0.0435, 0.0652, 0.087, 0.1087, 0.1304, 0.1522, \n 0.1739, 0.1957, 0.2174, 0.2391, 0.2609, 0.2826, 0.3043, 0.3261, 0.3478,\n 0.3696, 0.3913, 0.413, 0.4348, 0.4565, 0.4783, 0.5])\n', (5164, 5359), True, 'import numpy as np\n'), ((6022, 6092), 'numpy.array', 'np.array', (['[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]'], {}), '([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0])\n', (6030, 6092), True, 'import numpy as np\n'), ((6147, 6192), 'numpy.allclose', 'np.allclose', (['flow_res', 'torch_res'], {'atol': '(0.0001)'}), '(flow_res, torch_res, atol=0.0001)\n', (6158, 6192), True, 'import numpy as np\n'), ((5967, 5992), 'oneflow.linspace', 'flow.linspace', (['(0)', '(0.0)', '(12)'], {}), '(0, 0.0, 12)\n', (5980, 5992), True, 'import oneflow as flow\n')] |
import numpy as np
import argparse
import cv2
import oneflow as flow
import oneflow.typing as tp
import style_model
def float_list(x):
return list(map(float, x.split(',')))
def load_image(image_path):
im = cv2.imread(image_path)
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = np.transpose(im, (2, 0, 1))
im = np.expand_dims(im, axis=0)
return np.ascontiguousarray(im, 'float32')
def recover_image(im):
im = np.squeeze(im)
im = np.transpose(im, (1, 2, 0))
im = cv2.cvtColor(np.float32(im), cv2.COLOR_RGB2BGR)
return im.astype(np.uint8)
def get_predict_config(device_type="gpu", device_num=1):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.consistent_view())
func_config.default_placement_scope(
flow.scope.placement(device_type, "0:0-{}".format(device_num - 1))
)
return func_config
def main(args):
input_image = load_image(args.input_image_path)
height = input_image.shape[2]
width = input_image.shape[3]
flow.env.init()
@flow.global_function("predict", get_predict_config())
def PredictNet(
image: tp.Numpy.Placeholder((1, 3, height, width), dtype = flow.float32)) -> tp.Numpy:
style_out = style_model.styleNet(image, trainable = True)
return style_out
flow.load_variables(flow.checkpoint.get(args.model_load_dir))
import datetime
a = datetime.datetime.now()
style_out = PredictNet(input_image)
b = datetime.datetime.now()
c = b - a
print("time: %s ms, height: %d, width: %d" % (c.microseconds / 1000, height, width))
cv2.imwrite(args.output_image_path, recover_image(style_out))
def get_parser(parser = None):
parser = argparse.ArgumentParser("flags for neural style")
parser.add_argument("--input_image_path", type = str, default = 'test_img/tiger.jpg', help = "image path")
parser.add_argument("--output_image_path", type = str, default = 'test_img/tiger.jpg', help = "image path")
parser.add_argument("--model_load_dir", type = str, default = "", help = "model save directory")
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| [
"oneflow.env.init",
"oneflow.typing.Numpy.Placeholder",
"oneflow.checkpoint.get",
"oneflow.FunctionConfig",
"oneflow.scope.consistent_view"
] | [((217, 239), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (227, 239), False, 'import cv2\n'), ((249, 284), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (261, 284), False, 'import cv2\n'), ((294, 321), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (306, 321), True, 'import numpy as np\n'), ((331, 357), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (345, 357), True, 'import numpy as np\n'), ((369, 404), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im', '"""float32"""'], {}), "(im, 'float32')\n", (389, 404), True, 'import numpy as np\n'), ((438, 452), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (448, 452), True, 'import numpy as np\n'), ((462, 489), 'numpy.transpose', 'np.transpose', (['im', '(1, 2, 0)'], {}), '(im, (1, 2, 0))\n', (474, 489), True, 'import numpy as np\n'), ((654, 675), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (673, 675), True, 'import oneflow as flow\n'), ((1077, 1092), 'oneflow.env.init', 'flow.env.init', ([], {}), '()\n', (1090, 1092), True, 'import oneflow as flow\n'), ((1455, 1478), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1476, 1478), False, 'import datetime\n'), ((1529, 1552), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1550, 1552), False, 'import datetime\n'), ((1769, 1818), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for neural style"""'], {}), "('flags for neural style')\n", (1792, 1818), False, 'import argparse\n'), ((512, 526), 'numpy.float32', 'np.float32', (['im'], {}), '(im)\n', (522, 526), True, 'import numpy as np\n'), ((761, 789), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (787, 789), True, 'import oneflow as flow\n'), ((1288, 1331), 'style_model.styleNet', 'style_model.styleNet', (['image'], {'trainable': '(True)'}), '(image, trainable=True)\n', (1308, 1331), False, 'import style_model\n'), ((1384, 1424), 'oneflow.checkpoint.get', 'flow.checkpoint.get', (['args.model_load_dir'], {}), '(args.model_load_dir)\n', (1403, 1424), True, 'import oneflow as flow\n'), ((1188, 1251), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, height, width)'], {'dtype': 'flow.float32'}), '((1, 3, height, width), dtype=flow.float32)\n', (1208, 1251), True, 'import oneflow.typing as tp\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
import test_global_storage
from test_util import GenArgList
def compare_with_tensorflow(device_type, params_case, dilations, data_format):
input_shape, output_shape, padding, strides, kernel_size = params_case
assert data_format in ["NCHW", "NHWC"]
out_channels = output_shape[1] if data_format == "NCHW" else output_shape[3]
in_channels = input_shape[1] if data_format == "NCHW" else input_shape[3]
assert device_type in ["gpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.train.primary_lr(1e-4)
func_config.train.model_update_conf(dict(naive_conf={}))
@flow.global_function(func_config)
def DeconvJob():
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"x",
shape=input_shape,
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
if data_format == "NCHW":
weight = flow.get_variable(
"weight",
shape=(in_channels, out_channels, kernel_size, kernel_size),
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
else:
weight = flow.get_variable(
"weight",
shape=(in_channels, kernel_size, kernel_size, out_channels),
dtype=flow.float,
initializer=flow.random_uniform_initializer(minval=-10, maxval=10),
trainable=True,
)
loss = flow.nn.conv2d_transpose(
x,
weight,
strides=strides,
output_shape=output_shape,
dilations=dilations,
padding=padding,
data_format=data_format,
)
flow.losses.add_loss(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(weight, test_global_storage.Setter("weight"))
flow.watch_diff(weight, test_global_storage.Setter("weight_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
return loss
# OneFlow
check_point = flow.train.CheckPoint()
check_point.init()
of_out = DeconvJob().get()
# Tensorflow
if data_format == "NCHW":
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(test_global_storage.Get("x").transpose(0, 2, 3, 1))
output_shape = (
output_shape[0],
output_shape[2],
output_shape[3],
output_shape[1],
)
w = tf.Variable(test_global_storage.Get("weight").transpose(2, 3, 1, 0))
tf_out = tf.nn.conv2d_transpose(
x,
w,
output_shape=output_shape,
strides=[1, strides, strides, 1],
padding=padding,
data_format="NHWC",
)
loss_diff = test_global_storage.Get("loss_diff").transpose(0, 2, 3, 1)
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
tf_weight_diff = tape.gradient(tf_out, w, loss_diff)
assert np.allclose(
of_out.numpy().transpose(0, 2, 3, 1), tf_out.numpy(), rtol=1e-02, atol=1e-02
)
assert np.allclose(
test_global_storage.Get("x_diff").transpose(0, 2, 3, 1),
tf_x_diff.numpy(),
rtol=1e-4,
atol=1e-4,
)
assert np.allclose(
test_global_storage.Get("weight_diff").transpose(2, 3, 1, 0),
tf_weight_diff.numpy(),
rtol=1e-4,
atol=1e-4,
)
else:
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(test_global_storage.Get("x"))
w = tf.Variable(test_global_storage.Get("weight").transpose(1, 2, 3, 0))
tf_out = tf.nn.conv2d_transpose(
x,
w,
output_shape=output_shape,
strides=[1, strides, strides, 1],
padding=padding,
data_format="NHWC",
)
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
tf_weight_diff = tape.gradient(tf_out, w, loss_diff)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=1e-02, atol=1e-02), (
of_out.numpy() - tf_out.numpy()
)
assert np.allclose(
test_global_storage.Get("x_diff"), tf_x_diff.numpy(), rtol=1e-02, atol=1e-02
)
assert np.allclose(
test_global_storage.Get("weight_diff").transpose(1, 2, 3, 0),
tf_weight_diff.numpy(),
rtol=1e-2,
atol=1e-2,
)
def test_deconv2d_NHWC_1n1c(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
# params_case: (input_shape, output_shape, padding, stirdes, kernel_size)
arg_dict["params_case"] = [
((32, 3, 3, 4), (32, 3, 3, 8), "SAME", 1, 3),
((32, 3, 3, 2), (32, 6, 6, 8), "SAME", 2, 4),
((32, 2, 2, 1), (32, 5, 5, 2), "VALID", 2, 2),
((32, 2, 2, 16), (32, 8, 8, 4), "VALID", 2, 5),
]
arg_dict["dilations"] = [1]
arg_dict["data_format"] = ["NHWC"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
def test_deconv2d_NCHW_1n1c(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
# params_case: (input_shape, output_shape, padding, stirdes, kernel_size)
arg_dict["params_case"] = [
((32, 4, 3, 3), (32, 8, 3, 3), "SAME", 1, 3),
((32, 4, 3, 3), (32, 8, 6, 6), "SAME", 2, 5),
((32, 1, 2, 2), (32, 2, 5, 5), "VALID", 2, 2),
((32, 16, 2, 2), (32, 4, 8, 8), "VALID", 2, 5),
]
arg_dict["dilations"] = [1]
arg_dict["data_format"] = ["NCHW"]
for arg in GenArgList(arg_dict):
compare_with_tensorflow(*arg)
| [
"oneflow.train.CheckPoint",
"oneflow.clear_default_session",
"oneflow.global_function",
"oneflow.nn.conv2d_transpose",
"oneflow.scope.placement",
"oneflow.losses.add_loss",
"oneflow.random_uniform_initializer",
"oneflow.FunctionConfig"
] | [((1160, 1188), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1186, 1188), True, 'import oneflow as flow\n'), ((1207, 1228), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1226, 1228), True, 'import oneflow as flow\n'), ((1381, 1414), 'oneflow.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (1401, 1414), True, 'import oneflow as flow\n'), ((3275, 3298), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (3296, 3298), True, 'import oneflow as flow\n'), ((5917, 5930), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5928, 5930), False, 'from collections import OrderedDict\n'), ((6390, 6410), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6400, 6410), False, 'from test_util import GenArgList\n'), ((6507, 6520), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6518, 6520), False, 'from collections import OrderedDict\n'), ((6980, 7000), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6990, 7000), False, 'from test_util import GenArgList\n'), ((5244, 5280), 'test_global_storage.Get', 'test_global_storage.Get', (['"""loss_diff"""'], {}), "('loss_diff')\n", (5267, 5280), False, 'import test_global_storage\n'), ((1449, 1489), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1469, 1489), True, 'import oneflow as flow\n'), ((2491, 2643), 'oneflow.nn.conv2d_transpose', 'flow.nn.conv2d_transpose', (['x', 'weight'], {'strides': 'strides', 'output_shape': 'output_shape', 'dilations': 'dilations', 'padding': 'padding', 'data_format': 'data_format'}), '(x, weight, strides=strides, output_shape=\n output_shape, dilations=dilations, padding=padding, data_format=data_format\n )\n', (2515, 2643), True, 'import oneflow as flow\n'), ((2773, 2799), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['loss'], {}), '(loss)\n', (2793, 2799), True, 'import oneflow as flow\n'), ((3413, 3445), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (3428, 3445), True, 'import tensorflow as tf\n'), ((3816, 3946), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'w'], {'output_shape': 'output_shape', 'strides': '[1, strides, strides, 1]', 'padding': 'padding', 'data_format': '"""NHWC"""'}), "(x, w, output_shape=output_shape, strides=[1, strides,\n strides, 1], padding=padding, data_format='NHWC')\n", (3838, 3946), True, 'import tensorflow as tf\n'), ((4780, 4812), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (4795, 4812), True, 'import tensorflow as tf\n'), ((4986, 5116), 'tensorflow.nn.conv2d_transpose', 'tf.nn.conv2d_transpose', (['x', 'w'], {'output_shape': 'output_shape', 'strides': '[1, strides, strides, 1]', 'padding': 'padding', 'data_format': '"""NHWC"""'}), "(x, w, output_shape=output_shape, strides=[1, strides,\n strides, 1], padding=padding, data_format='NHWC')\n", (5008, 5116), True, 'import tensorflow as tf\n'), ((5579, 5612), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (5602, 5612), False, 'import test_global_storage\n'), ((2827, 2858), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (2853, 2858), False, 'import test_global_storage\n'), ((2891, 2927), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (2917, 2927), False, 'import test_global_storage\n'), ((2960, 2996), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""weight"""'], {}), "('weight')\n", (2986, 2996), False, 'import test_global_storage\n'), ((3034, 3075), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""weight_diff"""'], {}), "('weight_diff')\n", (3060, 3075), False, 'import test_global_storage\n'), ((3106, 3140), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss"""'], {}), "('loss')\n", (3132, 3140), False, 'import test_global_storage\n'), ((3176, 3215), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss_diff"""'], {}), "('loss_diff')\n", (3202, 3215), False, 'import test_global_storage\n'), ((4075, 4111), 'test_global_storage.Get', 'test_global_storage.Get', (['"""loss_diff"""'], {}), "('loss_diff')\n", (4098, 4111), False, 'import test_global_storage\n'), ((4850, 4878), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x"""'], {}), "('x')\n", (4873, 4878), False, 'import test_global_storage\n'), ((1644, 1698), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(-10)', 'maxval': '(10)'}), '(minval=-10, maxval=10)\n', (1675, 1698), True, 'import oneflow as flow\n'), ((4419, 4452), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (4442, 4452), False, 'import test_global_storage\n'), ((4603, 4641), 'test_global_storage.Get', 'test_global_storage.Get', (['"""weight_diff"""'], {}), "('weight_diff')\n", (4626, 4641), False, 'import test_global_storage\n'), ((5706, 5744), 'test_global_storage.Get', 'test_global_storage.Get', (['"""weight_diff"""'], {}), "('weight_diff')\n", (5729, 5744), False, 'import test_global_storage\n'), ((2009, 2063), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(-10)', 'maxval': '(10)'}), '(minval=-10, maxval=10)\n', (2040, 2063), True, 'import oneflow as flow\n'), ((2362, 2416), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {'minval': '(-10)', 'maxval': '(10)'}), '(minval=-10, maxval=10)\n', (2393, 2416), True, 'import oneflow as flow\n'), ((3483, 3511), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x"""'], {}), "('x')\n", (3506, 3511), False, 'import test_global_storage\n'), ((3738, 3771), 'test_global_storage.Get', 'test_global_storage.Get', (['"""weight"""'], {}), "('weight')\n", (3761, 3771), False, 'import test_global_storage\n'), ((4908, 4941), 'test_global_storage.Get', 'test_global_storage.Get', (['"""weight"""'], {}), "('weight')\n", (4931, 4941), False, 'import test_global_storage\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow as flow
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.module as module_util
from oneflow.python.oneflow_export import oneflow_export
from typing import Optional, Sequence, Union
import random
import sys
import traceback
@oneflow_export("data.OFRecordRawDecoder", "data.ofrecord_raw_decoder")
def OFRecordRawDecoder(
input_blob: remote_blob_util.BlobDef,
blob_name: str,
shape: Sequence[int],
dtype: dtype_util.dtype,
dim1_varying_length: bool = False,
auto_zero_padding: bool = False,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if name is None:
name = id_util.UniqueStr("OFRecordRawDecoder_")
return (
flow.user_op_builder(name)
.Op("ofrecord_raw_decoder")
.Input("in", [input_blob])
.Output("out")
.Attr("name", blob_name)
.Attr("shape", shape)
.Attr("data_type", dtype)
.Attr("dim1_varying_length", dim1_varying_length)
.Attr("auto_zero_padding", auto_zero_padding)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("data.OFRecordBytesDecoder", "data.ofrecord_bytes_decoder")
def OFRecordBytesDecoder(
input_blob: remote_blob_util.BlobDef, blob_name: str, name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if name is None:
name = id_util.UniqueStr("OFRecordBytesDecoder_")
return (
flow.user_op_builder(name)
.Op("ofrecord_bytes_decoder")
.Input("in", [input_blob])
.Output("out")
.Attr("name", blob_name)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export(
"data.OFRecordImageDecoderRandomCrop", "data.ofrecord_image_decoder_random_crop"
)
def api_ofrecord_image_decoder_random_crop(
input_blob: remote_blob_util.BlobDef,
blob_name: str,
color_space: str = "BGR",
num_attempts: int = 10,
seed: Optional[int] = None,
random_area: Sequence[float] = [0.08, 1.0],
random_aspect_ratio: Sequence[float] = [0.75, 1.333333],
name: str = "OFRecordImageDecoderRandomCrop",
) -> remote_blob_util.BlobDef:
"""This operator is an image decoder with random crop.
Args:
input_blob (BlobDef): The input Blob
blob_name (str): The name of the Blob
color_space (str, optional): The color space, such as "RGB", "BGR". Defaults to "BGR".
num_attempts (int, optional): The maximum number of random cropping attempts. Defaults to 10.
seed (Optional[int], optional): The random seed. Defaults to None.
random_area (Sequence[float], optional): The random cropping area. Defaults to [0.08, 1.0].
random_aspect_ratio (Sequence[float], optional): The random scaled ratio. Defaults to [0.75, 1.333333].
name (str, optional): The name for the operation. Defaults to "OFRecordImageDecoderRandomCrop".
Returns:
BlobDef: The random cropped Blob
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 16
color_space = "RGB"
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./imgdataset",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
random_shuffle=True,
shuffle_after_epoch=True,
)
image = flow.data.OFRecordImageDecoderRandomCrop(
ofrecord, "encoded", color_space=color_space
)
res_image, scale, new_size = flow.image.Resize(
image, target_size=(224, 224)
)
label = flow.data.OFRecordRawDecoder(
ofrecord, "class/label", shape=(1, ), dtype=flow.int32
)
return res_image, label
if __name__ == "__main__":
images, labels = ofrecord_reader_job()
# images.shape (16, 224, 224, 3)
"""
assert isinstance(name, str)
if seed is not None:
assert name is not None
module = flow.find_or_create_module(
name,
lambda: OFRecordImageDecoderRandomCropModule(
blob_name=blob_name,
color_space=color_space,
num_attempts=num_attempts,
random_seed=seed,
random_area=random_area,
random_aspect_ratio=random_aspect_ratio,
name=name,
),
)
return module(input_blob)
class OFRecordImageDecoderRandomCropModule(module_util.Module):
def __init__(
self,
blob_name: str,
color_space: str,
num_attempts: int,
random_seed: Optional[int],
random_area: Sequence[float],
random_aspect_ratio: Sequence[float],
name: str,
):
module_util.Module.__init__(self, name)
seed, has_seed = flow.random.gen_seed(random_seed)
self.op_module_builder = (
flow.user_op_module_builder("ofrecord_image_decoder_random_crop")
.InputSize("in", 1)
.Output("out")
.Attr("name", blob_name)
.Attr("color_space", color_space)
.Attr("num_attempts", num_attempts)
.Attr("random_area", random_area)
.Attr("random_aspect_ratio", random_aspect_ratio)
.Attr("has_seed", has_seed)
.Attr("seed", seed)
.CheckAndComplete()
)
self.op_module_builder.user_op_module.InitOpKernel()
def forward(self, input: remote_blob_util.BlobDef):
if self.call_seq_no == 0:
name = self.module_name
else:
name = id_util.UniqueStr("OFRecordImageDecoderRandomCrop_")
return (
self.op_module_builder.OpName(name)
.Input("in", [input])
.Build()
.InferAndTryRun()
.SoleOutputBlob()
)
@oneflow_export("data.OFRecordImageDecoder", "data.ofrecord_image_decoder")
def OFRecordImageDecoder(
input_blob: remote_blob_util.BlobDef,
blob_name: str,
color_space: str = "BGR",
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator is an image decoder.
Args:
input_blob (BlobDef): The input Blob
blob_name (str): The name of the input Blob
color_space (str, optional): The color space, such as "RGB", "BGR". Defaults to "BGR".
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def image_decoder_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 16
color_space = "RGB"
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./imgdataset",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
random_shuffle=True,
shuffle_after_epoch=True,
)
image = flow.data.OFRecordImageDecoder(
ofrecord, "encoded", color_space=color_space
)
res_image, scale, new_size = flow.image.Resize(
image, target_size=(224, 224)
)
label = flow.data.OFRecordRawDecoder(
ofrecord, "class/label", shape=(1, ), dtype=flow.int32
)
return res_image, label
if __name__ == "__main__":
images, labels = image_decoder_job()
# image.shape (16, 224, 224, 3)
"""
if name is None:
name = id_util.UniqueStr("OFRecordImageDecoder_")
return (
flow.user_op_builder(name)
.Op("ofrecord_image_decoder")
.Input("in", [input_blob])
.Output("out")
.Attr("name", blob_name)
.Attr("color_space", color_space)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("image.Resize", "image.resize", "image_resize")
def api_image_resize(
image: remote_blob_util.BlobDef,
target_size: Union[int, Sequence[int]] = None,
min_size: Optional[int] = None,
max_size: Optional[int] = None,
keep_aspect_ratio: bool = False,
resize_side: str = "shorter",
channels: int = 3,
dtype: Optional[dtype_util.dtype] = None,
interpolation_type: str = "auto",
name: Optional[str] = None,
# deprecated params, reserve for backward compatible
color_space: Optional[str] = None,
interp_type: Optional[str] = None,
resize_shorter: int = 0,
resize_x: int = 0,
resize_y: int = 0,
) -> Union[remote_blob_util.BlobDef, Sequence[remote_blob_util.BlobDef]]:
r"""Resize images to target size.
Args:
image: A `Tensor` consists of images to be resized.
target_size: A list or tuple when `keep_aspect_ratio` is false or an int when `keep_aspect_ratio` is true. When `keep_aspect_ratio` is false, `target_size` has a form of `(target_width, target_height)` that image will resize to. When `keep_aspect_ratio` is true, the longer side or shorter side of the image will be resized to target size.
min_size: An int, optional. Only works when `keep_aspect_ratio` is true and `resize_side` is "longer". If `min_size` is not None, the shorter side must be greater than or equal to `min_size`. Default is None.
max_size: An int, optional. Only works when `keep_aspect_ratio` is true and `resize_side` is "shorter". If `max_size` is not None, the longer side must be less than or equal to `max_size`. Default is None.
keep_aspect_ratio: A bool. If is false, indicate that image will be resized to fixed width and height, otherwise image will be resized keeping aspect ratio.
resize_side: A str of "longer" or "shorter". Only works when `keep_aspect_ratio` is True. If `resize_side` is "longer", the longer side of image will be resized to `target_size`. If `resize_side` is "shorter", the shorter side of image will be resized to `target_size`.
channels: An int. how many channels an image has
dtype: `oneflow.dtype`. Indicate output resized image data type.
interpolation_type: A str of "auto", "bilinear", "nearest_neighbor", "bicubic" or "area". Indicate interpolation method used to resize image.
name: A str, optional. Name for the operation.
color_space: Deprecated, a str of "RGB", "BGR" or "GRAY". Please use `channels` instead.
interp_type: Deprecated, s str of "Linear", "Cubic" or "NN". Please use `interpolation_type` instead.
resize_shorter: Deprecated, a int. Indicate target size that the shorter side of image will resize to. Please use `target_size` and `resize_side` instead.
resize_x: Deprecated, a int. Indicate the target size that the width of image will resize to. Please use `target_size` instead.
resize_y: Deprecated, a int. Indicate the target size that the height of image will resize to. Please use `target_size` instead.
Returns:
Tuple of resized images `Blob`, width and height scales `Blob` and new width and height `Blob`
(new width and height `Blob` will be None when keep_aspect_ratio is false).
If deprecated params are used, a single resized images `Blob` will be returned.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 16
color_space = "RGB"
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./imgdataset",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
random_shuffle=True,
shuffle_after_epoch=True,
)
image = flow.data.OFRecordImageDecoderRandomCrop(
ofrecord, "encoded", color_space=color_space
)
res_image, scale, new_size = flow.image.Resize(
image, target_size=(224, 224)
)
label = flow.data.OFRecordRawDecoder(
ofrecord, "class/label", shape=(1, ), dtype=flow.int32
)
return res_image, label
if __name__ == "__main__":
images, labels = ofrecord_reader_job()
# image.shape (16, 224, 224, 3)
"""
# process deprecated params
deprecated_param_used = False
if color_space is not None:
print("WARNING: color_space has been deprecated. Please use channels instead.")
print(traceback.format_stack()[-2])
deprecated_param_used = True
assert isinstance(color_space, str)
if color_space.upper() == "RGB" or color_space.upper() == "BGR":
channels = 3
elif color_space.upper() == "GRAY":
channels = 1
else:
raise ValueError("invalid color_space")
if interp_type is not None:
print(
"WARNING: interp_type has been deprecated. Please use interpolation_type instead."
)
print(traceback.format_stack()[-2])
deprecated_param_used = True
assert isinstance(interp_type, str)
if interp_type == "Linear":
interpolation_type = "bilinear"
elif interp_type == "NN":
interpolation_type = "nearest_neighbor"
elif interp_type == "Cubic":
interpolation_type = "bicubic"
else:
raise ValueError("invalid interp_type")
if resize_x > 0 and resize_y > 0:
print(
"WARNING: resize_x and resize_y has been deprecated. Please use target_size instead."
)
print(traceback.format_stack()[-2])
deprecated_param_used = True
target_size = (resize_x, resize_y)
keep_aspect_ratio = False
if resize_shorter > 0:
print(
"WARNING: resize_shorter has been deprecated. Please use target_size instead."
)
print(traceback.format_stack()[-2])
deprecated_param_used = True
target_size = resize_shorter
keep_aspect_ratio = True
resize_side = "shorter"
if name is None:
name = id_util.UniqueStr("ImageResize_")
if keep_aspect_ratio:
if not isinstance(target_size, int):
raise ValueError(
"target_size must be an int when keep_aspect_ratio is True"
)
if min_size is None:
min_size = 0
if max_size is None:
max_size = 0
if resize_side == "shorter":
resize_longer = False
elif resize_side == "longer":
resize_longer = True
else:
raise ValueError('resize_side must be "shorter" or "longer"')
op = (
flow.user_op_builder(name)
.Op("image_resize_keep_aspect_ratio")
.Input("in", [image])
.Output("out")
.Output("size")
.Output("scale")
.Attr("target_size", target_size)
.Attr("min_size", min_size)
.Attr("max_size", max_size)
.Attr("resize_longer", resize_longer)
.Attr("interpolation_type", interpolation_type)
.Build()
)
res_image, new_size, scale = op.InferAndTryRun().RemoteBlobList()
scale = flow.tensor_buffer_to_tensor(
scale, dtype=flow.float32, instance_shape=(2,)
)
new_size = flow.tensor_buffer_to_tensor(
new_size, dtype=flow.int32, instance_shape=(2,)
)
else:
if (
not isinstance(target_size, (list, tuple))
or len(target_size) != 2
or not all(isinstance(size, int) for size in target_size)
):
raise ValueError(
"target_size must be a form like (width, height) when keep_aspect_ratio is False"
)
if dtype is None:
dtype = flow.uint8
target_w, target_h = target_size
op = (
flow.user_op_builder(name)
.Op("image_resize_to_fixed")
.Input("in", [image])
.Output("out")
.Output("scale")
.Attr("target_width", target_w)
.Attr("target_height", target_h)
.Attr("channels", channels)
.Attr("data_type", dtype)
.Attr("interpolation_type", interpolation_type)
.Build()
)
res_image, scale = op.InferAndTryRun().RemoteBlobList()
new_size = None
if deprecated_param_used:
return res_image
return res_image, scale, new_size
@oneflow_export("image.target_resize", "image_target_resize")
def api_image_target_resize(
images: remote_blob_util.BlobDef,
target_size: int,
min_size: Optional[int] = None,
max_size: Optional[int] = None,
resize_side: str = "shorter",
interpolation_type: str = "auto",
name: Optional[str] = None,
) -> Sequence[remote_blob_util.BlobDef]:
"""This operator resizes image to target size.
Args:
images (BlobDef): The input Blob. Its type should be `kTensorBuffer`. More details please refer to the code example.
target_size (int): An int, the target size.
min_size (Optional[int], optional): If `min_size` is not None, the shorter side must be greater than or equal to `min_size`. Default is None. Defaults to None.
max_size (Optional[int], optional): If `max_size` is not None, the longer side must be less than or equal to `max_size`. Defaults to None.
resize_side (str, optional): A str of "longer" or "shorter". Only works when `keep_aspect_ratio` is True. If `resize_side` is "longer", the longer side of image will be resized to `target_size`. If `resize_side` is "shorter", the shorter side of image will be resized to `target_size`. Defaults to "shorter".
interpolation_type (str, optional): A str of "auto", "bilinear", "nearest_neighbor", "bicubic" or "area". Indicate interpolation method used to resize image. Defaults to "auto".
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
Sequence[BlobDef]: A Sequence includes the result Blob.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
import numpy as np
import cv2
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
assert image_static_shape[0] == 1, str(image_static_shape)
image_static_shape[0] = len(image_shapes)
return image_static_shape
def _of_image_target_resize(images, image_static_shape, target_size, max_size):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_target_resize_job(images_def: tp.ListListNumpy.Placeholder(shape=image_static_shape, dtype=flow.float)
) -> Tuple[tp.ListListNumpy, tp.ListNumpy, tp.ListNumpy]:
# The input Blob type should be "kTensorBuffer"
# So we use oneflow.tensor_list_to_tensor_buffer to convert
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
resized_images_buffer, size, scale = flow.image_target_resize(
images_buffer,
target_size=target_size,
max_size=max_size,
resize_side="shorter",
)
# We convert back to "tensorlist" type
resized_images = flow.tensor_buffer_to_tensor_list(
resized_images_buffer,
shape=(target_size, max_size, image_static_shape[-1]),
dtype=flow.float,
)
return resized_images, size, scale
resized_images, size, scale = image_target_resize_job([images])
resized_image = resized_images[0]
size = size[0]
scale = scale[0]
return resized_images, size, scale
if __name__ == "__main__":
img = _read_images_by_cv(['./img/1.jpg'])
img_shape = _get_images_static_shape(img) # In example is [1, 349, 367, 3]
target_size = 256
max_size = 512
resized_images, size, scale = _of_image_target_resize(img, tuple(img_shape), target_size, max_size)
# Here the shorter side is "349", we resize it to target_size(256)
# The scale is 256 / 349 = 0.73
# The longer side will be resized to 367 * scale = 269
# get the first element from the resized_images (its type is `list.list`)
print(resized_images[0][0].shape) # (1, 256, 269, 3)
"""
if name is None:
name = id_util.UniqueStr("ImageTargetResize_")
res_image, scale, new_size = api_image_resize(
images,
target_size=target_size,
min_size=min_size,
max_size=max_size,
keep_aspect_ratio=True,
resize_side=resize_side,
interpolation_type=interpolation_type,
name=name,
)
return res_image, new_size, scale
@oneflow_export("image.CropMirrorNormalize", "image.crop_mirror_normalize")
def CropMirrorNormalize(
input_blob: remote_blob_util.BlobDef,
mirror_blob: Optional[remote_blob_util.BlobDef] = None,
color_space: str = "BGR",
output_layout: str = "NCHW",
crop_h: int = 0,
crop_w: int = 0,
crop_pos_y: float = 0.5,
crop_pos_x: float = 0.5,
mean: Sequence[float] = [0.0],
std: Sequence[float] = [1.0],
output_dtype: dtype_util.dtype = dtype_util.float,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator performs the cropping, normalization, and horizontal flip for input Blob.
If `crop_h` and `crop_w` are provided, the image cropping position is specified by "crop_pos_y" and "crop_pos_x".
The position is computed as follows:
.. math::
& crop_x = crop\_pos\_x*(Width-crop\_w)
& crop_y = crop\_pos\_y*(Height-crop\_h)
The `Width` and `Height` is the width and height of input Blob.
Args:
input_blob (BlobDef): The input Blob.
mirror_blob (Optional[BlobDef], optional): The operation for horizontal flip, if it is `None`, the operator will not perform the horizontal flip. Defaults to None.
color_space (str, optional): The color space for input Blob. Defaults to "BGR".
output_layout (str, optional): The output format. Defaults to "NCHW".
crop_h (int, optional): The image cropping window height. Defaults to 0.
crop_w (int, optional): The image cropping window width. Defaults to 0.
crop_pos_y (float, optional): The vertical position of the image cropping window, the value range is normalized to (0.0, 1.0). Defaults to 0.5.
crop_pos_x (float, optional): The horizontal position of the image cropping window, the value range is normalized to (0.0, 1.0). Defaults to 0.5.
mean (Sequence[float], optional): The mean value for normalization. Defaults to [0.0].
std (Sequence[float], optional): The standard deviation values for normalization. Defaults to [1.0].
output_dtype (dtype_util.dtype, optional): The datatype of output Blob. Defaults to dtype_util.float.
name (Optional[str], optional): The name for the operation. Defaults to None.
Raises:
NotImplementedError: The data type of input Blob should be `tensor_buffer` or `uint8`
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def crop_mirror_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 1
color_space = "RGB"
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./imgdataset",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
shuffle_after_epoch=True,
)
image = flow.data.OFRecordImageDecoder(
ofrecord, "encoded", color_space=color_space
)
res_image, scale, new_size = flow.image.Resize(
image, target_size=(512, 512)
)
label = flow.data.OFRecordRawDecoder(
ofrecord, "class/label", shape=(1, ), dtype=flow.int32
)
rng = flow.random.CoinFlip(batch_size=batch_size)
normal = flow.image.CropMirrorNormalize(
res_image,
mirror_blob=rng,
color_space=color_space,
crop_h= 256,
crop_w= 256,
crop_pos_y=0.5,
crop_pos_x=0.5,
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
output_dtype=flow.float,
)
return normal, label
if __name__ == "__main__":
images, labels = crop_mirror_job()
# images.shape (1, 3, 256, 256)
"""
if name is None:
name = id_util.UniqueStr("CropMirrorNormalize_")
op_type_name = ""
if input_blob.dtype is dtype_util.tensor_buffer:
op_type_name = "crop_mirror_normalize_from_tensorbuffer"
elif input_blob.dtype is dtype_util.uint8:
op_type_name = "crop_mirror_normalize_from_uint8"
else:
print(
"ERROR! oneflow.data.crop_mirror_normalize op",
" NOT support input data type : ",
input_blob.dtype,
)
raise NotImplementedError
op = flow.user_op_builder(name).Op(op_type_name).Input("in", [input_blob])
if mirror_blob is not None:
op = op.Input("mirror", [mirror_blob])
return (
op.Output("out")
.Attr("color_space", color_space)
.Attr("output_layout", output_layout)
.Attr("mean", mean)
.Attr("std", std)
.Attr("crop_h", crop_h)
.Attr("crop_w", crop_w)
.Attr("crop_pos_y", crop_pos_y)
.Attr("crop_pos_x", crop_pos_x)
.Attr("output_dtype", output_dtype)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("image.random_crop", "image_random_crop")
def api_image_random_crop(
input_blob: remote_blob_util.BlobDef,
num_attempts: int = 10,
seed: Optional[int] = None,
random_area: Sequence[float] = None,
random_aspect_ratio: Sequence[float] = None,
name: str = "ImageRandomCrop",
) -> remote_blob_util.BlobDef:
"""This operator crops the input image randomly.
Args:
input_blob (BlobDef): The input Blob.
num_attempts (int, optional): The maximum number of random cropping attempts. Defaults to 10.
seed (Optional[int], optional): The random seed. Defaults to None.
random_area (Sequence[float], optional): The random cropping area. Defaults to None.
random_aspect_ratio (Sequence[float], optional): The random scaled ratio. Defaults to None.
name (str, optional): The name for the operation. Defaults to "ImageRandomCrop".
Returns:
BlobDef: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
import cv2
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
assert image_static_shape[0] == 1, str(image_static_shape)
image_static_shape[0] = len(image_shapes)
return image_static_shape
def _of_image_random_crop(images, image_static_shape):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_random_crop_job(images_def: tp.ListListNumpy.Placeholder(shape=image_static_shape, dtype=flow.float)
) -> tp.ListListNumpy:
# The input Blob type should be "kTensorBuffer"
# So we use oneflow.tensor_list_to_tensor_buffer to convert
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
# Do the random crop
random_crop_buffer = flow.image.random_crop(
images_buffer,
random_area=[0.15, 0.80],
random_aspect_ratio=[0.75, 1.55],
)
# We convert back to "tensorlist" type
random_crop_images = flow.tensor_buffer_to_tensor_list(
random_crop_buffer,
shape=(image_static_shape[1], image_static_shape[2], image_static_shape[-1]),
dtype=flow.float,
)
return random_crop_images
random_crop_images = image_random_crop_job([images])
return random_crop_images
if __name__ == "__main__":
img = _read_images_by_cv(['./img/1.jpg'])
img_shape = _get_images_static_shape(img) # In example is (1, 234, 346, 3)
random_crop_images = _of_image_random_crop(img, tuple(img_shape))
# random_crop_images.shape is (234, 346, 3)
"""
assert isinstance(name, str)
if seed is not None:
assert name is not None
if random_area is None:
random_area = [0.08, 1.0]
if random_aspect_ratio is None:
random_aspect_ratio = [0.75, 1.333333]
module = flow.find_or_create_module(
name,
lambda: ImageRandomCropModule(
num_attempts=num_attempts,
random_seed=seed,
random_area=random_area,
random_aspect_ratio=random_aspect_ratio,
name=name,
),
)
return module(input_blob)
class ImageRandomCropModule(module_util.Module):
def __init__(
self,
num_attempts: int,
random_seed: Optional[int],
random_area: Sequence[float],
random_aspect_ratio: Sequence[float],
name: str,
):
module_util.Module.__init__(self, name)
seed, has_seed = flow.random.gen_seed(random_seed)
self.op_module_builder = (
flow.user_op_module_builder("image_random_crop")
.InputSize("in", 1)
.Output("out")
.Attr("num_attempts", num_attempts)
.Attr("random_area", random_area)
.Attr("random_aspect_ratio", random_aspect_ratio)
.Attr("has_seed", has_seed)
.Attr("seed", seed)
.CheckAndComplete()
)
self.op_module_builder.user_op_module.InitOpKernel()
def forward(self, input: remote_blob_util.BlobDef):
if self.call_seq_no == 0:
name = self.module_name
else:
name = id_util.UniqueStr("ImageRandomCrop_")
return (
self.op_module_builder.OpName(name)
.Input("in", [input])
.Build()
.InferAndTryRun()
.SoleOutputBlob()
)
@oneflow_export("random.CoinFlip", "random.coin_flip")
def api_coin_flip(
batch_size: int = 1,
seed: Optional[int] = None,
probability: float = 0.5,
name: str = "CoinFlip",
) -> remote_blob_util.BlobDef:
"""This operator performs the horizontal flip.
Args:
batch_size (int, optional): The batch size. Defaults to 1.
seed (Optional[int], optional): The random seed. Defaults to None.
probability (float, optional): The flip probability. Defaults to 0.5.
name (str, optional): The name for the operation. Defaults to "CoinFlip".
Returns:
BlobDef: [description]
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def coin_flip_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 1
color_space = "RGB"
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./imgdataset",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
shuffle_after_epoch=True,
)
image = flow.data.OFRecordImageDecoder(
ofrecord, "encoded", color_space=color_space
)
res_image, scale, new_size = flow.image.Resize(
image, target_size=(512, 512)
)
label = flow.data.OFRecordRawDecoder(
ofrecord, "class/label", shape=(1, ), dtype=flow.int32
)
coin_flip = flow.random.CoinFlip(
batch_size=batch_size,
probability=0.8
)
normal = flow.image.CropMirrorNormalize(
res_image,
mirror_blob=coin_flip,
color_space=color_space,
crop_h= 256,
crop_w= 256,
crop_pos_y=0.5,
crop_pos_x=0.5,
mean=[123.68, 116.779, 103.939],
std=[58.393, 57.12, 57.375],
output_dtype=flow.float,
)
return normal, label
if __name__ == "__main__":
images, labels = coin_flip_job()
"""
assert isinstance(name, str)
if seed is not None:
assert name is not None
module = flow.find_or_create_module(
name,
lambda: CoinFlipModule(
batch_size=batch_size, probability=probability, random_seed=seed, name=name,
),
)
return module()
class CoinFlipModule(module_util.Module):
def __init__(
self,
batch_size: str,
probability: float,
random_seed: Optional[int],
name: str,
):
module_util.Module.__init__(self, name)
seed, has_seed = flow.random.gen_seed(random_seed)
self.op_module_builder = (
flow.user_op_module_builder("coin_flip")
.Output("out")
.Attr("batch_size", batch_size)
.Attr("probability", probability)
.Attr("has_seed", has_seed)
.Attr("seed", seed)
.CheckAndComplete()
)
self.op_module_builder.user_op_module.InitOpKernel()
def forward(self):
if self.call_seq_no == 0:
name = self.module_name
else:
name = id_util.UniqueStr("CoinFlip_")
return (
self.op_module_builder.OpName(name)
.Build()
.InferAndTryRun()
.SoleOutputBlob()
)
@oneflow_export("image.decode", "image_decode")
def image_decode(
images_bytes_buffer: remote_blob_util.BlobDef,
dtype: dtype_util.dtype = dtype_util.uint8,
color_space: str = "BGR",
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator decode the image.
Args:
images_bytes_buffer (BlobDef): The input Blob. Its type should be `kTensorBuffer`. More details please refer to the code example.
dtype (dtype_util.dtype, optional): The data type. Defaults to dtype_util.uint8.
color_space (str, optional): The color space. Defaults to "BGR".
name (Optional[str], optional): The name for the opreation. Defaults to None.
Returns:
BlobDef: The decoded image list.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
from PIL import Image
def _of_image_decode(images):
image_files = [open(im, "rb") for im in images]
images_bytes = [imf.read() for imf in image_files]
static_shape = (len(images_bytes), max([len(bys) for bys in images_bytes]))
for imf in image_files:
imf.close()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_decode_job(
images_def: tp.ListListNumpy.Placeholder(shape=static_shape, dtype=flow.int8)
)->tp.ListListNumpy:
# convert to tensor buffer
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
decoded_images_buffer = flow.image_decode(images_buffer)
# Remember to set a shape
# convert back to tensor list
return flow.tensor_buffer_to_tensor_list(
decoded_images_buffer, shape=(640, 640, 3), dtype=flow.uint8
)
images_np_arr = [
np.frombuffer(bys, dtype=np.byte).reshape(1, -1) for bys in images_bytes
]
decoded_images = image_decode_job([images_np_arr])
return decoded_images[0]
if __name__ == "__main__":
img = _of_image_decode(['./img/1.jpg'])
print(img[0].shape) # Our image shape is (1, 349, 367, 3)
"""
# TODO: check color_space valiad
if name is None:
name = id_util.UniqueStr("ImageDecode_")
op = (
flow.user_op_builder(name)
.Op("image_decode")
.Input("in", [images_bytes_buffer])
.Output("out")
.Attr("color_space", color_space)
.Attr("data_type", dtype)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("image.batch_align", "image_batch_align")
def image_batch_align(
images: remote_blob_util.BlobDef,
shape: Sequence[int],
dtype: dtype_util.dtype,
alignment: int,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
r"""This operator aligns the shape for a batch of images.
The aligned shape is computed as:
.. math::
& shape_{width} = int(\frac{(shape_{width}+alignment-1)}{alignment})*alignment
& shape_{height} = int(\frac{(shape_{height}+alignment-1)}{alignment})*alignment
Args:
images (BlobDef): The images.
shape (Sequence[int]): The maximum static shape of input images.
dtype (dtype_util.dtype): The data type.
alignment (int): The align factor.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as tp
def _of_image_batch_align(images, input_shape, output_shape, alignment):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_batch_align_job(
images_def: tp.ListListNumpy.Placeholder(shape=input_shape, dtype=flow.float)
) -> tp.ListNumpy:
# Convert to tensor buffer
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
image = flow.image_batch_align(
images_buffer, shape=output_shape[1:], dtype=flow.float, alignment=alignment
)
return image
image = image_batch_align_job([images])
return image[0]
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
assert image_static_shape[0] == 1, str(image_static_shape)
image_static_shape[0] = len(image_shapes)
return image_static_shape
def _roundup(x, n):
# compute the aligned shape
return int((x + n - 1) / n) * n
if __name__ == "__main__":
img = _read_images_by_cv(['./img/1.jpg', './img/2.jpg', './img/3.jpg'])
img_shape = _get_images_static_shape(img) # In example is [3, 349, 367, 3]
alignment = 16 # alignment factor
aligned_image_shape = [
img_shape[0],
_roundup(img_shape[1], alignment),
_roundup(img_shape[2], alignment),
img_shape[3],
]
image = _of_image_batch_align(img, tuple(img_shape), aligned_image_shape, alignment)
"""
if name is None:
name = id_util.UniqueStr("ImageBatchAlign_")
op = (
flow.user_op_builder(name)
.Op("image_batch_align")
.Input("in", [images])
.Output("out")
.Attr("shape", shape)
.Attr("data_type", dtype)
.Attr("alignment", alignment)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("image.normalize", "image_normalize")
def image_normalize(
image: remote_blob_util.BlobDef,
std: Sequence[float],
mean: Sequence[float],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator normalizes the image.
Args:
image (BlobDef): The input image.
std (Sequence[float]): The standard deviation of the images.
mean (Sequence[float]): The mean value of the images.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as tp
def _of_image_normalize(images, image_shape, std, mean):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_normalize_job(
images_def: tp.ListListNumpy.Placeholder(shape=image_shape, dtype=flow.float)
) -> tp.ListListNumpy:
# Convert to tensor buffer
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
# Normalize the imagess
norm_images = flow.image_normalize(images_buffer, std, mean)
# Convert back to tensor list
return flow.tensor_buffer_to_tensor_list(
norm_images, shape=image_shape[1:], dtype=flow.float
)
image_tensor = image_normalize_job([images])
return image_tensor[0]
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
assert image_static_shape[0] == 1, str(image_static_shape)
image_static_shape[0] = len(image_shapes)
return image_static_shape
if __name__ == "__main__":
img = _read_images_by_cv(['./img/1.jpg', './img/2.jpg', './img/3.jpg'])
img_shape = _get_images_static_shape(img) # In example is [3, 349, 367, 3]
image = _of_image_normalize(img,
tuple(img_shape),
std=(102.9801, 115.9465, 122.7717),
mean=(1.0, 1.0, 1.0))
"""
if name is None:
name = id_util.UniqueStr("ImageNormalize_")
assert isinstance(std, (list, tuple))
assert isinstance(mean, (list, tuple))
op = (
flow.user_op_builder(name)
.Op("image_normalize")
.Input("in", [image])
.Output("out")
.Attr("std", std)
.Attr("mean", mean)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("image.flip", "image_flip")
def image_flip(
image: remote_blob_util.BlobDef,
flip_code: Union[int, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator flips the images.
The flip code corresponds to the different flip mode:
0 (0x00): Non Flip
1 (0x01): Horizontal Flip
16 (0x10): Vertical Flip
17 (0x11): Both Horizontal and Vertical Flip
Args:
image (BlobDef): The input images.
flip_code (Union[int, BlobDef]): The flip code.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import cv2
import numpy as np
import oneflow as flow
import oneflow.typing as tp
def _of_image_flip(images, image_shape, flip_code):
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_flip_job(
images_def: tp.ListListNumpy.Placeholder(shape=image_shape, dtype=flow.float)
) -> tp.ListListNumpy:
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
flip_images = flow.image_flip(images_buffer, flip_code)
return flow.tensor_buffer_to_tensor_list(
flip_images, shape=image_shape[1:], dtype=flow.float
)
image_tensor = image_flip_job([images])
return image_tensor[0]
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
assert image_static_shape[0] == 1, str(image_static_shape)
image_static_shape[0] = len(image_shapes)
return image_static_shape
if __name__ == "__main__":
img = _read_images_by_cv(['./img/1.jpg', './img/2.jpg', './img/3.jpg'])
img_shape = _get_images_static_shape(img) # In example is [3, 349, 367, 3]
image = _of_image_flip(img,
tuple(img_shape),
flip_code=1)
"""
assert isinstance(image, remote_blob_util.BlobDef)
if name is None:
name = id_util.UniqueStr("ImageFlip_")
if not isinstance(flip_code, remote_blob_util.BlobDef):
assert isinstance(flip_code, int)
flip_code = flow.constant(
flip_code,
shape=(image.shape[0],),
dtype=flow.int8,
name="{}_FlipCode_".format(name),
)
else:
assert image.shape[0] == flip_code.shape[0]
op = (
flow.user_op_builder(name)
.Op("image_flip")
.Input("in", [image])
.Input("flip_code", [flip_code])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("detection.object_bbox_flip", "object_bbox_flip")
def object_bbox_flip(
bbox: remote_blob_util.BlobDef,
image_size: remote_blob_util.BlobDef,
flip_code: Union[int, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator flips the object bounding box.
The flip code corresponds to the different flip mode:
0 (0x00): Non Flip
1 (0x01): Horizontal Flip
16 (0x10): Vertical Flip
17 (0x11): Both Horizontal and Vertical Flip
Args:
bbox (BlobDef): The bounding box.
image_size (BlobDef): The size of input image.
flip_code (Union[int, BlobDef]): The flip code.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import numpy as np
import oneflow as flow
import oneflow.typing as tp
def _of_object_bbox_flip(bbox_list, image_size, flip_code):
bbox_shape = _get_bbox_static_shape(bbox_list)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def object_bbox_flip_job(
bbox_def: tp.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
image_size_def: tp.ListNumpy.Placeholder(
shape=image_size.shape, dtype=flow.int32
),
) -> tp.ListListNumpy:
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
flip_bbox = flow.object_bbox_flip(bbox_buffer, image_size_def, flip_code)
return flow.tensor_buffer_to_tensor_list(
flip_bbox, shape=bbox_shape[1:], dtype=flow.float
)
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
bbox_tensor = object_bbox_flip_job([input_bbox_list], [image_size])
return bbox_tensor[0]
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
if __name__ == "__main__":
bbox = np.array([[[20.0, 40.0, 80.0, 160.0],
[30.0, 50.0, 70.0, 100.0]]]).astype(np.single) # [x1, y1, x2, y2]
image_size = np.array([[480, 620]]).astype(np.int32)
bbox_flip = _of_object_bbox_flip(bbox,
image_size,
flip_code=1) # Horizontal Flip
print(bbox_flip[0][0])
# [[399. 40. 459. 160.]
# [409. 50. 449. 100.]]
"""
assert isinstance(bbox, remote_blob_util.BlobDef)
assert isinstance(image_size, remote_blob_util.BlobDef)
assert bbox.shape[0] == image_size.shape[0]
if name is None:
name = id_util.UniqueStr("ObjectBboxFlip_")
if not isinstance(flip_code, remote_blob_util.BlobDef):
assert isinstance(flip_code, int)
flip_code = flow.constant(
flip_code,
shape=(bbox.shape[0],),
dtype=flow.int8,
name="{}_FlipCode".format(name),
)
else:
assert bbox.shape[0] == flip_code.shape[0]
op = (
flow.user_op_builder(name)
.Op("object_bbox_flip")
.Input("bbox", [bbox])
.Input("image_size", [image_size])
.Input("flip_code", [flip_code])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("detection.object_bbox_scale", "object_bbox_scale")
def object_bbox_scale(
bbox: remote_blob_util.BlobDef,
scale: remote_blob_util.BlobDef,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator scales the input image and the corresponding bounding box. It returns the scaled bounding box.
Args:
bbox (BlobDef): The bounding box.
scale (BlobDef): The scale factor.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob.
For example:
.. code-block:: python
import numpy as np
import oneflow as flow
import oneflow.typing as tp
import cv2
from typing import Tuple
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return images
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
image_static_shape.insert(0, len(image_shapes))
return image_static_shape
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
def _of_target_resize_bbox_scale(images, bbox_list, target_size, max_size):
image_shape = _get_images_static_shape(images)
bbox_shape = _get_bbox_static_shape(bbox_list)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def target_resize_bbox_scale_job(
image_def: tp.ListListNumpy.Placeholder(
shape=tuple(image_shape), dtype=flow.float
),
bbox_def: tp.ListListNumpy.Placeholder(
shape=tuple(bbox_shape), dtype=flow.float
),
) -> Tuple[tp.ListListNumpy, tp.ListNumpy]:
images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
resized_images_buffer, new_size, scale = flow.image_target_resize(
images_buffer, target_size=target_size, max_size=max_size
)
bbox_buffer = flow.tensor_list_to_tensor_buffer(bbox_def)
scaled_bbox = flow.object_bbox_scale(bbox_buffer, scale)
scaled_bbox_list = flow.tensor_buffer_to_tensor_list(
scaled_bbox, shape=bbox_shape[1:], dtype=flow.float
)
return scaled_bbox_list, new_size
input_image_list = [np.expand_dims(image, axis=0) for image in images]
input_bbox_list = [np.expand_dims(bbox, axis=0) for bbox in bbox_list]
output_bbox_list, output_image_size = target_resize_bbox_scale_job(
[input_image_list], [input_bbox_list]
)
return output_bbox_list[0], output_image_size[0]
if __name__ == "__main__":
images = _read_images_by_cv(['./img/1.jpg', './img/2.jpg'])
bbox = np.array([[[20.0, 40.0, 80.0, 160.0],
[30.0, 50.0, 70.0, 100.0]],
[[26.0, 40.0, 86.0, 160.0],
[36.0, 56.0, 76.0, 106.0]]]).astype(np.single) # [x1, y1, x2, y2]
bbox, size = _of_target_resize_bbox_scale(images, bbox, 280, 350)
print(bbox[0])
print(bbox[1])
# [[[ 16.0218 32.09169 64.0872 128.36676 ]
# [ 24.032698 40.114613 56.076298 80.229225]]]
# [[[ 24.186047 37.170418 80. 148.68167 ]
# [ 33.488373 52.038586 70.69768 98.5016 ]]]
"""
assert isinstance(bbox, remote_blob_util.BlobDef)
assert isinstance(scale, remote_blob_util.BlobDef)
assert bbox.shape[0] == scale.shape[0]
if name is None:
name = id_util.UniqueStr("ObjectBboxScale_")
op = (
flow.user_op_builder(name)
.Op("object_bbox_scale")
.Input("bbox", [bbox])
.Input("scale", [scale])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export(
"detection.object_segmentation_polygon_flip", "object_segmentation_polygon_flip"
)
def object_segm_poly_flip(
poly: remote_blob_util.BlobDef,
image_size: remote_blob_util.BlobDef,
flip_code: Union[int, remote_blob_util.BlobDef],
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator flips the segmentation points in image.
The flip code corresponds to the different flip mode:
0 (0x00): Non Flip
1 (0x01): Horizontal Flip
16 (0x10): Vertical Flip
17 (0x11): Both Horizontal and Vertical Flip
Args:
poly (BlobDef): The poly segmentation points.
image_size (BlobDef): The image size.
flip_code (Union[int, BlobDef]): The filp code.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob
For example:
.. code-block:: python
import numpy as np
import oneflow as flow
import oneflow.typing as tp
import cv2
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return [np.expand_dims(image, axis=0) for image in images]
def _of_object_segm_poly_flip(poly_list, image_size, flip_code):
poly_shape = _get_segm_poly_static_shape(poly_list)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def object_segm_poly_flip_job(
poly_def: tp.ListListNumpy.Placeholder(
shape=tuple(poly_shape), dtype=flow.float
),
image_size_def: tp.ListNumpy.Placeholder(
shape=image_size.shape, dtype=flow.int32
),
) -> tp.ListListNumpy:
poly_buffer = flow.tensor_list_to_tensor_buffer(poly_def)
flip_poly = flow.object_segmentation_polygon_flip(
poly_buffer, image_size_def, flip_code
)
return flow.tensor_buffer_to_tensor_list(
flip_poly, shape=poly_shape[1:], dtype=flow.float
)
input_poly_list = [np.expand_dims(poly, axis=0) for poly in poly_list]
poly_tensor = object_segm_poly_flip_job([input_poly_list], [image_size])
return poly_tensor[0]
def _get_segm_poly_static_shape(poly_list):
poly_shapes = [poly.shape for poly in poly_list]
poly_static_shape = np.amax(poly_shapes, axis=0)
assert isinstance(
poly_static_shape, np.ndarray
), "poly_shapes: {}, poly_static_shape: {}".format(
str(poly_shapes), str(poly_static_shape)
)
poly_static_shape = poly_static_shape.tolist()
poly_static_shape.insert(0, len(poly_list))
return poly_static_shape
if __name__ == "__main__":
segm_poly_list = []
segmentations = [[[20.0, 40.0], [80.0, 160.0], [100.0, 210.0]], # Image 1 segmentation point
[[25.0, 45.0], [85.0, 165.0], [105.0, 215.0]]] # Image 2 segmentation point
for segmentation in segmentations:
polygon = []
for seg in segmentation:
polygon.extend(seg)
poly_array = np.array(polygon, dtype=np.single).reshape(-1, 2) # Reshape it
segm_poly_list.append(poly_array)
image_size = np.array([[480, 620], # Image 1 size
[640, 640]]).astype(np.int32) # Image 2 size
of_segm_poly_list = _of_object_segm_poly_flip(
segm_poly_list, image_size, flip_code=1
) # Horizontal Flip
print(of_segm_poly_list[0])
print(of_segm_poly_list[1])
# of_segm_poly_list[0]
# [[[460. 40.]
# [400. 160.]
# [380. 210.]]]
# of_segm_poly_list[1]
# [[[615. 45.]
# [555. 165.]
# [535. 215.]]]
"""
assert isinstance(poly, remote_blob_util.BlobDef)
assert isinstance(image_size, remote_blob_util.BlobDef)
assert poly.shape[0] == image_size.shape[0]
if name is None:
name = id_util.UniqueStr("ObjectSegmPolyFilp_")
if not isinstance(flip_code, remote_blob_util.BlobDef):
assert isinstance(flip_code, int)
flip_code = flow.constant(
flip_code,
shape=(poly.shape[0],),
dtype=flow.int8,
name="{}_FlipCode".format(name),
)
else:
assert poly.shape[0] == flip_code.shape[0]
op = (
flow.user_op_builder(name)
.Op("object_segmentation_polygon_flip")
.Input("poly", [poly])
.Input("image_size", [image_size])
.Input("flip_code", [flip_code])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export(
"detection.object_segmentation_polygon_scale", "object_segmentation_polygon_scale"
)
def object_segm_poly_scale(
poly: remote_blob_util.BlobDef,
scale: remote_blob_util.BlobDef,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator scales the segmentation points in the images.
Args:
poly (BlobDef): The poly segmentation points.
scale (BlobDef): The image scale.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob.
For example:
.. code-block:: python
import numpy as np
import oneflow as flow
import oneflow.typing as tp
import cv2
from typing import Tuple
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return images
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
image_static_shape.insert(0, len(image_shapes))
return image_static_shape
def _get_segm_poly_static_shape(poly_list):
poly_shapes = [poly.shape for poly in poly_list]
poly_static_shape = np.amax(poly_shapes, axis=0)
assert isinstance(
poly_static_shape, np.ndarray
), "poly_shapes: {}, poly_static_shape: {}".format(
str(poly_shapes), str(poly_static_shape)
)
poly_static_shape = poly_static_shape.tolist()
poly_static_shape.insert(0, len(poly_list))
return poly_static_shape
def _get_bbox_static_shape(bbox_list):
bbox_shapes = [bbox.shape for bbox in bbox_list]
bbox_static_shape = np.amax(bbox_shapes, axis=0)
assert isinstance(
bbox_static_shape, np.ndarray
), "bbox_shapes: {}, bbox_static_shape: {}".format(
str(bbox_shapes), str(bbox_static_shape)
)
bbox_static_shape = bbox_static_shape.tolist()
bbox_static_shape.insert(0, len(bbox_list))
return bbox_static_shape
def _of_object_segm_poly_scale(images, poly_list, target_size, max_size):
image_shape = _get_images_static_shape(images)
print(image_shape)
poly_shape = _get_segm_poly_static_shape(poly_list)
print("Poly shape is ", poly_shape)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def object_segm_poly_scale_job(
image_def: tp.ListListNumpy.Placeholder(
shape=tuple(image_shape), dtype=flow.float
),
poly_def: tp.ListListNumpy.Placeholder(
shape=tuple(poly_shape), dtype=flow.float
),
) -> Tuple[tp.ListListNumpy, tp.ListNumpy]:
images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
resized_images_buffer, new_size, scale = flow.image_target_resize(
images_buffer, target_size=target_size, max_size=max_size
)
poly_buffer = flow.tensor_list_to_tensor_buffer(poly_def)
scaled_poly = flow.object_segmentation_polygon_scale(poly_buffer, scale)
scaled_poly_list = flow.tensor_buffer_to_tensor_list(
scaled_poly, shape=poly_shape[1:], dtype=flow.float
)
return scaled_poly_list, new_size
input_image_list = [np.expand_dims(image, axis=0) for image in images]
input_poly_list = [np.expand_dims(poly, axis=0) for poly in poly_list]
output_poly_list, output_image_size = object_segm_poly_scale_job(
[input_image_list], [input_poly_list]
)
return output_poly_list[0], output_image_size
if __name__ == "__main__":
images = _read_images_by_cv(['./img/1.jpg', './img/2.jpg'])
segm_poly_list = []
segmentations = [[[20.0, 40.0], [80.0, 160.0], [100.0, 210.0]], # Image 1 segmentation point
[[25.0, 45.0], [85.0, 165.0], [105.0, 215.0]]] # Image 2 segmentation point
for segmentation in segmentations:
polygon = []
for seg in segmentation:
polygon.extend(seg)
poly_array = np.array(polygon, dtype=np.single).reshape(-1, 2) # Reshape it
segm_poly_list.append(poly_array)
bbox, size = _of_object_segm_poly_scale(images, segm_poly_list, 280, 350)
"""
assert isinstance(poly, remote_blob_util.BlobDef)
assert isinstance(scale, remote_blob_util.BlobDef)
assert poly.shape[0] == scale.shape[0]
if name is None:
name = id_util.UniqueStr("ObjectSegmPolyFilp_")
op = (
flow.user_op_builder(name)
.Op("object_segmentation_polygon_scale")
.Input("poly", [poly])
.Input("scale", [scale])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export(
"detection.object_segmentation_polygon_to_mask",
"object_segmentation_polygon_to_mask",
)
def object_segm_poly_to_mask(
poly: remote_blob_util.BlobDef,
poly_index: remote_blob_util.BlobDef,
image_size: remote_blob_util.BlobDef,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator converts the poly segment points to the segment mask array.
Args:
poly (BlobDef): The poly segment points.
poly_index (BlobDef): The poly segment index.
image_size (BlobDef): The input image size.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob.
.. code-block:: python
import numpy as np
import oneflow as flow
import oneflow.typing as tp
import cv2
from typing import Tuple
def _read_images_by_cv(image_files):
images = [cv2.imread(image_file).astype(np.single) for image_file in image_files]
return images
def _get_images_static_shape(images):
image_shapes = [image.shape for image in images]
image_static_shape = np.amax(image_shapes, axis=0)
assert isinstance(
image_static_shape, np.ndarray
), "image_shapes: {}, image_static_shape: {}".format(
str(image_shapes), str(image_static_shape)
)
image_static_shape = image_static_shape.tolist()
image_static_shape.insert(0, len(image_shapes))
return image_static_shape
def _get_segm_poly_static_shape(poly_list, poly_index_list):
assert len(poly_list) == len(poly_index_list)
num_images = len(poly_list)
max_poly_elems = 0
for poly, poly_index in zip(poly_list, poly_index_list):
assert len(poly.shape) == 2
assert len(poly_index.shape) == 2, str(poly_index.shape)
assert poly.shape[0] == poly_index.shape[0]
assert poly.shape[1] == 2
assert poly_index.shape[1] == 3
max_poly_elems = max(max_poly_elems, poly.shape[0])
return [num_images, max_poly_elems, 2], [num_images, max_poly_elems, 3]
def _segm_poly_to_tensor(img_segm_poly_list):
poly_array_list = []
poly_index_array_list = []
for img_idx, segm_poly_list in enumerate(img_segm_poly_list):
img_poly_elem_list = []
img_poly_index_list = []
for obj_idx, poly_list in enumerate(segm_poly_list):
for poly_idx, poly in enumerate(poly_list):
img_poly_elem_list.extend(poly)
for pt_idx, pt in enumerate(poly):
if pt_idx % 2 == 0:
img_poly_index_list.append([pt_idx / 2, poly_idx, obj_idx])
img_poly_array = np.array(img_poly_elem_list, dtype=np.single).reshape(-1, 2)
assert img_poly_array.size > 0, segm_poly_list
poly_array_list.append(img_poly_array)
img_poly_index_array = np.array(img_poly_index_list, dtype=np.int32)
assert img_poly_index_array.size > 0, segm_poly_list
poly_index_array_list.append(img_poly_index_array)
return poly_array_list, poly_index_array_list
def _of_poly_to_mask_pipline(
images, poly_list, poly_index_list, num_segms_list, target_size, max_size
):
print(len(images))
print(len(poly_list))
assert len(images) == len(poly_list)
assert len(poly_list) == len(poly_index_list)
image_shape = _get_images_static_shape(images)
poly_shape, poly_index_shape = _get_segm_poly_static_shape(
poly_list, poly_index_list
)
max_num_segms = max(num_segms_list)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_data_type(flow.float)
@flow.global_function(function_config=func_config)
def poly_to_mask_job(
image_def: tp.ListListNumpy.Placeholder(
shape=tuple(image_shape), dtype=flow.float
),
poly_def: tp.ListListNumpy.Placeholder(
shape=tuple(poly_shape), dtype=flow.float
),
poly_index_def: tp.ListListNumpy.Placeholder(
shape=tuple(poly_index_shape), dtype=flow.int32
),
) -> Tuple[tp.ListListNumpy, tp.ListListNumpy]:
images_buffer = flow.tensor_list_to_tensor_buffer(image_def)
resized_images_buffer, new_size, scale = flow.image_target_resize(
images_buffer, target_size=target_size, max_size=max_size
)
poly_buffer = flow.tensor_list_to_tensor_buffer(poly_def)
poly_index_buffer = flow.tensor_list_to_tensor_buffer(poly_index_def)
scaled_poly_buffer = flow.object_segmentation_polygon_scale(poly_buffer, scale)
mask_buffer = flow.object_segmentation_polygon_to_mask(
scaled_poly_buffer, poly_index_buffer, new_size
)
mask_list = flow.tensor_buffer_to_tensor_list(
mask_buffer, shape=(max_num_segms, target_size, max_size), dtype=flow.int8
)
scaled_poly_list = flow.tensor_buffer_to_tensor_list(
scaled_poly_buffer, shape=poly_shape[1:], dtype=flow.float
)
return mask_list, scaled_poly_list
input_image_list = [np.expand_dims(image, axis=0) for image in images]
input_poly_list = [np.expand_dims(poly, axis=0) for poly in poly_list]
input_poly_index_list = [
np.expand_dims(poly_index, axis=0) for poly_index in poly_index_list
]
output_mask_list, output_poly_list = poly_to_mask_job(
[input_image_list], [input_poly_list], [input_poly_index_list]
)
return output_mask_list[0], output_poly_list[0]
if __name__ == "__main__":
images = _read_images_by_cv(['./img/1.jpg', './img/2.jpg'])
segm_poly_list = []
segmentations = [[[20.0, 40.0, 80.0, 160.0, 100.0, 210.0, 120.0, 215.0]], # Image 1 segmentation point
[[24.0, 42.0, 86.0, 168.0, 103.0, 223.0, 125.0, 235.0]]] # Image 2 segmentation point
for segmentation in segmentations:
polygon = []
for seg in segmentation:
polygon.extend(seg)
poly_array = np.array(polygon, dtype=np.single).reshape(-1, 2) # Reshape it
segm_poly_list.append([poly_array])
poly_list, poly_index_list = _segm_poly_to_tensor(segm_poly_list)
num_segms_list = [len(segm_poly_list) for segm_poly_list in segm_poly_list]
target_size = 280
max_size = 350
of_mask_list, of_scaled_poly_list = _of_poly_to_mask_pipline(
images, poly_list, poly_index_list, num_segms_list, target_size, max_size
)
of_mask_list = [
mask_array.reshape(-1, mask_array.shape[-2], mask_array.shape[-1])
for mask_array in of_mask_list
] # reshape it
"""
assert isinstance(poly, remote_blob_util.BlobDef)
assert isinstance(poly_index, remote_blob_util.BlobDef)
assert isinstance(image_size, remote_blob_util.BlobDef)
assert poly.shape[0] == poly_index.shape[0]
assert poly.shape[0] == image_size.shape[0]
if name is None:
name = id_util.UniqueStr("ObjectSegmPolyToMask_")
op = (
flow.user_op_builder(name)
.Op("object_segmentation_polygon_to_mask")
.Input("poly", [poly])
.Input("poly_index", [poly_index])
.Input("image_size", [image_size])
.Output("out")
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("data.coco_reader")
def api_coco_reader(
annotation_file: str,
image_dir: str,
batch_size: int,
shuffle: bool = True,
random_seed: Optional[int] = None,
group_by_aspect_ratio: bool = True,
stride_partition: bool = True,
name: str = None,
) -> remote_blob_util.BlobDef:
assert name is not None
module = flow.find_or_create_module(
name,
lambda: COCOReader(
annotation_file=annotation_file,
image_dir=image_dir,
batch_size=batch_size,
shuffle=shuffle,
random_seed=random_seed,
group_by_aspect_ratio=group_by_aspect_ratio,
stride_partition=stride_partition,
name=name,
),
)
return module()
class COCOReader(module_util.Module):
def __init__(
self,
annotation_file: str,
image_dir: str,
batch_size: int,
shuffle: bool = True,
random_seed: Optional[int] = None,
group_by_aspect_ratio: bool = True,
stride_partition: bool = True,
name: str = None,
):
assert name is not None
if random_seed is None:
random_seed = random.randrange(sys.maxsize)
module_util.Module.__init__(self, name)
self.op_module_builder = (
flow.consistent_user_op_module_builder("COCOReader")
.Output("image")
.Output("image_id")
.Output("image_size")
.Output("gt_bbox")
.Output("gt_label")
.Output("gt_segm")
.Output("gt_segm_index")
.Attr("session_id", flow.current_scope().session_id)
.Attr("annotation_file", annotation_file)
.Attr("image_dir", image_dir)
.Attr("batch_size", batch_size)
.Attr("shuffle_after_epoch", shuffle)
.Attr("random_seed", random_seed)
.Attr("group_by_ratio", group_by_aspect_ratio)
.Attr("stride_partition", stride_partition)
.CheckAndComplete()
)
self.op_module_builder.user_op_module.InitOpKernel()
def forward(self):
if self.call_seq_no == 0:
name = self.module_name
else:
name = id_util.UniqueStr("COCOReader")
return (
self.op_module_builder.OpName(name)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
@oneflow_export("data.ofrecord_image_classification_reader")
def ofrecord_image_classification_reader(
ofrecord_dir: str,
image_feature_name: str,
label_feature_name: str,
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
random_shuffle: bool = False,
shuffle_buffer_size: int = 1024,
shuffle_after_epoch: bool = False,
color_space: str = "BGR",
decode_buffer_size_per_thread: int = 32,
num_decode_threads_per_machine: Optional[int] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
"""This operator creates a reader for image classification tasks.
Args:
ofrecord_dir (str): The directory of ofrecord file.
image_feature_name (str): The name of the image feature.
label_feature_name (str): The name of the label feature.
batch_size (int, optional): The batch_size. Defaults to 1.
data_part_num (int, optional): The amounts of data part. Defaults to 1.
part_name_prefix (str, optional): The prefix of data part name. Defaults to "part-".
part_name_suffix_length (int, optional): The suffix name of data part name. Defaults to -1.
random_shuffle (bool, optional): Whether to random shuffle the data. Defaults to False.
shuffle_buffer_size (int, optional): The buffer size for shuffle data. Defaults to 1024.
shuffle_after_epoch (bool, optional): Whether to shuffle the data after each epoch. Defaults to False.
color_space (str, optional): The color space. Defaults to "BGR".
decode_buffer_size_per_thread (int, optional): The decode buffer size for per thread. Defaults to 32.
num_decode_threads_per_machine (Optional[int], optional): The amounts of decode threads for each machine. Defaults to None.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
BlobDef: The result Blob.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def image_classifier_job() -> Tuple[tp.Numpy, tp.Numpy]:
image, label = flow.data.ofrecord_image_classification_reader(
ofrecord_dir="./imgdataset",
image_feature_name="encoded",
label_feature_name="class/label",
batch_size=8,
data_part_num=1,
part_name_prefix="part-",
part_name_suffix_length=-1,
random_shuffle=False,
shuffle_after_epoch=False,
color_space="RGB",
decode_buffer_size_per_thread=16,
)
res_image, scale, new_size = flow.image.Resize(
image, target_size=(224, 224)
)
return res_image, label
if __name__ == "__main__":
images, labels = image_classifier_job()
# images.shape (8, 224, 224, 3)
"""
if name is None:
name = id_util.UniqueStr("OFRecordImageClassificationReader_")
(image, label) = (
flow.user_op_builder(name)
.Op("ofrecord_image_classification_reader")
.Output("image")
.Output("label")
.Attr("data_dir", ofrecord_dir)
.Attr("data_part_num", data_part_num)
.Attr("batch_size", batch_size)
.Attr("part_name_prefix", part_name_prefix)
.Attr("random_shuffle", random_shuffle)
.Attr("shuffle_buffer_size", shuffle_buffer_size)
.Attr("shuffle_after_epoch", shuffle_after_epoch)
.Attr("part_name_suffix_length", part_name_suffix_length)
.Attr("color_space", color_space)
.Attr("image_feature_name", image_feature_name)
.Attr("label_feature_name", label_feature_name)
.Attr("decode_buffer_size_per_thread", decode_buffer_size_per_thread)
.Attr("num_decode_threads_per_machine", num_decode_threads_per_machine or 0)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
label = flow.tensor_buffer_to_tensor(label, dtype=flow.int32, instance_shape=[1])
label = flow.squeeze(label, axis=[-1])
return image, label
@oneflow_export("data.OneRecDecoder", "data.onerec_decoder")
def OneRecDecoder(
input_blob,
key,
dtype,
shape,
is_dynamic=False,
reshape=None,
batch_padding=None,
name=None,
):
if name is None:
name = id_util.UniqueStr("OneRecDecoder_")
if reshape is not None:
has_reshape = True
else:
has_reshape = False
reshape = shape
if batch_padding is not None:
has_batch_padding = True
else:
has_batch_padding = False
batch_padding = shape
return (
flow.user_op_builder(name)
.Op("onerec_decoder")
.Input("in", [input_blob])
.Output("out")
.Attr("key", key)
.Attr("data_type", dtype)
.Attr("static_shape", shape)
.Attr("is_dynamic", is_dynamic)
.Attr("has_reshape", has_reshape)
.Attr("reshape", reshape)
.Attr("has_batch_padding", has_batch_padding)
.Attr("batch_padding", batch_padding)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
| [
"oneflow.python.framework.module.Module.__init__",
"oneflow.random.gen_seed",
"oneflow.user_op_builder",
"oneflow.user_op_module_builder",
"oneflow.squeeze",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.current_scope",
"oneflow.consistent_user_op_module_builder",
"oneflow.python.framewor... | [((1022, 1092), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.OFRecordRawDecoder"""', '"""data.ofrecord_raw_decoder"""'], {}), "('data.OFRecordRawDecoder', 'data.ofrecord_raw_decoder')\n", (1036, 1092), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1882, 1956), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.OFRecordBytesDecoder"""', '"""data.ofrecord_bytes_decoder"""'], {}), "('data.OFRecordBytesDecoder', 'data.ofrecord_bytes_decoder')\n", (1896, 1956), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2437, 2537), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.OFRecordImageDecoderRandomCrop"""', '"""data.ofrecord_image_decoder_random_crop"""'], {}), "('data.OFRecordImageDecoderRandomCrop',\n 'data.ofrecord_image_decoder_random_crop')\n", (2451, 2537), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((6950, 7024), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.OFRecordImageDecoder"""', '"""data.ofrecord_image_decoder"""'], {}), "('data.OFRecordImageDecoder', 'data.ofrecord_image_decoder')\n", (6964, 7024), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((9250, 9312), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""image.Resize"""', '"""image.resize"""', '"""image_resize"""'], {}), "('image.Resize', 'image.resize', 'image_resize')\n", (9264, 9312), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((18137, 18197), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""image.target_resize"""', '"""image_target_resize"""'], {}), "('image.target_resize', 'image_target_resize')\n", (18151, 18197), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((23432, 23506), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""image.CropMirrorNormalize"""', '"""image.crop_mirror_normalize"""'], {}), "('image.CropMirrorNormalize', 'image.crop_mirror_normalize')\n", (23446, 23506), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((28753, 28809), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""image.random_crop"""', '"""image_random_crop"""'], {}), "('image.random_crop', 'image_random_crop')\n", (28767, 28809), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((34222, 34275), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""random.CoinFlip"""', '"""random.coin_flip"""'], {}), "('random.CoinFlip', 'random.coin_flip')\n", (34236, 34275), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((37949, 37995), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""image.decode"""', '"""image_decode"""'], {}), "('image.decode', 'image_decode')\n", (37963, 37995), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((40830, 40886), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""image.batch_align"""', '"""image_batch_align"""'], {}), "('image.batch_align', 'image_batch_align')\n", (40844, 40886), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((44671, 44723), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""image.normalize"""', '"""image_normalize"""'], {}), "('image.normalize', 'image_normalize')\n", (44685, 44723), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((48126, 48168), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""image.flip"""', '"""image_flip"""'], {}), "('image.flip', 'image_flip')\n", (48140, 48168), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((51711, 51775), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""detection.object_bbox_flip"""', '"""object_bbox_flip"""'], {}), "('detection.object_bbox_flip', 'object_bbox_flip')\n", (51725, 51775), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((55874, 55940), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""detection.object_bbox_scale"""', '"""object_bbox_scale"""'], {}), "('detection.object_bbox_scale', 'object_bbox_scale')\n", (55888, 55940), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((60949, 61049), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""detection.object_segmentation_polygon_flip"""', '"""object_segmentation_polygon_flip"""'], {}), "('detection.object_segmentation_polygon_flip',\n 'object_segmentation_polygon_flip')\n", (60963, 61049), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((66138, 66240), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""detection.object_segmentation_polygon_scale"""', '"""object_segmentation_polygon_scale"""'], {}), "('detection.object_segmentation_polygon_scale',\n 'object_segmentation_polygon_scale')\n", (66152, 66240), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((71844, 71950), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""detection.object_segmentation_polygon_to_mask"""', '"""object_segmentation_polygon_to_mask"""'], {}), "('detection.object_segmentation_polygon_to_mask',\n 'object_segmentation_polygon_to_mask')\n", (71858, 71950), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((80137, 80171), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.coco_reader"""'], {}), "('data.coco_reader')\n", (80151, 80171), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((82578, 82637), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.ofrecord_image_classification_reader"""'], {}), "('data.ofrecord_image_classification_reader')\n", (82592, 82637), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((86887, 86946), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.OneRecDecoder"""', '"""data.onerec_decoder"""'], {}), "('data.OneRecDecoder', 'data.onerec_decoder')\n", (86901, 86946), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((86743, 86816), 'oneflow.tensor_buffer_to_tensor', 'flow.tensor_buffer_to_tensor', (['label'], {'dtype': 'flow.int32', 'instance_shape': '[1]'}), '(label, dtype=flow.int32, instance_shape=[1])\n', (86771, 86816), True, 'import oneflow as flow\n'), ((86829, 86859), 'oneflow.squeeze', 'flow.squeeze', (['label'], {'axis': '[-1]'}), '(label, axis=[-1])\n', (86841, 86859), True, 'import oneflow as flow\n'), ((1409, 1449), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OFRecordRawDecoder_"""'], {}), "('OFRecordRawDecoder_')\n", (1426, 1449), True, 'import oneflow.python.framework.id_util as id_util\n'), ((2136, 2178), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OFRecordBytesDecoder_"""'], {}), "('OFRecordBytesDecoder_')\n", (2153, 2178), True, 'import oneflow.python.framework.id_util as id_util\n'), ((5858, 5897), 'oneflow.python.framework.module.Module.__init__', 'module_util.Module.__init__', (['self', 'name'], {}), '(self, name)\n', (5885, 5897), True, 'import oneflow.python.framework.module as module_util\n'), ((5923, 5956), 'oneflow.random.gen_seed', 'flow.random.gen_seed', (['random_seed'], {}), '(random_seed)\n', (5943, 5956), True, 'import oneflow as flow\n'), ((8907, 8949), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OFRecordImageDecoder_"""'], {}), "('OFRecordImageDecoder_')\n", (8924, 8949), True, 'import oneflow.python.framework.id_util as id_util\n'), ((15704, 15737), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageResize_"""'], {}), "('ImageResize_')\n", (15721, 15737), True, 'import oneflow.python.framework.id_util as id_util\n'), ((16851, 16927), 'oneflow.tensor_buffer_to_tensor', 'flow.tensor_buffer_to_tensor', (['scale'], {'dtype': 'flow.float32', 'instance_shape': '(2,)'}), '(scale, dtype=flow.float32, instance_shape=(2,))\n', (16879, 16927), True, 'import oneflow as flow\n'), ((16969, 17046), 'oneflow.tensor_buffer_to_tensor', 'flow.tensor_buffer_to_tensor', (['new_size'], {'dtype': 'flow.int32', 'instance_shape': '(2,)'}), '(new_size, dtype=flow.int32, instance_shape=(2,))\n', (16997, 17046), True, 'import oneflow as flow\n'), ((23059, 23098), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageTargetResize_"""'], {}), "('ImageTargetResize_')\n", (23076, 23098), True, 'import oneflow.python.framework.id_util as id_util\n'), ((27652, 27693), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""CropMirrorNormalize_"""'], {}), "('CropMirrorNormalize_')\n", (27669, 27693), True, 'import oneflow.python.framework.id_util as id_util\n'), ((33245, 33284), 'oneflow.python.framework.module.Module.__init__', 'module_util.Module.__init__', (['self', 'name'], {}), '(self, name)\n', (33272, 33284), True, 'import oneflow.python.framework.module as module_util\n'), ((33310, 33343), 'oneflow.random.gen_seed', 'flow.random.gen_seed', (['random_seed'], {}), '(random_seed)\n', (33330, 33343), True, 'import oneflow as flow\n'), ((37152, 37191), 'oneflow.python.framework.module.Module.__init__', 'module_util.Module.__init__', (['self', 'name'], {}), '(self, name)\n', (37179, 37191), True, 'import oneflow.python.framework.module as module_util\n'), ((37217, 37250), 'oneflow.random.gen_seed', 'flow.random.gen_seed', (['random_seed'], {}), '(random_seed)\n', (37237, 37250), True, 'import oneflow as flow\n'), ((40504, 40537), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageDecode_"""'], {}), "('ImageDecode_')\n", (40521, 40537), True, 'import oneflow.python.framework.id_util as id_util\n'), ((44323, 44360), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageBatchAlign_"""'], {}), "('ImageBatchAlign_')\n", (44340, 44360), True, 'import oneflow.python.framework.id_util as id_util\n'), ((47744, 47780), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageNormalize_"""'], {}), "('ImageNormalize_')\n", (47761, 47780), True, 'import oneflow.python.framework.id_util as id_util\n'), ((51093, 51124), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageFlip_"""'], {}), "('ImageFlip_')\n", (51110, 51124), True, 'import oneflow.python.framework.id_util as id_util\n'), ((55204, 55240), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ObjectBboxFlip_"""'], {}), "('ObjectBboxFlip_')\n", (55221, 55240), True, 'import oneflow.python.framework.id_util as id_util\n'), ((60670, 60707), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ObjectBboxScale_"""'], {}), "('ObjectBboxScale_')\n", (60687, 60707), True, 'import oneflow.python.framework.id_util as id_util\n'), ((65448, 65488), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ObjectSegmPolyFilp_"""'], {}), "('ObjectSegmPolyFilp_')\n", (65465, 65488), True, 'import oneflow.python.framework.id_util as id_util\n'), ((71546, 71586), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ObjectSegmPolyFilp_"""'], {}), "('ObjectSegmPolyFilp_')\n", (71563, 71586), True, 'import oneflow.python.framework.id_util as id_util\n'), ((79782, 79824), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ObjectSegmPolyToMask_"""'], {}), "('ObjectSegmPolyToMask_')\n", (79799, 79824), True, 'import oneflow.python.framework.id_util as id_util\n'), ((81375, 81414), 'oneflow.python.framework.module.Module.__init__', 'module_util.Module.__init__', (['self', 'name'], {}), '(self, name)\n', (81402, 81414), True, 'import oneflow.python.framework.module as module_util\n'), ((85715, 85770), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OFRecordImageClassificationReader_"""'], {}), "('OFRecordImageClassificationReader_')\n", (85732, 85770), True, 'import oneflow.python.framework.id_util as id_util\n'), ((87131, 87166), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OneRecDecoder_"""'], {}), "('OneRecDecoder_')\n", (87148, 87166), True, 'import oneflow.python.framework.id_util as id_util\n'), ((6703, 6755), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OFRecordImageDecoderRandomCrop_"""'], {}), "('OFRecordImageDecoderRandomCrop_')\n", (6720, 6755), True, 'import oneflow.python.framework.id_util as id_util\n'), ((33990, 34027), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageRandomCrop_"""'], {}), "('ImageRandomCrop_')\n", (34007, 34027), True, 'import oneflow.python.framework.id_util as id_util\n'), ((37758, 37788), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""CoinFlip_"""'], {}), "('CoinFlip_')\n", (37775, 37788), True, 'import oneflow.python.framework.id_util as id_util\n'), ((81337, 81366), 'random.randrange', 'random.randrange', (['sys.maxsize'], {}), '(sys.maxsize)\n', (81353, 81366), False, 'import random\n'), ((82387, 82418), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""COCOReader"""'], {}), "('COCOReader')\n", (82404, 82418), True, 'import oneflow.python.framework.id_util as id_util\n'), ((14086, 14110), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (14108, 14110), False, 'import traceback\n'), ((14597, 14621), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (14619, 14621), False, 'import traceback\n'), ((15196, 15220), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (15218, 15220), False, 'import traceback\n'), ((15498, 15522), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (15520, 15522), False, 'import traceback\n'), ((28155, 28181), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (28175, 28181), True, 'import oneflow as flow\n'), ((51490, 51516), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (51510, 51516), True, 'import oneflow as flow\n'), ((60728, 60754), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (60748, 60754), True, 'import oneflow as flow\n'), ((71607, 71633), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (71627, 71633), True, 'import oneflow as flow\n'), ((40558, 40584), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (40578, 40584), True, 'import oneflow as flow\n'), ((47887, 47913), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (47907, 47913), True, 'import oneflow as flow\n'), ((55603, 55629), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (55623, 55629), True, 'import oneflow as flow\n'), ((65851, 65877), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (65871, 65877), True, 'import oneflow as flow\n'), ((79845, 79871), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (79865, 79871), True, 'import oneflow as flow\n'), ((37298, 37338), 'oneflow.user_op_module_builder', 'flow.user_op_module_builder', (['"""coin_flip"""'], {}), "('coin_flip')\n", (37325, 37338), True, 'import oneflow as flow\n'), ((44381, 44407), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (44401, 44407), True, 'import oneflow as flow\n'), ((2200, 2226), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (2220, 2226), True, 'import oneflow as flow\n'), ((8971, 8997), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (8991, 8997), True, 'import oneflow as flow\n'), ((33391, 33439), 'oneflow.user_op_module_builder', 'flow.user_op_module_builder', (['"""image_random_crop"""'], {}), "('image_random_crop')\n", (33418, 33439), True, 'import oneflow as flow\n'), ((81773, 81793), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (81791, 81793), True, 'import oneflow as flow\n'), ((6004, 6069), 'oneflow.user_op_module_builder', 'flow.user_op_module_builder', (['"""ofrecord_image_decoder_random_crop"""'], {}), "('ofrecord_image_decoder_random_crop')\n", (6031, 6069), True, 'import oneflow as flow\n'), ((17535, 17561), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (17555, 17561), True, 'import oneflow as flow\n'), ((1471, 1497), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (1491, 1497), True, 'import oneflow as flow\n'), ((16299, 16325), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (16319, 16325), True, 'import oneflow as flow\n'), ((87446, 87472), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (87466, 87472), True, 'import oneflow as flow\n'), ((81462, 81514), 'oneflow.consistent_user_op_module_builder', 'flow.consistent_user_op_module_builder', (['"""COCOReader"""'], {}), "('COCOReader')\n", (81500, 81514), True, 'import oneflow as flow\n'), ((85802, 85828), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (85822, 85828), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
from oneflow.ops.transpose_util import (
get_inversed_perm,
get_perm_when_transpose_axis_to_last_dim,
)
class Sort(Module):
def __init__(self, dim: int = -1, descending: bool = False) -> None:
super().__init__()
self.dim = dim
direction = "DESCENDING" if descending else "ASCENDING"
self._argsort_op = (
flow.builtin_op("arg_sort")
.Input("in")
.Output("out")
.Attr("direction", direction)
.Build()
)
def forward(self, input):
num_dims = len(input.shape)
dim = self.dim if self.dim >= 0 else self.dim + num_dims
assert 0 <= dim < num_dims, "dim out of range"
if dim == num_dims - 1:
indices = self._argsort_op(input)[0]
return (flow.gather(input, indices, dim), indices)
else:
perm = get_perm_when_transpose_axis_to_last_dim(num_dims, dim)
x = flow.F.transpose(input, perm=perm)
indices = self._argsort_op(x)[0]
indices = flow.F.transpose(indices, perm=get_inversed_perm(perm))
return (flow.gather(input, indices, dim), indices)
@register_tensor_op("sort")
def sort_op(input, dim: int = -1, descending: bool = False):
"""Sorts the elements of the input tensor along a given dimension in ascending order by value.
Args:
input (oneflow.Tensor): The input Tensor.
dim (int, optional): dimension to be sorted. Defaults to the last dim (-1).
descending (bool, optional): controls the sorting order (ascending or descending).
Returns:
Tuple(oneflow.Tensor, oneflow.Tensor(dtype=int32)): A tuple of (values, indices), where
where the values are the sorted values and the indices are the indices of the elements
in the original input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> x = np.array([[1, 3, 8, 7, 2], [1, 9, 4, 3, 2]], dtype=np.float32)
>>> input = flow.Tensor(x)
>>> (values, indices) = flow.sort(input)
>>> values
tensor([[1., 2., 3., 7., 8.],
[1., 2., 3., 4., 9.]], dtype=oneflow.float32)
>>> indices
tensor([[0, 4, 1, 3, 2],
[0, 4, 3, 2, 1]], dtype=oneflow.int32)
>>> (values, indices) = flow.sort(input, descending=True)
>>> values
tensor([[8., 7., 3., 2., 1.],
[9., 4., 3., 2., 1.]], dtype=oneflow.float32)
>>> indices
tensor([[2, 3, 1, 4, 0],
[1, 2, 3, 4, 0]], dtype=oneflow.int32)
>>> (values, indices) = flow.sort(input, dim=0)
>>> values
tensor([[1., 3., 4., 3., 2.],
[1., 9., 8., 7., 2.]], dtype=oneflow.float32)
>>> indices
tensor([[0, 0, 1, 1, 0],
[1, 1, 0, 0, 1]], dtype=oneflow.int32)
"""
return Sort(dim=dim, descending=descending)(input)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim",
"oneflow.F.transpose",
"oneflow.gather",
"oneflow.framework.tensor.register_tensor_op",
"oneflow.ops.transpose_util.get_inversed_perm",
"oneflow.builtin_op"
] | [((1881, 1907), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""sort"""'], {}), "('sort')\n", (1899, 1907), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((3744, 3780), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (3759, 3780), False, 'import doctest\n'), ((1585, 1640), 'oneflow.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim', 'get_perm_when_transpose_axis_to_last_dim', (['num_dims', 'dim'], {}), '(num_dims, dim)\n', (1625, 1640), False, 'from oneflow.ops.transpose_util import get_inversed_perm, get_perm_when_transpose_axis_to_last_dim\n'), ((1657, 1691), 'oneflow.F.transpose', 'flow.F.transpose', (['input'], {'perm': 'perm'}), '(input, perm=perm)\n', (1673, 1691), True, 'import oneflow as flow\n'), ((1509, 1541), 'oneflow.gather', 'flow.gather', (['input', 'indices', 'dim'], {}), '(input, indices, dim)\n', (1520, 1541), True, 'import oneflow as flow\n'), ((1835, 1867), 'oneflow.gather', 'flow.gather', (['input', 'indices', 'dim'], {}), '(input, indices, dim)\n', (1846, 1867), True, 'import oneflow as flow\n'), ((1790, 1813), 'oneflow.ops.transpose_util.get_inversed_perm', 'get_inversed_perm', (['perm'], {}), '(perm)\n', (1807, 1813), False, 'from oneflow.ops.transpose_util import get_inversed_perm, get_perm_when_transpose_axis_to_last_dim\n'), ((1068, 1095), 'oneflow.builtin_op', 'flow.builtin_op', (['"""arg_sort"""'], {}), "('arg_sort')\n", (1083, 1095), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.comm.send,
"""Sends a tensor synchronously.
Args:
tensor (Tensor): Tensor to send.
dst (int): Destination rank.
send_meta (Bool): Whether to send meta information (default is True)
""",
)
add_docstr(
oneflow.comm.recv,
"""Receives a tensor synchronously.
All(send_meta is False) or none of shape, dtype and device should have value.
Args:
src (int, optional): Source rank. Will receive from any
process if unspecified.
shape (optional): output tensor shape.
dataType (optional): output tensor data type.
device (optional): output tensor device.
out (Tensor, optional): Tensor to fill with received data.
Returns:
if out is None, return received tensor. otherwise got data from out self without return.
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 907), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.comm.send', '"""Sends a tensor synchronously.\n\n Args:\n tensor (Tensor): Tensor to send.\n dst (int): Destination rank.\n send_meta (Bool): Whether to send meta information (default is True)\n\n """'], {}), '(oneflow.comm.send,\n """Sends a tensor synchronously.\n\n Args:\n tensor (Tensor): Tensor to send.\n dst (int): Destination rank.\n send_meta (Bool): Whether to send meta information (default is True)\n\n """\n )\n', (670, 907), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((911, 1524), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.comm.recv', '"""Receives a tensor synchronously.\n \n All(send_meta is False) or none of shape, dtype and device should have value.\n\n Args:\n src (int, optional): Source rank. Will receive from any\n process if unspecified.\n shape (optional): output tensor shape.\n dataType (optional): output tensor data type.\n device (optional): output tensor device.\n out (Tensor, optional): Tensor to fill with received data.\n \n Returns:\n if out is None, return received tensor. otherwise got data from out self without return.\n """'], {}), '(oneflow.comm.recv,\n """Receives a tensor synchronously.\n \n All(send_meta is False) or none of shape, dtype and device should have value.\n\n Args:\n src (int, optional): Source rank. Will receive from any\n process if unspecified.\n shape (optional): output tensor shape.\n dataType (optional): output tensor data type.\n device (optional): output tensor device.\n out (Tensor, optional): Tensor to fill with received data.\n \n Returns:\n if out is None, return received tensor. otherwise got data from out self without return.\n """\n )\n', (921, 1524), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from automated_test_util import *
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestSinh(flow.unittest.TestCase):
@autotest()
def test_flow_sinh_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.sinh(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestSin(flow.unittest.TestCase):
@autotest()
def test_flow_sin_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.sin(x)
return y
def _test_cos(test_case, shape, device):
input = flow.Tensor(
np.random.randn(*shape), dtype=flow.float32, device=flow.device(device)
)
of_out = flow.cos(input)
np_out = np.cos(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def _test_cos_backward(test_case, shape, device):
x = flow.Tensor(
np.random.randn(*shape),
dtype=flow.float32,
device=flow.device(device),
requires_grad=True,
)
y = flow.cos(x)
z = y.sum()
z.backward()
np_grad = -np.sin(x.numpy())
test_case.assertTrue(np.allclose(x.grad.numpy(), np_grad, 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestCos(flow.unittest.TestCase):
def test_cos(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_cos, _test_cos_backward]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@flow.unittest.skip_unless_1n1d()
class TestLogModule(flow.unittest.TestCase):
@autotest()
def test_log_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
return torch.log(x)
def _test_std(test_case, shape, device):
np_arr = np.random.randn(*shape)
input = flow.Tensor(np_arr, device=flow.device(device))
of_out = flow.std(input, dim=2)
np_out = np.std(np_arr, axis=2)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
def _test_std_dim1(test_case, shape, device):
np_arr = np.random.randn(*shape)
input = flow.Tensor(np_arr, device=flow.device(device))
of_out = flow.std(input, dim=1)
np_out = np.std(np_arr, axis=1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
def _test_std_negative_dim(test_case, shape, device):
np_arr = np.random.randn(4, 2, 3, 5)
input = flow.Tensor(np_arr, device=flow.device(device))
of_out = input.std(dim=(-2, -1, -3), keepdim=False)
np_out = np.std(np_arr, axis=(-2, -1, -3))
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 0.0001, 0.0001))
@flow.unittest.skip_unless_1n1d()
class TestStd(flow.unittest.TestCase):
def test_std(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_std, _test_std_dim1, _test_std_negative_dim]
arg_dict["shape"] = [(2, 3, 4), (2, 3, 4, 5)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@unittest.skip("std has bug")
@autotest()
def test_std_flow_with_random_data(test_case):
device = random_device()
all_dim = random().to(int)
dim = random(low=0, high=all_dim).to(int)
x = random_pytorch_tensor(ndim=all_dim).to(device)
z = torch.std(x, dim=dim)
return z
@unittest.skip("std has bug")
@autotest()
def test_std_tensor_with_random_data(test_case):
device = random_device()
all_dim = random().to(int)
dim = random(low=0, high=all_dim).to(int)
x = random_pytorch_tensor(ndim=all_dim).to(device)
z = x.std(dim=dim)
return z
@flow.unittest.skip_unless_1n1d()
class TestSqrt(flow.unittest.TestCase):
@autotest()
def test_sqrt_flow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
z = torch.sqrt(x)
return z
@autotest()
def test_sqrt_tensor_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
z = x.sqrt()
return z
def _test_rsqrt(test_case, shape, device):
np_arr = np.random.randn(*shape)
np_arr = np.abs(np_arr)
np_out = 1 / np.sqrt(np_arr)
input = flow.Tensor(np_arr, device=flow.device(device))
of_out = input.rsqrt()
test_case.assertTrue(
np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05, equal_nan=True)
)
def _test_rsqrt_backward(test_case, shape, device):
np_arr = np.random.randn(*shape)
np_arr = np.abs(np_arr)
x = flow.Tensor(np_arr, device=flow.device(device), requires_grad=True)
y = flow.rsqrt(input=x)
z = y.sum()
z.backward()
np_grad = -1 / 2 * 1 / (x.numpy() * np.sqrt(x.numpy()))
test_case.assertTrue(
np.allclose(x.grad.numpy(), np_grad, 1e-05, 1e-05, equal_nan=True)
)
@flow.unittest.skip_unless_1n1d()
class TestRsqrt(flow.unittest.TestCase):
def test_rsqrt(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_rsqrt, _test_rsqrt_backward]
arg_dict["shape"] = [(2, 3), (2, 3, 4), (2, 3, 4, 5)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@flow.unittest.skip_unless_1n1d()
class TestSquare(flow.unittest.TestCase):
@autotest()
def test_square_flow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
z = torch.square(x)
return z
@autotest()
def test_square_tensor_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
z = x.square()
return z
@flow.unittest.skip_unless_1n1d()
class TestPow(flow.unittest.TestCase):
@autotest()
def test_flow_pow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = random().to(float)
z = torch.pow(x, y)
return z
@flow.unittest.skip_unless_1n1d()
class TestAsin(flow.unittest.TestCase):
@autotest()
def test_flow_asin_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(low=-0.5, high=0.5).to(device)
y = torch.asin(x)
return y
@autotest()
def test_flow_arcsin_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(low=-0.5, high=0.5).to(device)
y = torch.arcsin(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestAsinh(flow.unittest.TestCase):
@autotest()
def test_flow_asinh_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.asinh(x)
return y
@autotest()
def test_flow_arcsinh_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.arcsinh(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestTan(flow.unittest.TestCase):
@autotest()
def test_flow_tan_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.tan(x)
return y
@flow.unittest.skip_unless_1n1d()
class TestAtan(flow.unittest.TestCase):
@autotest()
def test_flow_atan_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.atan(x)
return y
@autotest()
def test_flow_arctan_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor().to(device)
y = torch.arctan(x)
return y
@autotest()
def test_flow_atan2_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(ndim=2, dim1=3).to(device)
y = random_pytorch_tensor(ndim=2, dim1=3).to(device)
z = torch.atan2(x, y)
return z
@autotest()
def test_flow_atanh_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(low=-0.5, high=0.5).to(device)
y = torch.atanh(x)
return y
@autotest()
def test_flow_arctanh_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(low=-0.5, high=0.5).to(device)
y = torch.arctanh(x)
return y
def _topk_np(input, k, dim: int = None, largest: bool = True, _sorted: bool = True):
in_dims = input.shape
out_dims = list(in_dims)
num_axes = len(input.shape)
if dim < 0:
dim = dim + num_axes
n = in_dims[dim]
if k > n:
k = n
out_dims[dim] = k
out_dims = tuple(out_dims)
prev_dims = 1
next_dims = 1
for i in range(dim):
prev_dims *= in_dims[i]
for i in range(dim + 1, len(in_dims)):
next_dims *= in_dims[i]
input_flat = input.reshape((prev_dims, n, next_dims))
values_ref = np.ndarray(shape=(prev_dims, k, next_dims), dtype=input.dtype)
values_ref.fill(0)
indices_ref = np.ndarray(shape=(prev_dims, k, next_dims), dtype=np.int64)
indices_ref.fill(-1)
for i in range(prev_dims):
for j in range(next_dims):
kv = []
for x in range(n):
val = input_flat[i, x, j]
y = x * next_dims + i * in_dims[dim] * next_dims + j
kv.append((val, x, y))
cnt = 0
for (val, x, y) in sorted(kv, key=lambda x: (x[0], -x[1]), reverse=largest):
values_ref[i, cnt, j] = val
indices_ref[i, cnt, j] = x
cnt += 1
if cnt >= k or cnt >= n:
break
values_ref = values_ref.reshape(out_dims)
indices_ref = indices_ref.reshape(out_dims)
return (values_ref, indices_ref)
def _test_topk_dim_negative(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 7), dtype=flow.float32, device=flow.device(device)
)
dim = -1
k = 4
(of_values, of_indices) = flow.topk(input, k=k, dim=dim)
(np_values, np_indices) = _topk_np(input.numpy(), k=k, dim=dim)
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
test_case.assertTrue(
np.array_equal(of_indices.numpy().flatten(), np_indices.flatten())
)
def _test_tensor_topk(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 7), dtype=flow.float32, device=flow.device(device)
)
dim = 1
k = 4
(of_values, of_indices) = input.topk(k=k, dim=dim)
(np_values, np_indices) = _topk_np(input.numpy(), k=k, dim=dim)
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
test_case.assertTrue(
np.array_equal(of_indices.numpy().flatten(), np_indices.flatten())
)
def _test_topk_dim_positive(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 7), dtype=flow.float32, device=flow.device(device)
)
dim = 2
k = 4
(of_values, of_indices) = flow.topk(input, k=k, dim=dim)
(np_values, np_indices) = _topk_np(input.numpy(), k=k, dim=dim)
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
test_case.assertTrue(
np.array_equal(of_indices.numpy().flatten(), np_indices.flatten())
)
def _test_topk_largest(test_case, device):
input = flow.Tensor(
np.random.randn(2, 6, 5, 7), dtype=flow.float32, device=flow.device(device)
)
dim = 1
k = 4
largest = False
(of_values, of_indices) = flow.topk(input, k=k, dim=dim, largest=False)
(np_values, np_indices) = _topk_np(input.numpy(), k=k, dim=dim, largest=False)
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
test_case.assertTrue(
np.array_equal(of_indices.numpy().flatten(), np_indices.flatten())
)
def _test_topk_original(test_case, device):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10, 10, 200)]
arg_dict["axis"] = [-2, 0, 2]
arg_dict["k"] = [1, 50, 200]
arg_dict["largest"] = [True, False]
arg_dict["data_type"] = ["float32", "double"]
rng = np.random.default_rng()
for (shape, axis, k, largest, data_type) in GenArgList(arg_dict):
np_type = type_name_to_np_type[data_type]
random_data = rng.standard_normal(size=shape, dtype=np_type)
while np.unique(random_data).size != random_data.size:
random_data = rng.standard_normal(size=shape, dtype=np_type)
input = flow.Tensor(
random_data,
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
(of_values, of_indices) = flow.topk(input, k=k, dim=axis, largest=largest)
(np_values, np_indices) = _topk_np(
input.numpy(), k=k, dim=axis, largest=largest
)
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
test_case.assertTrue(
np.array_equal(of_indices.numpy().flatten(), np_indices.flatten())
)
@flow.unittest.skip_unless_1n1d()
class TestPow(flow.unittest.TestCase):
def test_pow(test_case):
input = flow.Tensor(np.array([1, 2, 3, 4, 5, 6]), dtype=flow.float32)
of_out = flow.pow(input, 2.1)
np_out = np.power(input.numpy(), 2.1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
def test_pow_tensor_function(test_case):
input = flow.Tensor(np.array([1, 2, 3, 4, 5, 6]), dtype=flow.float32)
of_out = input.pow(2.1)
np_out = np.power(input.numpy(), 2.1)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestTopk(flow.unittest.TestCase):
def test_topk(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_topk_dim_negative,
_test_tensor_topk,
_test_topk_dim_positive,
_test_topk_largest,
_test_topk_original,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
@flow.unittest.skip_unless_1n1d()
class TestArccosh(flow.unittest.TestCase):
@autotest()
def test_arccosh_flow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(low=2, high=3).to(device)
y = torch.arccosh(x)
return y
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
@flow.unittest.skip_unless_1n1d()
class TestAcosh(flow.unittest.TestCase):
@autotest()
def test_acosh_flow_with_random_data(test_case):
device = random_device()
x = random_pytorch_tensor(low=2, high=3).to(device)
y = torch.acosh(x)
return y
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
@flow.unittest.skip_unless_1n1d()
class TestAtan2(flow.unittest.TestCase):
@autotest()
def test_flow_atan2_with_random_data(test_case):
device = random_device()
x1 = random_pytorch_tensor(ndim=1, dim0=1).to(device)
x2 = random_pytorch_tensor(ndim=1, dim0=1).to(device)
y = torch.atan2(x1, x2)
return y
def _test_elementwise_minimum(test_case, device):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10, 10, 200), (3, 12), (12,)]
arg_dict["data_type"] = ["float32", "double"]
for (shape, data_type) in GenArgList(arg_dict):
input_x = flow.Tensor(
np.random.randn(*shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
input_y = flow.Tensor(
np.random.randn(*shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_values = flow.minimum(input_x, input_y)
np_values = np.minimum(input_x.numpy(), input_y.numpy())
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
def _test_broadcast_minimum(test_case, device):
arg_dict = OrderedDict()
arg_dict["shape"] = [[(10, 10, 200), (10, 1, 1)], [(3, 12), (1, 12)]]
arg_dict["data_type"] = ["float32", "double"]
for (shape, data_type) in GenArgList(arg_dict):
input_x = flow.Tensor(
np.random.randn(*shape[0]),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
input_y = flow.Tensor(
np.random.randn(*shape[1]),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_values = flow.minimum(input_x, input_y)
np_values = np.minimum(input_x.numpy(), input_y.numpy())
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
@flow.unittest.skip_unless_1n1d()
class TestMinimum(flow.unittest.TestCase):
def test_minimum(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_elementwise_minimum,
_test_broadcast_minimum,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_flow_elementwise_minimum_with_random_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
x = random_pytorch_tensor(ndim=2, dim0=k1, dim1=k2)
y = random_pytorch_tensor(ndim=2, dim0=k1, dim1=k2)
return torch.minimum(x, y)
@autotest()
def test_flow_broadcast_minimum_with_random_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
k3 = random(2, 6)
x = random_pytorch_tensor(ndim=3, dim0=k1, dim1=1, dim2=1)
y = random_pytorch_tensor(ndim=3, dim0=1, dim1=k2, dim2=k3)
return torch.minimum(x, y)
def _test_elementwise_maximum(test_case, device):
arg_dict = OrderedDict()
arg_dict["shape"] = [(10, 10, 200), (3, 12), (12,)]
arg_dict["data_type"] = ["float32", "double"]
for (shape, data_type) in GenArgList(arg_dict):
input_x = flow.Tensor(
np.random.randn(*shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
input_y = flow.Tensor(
np.random.randn(*shape),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_values = flow.maximum(input_x, input_y)
np_values = np.maximum(input_x.numpy(), input_y.numpy())
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
def _test_broadcast_maximum(test_case, device):
arg_dict = OrderedDict()
arg_dict["shape"] = [[(10, 10, 200), (10, 1, 1)], [(3, 12), (1, 12)]]
arg_dict["data_type"] = ["float32", "double"]
for (shape, data_type) in GenArgList(arg_dict):
input_x = flow.Tensor(
np.random.randn(*shape[0]),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
input_y = flow.Tensor(
np.random.randn(*shape[1]),
dtype=type_name_to_flow_type[data_type],
device=flow.device(device),
)
of_values = flow.maximum(input_x, input_y)
np_values = np.maximum(input_x.numpy(), input_y.numpy())
test_case.assertTrue(
np.array_equal(of_values.numpy().flatten(), np_values.flatten())
)
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestMaximum(flow.unittest.TestCase):
def test_maximum(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_elementwise_maximum,
_test_broadcast_maximum,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_flow_elementwise_mximum_with_random_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
x = random_pytorch_tensor(ndim=2, dim0=k1, dim1=k2)
y = random_pytorch_tensor(ndim=2, dim0=k1, dim1=k2)
return torch.maximum(x, y)
@autotest()
def test_flow_broadcast_maximum_with_random_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
k3 = random(2, 6)
x = random_pytorch_tensor(ndim=3, dim0=k1, dim1=1, dim2=1)
y = random_pytorch_tensor(ndim=3, dim0=1, dim1=k2, dim2=k3)
return torch.maximum(x, y)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.std",
"oneflow.cos",
"oneflow.topk",
"oneflow.rsqrt",
"oneflow.unittest.env.eager_execution_enabled",
"oneflow.maximum",
"oneflow.device",
"oneflow.pow",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.minimum"
] | [((827, 859), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (857, 859), True, 'import oneflow as flow\n'), ((1094, 1126), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1124, 1126), True, 'import oneflow as flow\n'), ((2019, 2051), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2049, 2051), True, 'import oneflow as flow\n'), ((2407, 2439), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2437, 2439), True, 'import oneflow as flow\n'), ((3581, 3613), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3611, 3613), True, 'import oneflow as flow\n'), ((4636, 4668), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4666, 4668), True, 'import oneflow as flow\n'), ((5851, 5883), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (5881, 5883), True, 'import oneflow as flow\n'), ((6247, 6279), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (6277, 6279), True, 'import oneflow as flow\n'), ((6713, 6745), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (6743, 6745), True, 'import oneflow as flow\n'), ((7011, 7043), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (7041, 7043), True, 'import oneflow as flow\n'), ((7510, 7542), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (7540, 7542), True, 'import oneflow as flow\n'), ((7978, 8010), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (8008, 8010), True, 'import oneflow as flow\n'), ((8242, 8274), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (8272, 8274), True, 'import oneflow as flow\n'), ((14209, 14241), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (14239, 14241), True, 'import oneflow as flow\n'), ((14837, 14869), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (14867, 14869), True, 'import oneflow as flow\n'), ((15430, 15462), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (15460, 15462), True, 'import oneflow as flow\n'), ((15834, 15866), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (15864, 15866), True, 'import oneflow as flow\n'), ((16232, 16264), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (16262, 16264), True, 'import oneflow as flow\n'), ((18349, 18381), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (18379, 18381), True, 'import oneflow as flow\n'), ((1522, 1537), 'oneflow.cos', 'flow.cos', (['input'], {}), '(input)\n', (1530, 1537), True, 'import oneflow as flow\n'), ((1861, 1872), 'oneflow.cos', 'flow.cos', (['x'], {}), '(x)\n', (1869, 1872), True, 'import oneflow as flow\n'), ((2711, 2734), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2726, 2734), True, 'import numpy as np\n'), ((2808, 2830), 'oneflow.std', 'flow.std', (['input'], {'dim': '(2)'}), '(input, dim=2)\n', (2816, 2830), True, 'import oneflow as flow\n'), ((2844, 2866), 'numpy.std', 'np.std', (['np_arr'], {'axis': '(2)'}), '(np_arr, axis=2)\n', (2850, 2866), True, 'import numpy as np\n'), ((3006, 3029), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (3021, 3029), True, 'import numpy as np\n'), ((3103, 3125), 'oneflow.std', 'flow.std', (['input'], {'dim': '(1)'}), '(input, dim=1)\n', (3111, 3125), True, 'import oneflow as flow\n'), ((3139, 3161), 'numpy.std', 'np.std', (['np_arr'], {'axis': '(1)'}), '(np_arr, axis=1)\n', (3145, 3161), True, 'import numpy as np\n'), ((3309, 3336), 'numpy.random.randn', 'np.random.randn', (['(4)', '(2)', '(3)', '(5)'], {}), '(4, 2, 3, 5)\n', (3324, 3336), True, 'import numpy as np\n'), ((3466, 3499), 'numpy.std', 'np.std', (['np_arr'], {'axis': '(-2, -1, -3)'}), '(np_arr, axis=(-2, -1, -3))\n', (3472, 3499), True, 'import numpy as np\n'), ((3984, 4012), 'unittest.skip', 'unittest.skip', (['"""std has bug"""'], {}), "('std has bug')\n", (3997, 4012), False, 'import unittest\n'), ((4314, 4342), 'unittest.skip', 'unittest.skip', (['"""std has bug"""'], {}), "('std has bug')\n", (4327, 4342), False, 'import unittest\n'), ((5147, 5170), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (5162, 5170), True, 'import numpy as np\n'), ((5184, 5198), 'numpy.abs', 'np.abs', (['np_arr'], {}), '(np_arr)\n', (5190, 5198), True, 'import numpy as np\n'), ((5492, 5515), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (5507, 5515), True, 'import numpy as np\n'), ((5529, 5543), 'numpy.abs', 'np.abs', (['np_arr'], {}), '(np_arr)\n', (5535, 5543), True, 'import numpy as np\n'), ((5628, 5647), 'oneflow.rsqrt', 'flow.rsqrt', ([], {'input': 'x'}), '(input=x)\n', (5638, 5647), True, 'import oneflow as flow\n'), ((9966, 10028), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(prev_dims, k, next_dims)', 'dtype': 'input.dtype'}), '(shape=(prev_dims, k, next_dims), dtype=input.dtype)\n', (9976, 10028), True, 'import numpy as np\n'), ((10070, 10129), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(prev_dims, k, next_dims)', 'dtype': 'np.int64'}), '(shape=(prev_dims, k, next_dims), dtype=np.int64)\n', (10080, 10129), True, 'import numpy as np\n'), ((11059, 11089), 'oneflow.topk', 'flow.topk', (['input'], {'k': 'k', 'dim': 'dim'}), '(input, k=k, dim=dim)\n', (11068, 11089), True, 'import oneflow as flow\n'), ((12103, 12133), 'oneflow.topk', 'flow.topk', (['input'], {'k': 'k', 'dim': 'dim'}), '(input, k=k, dim=dim)\n', (12112, 12133), True, 'import oneflow as flow\n'), ((12646, 12691), 'oneflow.topk', 'flow.topk', (['input'], {'k': 'k', 'dim': 'dim', 'largest': '(False)'}), '(input, k=k, dim=dim, largest=False)\n', (12655, 12691), True, 'import oneflow as flow\n'), ((13048, 13061), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (13059, 13061), False, 'from collections import OrderedDict\n'), ((13269, 13292), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (13290, 13292), True, 'import numpy as np\n'), ((13341, 13361), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (13351, 13361), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((16648, 16661), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (16659, 16661), False, 'from collections import OrderedDict\n'), ((16798, 16818), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (16808, 16818), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((17460, 17473), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17471, 17473), False, 'from collections import OrderedDict\n'), ((17628, 17648), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (17638, 17648), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((19424, 19437), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (19435, 19437), False, 'from collections import OrderedDict\n'), ((19574, 19594), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (19584, 19594), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((20236, 20249), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20247, 20249), False, 'from collections import OrderedDict\n'), ((20404, 20424), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (20414, 20424), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((22131, 22146), 'unittest.main', 'unittest.main', ([], {}), '()\n', (22144, 22146), False, 'import unittest\n'), ((1431, 1454), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1446, 1454), True, 'import numpy as np\n'), ((1730, 1753), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (1745, 1753), True, 'import numpy as np\n'), ((2139, 2152), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2150, 2152), False, 'from collections import OrderedDict\n'), ((2342, 2362), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2352, 2362), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((3701, 3714), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3712, 3714), False, 'from collections import OrderedDict\n'), ((3916, 3936), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3926, 3936), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((5216, 5231), 'numpy.sqrt', 'np.sqrt', (['np_arr'], {}), '(np_arr)\n', (5223, 5231), True, 'import numpy as np\n'), ((5975, 5988), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5986, 5988), False, 'from collections import OrderedDict\n'), ((6182, 6202), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6192, 6202), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((10924, 10951), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(7)'], {}), '(2, 6, 5, 7)\n', (10939, 10951), True, 'import numpy as np\n'), ((11447, 11474), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(7)'], {}), '(2, 6, 5, 7)\n', (11462, 11474), True, 'import numpy as np\n'), ((11969, 11996), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(7)'], {}), '(2, 6, 5, 7)\n', (11984, 11996), True, 'import numpy as np\n'), ((12492, 12519), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(7)'], {}), '(2, 6, 5, 7)\n', (12507, 12519), True, 'import numpy as np\n'), ((13809, 13857), 'oneflow.topk', 'flow.topk', (['input'], {'k': 'k', 'dim': 'axis', 'largest': 'largest'}), '(input, k=k, dim=axis, largest=largest)\n', (13818, 13857), True, 'import oneflow as flow\n'), ((14405, 14425), 'oneflow.pow', 'flow.pow', (['input', '(2.1)'], {}), '(input, 2.1)\n', (14413, 14425), True, 'import oneflow as flow\n'), ((14959, 14972), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14970, 14972), False, 'from collections import OrderedDict\n'), ((15250, 15270), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (15260, 15270), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((15340, 15383), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (15381, 15383), True, 'import oneflow as flow\n'), ((15744, 15787), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (15785, 15787), True, 'import oneflow as flow\n'), ((16142, 16185), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (16183, 16185), True, 'import oneflow as flow\n'), ((17182, 17212), 'oneflow.minimum', 'flow.minimum', (['input_x', 'input_y'], {}), '(input_x, input_y)\n', (17194, 17212), True, 'import oneflow as flow\n'), ((18018, 18048), 'oneflow.minimum', 'flow.minimum', (['input_x', 'input_y'], {}), '(input_x, input_y)\n', (18030, 18048), True, 'import oneflow as flow\n'), ((18477, 18490), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18488, 18490), False, 'from collections import OrderedDict\n'), ((18674, 18694), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18684, 18694), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((18259, 18302), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (18300, 18302), True, 'import oneflow as flow\n'), ((19958, 19988), 'oneflow.maximum', 'flow.maximum', (['input_x', 'input_y'], {}), '(input_x, input_y)\n', (19970, 19988), True, 'import oneflow as flow\n'), ((20794, 20824), 'oneflow.maximum', 'flow.maximum', (['input_x', 'input_y'], {}), '(input_x, input_y)\n', (20806, 20824), True, 'import oneflow as flow\n'), ((21219, 21232), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21230, 21232), False, 'from collections import OrderedDict\n'), ((21416, 21436), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21426, 21436), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((21035, 21078), 'oneflow.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (21076, 21078), True, 'import oneflow as flow\n'), ((1483, 1502), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1494, 1502), True, 'import oneflow as flow\n'), ((1798, 1817), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1809, 1817), True, 'import oneflow as flow\n'), ((2774, 2793), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2785, 2793), True, 'import oneflow as flow\n'), ((3069, 3088), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (3080, 3088), True, 'import oneflow as flow\n'), ((3376, 3395), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (3387, 3395), True, 'import oneflow as flow\n'), ((5271, 5290), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (5282, 5290), True, 'import oneflow as flow\n'), ((5579, 5598), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (5590, 5598), True, 'import oneflow as flow\n'), ((10980, 10999), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (10991, 10999), True, 'import oneflow as flow\n'), ((11503, 11522), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (11514, 11522), True, 'import oneflow as flow\n'), ((12025, 12044), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (12036, 12044), True, 'import oneflow as flow\n'), ((12548, 12567), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (12559, 12567), True, 'import oneflow as flow\n'), ((14338, 14366), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (14346, 14366), True, 'import numpy as np\n'), ((14626, 14654), 'numpy.array', 'np.array', (['[1, 2, 3, 4, 5, 6]'], {}), '([1, 2, 3, 4, 5, 6])\n', (14634, 14654), True, 'import numpy as np\n'), ((16863, 16886), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (16878, 16886), True, 'import numpy as np\n'), ((17034, 17057), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (17049, 17057), True, 'import numpy as np\n'), ((17693, 17719), 'numpy.random.randn', 'np.random.randn', (['*shape[0]'], {}), '(*shape[0])\n', (17708, 17719), True, 'import numpy as np\n'), ((17867, 17893), 'numpy.random.randn', 'np.random.randn', (['*shape[1]'], {}), '(*shape[1])\n', (17882, 17893), True, 'import numpy as np\n'), ((19639, 19662), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (19654, 19662), True, 'import numpy as np\n'), ((19810, 19833), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (19825, 19833), True, 'import numpy as np\n'), ((20469, 20495), 'numpy.random.randn', 'np.random.randn', (['*shape[0]'], {}), '(*shape[0])\n', (20484, 20495), True, 'import numpy as np\n'), ((20643, 20669), 'numpy.random.randn', 'np.random.randn', (['*shape[1]'], {}), '(*shape[1])\n', (20658, 20669), True, 'import numpy as np\n'), ((13496, 13518), 'numpy.unique', 'np.unique', (['random_data'], {}), '(random_data)\n', (13505, 13518), True, 'import numpy as np\n'), ((13744, 13763), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (13755, 13763), True, 'import oneflow as flow\n'), ((16960, 16979), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (16971, 16979), True, 'import oneflow as flow\n'), ((17131, 17150), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (17142, 17150), True, 'import oneflow as flow\n'), ((17793, 17812), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (17804, 17812), True, 'import oneflow as flow\n'), ((17967, 17986), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (17978, 17986), True, 'import oneflow as flow\n'), ((19736, 19755), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (19747, 19755), True, 'import oneflow as flow\n'), ((19907, 19926), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (19918, 19926), True, 'import oneflow as flow\n'), ((20569, 20588), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (20580, 20588), True, 'import oneflow as flow\n'), ((20743, 20762), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (20754, 20762), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
import math
import oneflow.compatible.single_client as flow
import oneflow.compatible.single_client.typing as oft
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
module_path = os.path.dirname(os.path.abspath(__file__))
print("module_path:", module_path)
print("pwd_path:", os.getcwd())
user_sigmoid_op = flow.experimental.custom_op_module("user_sigmoid", module_path)
user_sigmoid_op.py_api().cpp_def().py_kernel().build_load()
def numpy_sigmoid(x):
return 1 / (1 + np.exp(-x))
def numpy_sigmoid_grad(y, dy):
return y * (1 - y) * dy
def make_job(input_shape, dtype=flow.float32):
@flow.global_function(function_config=func_config)
def sigmoid_job(x: oft.Numpy.Placeholder(input_shape, dtype=dtype)):
return flow.math.sigmoid(x)
return sigmoid_job
def make_grad_job(y_shape, dy_shape, dtype=flow.float32):
@flow.global_function(function_config=func_config)
def sigmoid_grad_job(
y: oft.Numpy.Placeholder(y_shape, dtype=dtype),
dy: oft.Numpy.Placeholder(dy_shape, dtype=dtype),
):
return flow.math.sigmoid_grad(y, dy)
return sigmoid_grad_job
@flow.unittest.skip_unless_1n1d()
class TestUserSigmoid(flow.unittest.TestCase):
def test_user_sigmoid(test_case):
flow.clear_default_session()
def make_py_job(input_shape, dtype=flow.float32):
@flow.global_function(function_config=func_config)
def sigmoid_py_job(x: oft.Numpy.Placeholder(input_shape, dtype=dtype)):
with flow.scope.placement("cpu", "0:0"):
return user_sigmoid_op.api.user_sigmoid_forward(x)
return sigmoid_py_job
x = np.ones((1, 10), dtype=np.float32)
sig_job = make_job(x.shape)
py_sig_job = make_py_job(x.shape)
sig = sig_job(x).get().numpy()
py_sig = py_sig_job(x).get().numpy()
numpy_sig = numpy_sigmoid(x)
print("sig : ", sig)
print("py_sig : ", py_sig)
print("numpy_sig : ", numpy_sig)
test_case.assertTrue(np.allclose(sig, py_sig, rtol=1e-03, atol=1e-05))
test_case.assertTrue(np.allclose(py_sig, numpy_sig, rtol=1e-03, atol=1e-05))
def test_user_sigmoid_grad(test_case):
flow.clear_default_session()
def make_py_grad_job(y_shape, dy_shape, dtype=flow.float32):
@flow.global_function(function_config=func_config)
def sigmoid_py_grad_job(
y: oft.Numpy.Placeholder(y_shape, dtype=dtype),
dy: oft.Numpy.Placeholder(dy_shape, dtype=dtype),
):
with flow.scope.placement("cpu", "0:0"):
return user_sigmoid_op.api.user_sigmoid_backward(y, dy)
return sigmoid_py_grad_job
x = np.ones((1, 10), dtype=np.float32)
y = 0.5 * np.ones((1, 10), dtype=np.float32)
dy = 0.2 * np.ones((1, 10), dtype=np.float32)
sig_grad_job = make_grad_job(y.shape, dy.shape)
py_sig_grad_job = make_py_grad_job(y.shape, dy.shape)
sig_grad = sig_grad_job(y, dy).get().numpy()
py_sig_grad = py_sig_grad_job(y, dy).get().numpy()
numpy_sig_grad = numpy_sigmoid_grad(y, dy)
print("sig_grad", sig_grad)
print("py_sig_grad", py_sig_grad)
print("numpy_sig_grad", numpy_sig_grad)
test_case.assertTrue(np.allclose(sig_grad, py_sig_grad, rtol=1e-03, atol=1e-05))
test_case.assertTrue(
np.allclose(py_sig_grad, numpy_sig_grad, rtol=1e-03, atol=1e-05)
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.experimental.custom_op_module",
"oneflow.compatible.single_client.math.sigmoid",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.math.sigmoid_grad",
"oneflow.compatible.single_client.unittest.skip_unless_1n1d",
"oneflow.compatible.sin... | [((766, 787), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (785, 787), True, 'import oneflow.compatible.single_client as flow\n'), ((974, 1037), 'oneflow.compatible.single_client.experimental.custom_op_module', 'flow.experimental.custom_op_module', (['"""user_sigmoid"""', 'module_path'], {}), "('user_sigmoid', module_path)\n", (1008, 1037), True, 'import oneflow.compatible.single_client as flow\n'), ((1791, 1823), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1821, 1823), True, 'import oneflow.compatible.single_client as flow\n'), ((861, 886), 'os.path.abspath', 'os.path.abspath', (['__file__'], {}), '(__file__)\n', (876, 886), False, 'import os\n'), ((942, 953), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (951, 953), False, 'import os\n'), ((1269, 1318), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1289, 1318), True, 'import oneflow.compatible.single_client as flow\n'), ((1517, 1566), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1537, 1566), True, 'import oneflow.compatible.single_client as flow\n'), ((4201, 4216), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4214, 4216), False, 'import unittest\n'), ((1407, 1427), 'oneflow.compatible.single_client.math.sigmoid', 'flow.math.sigmoid', (['x'], {}), '(x)\n', (1424, 1427), True, 'import oneflow.compatible.single_client as flow\n'), ((1729, 1758), 'oneflow.compatible.single_client.math.sigmoid_grad', 'flow.math.sigmoid_grad', (['y', 'dy'], {}), '(y, dy)\n', (1751, 1758), True, 'import oneflow.compatible.single_client as flow\n'), ((1917, 1945), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1943, 1945), True, 'import oneflow.compatible.single_client as flow\n'), ((2328, 2362), 'numpy.ones', 'np.ones', (['(1, 10)'], {'dtype': 'np.float32'}), '((1, 10), dtype=np.float32)\n', (2335, 2362), True, 'import numpy as np\n'), ((2883, 2911), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2909, 2911), True, 'import oneflow.compatible.single_client as flow\n'), ((3413, 3447), 'numpy.ones', 'np.ones', (['(1, 10)'], {'dtype': 'np.float32'}), '((1, 10), dtype=np.float32)\n', (3420, 3447), True, 'import numpy as np\n'), ((1142, 1152), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (1148, 1152), True, 'import numpy as np\n'), ((1342, 1389), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'dtype'}), '(input_shape, dtype=dtype)\n', (1363, 1389), True, 'import oneflow.compatible.single_client.typing as oft\n'), ((1604, 1647), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (1625, 1647), True, 'import oneflow.compatible.single_client.typing as oft\n'), ((1661, 1705), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['dy_shape'], {'dtype': 'dtype'}), '(dy_shape, dtype=dtype)\n', (1682, 1705), True, 'import oneflow.compatible.single_client.typing as oft\n'), ((2018, 2067), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (2038, 2067), True, 'import oneflow.compatible.single_client as flow\n'), ((2696, 2744), 'numpy.allclose', 'np.allclose', (['sig', 'py_sig'], {'rtol': '(0.001)', 'atol': '(1e-05)'}), '(sig, py_sig, rtol=0.001, atol=1e-05)\n', (2707, 2744), True, 'import numpy as np\n'), ((2775, 2829), 'numpy.allclose', 'np.allclose', (['py_sig', 'numpy_sig'], {'rtol': '(0.001)', 'atol': '(1e-05)'}), '(py_sig, numpy_sig, rtol=0.001, atol=1e-05)\n', (2786, 2829), True, 'import numpy as np\n'), ((2995, 3044), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3015, 3044), True, 'import oneflow.compatible.single_client as flow\n'), ((3466, 3500), 'numpy.ones', 'np.ones', (['(1, 10)'], {'dtype': 'np.float32'}), '((1, 10), dtype=np.float32)\n', (3473, 3500), True, 'import numpy as np\n'), ((3520, 3554), 'numpy.ones', 'np.ones', (['(1, 10)'], {'dtype': 'np.float32'}), '((1, 10), dtype=np.float32)\n', (3527, 3554), True, 'import numpy as np\n'), ((3991, 4049), 'numpy.allclose', 'np.allclose', (['sig_grad', 'py_sig_grad'], {'rtol': '(0.001)', 'atol': '(1e-05)'}), '(sig_grad, py_sig_grad, rtol=0.001, atol=1e-05)\n', (4002, 4049), True, 'import numpy as np\n'), ((4093, 4157), 'numpy.allclose', 'np.allclose', (['py_sig_grad', 'numpy_sig_grad'], {'rtol': '(0.001)', 'atol': '(1e-05)'}), '(py_sig_grad, numpy_sig_grad, rtol=0.001, atol=1e-05)\n', (4104, 4157), True, 'import numpy as np\n'), ((2102, 2149), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'dtype'}), '(input_shape, dtype=dtype)\n', (2123, 2149), True, 'import oneflow.compatible.single_client.typing as oft\n'), ((2173, 2207), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (2193, 2207), True, 'import oneflow.compatible.single_client as flow\n'), ((3101, 3144), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (3122, 3144), True, 'import oneflow.compatible.single_client.typing as oft\n'), ((3166, 3210), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['dy_shape'], {'dtype': 'dtype'}), '(dy_shape, dtype=dtype)\n', (3187, 3210), True, 'import oneflow.compatible.single_client.typing as oft\n'), ((3248, 3282), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (3268, 3282), True, 'import oneflow.compatible.single_client as flow\n')] |
import oneflow as flow
import numpy as np
import time
import argparse
import torch
import models.pytorch_resnet50 as pytorch_resnet50
from models.resnet50 import resnet50
def _parse_args():
parser = argparse.ArgumentParser("flags for compare oneflow and pytorch speed")
return parser.parse_args()
def main(args):
batch_size = 16
image_nd = np.random.rand(batch_size, 3, 224, 224).astype(np.float32)
label_nd = np.array([e for e in range(batch_size)], dtype=np.int32)
res50_module = resnet50()
# set for eval mode
# res50_module.eval()
image = flow.tensor(image_nd)
label = flow.tensor(label_nd)
corss_entropy = flow.nn.CrossEntropyLoss(reduction="mean")
image_gpu = image.to("cuda")
label = label.to("cuda")
res50_module.to("cuda")
corss_entropy.to("cuda")
learning_rate = 0.01
mom = 0.9
of_sgd = flow.optim.SGD(res50_module.parameters(), lr=learning_rate, momentum=mom)
bp_iters = 50
for_time = 0.0
bp_time = 0.0
update_time = 0.0
print("start oneflow training loop....")
start_t = time.time()
for i in range(bp_iters):
s_t = time.time()
logits = res50_module(image_gpu)
loss = corss_entropy(logits, label)
for_time += time.time() - s_t
s_t = time.time()
loss.backward()
bp_time += time.time() - s_t
s_t = time.time()
of_sgd.step()
of_sgd.zero_grad()
update_time += time.time() - s_t
of_loss = loss.numpy()
end_t = time.time()
print("oneflow traning loop avg time : {}".format((end_t - start_t) / bp_iters))
print("forward avg time : {}".format(for_time / bp_iters))
print("backward avg time : {}".format(bp_time / bp_iters))
print("update parameters avg time : {}".format(update_time / bp_iters))
#####################################################################################################
# pytorch resnet50
torch_res50_module = pytorch_resnet50.resnet50()
# set for eval mode
# torch_res50_module.eval()
torch_res50_module.to("cuda")
torch_sgd = torch.optim.SGD(
torch_res50_module.parameters(), lr=learning_rate, momentum=mom
)
image = torch.tensor(image_nd)
image_gpu = image.to("cuda")
corss_entropy = torch.nn.CrossEntropyLoss()
corss_entropy.to("cuda")
label = torch.tensor(label_nd, dtype=torch.long).to("cuda")
for_time = 0.0
bp_time = 0.0
update_time = 0.0
print("start pytorch training loop....")
start_t = time.time()
for i in range(bp_iters):
s_t = time.time()
logits = torch_res50_module(image_gpu)
loss = corss_entropy(logits, label)
for_time += time.time() - s_t
s_t = time.time()
loss.backward()
bp_time += time.time() - s_t
s_t = time.time()
torch_sgd.step()
torch_sgd.zero_grad()
update_time += time.time() - s_t
torch_loss = loss.cpu().detach().numpy()
end_t = time.time()
print("pytorch traning loop avg time : {}".format((end_t - start_t) / bp_iters))
print("forward avg time : {}".format(for_time / bp_iters))
print("backward avg time : {}".format(bp_time / bp_iters))
print("update parameters avg time : {}".format(update_time / bp_iters))
if __name__ == "__main__":
args = _parse_args()
main(args)
| [
"oneflow.tensor",
"oneflow.nn.CrossEntropyLoss"
] | [((207, 277), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for compare oneflow and pytorch speed"""'], {}), "('flags for compare oneflow and pytorch speed')\n", (230, 277), False, 'import argparse\n'), ((514, 524), 'models.resnet50.resnet50', 'resnet50', ([], {}), '()\n', (522, 524), False, 'from models.resnet50 import resnet50\n'), ((587, 608), 'oneflow.tensor', 'flow.tensor', (['image_nd'], {}), '(image_nd)\n', (598, 608), True, 'import oneflow as flow\n'), ((621, 642), 'oneflow.tensor', 'flow.tensor', (['label_nd'], {}), '(label_nd)\n', (632, 642), True, 'import oneflow as flow\n'), ((663, 705), 'oneflow.nn.CrossEntropyLoss', 'flow.nn.CrossEntropyLoss', ([], {'reduction': '"""mean"""'}), "(reduction='mean')\n", (687, 705), True, 'import oneflow as flow\n'), ((1091, 1102), 'time.time', 'time.time', ([], {}), '()\n', (1100, 1102), False, 'import time\n'), ((1527, 1538), 'time.time', 'time.time', ([], {}), '()\n', (1536, 1538), False, 'import time\n'), ((1982, 2009), 'models.pytorch_resnet50.resnet50', 'pytorch_resnet50.resnet50', ([], {}), '()\n', (2007, 2009), True, 'import models.pytorch_resnet50 as pytorch_resnet50\n'), ((2225, 2247), 'torch.tensor', 'torch.tensor', (['image_nd'], {}), '(image_nd)\n', (2237, 2247), False, 'import torch\n'), ((2301, 2328), 'torch.nn.CrossEntropyLoss', 'torch.nn.CrossEntropyLoss', ([], {}), '()\n', (2326, 2328), False, 'import torch\n'), ((2542, 2553), 'time.time', 'time.time', ([], {}), '()\n', (2551, 2553), False, 'import time\n'), ((3008, 3019), 'time.time', 'time.time', ([], {}), '()\n', (3017, 3019), False, 'import time\n'), ((1147, 1158), 'time.time', 'time.time', ([], {}), '()\n', (1156, 1158), False, 'import time\n'), ((1297, 1308), 'time.time', 'time.time', ([], {}), '()\n', (1306, 1308), False, 'import time\n'), ((1385, 1396), 'time.time', 'time.time', ([], {}), '()\n', (1394, 1396), False, 'import time\n'), ((2598, 2609), 'time.time', 'time.time', ([], {}), '()\n', (2607, 2609), False, 'import time\n'), ((2754, 2765), 'time.time', 'time.time', ([], {}), '()\n', (2763, 2765), False, 'import time\n'), ((2842, 2853), 'time.time', 'time.time', ([], {}), '()\n', (2851, 2853), False, 'import time\n'), ((363, 402), 'numpy.random.rand', 'np.random.rand', (['batch_size', '(3)', '(224)', '(224)'], {}), '(batch_size, 3, 224, 224)\n', (377, 402), True, 'import numpy as np\n'), ((1264, 1275), 'time.time', 'time.time', ([], {}), '()\n', (1273, 1275), False, 'import time\n'), ((1352, 1363), 'time.time', 'time.time', ([], {}), '()\n', (1361, 1363), False, 'import time\n'), ((1469, 1480), 'time.time', 'time.time', ([], {}), '()\n', (1478, 1480), False, 'import time\n'), ((2370, 2410), 'torch.tensor', 'torch.tensor', (['label_nd'], {'dtype': 'torch.long'}), '(label_nd, dtype=torch.long)\n', (2382, 2410), False, 'import torch\n'), ((2721, 2732), 'time.time', 'time.time', ([], {}), '()\n', (2730, 2732), False, 'import time\n'), ((2809, 2820), 'time.time', 'time.time', ([], {}), '()\n', (2818, 2820), False, 'import time\n'), ((2932, 2943), 'time.time', 'time.time', ([], {}), '()\n', (2941, 2943), False, 'import time\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow.python.framework.op_arg_util as op_arg_util
import oneflow.core.job.placement_pb2 as placement_pb
import oneflow.python.eager.symbol as symbol_util
import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb
import random
class BoxingToMiddle(object):
def __init__(
self,
boxing_method,
get_middle_parallel_desc_symbol,
get_middle_sbp_parallel,
verbose=False,
):
self.boxing_method_ = boxing_method
self.get_middle_op_arg_parallel_attr_ = MiddleOpArgParallelAttr(
get_middle_parallel_desc_symbol, get_middle_sbp_parallel,
)
self.verbose_ = verbose
@property
def boxing_method(self):
return self.boxing_method_
@property
def get_middle_op_arg_parallel_attr(self):
return self.get_middle_op_arg_parallel_attr_
@property
def verbose(self):
return self.verbose_
def MiddleOpArgParallelAttr(get_parallel_desc_symbol, get_sbp_parallel):
def GetOpArgParallelAttr(
builder, produced_blob_object, consumer_op_arg_parallel_attr
):
return op_arg_util.OpArgParallelAttribute(
get_parallel_desc_symbol(
builder, produced_blob_object, consumer_op_arg_parallel_attr
),
get_sbp_parallel(
builder, produced_blob_object, consumer_op_arg_parallel_attr
),
produced_blob_object.op_arg_parallel_attr.opt_mirrored_parallel,
)
return GetOpArgParallelAttr
def ReplaceProducerDeviceTag(new_device_tag):
def Getter(builder, produced_blob_object, consumer_op_arg_parallel_attr):
x_parallel_attr = produced_blob_object.op_arg_parallel_attr
return TryReplaceDeviceTag(
builder, x_parallel_attr.parallel_desc_symbol, new_device_tag
)
return Getter
def ProducerRandomParallelIdPerMachine(device_tag=None):
def Getter(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return RandomParallelIdPerMachine(
produced_blob_object.parallel_desc_symbol,
device_tag=device_tag,
builder=builder,
)
return Getter
def ConsumerRandomParallelIdPerMachine(device_tag=None):
def Getter(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return RandomParallelIdPerMachine(
consumer_op_arg_parallel_attr.parallel_desc_symbol,
device_tag=device_tag,
builder=builder,
)
return Getter
def ProducerParallelDesc(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return produced_blob_object.parallel_desc_symbol
def ConsumerParallelDesc(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return consumer_op_arg_parallel_attr.parallel_desc_symbol
def ReplaceConsumerDeviceTag(new_device_tag):
def Getter(builder, produced_blob_object, consumer_op_arg_parallel_attr):
parallel_desc_sym = consumer_op_arg_parallel_attr.parallel_desc_symbol
return TryReplaceDeviceTag(builder, parallel_desc_sym, new_device_tag)
return Getter
def BroadcastParallel(builder, produced_blob_object, consumer_op_arg_parallel_attr):
sbp_parallel = sbp_parallel_pb.SbpParallel()
sbp_parallel.broadcast_parallel.SetInParent()
return sbp_parallel
def ProducerSbpParallel(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return produced_blob_object.op_arg_parallel_attr.sbp_parallel
def ConsumerSbpParallel(builder, produced_blob_object, consumer_op_arg_parallel_attr):
return consumer_op_arg_parallel_attr.sbp_parallel
def TryReplaceDeviceTag(builder, parallel_desc_symbol, device_tag):
if parallel_desc_symbol.device_tag == device_tag:
return parallel_desc_symbol
else:
return ReplaceDeviceTag(parallel_desc_symbol, device_tag, builder=builder)
def ReplaceDeviceTag(parallel_desc_symbol, device_tag, builder=None):
assert parallel_desc_symbol.device_tag != device_tag
parallel_conf = placement_pb.ParallelConf()
parallel_conf.device_tag = device_tag
for device_name in parallel_desc_symbol.parallel_conf.device_name:
parallel_conf.device_name.append(device_name)
if builder is None:
return symbol_util.ParallelDescSymbol(
parallel_desc_symbol.symbol_id, parallel_conf
)
else:
return builder.GetParallelDescSymbol(parallel_conf)
def RandomParallelIdPerMachine(parallel_desc_symbol, device_tag=None, builder=None):
if device_tag is None:
device_tag = parallel_desc_symbol.parallel_conf.device_tag
assert device_tag is not None
parallel_conf = placement_pb.ParallelConf()
parallel_conf.device_tag = device_tag
for machine_id, dev_ids in parallel_desc_symbol.machine_id2device_id_list.items():
dev_id = dev_ids[random.randint(0, len(dev_ids) - 1)]
parallel_conf.device_name.append("%s:%s" % (machine_id, dev_id))
if builder is None:
return symbol_util.ParallelDescSymbol(
parallel_desc_symbol.symbol_id, parallel_conf
)
else:
return builder.GetParallelDescSymbol(parallel_conf)
| [
"oneflow.core.job.sbp_parallel_pb2.SbpParallel",
"oneflow.core.job.placement_pb2.ParallelConf",
"oneflow.python.eager.symbol.ParallelDescSymbol"
] | [((3854, 3883), 'oneflow.core.job.sbp_parallel_pb2.SbpParallel', 'sbp_parallel_pb.SbpParallel', ([], {}), '()\n', (3881, 3883), True, 'import oneflow.core.job.sbp_parallel_pb2 as sbp_parallel_pb\n'), ((4658, 4685), 'oneflow.core.job.placement_pb2.ParallelConf', 'placement_pb.ParallelConf', ([], {}), '()\n', (4683, 4685), True, 'import oneflow.core.job.placement_pb2 as placement_pb\n'), ((5297, 5324), 'oneflow.core.job.placement_pb2.ParallelConf', 'placement_pb.ParallelConf', ([], {}), '()\n', (5322, 5324), True, 'import oneflow.core.job.placement_pb2 as placement_pb\n'), ((4892, 4969), 'oneflow.python.eager.symbol.ParallelDescSymbol', 'symbol_util.ParallelDescSymbol', (['parallel_desc_symbol.symbol_id', 'parallel_conf'], {}), '(parallel_desc_symbol.symbol_id, parallel_conf)\n', (4922, 4969), True, 'import oneflow.python.eager.symbol as symbol_util\n'), ((5628, 5705), 'oneflow.python.eager.symbol.ParallelDescSymbol', 'symbol_util.ParallelDescSymbol', (['parallel_desc_symbol.symbol_id', 'parallel_conf'], {}), '(parallel_desc_symbol.symbol_id, parallel_conf)\n', (5658, 5705), True, 'import oneflow.python.eager.symbol as symbol_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.framework.device as oneflow_device
import oneflow_api
@oneflow_export("tensor")
class Tensor:
def __init__(
self,
shape,
dtype,
device=None,
requires_grad=False,
retain_grad=False,
placement=None,
sbp=None,
is_consistent=False,
is_lazy=False,
determining_initializer=None,
):
device = device if device is not None else oneflow_api.device("cpu", 0)
self._local_or_consistent_tensor = None
self._undetermined_tensor = UndeterminedTensor(
shape,
dtype,
device=device,
requires_grad=requires_grad,
retain_grad=retain_grad,
placement=placement,
sbp=sbp,
is_consistent=is_consistent,
is_lazy=is_lazy,
)
if determining_initializer is None:
determining_initializer = _default_initializer_for_determining
self._determining_initializer = determining_initializer
@property
def shape(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.shape
else:
return self._undetermined_tensor.shape
@property
def device(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.device
else:
return self._undetermined_tensor.device
@property
def ndim(self):
return len(self.shape)
@property
def is_cuda(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.is_cuda
else:
return self._undetermined_tensor.is_cuda
@property
def dtype(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.dtype
else:
return self._undetermined_tensor.dtype
@property
def data(self):
TODO()
@property
def grad(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.grad
else:
return None
@property
def grad_fn(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.grad_fn
else:
return None
@property
def requires_grad(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.requires_grad
else:
return self._undetermined_tensor.requires_grad
@property
def is_leaf(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.is_leaf
else:
return True
def size(self):
return self.shape
def dim(self, idx):
return self.shape[idx]
def ndimension(self):
return self.ndim
def get_device(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.device
else:
return self._undetermined_tensor.device
def nelemenet(self):
prod = 1
for dim in self.shape:
prod *= dim
return prod
def data_ptr(self):
TODO()
def element_size(self):
TODO()
def numpy(self):
TODO()
def tolist(self):
TODO()
def backward(self):
TODO()
def __str__(self):
TODO()
def __repr__(self):
TODO()
def __array__(self):
TODO()
def __sizeof__(self):
TODO()
def __deepcopy__(self):
TODO()
def determine(self, determining_initializer=None):
assert not self.is_determined
if determining_initializer is None:
determining_initializer = self._determining_initializer
self._local_or_consistent_tensor = determining_initializer(
self._undetermined_tensor
)
self._undetermined_tensor = None
@property
def is_determined(self):
if self._local_or_consistent_tensor is not None:
assert self._undetermined_tensor is None
return True
else:
assert self._undetermined_tensor is not None
return False
def set_placement(self, placement):
assert isinstance(placement, oneflow_api.Placement)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
assert self._undetermined_tensor.device is None
self._undetermined_tensor.placement = placement
def set_sbp(self, sbp):
assert isinstance(sbp, oneflow_api.Distribute)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
self._undetermined_tensor.sbp = sbp
def set_is_consistent(self, is_consistent):
assert isinstance(is_consistent, bool)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
self._undetermined_tensor.is_consistent = is_consistent
def set_is_lazy(self, is_lazy):
assert isinstance(is_lazy, bool)
assert self._local_or_consistent_tensor is None
assert self._undetermined_tensor is not None
self._undetermined_tensor.is_lazy = is_lazy
@property
def placement(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.placement
else:
return self._undetermined_tensor.placement
@property
def is_lazy(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.is_lazy
else:
return self._undetermined_tensor.is_lazy
@property
def is_consistent(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.is_consistent
else:
return self._undetermined_tensor.is_consistent
@property
def sbp(self):
if self._local_or_consistent_tensor is not None:
return self._local_or_consistent_tensor.sbp
else:
return self._undetermined_tensor.sbp
class UndeterminedTensor:
def __init__(
self,
shape,
dtype,
device=None,
requires_grad=False,
retain_grad=False,
placement=None,
sbp=None,
is_consistent=False,
is_lazy=False,
):
if not isinstance(shape, oneflow_api.Size):
if not isinstance(shape, tuple):
shape = tuple(shape)
shape = oneflow_api.Size(shape)
device = device if device is not None else oneflow_api.device("cpu", 0)
self.shape = shape
self.dtype = dtype
self.device = device
self.requires_grad = requires_grad
self.retain_grad = retain_grad
self.placement = placement
self.sbp = sbp
self.is_consistent = is_consistent
self.is_lazy = is_lazy
@property
def is_cuda(self):
device_type = None
if self.placement is not None:
device_type = self.placement.device_tag
elif self.device is not None:
device_type = self.device.type
else:
raise ValueError("Neither placement nor device found.")
return device_type == "gpu" or device_type == "cuda"
def _default_initializer_for_determining(undetermined_tensor):
TODO()
| [
"oneflow.python.oneflow_export.oneflow_export"
] | [((726, 750), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""tensor"""'], {}), "('tensor')\n", (740, 750), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1094, 1122), 'oneflow_api.device', 'oneflow_api.device', (['"""cpu"""', '(0)'], {}), "('cpu', 0)\n", (1112, 1122), False, 'import oneflow_api\n'), ((7402, 7425), 'oneflow_api.Size', 'oneflow_api.Size', (['shape'], {}), '(shape)\n', (7418, 7425), False, 'import oneflow_api\n'), ((7477, 7505), 'oneflow_api.device', 'oneflow_api.device', (['"""cpu"""', '(0)'], {}), "('cpu', 0)\n", (7495, 7505), False, 'import oneflow_api\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Callable, Dict, Iterator, List, Union
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client.nn.optimizer.optimizer import (
Optimizer,
ParamGroup,
)
from oneflow.compatible.single_client.nn.parameter import Parameter
class RMSprop(Optimizer):
"""Implements RMSprop algorithm.
oot Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning
rate method. The original slides proposed RMSProp: Slide 29 of
http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf .
The original equation is as follows:
.. math::
r(w, t) = \\alpha r(w, t-1) + (1 - \\alpha)(\\nabla Q_{i}(w))^2
W = w - \\frac{\\eta} {\\\\sqrt{r(w,t) + \\epsilon}} \\nabla Q_{i}(w)
The first equation calculates moving average of the squared gradient for
each weight. Then dividing the gradient by :math:`sqrt{v(w,t)}`.
In some cases, adding a momentum term :math: `\\beta` is beneficial.
In our implementation, Nesterov momentum is used:
.. math::
r(w, t) = \\alpha r(w, t-1) + (1 - \\alpha)(\\nabla Q_{i}(w))^2
v(w, t) = \\beta v(w, t-1) + \\frac{\\eta} {\\\\sqrt{r(w,t) +
\\epsilon}} \\nabla Q_{i}(w)
w = w - v(w, t)
if centered is True:
.. math::
r(w, t) = \\alpha r(w, t-1) + (1 - \\alpha)(\\nabla Q_{i}(w))^2
g(w, t) = \\alpha g(w, t-1) + (1 - \\alpha)\\nabla Q_{i}(w)
v(w, t) = \\beta v(w, t-1) + \\frac{\\eta} {\\\\sqrt{r(w,t) - (g(w, t))^2 +
\\epsilon}} \\nabla Q_{i}(w)
w = w - v(w, t)
where, :math:`\\alpha` is a hyperparameter and typical values are 0.99, 0.95
and so on. :math:`\\beta` is the momentum term. :math:`\\epsilon` is a
smoothing term to avoid division by zero, usually set somewhere in range
from 1e-4 to 1e-8.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-2)
momentum (float, optional): momentum factor (default: 0, oneflow not support momenmtum > 0 now!)
alpha (float, optional): smoothing constant (default: 0.99)
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
centered (bool, optional) : if ``True``, compute the centered RMSProp,
the gradient is normalized by an estimation of its variance
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
"""
def __init__(
self,
parameters: Union[Iterator[Parameter], List[Dict]],
lr: float = 0.001,
alpha: float = 0.99,
eps: float = 1e-08,
weight_decay: float = 0,
momentum: float = 0.0,
centered: bool = False,
scale: float = 1.0,
):
super().__init__()
assert lr >= 0.0, f"Invalid learning rate: {lr}"
assert alpha >= 0.0, f"Invalid alpha value: {alpha}"
assert eps >= 0.0, f"Invalid epsilon value: {eps}"
assert weight_decay >= 0.0, f"Invalid weight_decay value: {weight_decay}"
assert scale > 0.0, f"Invalid scale factor: {scale}"
assert momentum == 0.0, "Not support momentum greater than zeros now!"
self._default_options["lr"] = lr
self._default_options["alpha"] = alpha
self._default_options["eps"] = eps
self._default_options["weight_decay"] = weight_decay
self._default_options["centered"] = centered
self._default_options["scale"] = scale
if isinstance(parameters, collections.abc.Iterator):
self.param_groups.append(ParamGroup(parameters, self._default_options))
else:
for param in parameters:
self.param_groups.append(ParamGroup(param, self._default_options))
for param_group in self.param_groups:
for param in param_group.parameters:
assert param.is_leaf, "parameters must be leaf tensor"
self._state[param] = dict()
self._state[param]["square_avg"] = flow.experimental.zeros_like(param)
if param_group["centered"]:
self._state[param]["grad_avg"] = flow.experimental.zeros_like(param)
self._centered_rmsprop = (
flow.stateful_op("rmsprop_update")
.Input("model")
.Input("model_diff")
.Input("mean_square")
.Input("mean_gradient")
.Build()
)
self._rmsprop = (
flow.stateful_op("rmsprop_update")
.Input("model")
.Input("model_diff")
.Input("mean_square")
.Build()
)
def step(self, closure: Callable = None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
with flow.no_grad():
loss = None
if closure is not None:
loss = closure()
for param_group in self.param_groups:
kwargs = {
"learning_rate": param_group["lr"],
"scale": param_group["scale"],
"epsilon": param_group["eps"],
"decay_rate": param_group["alpha"],
"weight_decay": param_group["weight_decay"],
}
for param in param_group.parameters:
if param.grad is None:
continue
ms_tensor = self._state[param]["square_avg"]
if param_group["centered"]:
mg_tensor = self._state[param]["grad_avg"]
flow._C.dispatch_rmsprop_update(
self._centered_rmsprop,
(param, param.grad, ms_tensor, mg_tensor),
centered=True,
**kwargs,
)
else:
flow._C.dispatch_rmsprop_update(
self._rmsprop, (param, param.grad, ms_tensor), **kwargs
)
self._state["step"] = self._state["step"] + 1
return loss
| [
"oneflow.compatible.single_client.stateful_op",
"oneflow.compatible.single_client.no_grad",
"oneflow.compatible.single_client.experimental.zeros_like",
"oneflow.compatible.single_client._C.dispatch_rmsprop_update",
"oneflow.compatible.single_client.nn.optimizer.optimizer.ParamGroup"
] | [((5622, 5636), 'oneflow.compatible.single_client.no_grad', 'flow.no_grad', ([], {}), '()\n', (5634, 5636), True, 'from oneflow.compatible import single_client as flow\n'), ((4316, 4361), 'oneflow.compatible.single_client.nn.optimizer.optimizer.ParamGroup', 'ParamGroup', (['parameters', 'self._default_options'], {}), '(parameters, self._default_options)\n', (4326, 4361), False, 'from oneflow.compatible.single_client.nn.optimizer.optimizer import Optimizer, ParamGroup\n'), ((4758, 4793), 'oneflow.compatible.single_client.experimental.zeros_like', 'flow.experimental.zeros_like', (['param'], {}), '(param)\n', (4786, 4793), True, 'from oneflow.compatible import single_client as flow\n'), ((4455, 4495), 'oneflow.compatible.single_client.nn.optimizer.optimizer.ParamGroup', 'ParamGroup', (['param', 'self._default_options'], {}), '(param, self._default_options)\n', (4465, 4495), False, 'from oneflow.compatible.single_client.nn.optimizer.optimizer import Optimizer, ParamGroup\n'), ((4891, 4926), 'oneflow.compatible.single_client.experimental.zeros_like', 'flow.experimental.zeros_like', (['param'], {}), '(param)\n', (4919, 4926), True, 'from oneflow.compatible import single_client as flow\n'), ((6438, 6565), 'oneflow.compatible.single_client._C.dispatch_rmsprop_update', 'flow._C.dispatch_rmsprop_update', (['self._centered_rmsprop', '(param, param.grad, ms_tensor, mg_tensor)'], {'centered': '(True)'}), '(self._centered_rmsprop, (param, param.grad,\n ms_tensor, mg_tensor), centered=True, **kwargs)\n', (6469, 6565), True, 'from oneflow.compatible import single_client as flow\n'), ((6751, 6843), 'oneflow.compatible.single_client._C.dispatch_rmsprop_update', 'flow._C.dispatch_rmsprop_update', (['self._rmsprop', '(param, param.grad, ms_tensor)'], {}), '(self._rmsprop, (param, param.grad,\n ms_tensor), **kwargs)\n', (6782, 6843), True, 'from oneflow.compatible import single_client as flow\n'), ((5209, 5243), 'oneflow.compatible.single_client.stateful_op', 'flow.stateful_op', (['"""rmsprop_update"""'], {}), "('rmsprop_update')\n", (5225, 5243), True, 'from oneflow.compatible import single_client as flow\n'), ((4974, 5008), 'oneflow.compatible.single_client.stateful_op', 'flow.stateful_op', (['"""rmsprop_update"""'], {}), "('rmsprop_update')\n", (4990, 5008), True, 'from oneflow.compatible import single_client as flow\n')] |
import os
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
)
import numpy as np
import time
import oneflow as flow
from oneflow.nn.parallel import DistributedDataParallel as ddp
from config import get_args
from graph import make_train_graph, make_eval_graph
from models.resnet50 import resnet50, Bottleneck
from models.data import make_data_loader
from models.optimizer import make_optimizer
from models.optimizer import make_lr_scheduler
from models.optimizer import make_cross_entropy
from models.accuracy import Accuracy
import utils.logger as log
from utils.stat import CudaUtilMemStat
class Trainer(object):
def __init__(self):
args = get_args()
for k, v in args.__dict__.items():
setattr(self, k, v)
self.rank = flow.env.get_rank()
self.world_size = flow.env.get_world_size()
self.cur_epoch = 0
self.cur_iter = 0
self.cur_batch = 0
self.is_global = (self.world_size > 1 and not self.ddp) or self.graph
self.is_train = False
self.meter_lr = self.graph is False
self.init_logger()
flow.boxing.nccl.set_fusion_threshold_mbytes(self.nccl_fusion_threshold_mb)
flow.boxing.nccl.set_fusion_max_ops_num(self.nccl_fusion_max_ops)
if self.use_fp16 and self.num_nodes * self.num_devices_per_node > 1:
flow.boxing.nccl.enable_use_buffer_to_fuse_all_reduce(False)
self.model = resnet50(
zero_init_residual=self.zero_init_residual,
fuse_bn_relu=self.fuse_bn_relu,
fuse_bn_add_relu=self.fuse_bn_add_relu,
channel_last=self.channel_last,
)
self.init_model()
self.cross_entropy = make_cross_entropy(args)
self.train_data_loader = make_data_loader(
args, "train", self.is_global, self.synthetic_data
)
self.val_data_loader = make_data_loader(
args, "validation", self.is_global, self.synthetic_data
)
self.optimizer = make_optimizer(args, self.model)
self.lr_scheduler = make_lr_scheduler(args, self.optimizer)
self.acc = Accuracy()
if self.graph:
self.train_graph = make_train_graph(
self.model,
self.cross_entropy,
self.train_data_loader,
self.optimizer,
self.lr_scheduler,
return_pred_and_label=self.metric_train_acc,
)
self.eval_graph = make_eval_graph(self.model, self.val_data_loader)
if self.gpu_stat_file is not None:
self.gpu_stat = CudaUtilMemStat(
f"rank{self.rank}_" + self.gpu_stat_file, only_ordinal=self.rank
)
else:
self.gpu_stat = None
def init_model(self):
self.logger.print("***** Model Init *****", print_ranks=[0])
start_t = time.perf_counter()
if self.is_global:
placement = flow.env.all_device_placement("cuda")
self.model = self.model.to_global(
placement=placement, sbp=flow.sbp.broadcast
)
else:
self.model = self.model.to("cuda")
if self.load_path is None:
self.legacy_init_parameters()
else:
self.load_state_dict()
if self.ddp:
self.model = ddp(self.model)
if self.save_init:
self.save("init")
end_t = time.perf_counter()
self.logger.print(
f"***** Model Init Finish, time escapled: {end_t - start_t:.5f} s *****",
print_ranks=[0],
)
def legacy_init_parameters(self):
if not self.legacy_init:
return
for m in self.model.modules():
# NOTE(zwx): legacy BatchNorm initializer in Benchmark seems wrong, so don't follow it
if isinstance(m, flow.nn.Conv2d):
flow.nn.init.kaiming_normal_(
m.weight, mode="fan_in", nonlinearity="relu"
)
elif isinstance(m, flow.nn.Linear):
flow.nn.init.kaiming_normal_(
m.weight, mode="fan_in", nonlinearity="relu"
)
flow.nn.init.constant_(m.bias, 0)
elif isinstance(m, flow.nn.BatchNorm2d):
flow.nn.init.constant_(m.weight, 1)
flow.nn.init.constant_(m.bias, 0)
for m in self.model.modules():
if isinstance(m, Bottleneck):
flow.nn.init.constant_(m.bn3.weight, 0)
def load_state_dict(self):
self.logger.print(f"Loading model from {self.load_path}", print_ranks=[0])
if self.is_global:
state_dict = flow.load(self.load_path, global_src_rank=0)
elif self.rank == 0:
state_dict = flow.load(self.load_path)
else:
return
self.model.load_state_dict(state_dict)
def init_logger(self):
if self.metric_local:
print_ranks = list(range(self.world_size))
else:
print_ranks = [0]
self.logger = log.get_logger(self.rank, print_ranks)
self.logger.register_metric("job", log.IterationMeter(), "[{}]")
self.logger.register_metric("epoch", log.IterationMeter(), "epoch: {}/{}")
self.logger.register_metric("iter", log.IterationMeter(), "iter: {}/{}")
self.logger.register_metric("loss", log.AverageMeter(), "loss: {:.5f}", True)
if self.meter_lr:
self.logger.register_metric("lr", log.IterationMeter(), "lr: {:.6f}")
self.logger.register_metric("top1", log.AverageMeter(), "top1: {:.5f}", True)
time_meter_str = (
"throughput: {:.2f}, timestamp: {:.6f}"
if self.print_timestamp
else "throughput: {:.2f}"
)
self.logger.register_metric(
"time", log.TimeMeter(self.print_timestamp), time_meter_str, True
)
def meter(
self,
epoch_pg=None,
iter_pg=None,
loss=None,
lr=None,
top1=None,
num_samples=1,
do_print=False,
):
self.logger.meter("job", "train" if self.is_train else "eval")
self.logger.meter("epoch", epoch_pg or (self.cur_epoch, self.num_epochs))
self.logger.meter("iter", iter_pg or (self.cur_iter, self.batches_per_epoch))
if loss is not None:
self.logger.meter("loss", loss)
if lr is not None and self.meter_lr:
self.logger.meter("lr", lr)
if top1 is not None:
self.logger.meter("top1", top1)
self.logger.meter("time", num_samples)
if do_print:
self.logger.print_metrics()
if self.gpu_stat is not None:
self.gpu_stat.stat()
def meter_train_iter(self, loss, top1):
assert self.is_train is True
lr = None
if self.meter_lr:
lr = self.optimizer.param_groups[0]["lr"]
do_print = (
self.cur_iter % self.print_interval == 0
or self.cur_iter == self.batches_per_epoch
)
self.meter(
loss=loss,
lr=lr,
top1=top1,
num_samples=self.train_batch_size,
do_print=do_print,
)
def __call__(self):
self.train()
def train(self):
self.logger.metric("time").reset()
for _ in range(self.num_epochs):
self.train_one_epoch()
if self.cur_batch == self.total_batches:
break
if not self.skip_eval:
acc = self.eval()
else:
acc = 0
save_dir = f"epoch_{self.cur_epoch}_val_acc_{acc}"
self.save(save_dir)
self.cur_epoch += 1
self.cur_iter = 0
def train_one_epoch(self):
self.model.train()
self.is_train = True
for _ in range(self.batches_per_epoch):
if self.graph:
loss, pred, label = self.train_graph()
else:
loss, pred, label = self.train_eager()
self.cur_iter += 1
loss = tol(loss, self.metric_local)
if pred is not None and label is not None:
pred = tol(pred, self.metric_local)
label = tol(label, self.metric_local)
top1_acc = self.acc([pred], [label])
else:
top1_acc = 0
self.meter_train_iter(loss, top1_acc)
self.cur_batch += 1
if self.cur_batch == self.total_batches:
break
def train_eager(self):
loss, pred, label = self.forward()
if loss.is_global and self.scale_grad:
# NOTE(zwx): scale init grad with world_size
# because global_tensor.mean() include dividor numel * world_size
loss = loss / self.world_size
loss.backward()
for param_group in self.optimizer.param_groups:
for param in param_group.parameters:
param.grad /= self.world_size
else:
loss.backward()
loss = loss / self.world_size
self.optimizer.step()
self.optimizer.zero_grad()
if self.lr_scheduler:
self.lr_scheduler.step()
return loss, pred, label
def eval(self):
self.model.eval()
self.is_train = False
preds, labels = [], []
for _ in range(self.val_batches_per_epoch):
if self.graph:
pred, label = self.eval_graph()
else:
pred, label = self.inference()
preds.append(tton(pred, self.metric_local))
labels.append(tton(label, self.metric_local))
top1_acc = calc_acc(preds, labels)
self.meter(
iter_pg=(self.val_batches_per_epoch, self.val_batches_per_epoch),
loss=0.0,
top1=top1_acc,
num_samples=self.val_batch_size * self.val_batches_per_epoch,
do_print=True,
)
return top1_acc
def forward(self):
image, label = self.train_data_loader()
image = image.to("cuda")
label = label.to("cuda")
logits = self.model(image)
loss = self.cross_entropy(logits, label)
if self.metric_train_acc:
pred = logits.softmax()
return loss, pred, label
else:
return loss, None, None
def inference(self):
image, label = self.val_data_loader()
image = image.to("cuda")
label = label.to("cuda")
with flow.no_grad():
logits = self.model(image)
pred = logits.softmax()
return pred, label
def save(self, subdir):
if self.save_path is None:
return
save_path = os.path.join(self.save_path, subdir)
self.logger.print(f"Saving model to {save_path}", print_ranks=[0])
state_dict = self.model.state_dict()
if self.is_global:
flow.save(state_dict, save_path, global_dst_rank=0)
elif self.rank == 0:
flow.save(state_dict, save_path)
else:
return
def tol(tensor, pure_local=True):
""" to local """
if tensor.is_global:
if pure_local:
tensor = tensor.to_local()
else:
tensor = tensor.to_global(sbp=flow.sbp.broadcast).to_local()
return tensor
def tton(tensor, local_only=True):
""" tensor to numpy """
if tensor.is_global:
if local_only:
tensor = tensor.to_local().numpy()
else:
tensor = tensor.to_global(sbp=flow.sbp.broadcast).to_local().numpy()
else:
tensor = tensor.numpy()
return tensor
def calc_acc(preds, labels):
correct_of = 0.0
num_samples = 0
for pred, label in zip(preds, labels):
clsidxs = np.argmax(pred, axis=1)
correct_of += (clsidxs == label).sum()
num_samples += label.size
top1_acc = correct_of / num_samples
return top1_acc
if __name__ == "__main__":
trainer = Trainer()
trainer()
| [
"oneflow.load",
"oneflow.env.get_rank",
"oneflow.env.get_world_size",
"oneflow.boxing.nccl.set_fusion_threshold_mbytes",
"oneflow.env.all_device_placement",
"oneflow.boxing.nccl.enable_use_buffer_to_fuse_all_reduce",
"oneflow.nn.parallel.DistributedDataParallel",
"oneflow.no_grad",
"oneflow.save",
... | [((714, 724), 'config.get_args', 'get_args', ([], {}), '()\n', (722, 724), False, 'from config import get_args\n'), ((821, 840), 'oneflow.env.get_rank', 'flow.env.get_rank', ([], {}), '()\n', (838, 840), True, 'import oneflow as flow\n'), ((867, 892), 'oneflow.env.get_world_size', 'flow.env.get_world_size', ([], {}), '()\n', (890, 892), True, 'import oneflow as flow\n'), ((1163, 1238), 'oneflow.boxing.nccl.set_fusion_threshold_mbytes', 'flow.boxing.nccl.set_fusion_threshold_mbytes', (['self.nccl_fusion_threshold_mb'], {}), '(self.nccl_fusion_threshold_mb)\n', (1207, 1238), True, 'import oneflow as flow\n'), ((1247, 1312), 'oneflow.boxing.nccl.set_fusion_max_ops_num', 'flow.boxing.nccl.set_fusion_max_ops_num', (['self.nccl_fusion_max_ops'], {}), '(self.nccl_fusion_max_ops)\n', (1286, 1312), True, 'import oneflow as flow\n'), ((1485, 1651), 'models.resnet50.resnet50', 'resnet50', ([], {'zero_init_residual': 'self.zero_init_residual', 'fuse_bn_relu': 'self.fuse_bn_relu', 'fuse_bn_add_relu': 'self.fuse_bn_add_relu', 'channel_last': 'self.channel_last'}), '(zero_init_residual=self.zero_init_residual, fuse_bn_relu=self.\n fuse_bn_relu, fuse_bn_add_relu=self.fuse_bn_add_relu, channel_last=self\n .channel_last)\n', (1493, 1651), False, 'from models.resnet50 import resnet50, Bottleneck\n'), ((1756, 1780), 'models.optimizer.make_cross_entropy', 'make_cross_entropy', (['args'], {}), '(args)\n', (1774, 1780), False, 'from models.optimizer import make_cross_entropy\n'), ((1815, 1883), 'models.data.make_data_loader', 'make_data_loader', (['args', '"""train"""', 'self.is_global', 'self.synthetic_data'], {}), "(args, 'train', self.is_global, self.synthetic_data)\n", (1831, 1883), False, 'from models.data import make_data_loader\n'), ((1937, 2010), 'models.data.make_data_loader', 'make_data_loader', (['args', '"""validation"""', 'self.is_global', 'self.synthetic_data'], {}), "(args, 'validation', self.is_global, self.synthetic_data)\n", (1953, 2010), False, 'from models.data import make_data_loader\n'), ((2059, 2091), 'models.optimizer.make_optimizer', 'make_optimizer', (['args', 'self.model'], {}), '(args, self.model)\n', (2073, 2091), False, 'from models.optimizer import make_optimizer\n'), ((2120, 2159), 'models.optimizer.make_lr_scheduler', 'make_lr_scheduler', (['args', 'self.optimizer'], {}), '(args, self.optimizer)\n', (2137, 2159), False, 'from models.optimizer import make_lr_scheduler\n'), ((2179, 2189), 'models.accuracy.Accuracy', 'Accuracy', ([], {}), '()\n', (2187, 2189), False, 'from models.accuracy import Accuracy\n'), ((2934, 2953), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (2951, 2953), False, 'import time\n'), ((3491, 3510), 'time.perf_counter', 'time.perf_counter', ([], {}), '()\n', (3508, 3510), False, 'import time\n'), ((5141, 5179), 'utils.logger.get_logger', 'log.get_logger', (['self.rank', 'print_ranks'], {}), '(self.rank, print_ranks)\n', (5155, 5179), True, 'import utils.logger as log\n'), ((10881, 10917), 'os.path.join', 'os.path.join', (['self.save_path', 'subdir'], {}), '(self.save_path, subdir)\n', (10893, 10917), False, 'import os\n'), ((11936, 11959), 'numpy.argmax', 'np.argmax', (['pred'], {'axis': '(1)'}), '(pred, axis=1)\n', (11945, 11959), True, 'import numpy as np\n'), ((72, 97), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (87, 97), False, 'import os\n'), ((1402, 1462), 'oneflow.boxing.nccl.enable_use_buffer_to_fuse_all_reduce', 'flow.boxing.nccl.enable_use_buffer_to_fuse_all_reduce', (['(False)'], {}), '(False)\n', (1455, 1462), True, 'import oneflow as flow\n'), ((2245, 2406), 'graph.make_train_graph', 'make_train_graph', (['self.model', 'self.cross_entropy', 'self.train_data_loader', 'self.optimizer', 'self.lr_scheduler'], {'return_pred_and_label': 'self.metric_train_acc'}), '(self.model, self.cross_entropy, self.train_data_loader,\n self.optimizer, self.lr_scheduler, return_pred_and_label=self.\n metric_train_acc)\n', (2261, 2406), False, 'from graph import make_train_graph, make_eval_graph\n'), ((2539, 2588), 'graph.make_eval_graph', 'make_eval_graph', (['self.model', 'self.val_data_loader'], {}), '(self.model, self.val_data_loader)\n', (2554, 2588), False, 'from graph import make_train_graph, make_eval_graph\n'), ((2661, 2747), 'utils.stat.CudaUtilMemStat', 'CudaUtilMemStat', (["(f'rank{self.rank}_' + self.gpu_stat_file)"], {'only_ordinal': 'self.rank'}), "(f'rank{self.rank}_' + self.gpu_stat_file, only_ordinal=self\n .rank)\n", (2676, 2747), False, 'from utils.stat import CudaUtilMemStat\n'), ((3006, 3043), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cuda"""'], {}), "('cuda')\n", (3035, 3043), True, 'import oneflow as flow\n'), ((3400, 3415), 'oneflow.nn.parallel.DistributedDataParallel', 'ddp', (['self.model'], {}), '(self.model)\n', (3403, 3415), True, 'from oneflow.nn.parallel import DistributedDataParallel as ddp\n'), ((4755, 4799), 'oneflow.load', 'flow.load', (['self.load_path'], {'global_src_rank': '(0)'}), '(self.load_path, global_src_rank=0)\n', (4764, 4799), True, 'import oneflow as flow\n'), ((5223, 5243), 'utils.logger.IterationMeter', 'log.IterationMeter', ([], {}), '()\n', (5241, 5243), True, 'import utils.logger as log\n'), ((5298, 5318), 'utils.logger.IterationMeter', 'log.IterationMeter', ([], {}), '()\n', (5316, 5318), True, 'import utils.logger as log\n'), ((5380, 5400), 'utils.logger.IterationMeter', 'log.IterationMeter', ([], {}), '()\n', (5398, 5400), True, 'import utils.logger as log\n'), ((5461, 5479), 'utils.logger.AverageMeter', 'log.AverageMeter', ([], {}), '()\n', (5477, 5479), True, 'import utils.logger as log\n'), ((5655, 5673), 'utils.logger.AverageMeter', 'log.AverageMeter', ([], {}), '()\n', (5671, 5673), True, 'import utils.logger as log\n'), ((5917, 5952), 'utils.logger.TimeMeter', 'log.TimeMeter', (['self.print_timestamp'], {}), '(self.print_timestamp)\n', (5930, 5952), True, 'import utils.logger as log\n'), ((10658, 10672), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (10670, 10672), True, 'import oneflow as flow\n'), ((11078, 11129), 'oneflow.save', 'flow.save', (['state_dict', 'save_path'], {'global_dst_rank': '(0)'}), '(state_dict, save_path, global_dst_rank=0)\n', (11087, 11129), True, 'import oneflow as flow\n'), ((3955, 4029), 'oneflow.nn.init.kaiming_normal_', 'flow.nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (3983, 4029), True, 'import oneflow as flow\n'), ((4548, 4587), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['m.bn3.weight', '(0)'], {}), '(m.bn3.weight, 0)\n', (4570, 4587), True, 'import oneflow as flow\n'), ((4854, 4879), 'oneflow.load', 'flow.load', (['self.load_path'], {}), '(self.load_path)\n', (4863, 4879), True, 'import oneflow as flow\n'), ((5575, 5595), 'utils.logger.IterationMeter', 'log.IterationMeter', ([], {}), '()\n', (5593, 5595), True, 'import utils.logger as log\n'), ((11171, 11203), 'oneflow.save', 'flow.save', (['state_dict', 'save_path'], {}), '(state_dict, save_path)\n', (11180, 11203), True, 'import oneflow as flow\n'), ((4132, 4206), 'oneflow.nn.init.kaiming_normal_', 'flow.nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_in"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_in', nonlinearity='relu')\n", (4160, 4206), True, 'import oneflow as flow\n'), ((4261, 4294), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (4283, 4294), True, 'import oneflow as flow\n'), ((4364, 4399), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['m.weight', '(1)'], {}), '(m.weight, 1)\n', (4386, 4399), True, 'import oneflow as flow\n'), ((4416, 4449), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (4438, 4449), True, 'import oneflow as flow\n')] |
import oneflow as flow
import os
import numpy as np
import argparse
import shutil
import pickle
from tqdm import tqdm
import json
from model import textCNN
from training import train, _eval
import utils
def _parse_args():
parser = argparse.ArgumentParser("flags for train TextCNN")
parser.add_argument(
"--save_checkpoint_path",
type=str,
default="./checkpoints",
help="save checkpoint root dir",
)
parser.add_argument(
"--save_vocab_path", type=str, default="vocab.pkl", help="save vocab root dir"
)
parser.add_argument(
"--load_checkpoint", type=str, default="", help="load checkpoint"
)
parser.add_argument(
"--dataset_path", type=str, default="./aclImdb", help="dataset path"
)
parser.add_argument(
"--no_cuda", action="store_true", default=False, help="disables CUDA training"
)
# training hyper-parameters
parser.add_argument(
"--learning_rate", type=float, default=1e-3, help="learning rate"
)
parser.add_argument("--epochs", type=int, default=15, help="training epochs")
parser.add_argument(
"--train_batch_size", type=int, default=16, help="train batch size"
)
parser.add_argument("--val_batch_size", type=int, default=16, help="val batch size")
parser.add_argument(
"--word_emb_dim", type=int, default=100, help="dimensions of word embeddings"
)
parser.add_argument(
"--conv_channel_size", type=int, default=64, help="channel size of Conv2d"
)
parser.add_argument(
"--kernel_size",
nargs="+",
type=int,
default=[3, 4, 5],
help="channel size of Conv2d",
)
parser.add_argument("--dropout_rate", type=float, default=0.5, help="dropout rate")
parser.add_argument("--num_class", type=int, default=2, help="number of classes")
parser.add_argument(
"--max_seq_len", type=int, default=200, help="maximum allowed sequence length"
)
return parser.parse_args()
def batch_loader(data, label, batch_size, shuffle=True):
if shuffle:
permu = np.random.permutation(len(data))
data, label = data[permu], label[permu]
batch_n = len(data) // batch_size
x_batch = [
flow.tensor(
data[i * batch_size : (i * batch_size + batch_size)], dtype=flow.long
)
for i in range(batch_n)
]
y_batch = [
flow.tensor(
label[i * batch_size : (i * batch_size + batch_size)], dtype=flow.long
)
for i in range(batch_n)
]
if batch_size * batch_n < len(data):
x_batch += [
flow.tensor(data[batch_size * batch_n : len(label)], dtype=flow.long)
]
y_batch += [
flow.tensor(label[batch_size * batch_n : len(label)], dtype=flow.long)
]
return x_batch, y_batch
def main(args):
config_dct = {
"word_emb_dim": args.word_emb_dim,
"dim_channel": args.conv_channel_size,
"kernel_wins": args.kernel_size,
"dropout_rate": args.dropout_rate,
"num_class": args.num_class,
"max_seq_len": args.max_seq_len,
}
with open("config.json", "w") as f:
json.dump(config_dct, f)
device = flow.device("cpu") if args.no_cuda else flow.device("cuda")
x_train, y_train = utils.load_dataset(os.path.join(args.dataset_path, "train"))
x_test, y_test = utils.load_dataset(os.path.join(args.dataset_path, "test"))
vocab_dct = utils.build_vocab(x_train + x_test)
with open(args.save_vocab_path, "wb") as f:
pickle.dump(vocab_dct, f)
x_train = utils.tensorize_data(x_train, vocab_dct)
x_test = utils.tensorize_data(x_test, vocab_dct)
y_train, x_train = np.array(y_train), np.array(x_train)
y_test, x_test = np.array(y_test), np.array(x_test)
textcnn = textCNN(
word_emb_dim=args.word_emb_dim,
vocab_size=len(vocab_dct),
dim_channel=args.conv_channel_size,
kernel_wins=args.kernel_size,
dropout_rate=args.dropout_rate,
num_class=args.num_class,
max_seq_len=args.max_seq_len,
)
textcnn.to(device)
optimizer = flow.optim.Adam(textcnn.parameters(), lr=args.learning_rate)
loss_func = flow.nn.BCEWithLogitsLoss().to(device)
if args.load_checkpoint != "":
textcnn.load_state_dict(flow.load(args.load_checkpoint))
train(
model=textcnn,
device=device,
train_data=(x_train, y_train),
dev_data=(x_test, y_test),
loss_func=loss_func,
optimizer=optimizer,
epochs=args.epochs,
train_batch_size=args.train_batch_size,
eval_batch_size=args.val_batch_size,
save_path=args.save_checkpoint_path,
)
if __name__ == "__main__":
args = _parse_args()
main(args)
| [
"oneflow.tensor",
"oneflow.device",
"oneflow.nn.BCEWithLogitsLoss",
"oneflow.load"
] | [((239, 289), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for train TextCNN"""'], {}), "('flags for train TextCNN')\n", (262, 289), False, 'import argparse\n'), ((3489, 3524), 'utils.build_vocab', 'utils.build_vocab', (['(x_train + x_test)'], {}), '(x_train + x_test)\n', (3506, 3524), False, 'import utils\n'), ((3623, 3663), 'utils.tensorize_data', 'utils.tensorize_data', (['x_train', 'vocab_dct'], {}), '(x_train, vocab_dct)\n', (3643, 3663), False, 'import utils\n'), ((3677, 3716), 'utils.tensorize_data', 'utils.tensorize_data', (['x_test', 'vocab_dct'], {}), '(x_test, vocab_dct)\n', (3697, 3716), False, 'import utils\n'), ((4394, 4678), 'training.train', 'train', ([], {'model': 'textcnn', 'device': 'device', 'train_data': '(x_train, y_train)', 'dev_data': '(x_test, y_test)', 'loss_func': 'loss_func', 'optimizer': 'optimizer', 'epochs': 'args.epochs', 'train_batch_size': 'args.train_batch_size', 'eval_batch_size': 'args.val_batch_size', 'save_path': 'args.save_checkpoint_path'}), '(model=textcnn, device=device, train_data=(x_train, y_train), dev_data\n =(x_test, y_test), loss_func=loss_func, optimizer=optimizer, epochs=\n args.epochs, train_batch_size=args.train_batch_size, eval_batch_size=\n args.val_batch_size, save_path=args.save_checkpoint_path)\n', (4399, 4678), False, 'from training import train, _eval\n'), ((2258, 2336), 'oneflow.tensor', 'flow.tensor', (['data[i * batch_size:i * batch_size + batch_size]'], {'dtype': 'flow.long'}), '(data[i * batch_size:i * batch_size + batch_size], dtype=flow.long)\n', (2269, 2336), True, 'import oneflow as flow\n'), ((2425, 2504), 'oneflow.tensor', 'flow.tensor', (['label[i * batch_size:i * batch_size + batch_size]'], {'dtype': 'flow.long'}), '(label[i * batch_size:i * batch_size + batch_size], dtype=flow.long)\n', (2436, 2504), True, 'import oneflow as flow\n'), ((3208, 3232), 'json.dump', 'json.dump', (['config_dct', 'f'], {}), '(config_dct, f)\n', (3217, 3232), False, 'import json\n'), ((3247, 3265), 'oneflow.device', 'flow.device', (['"""cpu"""'], {}), "('cpu')\n", (3258, 3265), True, 'import oneflow as flow\n'), ((3287, 3306), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (3298, 3306), True, 'import oneflow as flow\n'), ((3350, 3390), 'os.path.join', 'os.path.join', (['args.dataset_path', '"""train"""'], {}), "(args.dataset_path, 'train')\n", (3362, 3390), False, 'import os\n'), ((3432, 3471), 'os.path.join', 'os.path.join', (['args.dataset_path', '"""test"""'], {}), "(args.dataset_path, 'test')\n", (3444, 3471), False, 'import os\n'), ((3582, 3607), 'pickle.dump', 'pickle.dump', (['vocab_dct', 'f'], {}), '(vocab_dct, f)\n', (3593, 3607), False, 'import pickle\n'), ((3741, 3758), 'numpy.array', 'np.array', (['y_train'], {}), '(y_train)\n', (3749, 3758), True, 'import numpy as np\n'), ((3760, 3777), 'numpy.array', 'np.array', (['x_train'], {}), '(x_train)\n', (3768, 3777), True, 'import numpy as np\n'), ((3799, 3815), 'numpy.array', 'np.array', (['y_test'], {}), '(y_test)\n', (3807, 3815), True, 'import numpy as np\n'), ((3817, 3833), 'numpy.array', 'np.array', (['x_test'], {}), '(x_test)\n', (3825, 3833), True, 'import numpy as np\n'), ((4249, 4276), 'oneflow.nn.BCEWithLogitsLoss', 'flow.nn.BCEWithLogitsLoss', ([], {}), '()\n', (4274, 4276), True, 'import oneflow as flow\n'), ((4356, 4387), 'oneflow.load', 'flow.load', (['args.load_checkpoint'], {}), '(args.load_checkpoint)\n', (4365, 4387), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import functools
import os
from typing import Any, Callable, Iterable, List, Optional, Tuple
import torch
import oneflow as flow
import oneflow.support.env_var_util
import oneflow.test_utils.automated_test_util.torch_flow_dual_object as dual_object_module
__all__ = ["profile", "set_profiler_hook", "profile_dual_object"]
def compose(*fs):
def compose2(f, g):
return lambda *a, **kw: f(g(*a, **kw))
return functools.reduce(compose2, fs)
class ProfResult:
def __init__(
self,
prof,
num,
kind,
device,
thread_num,
op_name,
args_description,
additional_description=None,
):
self.prof = prof
self.num = num
self.kind = kind
self.device = device
self.thread_num = thread_num
self.op_name = op_name
self.args_description = args_description
self.additional_description = additional_description
def __getattr__(self, attr):
return getattr(self.prof, attr)
WARMUP_NUM = int(os.getenv("ONEFLOW_PROFILE_WARMUP_NUM", 10))
RUN_NUM = int(os.getenv("ONEFLOW_PROFILE_RUN_NUM", 1000))
PROF_VERBOSE = flow.support.env_var_util.parse_boolean_from_env(
"ONEFLOW_PROFILE_VERBOSE", False
)
END_TO_END = "end-to-end"
def run_torch(
op,
args,
kwargs,
device,
num_threads,
op_name,
args_description,
additional_description=None,
):
assert device in ["cpu", "cuda"]
if device == "cpu":
torch.set_num_threads(num_threads)
assert torch.get_num_threads() == num_threads
activities = [torch.profiler.ProfilerActivity.CPU]
else:
activities = [torch.profiler.ProfilerActivity.CUDA]
def tensor_to_device(x):
if isinstance(x, torch.Tensor):
return x.to(device)
return x
args = [tensor_to_device(arg) for arg in args]
kwargs = {k: tensor_to_device(v) for k, v in kwargs.items()}
for _ in range(WARMUP_NUM):
op(*args, **kwargs)
if PROF_VERBOSE:
print(
f'PyTorch ({f"CPU, num_threads={num_threads}" if device == "cpu" else "GPU"}):'
)
with torch.profiler.profile(activities=activities) as prof:
with torch.profiler.record_function(END_TO_END):
for _ in range(RUN_NUM):
op(*args, **kwargs)
if PROF_VERBOSE:
print(prof.key_averages().table(row_limit=10))
return ProfResult(
prof,
RUN_NUM,
"PyTorch",
device,
num_threads,
op_name,
args_description,
additional_description,
)
def run_flow(
op,
args,
kwargs,
device,
num_threads,
op_name,
args_description,
additional_description=None,
):
assert device in ["cpu", "cuda"]
if device == "cpu":
# NOTE: there is no flow.get_num_threads()
flow.set_num_threads(num_threads)
activities = [flow.profiler.ProfilerActivity.CPU]
else:
activities = [flow.profiler.ProfilerActivity.CUDA]
def tensor_to_device(x):
if isinstance(x, flow.Tensor):
return x.to(device)
return x
args = [tensor_to_device(arg) for arg in args]
kwargs = {k: tensor_to_device(v) for k, v in kwargs.items()}
for _ in range(WARMUP_NUM):
op(*args, **kwargs)
if PROF_VERBOSE:
print(
f'OneFlow ({f"CPU, num_threads={num_threads}" if device == "cpu" else "GPU"}):'
)
with flow.profiler.profile(
activities=activities,
record_bandwidth_for_cuda=flow.profiler.ProfilerActivity.CUDA in activities,
) as prof:
with flow.profiler.record_function(END_TO_END):
for _ in range(RUN_NUM):
op(*args, **kwargs)
if PROF_VERBOSE:
print(prof.key_averages())
return ProfResult(
prof,
RUN_NUM,
"OneFlow",
device,
num_threads,
op_name,
args_description,
additional_description,
)
def profile_dual_object(op):
assert isinstance(op, dual_object_module.DualObject)
torch_op = op.pytorch
flow_op = op.oneflow
def profiled_op(*args, **kwargs):
if "profile_description" in kwargs:
additional_description = kwargs["profile_description"]
del kwargs["profile_description"]
else:
additional_description = None
(
torch_args,
torch_kwargs,
flow_args,
flow_kwargs,
) = dual_object_module.get_args(torch_op, *args, **kwargs)
op_name = dual_object_module.to_string(op)
args_description = dual_object_module.to_string(*args, **kwargs)
result = []
for hardware_info in _hardware_info_list:
result.append(
run_flow(
flow_op,
flow_args,
flow_kwargs,
*hardware_info,
op_name,
args_description,
additional_description,
)
)
for hardware_info in _hardware_info_list:
result.append(
run_torch(
torch_op,
torch_args,
torch_kwargs,
*hardware_info,
op_name,
args_description,
additional_description,
)
)
return _profiler_hook(result)
return profiled_op
HardwareInfo = Tuple[str, Optional[int]] # (device_type, num_threads)
_hardware_info_list: List[HardwareInfo] = [("cpu", 1), ("cuda", None)]
_profiler_hook: Callable[[List[ProfResult]], Any] = lambda x: x
def set_hardware_info_list(hardware_info_list: List[HardwareInfo]) -> None:
global _hardware_info_list
_hardware_info_list = hardware_info_list
def set_profiler_hook(hook: Callable[[List[ProfResult]], Any]) -> None:
global _profiler_hook
_profiler_hook = hook
def profile(op):
def deco(f):
def new_f(*args, **kwargs):
dual_object_module.profiled_method_name.append(op.name)
res = f(*args, **kwargs)
dual_object_module.profiled_method_name.pop()
return res
return new_f
return deco
| [
"oneflow.test_utils.automated_test_util.torch_flow_dual_object.get_args",
"oneflow.profiler.record_function",
"oneflow.test_utils.automated_test_util.torch_flow_dual_object.profiled_method_name.append",
"oneflow.test_utils.automated_test_util.torch_flow_dual_object.profiled_method_name.pop",
"oneflow.profil... | [((1755, 1841), 'oneflow.support.env_var_util.parse_boolean_from_env', 'flow.support.env_var_util.parse_boolean_from_env', (['"""ONEFLOW_PROFILE_VERBOSE"""', '(False)'], {}), "('ONEFLOW_PROFILE_VERBOSE',\n False)\n", (1803, 1841), True, 'import oneflow as flow\n'), ((1017, 1047), 'functools.reduce', 'functools.reduce', (['compose2', 'fs'], {}), '(compose2, fs)\n', (1033, 1047), False, 'import functools\n'), ((1637, 1680), 'os.getenv', 'os.getenv', (['"""ONEFLOW_PROFILE_WARMUP_NUM"""', '(10)'], {}), "('ONEFLOW_PROFILE_WARMUP_NUM', 10)\n", (1646, 1680), False, 'import os\n'), ((1696, 1738), 'os.getenv', 'os.getenv', (['"""ONEFLOW_PROFILE_RUN_NUM"""', '(1000)'], {}), "('ONEFLOW_PROFILE_RUN_NUM', 1000)\n", (1705, 1738), False, 'import os\n'), ((2086, 2120), 'torch.set_num_threads', 'torch.set_num_threads', (['num_threads'], {}), '(num_threads)\n', (2107, 2120), False, 'import torch\n'), ((2748, 2793), 'torch.profiler.profile', 'torch.profiler.profile', ([], {'activities': 'activities'}), '(activities=activities)\n', (2770, 2793), False, 'import torch\n'), ((3467, 3500), 'oneflow.set_num_threads', 'flow.set_num_threads', (['num_threads'], {}), '(num_threads)\n', (3487, 3500), True, 'import oneflow as flow\n'), ((4071, 4197), 'oneflow.profiler.profile', 'flow.profiler.profile', ([], {'activities': 'activities', 'record_bandwidth_for_cuda': '(flow.profiler.ProfilerActivity.CUDA in activities)'}), '(activities=activities, record_bandwidth_for_cuda=flow\n .profiler.ProfilerActivity.CUDA in activities)\n', (4092, 4197), True, 'import oneflow as flow\n'), ((5114, 5168), 'oneflow.test_utils.automated_test_util.torch_flow_dual_object.get_args', 'dual_object_module.get_args', (['torch_op', '*args'], {}), '(torch_op, *args, **kwargs)\n', (5141, 5168), True, 'import oneflow.test_utils.automated_test_util.torch_flow_dual_object as dual_object_module\n'), ((5188, 5220), 'oneflow.test_utils.automated_test_util.torch_flow_dual_object.to_string', 'dual_object_module.to_string', (['op'], {}), '(op)\n', (5216, 5220), True, 'import oneflow.test_utils.automated_test_util.torch_flow_dual_object as dual_object_module\n'), ((5248, 5293), 'oneflow.test_utils.automated_test_util.torch_flow_dual_object.to_string', 'dual_object_module.to_string', (['*args'], {}), '(*args, **kwargs)\n', (5276, 5293), True, 'import oneflow.test_utils.automated_test_util.torch_flow_dual_object as dual_object_module\n'), ((2136, 2159), 'torch.get_num_threads', 'torch.get_num_threads', ([], {}), '()\n', (2157, 2159), False, 'import torch\n'), ((2816, 2858), 'torch.profiler.record_function', 'torch.profiler.record_function', (['END_TO_END'], {}), '(END_TO_END)\n', (2846, 2858), False, 'import torch\n'), ((4238, 4279), 'oneflow.profiler.record_function', 'flow.profiler.record_function', (['END_TO_END'], {}), '(END_TO_END)\n', (4267, 4279), True, 'import oneflow as flow\n'), ((6703, 6758), 'oneflow.test_utils.automated_test_util.torch_flow_dual_object.profiled_method_name.append', 'dual_object_module.profiled_method_name.append', (['op.name'], {}), '(op.name)\n', (6749, 6758), True, 'import oneflow.test_utils.automated_test_util.torch_flow_dual_object as dual_object_module\n'), ((6808, 6853), 'oneflow.test_utils.automated_test_util.torch_flow_dual_object.profiled_method_name.pop', 'dual_object_module.profiled_method_name.pop', ([], {}), '()\n', (6851, 6853), True, 'import oneflow.test_utils.automated_test_util.torch_flow_dual_object as dual_object_module\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.