code stringlengths 118 171k | apis list | extract_api stringlengths 145 164k |
|---|---|---|
# coding=utf-8
# Copyright 2021 The OneFlow Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import oneflow as flow
from oneflow import nn
from libai.utils import distributed as dist
from projects.SimCSE.modeling.model_utils import MLPLayer, cosine_similarity
from projects.SimCSE.utils.load_huggingface_weight import load_huggingface_bert
from .bert_for_simcse import BertForSimCSE
class Simcse_sup(nn.Module):
def __init__(self, cfg):
super().__init__()
self.bert = BertForSimCSE(cfg)
self.mlp = MLPLayer(cfg)
self.pooler_type = cfg.pooler_type
if cfg.pretrained_model_weight is not None:
load_huggingface_bert(
self.bert,
cfg.pretrained_model_weight,
cfg["hidden_size"],
cfg["num_attention_heads"],
cfg["hidden_layers"],
)
def pooler(self, inputs, attention_mask):
if self.pooler_type == "cls":
return inputs[0][:, 0]
elif self.pooler_type == "pooled":
return inputs[1]
elif self.pooler_type == "last-avg":
last_hidden = inputs[0]
return (last_hidden * attention_mask.unsqueeze(-1)).sum(1) / attention_mask.sum(
-1
).unsqueeze(-1)
elif self.pooler_type == "first-last-avg":
first_hidden = inputs[2][1]
last_hidden = inputs[0]
res = ((first_hidden + last_hidden) / 2.0 * attention_mask.unsqueeze(-1)).sum(
1
) / attention_mask.sum(-1).unsqueeze(-1)
return res
def create_use_row(self, labels):
count = 0
use_row = []
for row in range(labels.size(0)):
if count % 2 == 0 and count != 0:
count = 0
continue
use_row.append(row)
count += 1
return flow.tensor(use_row, sbp=labels.sbp, placement=labels.placement)
def forward(self, input_ids, attention_mask, token_type_ids=None, labels=None):
if self.training:
bs = input_ids.size(0)
input_ids = input_ids.view(bs * 3, -1)
attention_mask = attention_mask.view(bs * 3, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
out = self.mlp(out)
labels = flow.arange(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
use_row = self.create_use_row(labels)
labels = (use_row - use_row % 3 * 2) + 1
sim = cosine_similarity(out.unsqueeze(1), out.unsqueeze(0))
sim = (
sim
- flow.eye(
out.size(0),
sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]),
placement=out.placement,
)
* 1e12
)
sim = flow.index_select(sim, dim=0, index=use_row)
sim = sim / 0.05
loss = nn.CrossEntropyLoss()(sim, labels)
return {"loss": loss}
else:
bs = input_ids.size(0)
input_ids = input_ids.view(bs * 2, -1)
attention_mask = attention_mask.view(bs * 2, -1)
out = self.bert(input_ids, attention_mask)
out = self.pooler(out, attention_mask)
self.mlp(out)
out = out.view(bs, 2, -1)
sent1 = out[:, 0]
sent2 = out[:, 1]
sim = cosine_similarity(sent1, sent2)
sim = sim.to_global(sbp=dist.get_nd_sbp([flow.sbp.broadcast, flow.sbp.broadcast]))
return {"sim": sim.unsqueeze(1), "labels": labels}
| [
"oneflow.nn.CrossEntropyLoss",
"oneflow.tensor",
"oneflow.index_select"
] | [((1058, 1071), 'projects.SimCSE.modeling.model_utils.MLPLayer', 'MLPLayer', (['cfg'], {}), '(cfg)\n', (1066, 1071), False, 'from projects.SimCSE.modeling.model_utils import MLPLayer, cosine_similarity\n'), ((2422, 2486), 'oneflow.tensor', 'flow.tensor', (['use_row'], {'sbp': 'labels.sbp', 'placement': 'labels.placement'}), '(use_row, sbp=labels.sbp, placement=labels.placement)\n', (2433, 2486), True, 'import oneflow as flow\n'), ((1180, 1316), 'projects.SimCSE.utils.load_huggingface_weight.load_huggingface_bert', 'load_huggingface_bert', (['self.bert', 'cfg.pretrained_model_weight', "cfg['hidden_size']", "cfg['num_attention_heads']", "cfg['hidden_layers']"], {}), "(self.bert, cfg.pretrained_model_weight, cfg[\n 'hidden_size'], cfg['num_attention_heads'], cfg['hidden_layers'])\n", (1201, 1316), False, 'from projects.SimCSE.utils.load_huggingface_weight import load_huggingface_bert\n'), ((3557, 3601), 'oneflow.index_select', 'flow.index_select', (['sim'], {'dim': '(0)', 'index': 'use_row'}), '(sim, dim=0, index=use_row)\n', (3574, 3601), True, 'import oneflow as flow\n'), ((4128, 4159), 'projects.SimCSE.modeling.model_utils.cosine_similarity', 'cosine_similarity', (['sent1', 'sent2'], {}), '(sent1, sent2)\n', (4145, 4159), False, 'from projects.SimCSE.modeling.model_utils import MLPLayer, cosine_similarity\n'), ((3650, 3671), 'oneflow.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3669, 3671), False, 'from oneflow import nn\n'), ((2966, 3023), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (2981, 3023), True, 'from libai.utils import distributed as dist\n'), ((4196, 4253), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (4211, 4253), True, 'from libai.utils import distributed as dist\n'), ((3380, 3437), 'libai.utils.distributed.get_nd_sbp', 'dist.get_nd_sbp', (['[flow.sbp.broadcast, flow.sbp.broadcast]'], {}), '([flow.sbp.broadcast, flow.sbp.broadcast])\n', (3395, 3437), True, 'from libai.utils import distributed as dist\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import uuid
from typing import Callable, Optional, Union
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.session_context as session_ctx
import oneflow.python.framework.compile_context as compile_context
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.local_blob as local_blob_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.watcher as watcher_util
import oneflow.python.framework.typing as oft
import oneflow.python.framework.typing_util as oft_util
import oneflow.python.lib.core.enable_if as enable_if
import oneflow.python.framework.hob as hob
from oneflow.core.job.lbi_diff_watcher_info_pb2 import LbiAndDiffWatcherUuidPair
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.eager as eager_util
import oneflow
import oneflow._oneflow_internal
from oneflow._oneflow_internal import ConsistentBlob, MirroredBlob
import inspect
import numpy as np
@oneflow_export("watch")
def Watch(
blob_watched: oneflow._oneflow_internal.BlobDesc,
handler_or_prompt: Optional[Union[Callable, str]] = None,
) -> None:
r"""Register callback for a blob. The callback function will be called after the computation produce the blob finishes. We can use it to watch the values of Blob.
Args:
blob_watched: a `Blob`
handler_or_prompt: a function has an argument of a `Blob`
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def watch_Job() -> None:
init = flow.constant_initializer(2.5)
variable = flow.get_variable(
"variable-weight",
shape=(5, ),
initializer=init,
trainable=True
)
flow.watch(variable, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
watch_Job()
# out [2.5 2.5 2.5 2.5 2.5]
Example 2:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
def watch_handler(y: tp.Numpy):
print("out", y)
@flow.global_function()
def watch_Job(x: tp.Numpy.Placeholder((1, 3, 2, 2))
) -> None:
initializer = flow.truncated_normal(0.1)
conv2d = flow.layers.conv2d(
x,
filters=3,
kernel_size=1,
strides=1,
padding='SAME',
kernel_initializer=initializer,
name="Conv2d"
)
flow.watch(conv2d, watch_handler)
checkpoint = flow.train.CheckPoint()
checkpoint.init()
x = np.ones(shape=(1, 3, 2, 2)).astype(np.float32)
watch_Job(x)
# out [[[[ 0.03757111 0.03757111]
# [ 0.03757111 0.03757111]]
# [[-0.36131713 -0.36131713]
# [-0.36131713 -0.36131713]]
# [[-0.12266113 -0.12266113]
# [-0.12266113 -0.12266113]]]]
"""
api = enable_if.unique([EagerWatch, LazyWatch])
return api(blob_watched, handler_or_prompt)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def EagerWatch(blob_watched, handler_or_prompt=None):
handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt)
local_blob = local_blob_util.MakeLocalBlob4EagerBlob(blob_watched)
handler(oft_util.TransformWatchedBlob(local_blob, handler))
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def LazyWatch(blob_watched, handler_or_prompt=None):
handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt)
if isinstance(blob_watched, ConsistentBlob):
LazyConsistentWatch(blob_watched, handler)
elif isinstance(blob_watched, MirroredBlob):
handlers = _MakeSubConsistentBlobHandlers(blob_watched, handler)
for consistent_blob, sub_handler in zip(
blob_watched.sub_consistent_blob_list, handlers
):
assert isinstance(consistent_blob, ConsistentBlob)
LazyConsistentWatch(consistent_blob, sub_handler)
else:
raise NotImplementedError
def LazyConsistentWatch(blob_watched, handler):
handler_uuid = str(uuid.uuid1())
op_conf = op_conf_util.OperatorConf()
op_conf.name = id_util.UniqueStr("ForeignWatch_")
setattr(op_conf.foreign_watch_conf, "in", blob_watched.unique_name)
op_conf.foreign_watch_conf.handler_uuid = handler_uuid
device_name = blob_watched.parallel_conf.device_name(0)
with oneflow.scope.placement("cpu", "0:0"):
compile_context.CurJobAddOp(op_conf)
watcher_util.BindUuidAndHandler(handler_uuid, blob_watched, handler)
@oneflow_export("watch_diff")
def WatchDiff(
blob_watched: oneflow._oneflow_internal.BlobDesc,
handler_or_prompt: Optional[Union[Callable, str]] = None,
) -> None:
r"""Register callback for gradient of a blob. The callback will be called after the computation produce the gradient blob finishes.
Args:
blob_watched: a `Blob`
handler_or_prompt: a function has an argument of a `Blob`
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
BATCH_SIZE = 20
def watch_diff_handler(blob: tp.Numpy):
print("watch_diff_handler:", blob, blob.shape, blob.dtype)
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
initializer = flow.truncated_normal(0.1)
with flow.scope.placement("gpu", "0:0"):
reshape = flow.reshape(images, [images.shape[0], -1])
hidden = flow.layers.dense(
reshape,
512,
activation=flow.nn.relu,
kernel_initializer=initializer,
name="hidden",
)
logits = flow.layers.dense(
hidden, 10, kernel_initializer=initializer, name="output"
)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels, logits, name="softmax_loss")
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(loss)
flow.watch_diff(logits, watch_diff_handler)
return loss
if __name__ == "__main__":
checkpoint = flow.train.CheckPoint()
checkpoint.init()
(train_images, train_labels), (test_images, test_labels) = flow.data.load_mnist(
BATCH_SIZE
)
for i, (images, labels) in enumerate(zip(train_images, train_labels)):
loss = train_job(images, labels)
# watch_diff_handler: [[-1.88834548e-01 2.71021971e-03 2.28271242e-02 7.17673637e-03
# 4.10183379e-03 8.93106461e-02 2.23669074e-02 3.86103359e-03
# 3.12465224e-02 5.23346756e-03] .....
Example 2:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
BATCH_SIZE = 20
def watch_diff_handler(blob: tp.Numpy):
print("watch_diff_handler:", blob)
@flow.global_function(type="train")
def watch_matmul_diff_job(
images: tp.Numpy.Placeholder((3, 3), dtype=flow.float),
) -> None:
with flow.scope.placement("cpu", "0:0"):
weight_initializer = flow.constant_initializer(2)
weight_shape = (3, BATCH_SIZE)
weight = flow.get_variable(
"matmultest-weight",
shape=weight_shape,
initializer=weight_initializer)
output = flow.linalg.matmul(images, weight)
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(output)
flow.watch_diff(weight, watch_diff_handler)
if __name__ == "__main__":
check_point = flow.train.CheckPoint()
check_point.init()
x = np.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]]).astype(np.float32)
watch_matmul_diff_job(x)
# watch_diff_handler: [[3. 3. 3.]
# [3. 3. 3.]
# [3. 3. 3.]]
Example 3:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
def watch_diff_handler(blob: tp.Numpy):
print("watch_diff_handler:", blob, blob.shape, blob.dtype)
@flow.global_function(type="train")
def watch_conv_diff_job(
images: tp.Numpy.Placeholder((1, 1, 4, 4), dtype=flow.float),
) -> None:
with flow.scope.placement("gpu", "0:0"):
weight_shape = (1, 1, 3, 3)
weight_initializer = flow.truncated_normal(0.1)
weight = flow.get_variable(
name="conv-weight",
shape=weight_shape,
initializer=weight_initializer
)
output = flow.nn.conv2d(images, weight, strides=1, padding="VALID")
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(output)
flow.watch_diff(weight, watch_diff_handler)
if __name__ == "__main__":
check_point = flow.train.CheckPoint()
check_point.init()
x = np.array([[[[ 1., 2., 3., 4.],
[ 5., 6., 7., 8.],
[ 9., 10., 11., 12.],
[13., 14., 15., 16.]]]]).astype(np.float32)
watch_conv_diff_job(x)
# watch_diff_handler: [[[[14. 18. 22.]
# [30. 34. 38.]
# [46. 50. 54.]]]]
"""
api = enable_if.unique([EagerWatchDiff, LazyWatchDiff])
return api(blob_watched, handler_or_prompt)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def EagerWatchDiff(blob_watched, handler_or_prompt=None):
handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt)
handler_uuid = str(uuid.uuid1())
lbi_and_uuid = LbiAndDiffWatcherUuidPair()
# Copy cfg LBI to proto LBI
lbi_and_uuid.lbi.op_name = blob_watched.lbi.op_name()
lbi_and_uuid.lbi.blob_name = blob_watched.lbi.blob_name()
lbi_and_uuid.watcher_uuid = handler_uuid
c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair(lbi_and_uuid)
uuid2watch_handler = session_ctx.GetDefaultSession().uuid2watch_handler
uuid2watch_handler[handler_uuid] = lambda x: EagerWatch(x, handler_or_prompt)
@enable_if.condition(hob.in_global_mode & ~hob.eager_execution_enabled)
def LazyWatchDiff(blob_watched, handler_or_prompt=None):
handler = _CheckOrMakeHandler(blob_watched, handler_or_prompt)
if isinstance(blob_watched, ConsistentBlob):
LazyConsistentWatchDiff(blob_watched, handler)
elif isinstance(blob_watched, MirroredBlob):
handlers = _MakeSubConsistentBlobHandlers(blob_watched, handler)
for consistent_blob, sub_handler in zip(
blob_watched.sub_consistent_blob_list, handlers
):
assert isinstance(consistent_blob, ConsistentBlob)
LazyConsistentWatchDiff(consistent_blob, sub_handler)
else:
raise NotImplementedError
def LazyConsistentWatchDiff(blob_watched, handler):
handler_uuid = str(uuid.uuid1())
lbi_and_uuid = LbiAndDiffWatcherUuidPair()
# Copy cfg LBI to proto LBI
lbi_and_uuid.lbi.op_name = blob_watched.lbi.op_name()
lbi_and_uuid.lbi.blob_name = blob_watched.lbi.blob_name()
lbi_and_uuid.watcher_uuid = handler_uuid
c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair(lbi_and_uuid)
watcher_util.BindUuidAndHandler(handler_uuid, blob_watched, handler)
def _CheckOrMakeHandler(blob_watched, handler_or_prompt):
if callable(handler_or_prompt):
parameters = inspect.signature(handler_or_prompt).parameters
oft_util.CheckWatchCallbackParameterAnnotation(parameters)
annotation = parameters[list(parameters.keys())[0]].annotation
oft_util.CheckWatchedBlobByAnnotation(blob_watched, annotation)
return handler_or_prompt
prompt = handler_or_prompt
def Handler(x: GetTypeAnnotation(blob_watched)):
if prompt is not None:
print(str(prompt))
print(x)
return Handler
def _MakeSubConsistentBlobHandlers(blob_watched, handler):
assert isinstance(blob_watched, MirroredBlob)
handler4parallel_id_and_local_blob = _MakeHandler4ParallelIdAndLocalBlob(
blob_watched, handler
)
return [
_WrapperHandler4ParallelIdAndLocalBlob(i, handler4parallel_id_and_local_blob)
for i in range(len(blob_watched.sub_consistent_blob_list))
]
def _WrapperHandler4ParallelIdAndLocalBlob(
parallel_id, handler4parallel_id_and_local_blob
):
return lambda local_blob: handler4parallel_id_and_local_blob(
parallel_id, local_blob
)
def _MakeHandler4ParallelIdAndLocalBlob(blob_watched, handler):
parallel_id2consistent_local_blob = {}
len_sub_remote_blobs = len(blob_watched.sub_consistent_blob_list)
def HandlerParallelIdAndLocalBlob(parallel_id, local_blob):
assert parallel_id not in parallel_id2consistent_local_blob
parallel_id2consistent_local_blob[parallel_id] = local_blob
if len(parallel_id2consistent_local_blob) != len_sub_remote_blobs:
return
local_blob_list = [
parallel_id2consistent_local_blob[parallel_id]
for i in range(len_sub_remote_blobs)
]
local_numpy = local_blob_list[0].numpy()
if len(local_blob_list) > 1:
print("WARNING: watch return tensor list will concat as axis = 0.")
local_numpy_list = [x.numpy() for x in local_blob_list]
local_numpy = np.concatenate(local_numpy_list, axis=0)
local_blob = local_blob_util.LocalBlob(local_numpy, blob_watched.is_dynamic)
handler(oft_util.TransformWatchedBlob(local_blob, handler))
return HandlerParallelIdAndLocalBlob
def GetTypeAnnotation(blob_watched):
# TODO(chengcheng): oft.Numpy support dynamic
if not blob_watched.is_dynamic:
return oft.Numpy
else:
return oft.ListNumpy
| [
"oneflow.python.framework.session_context.GetDefaultSession",
"oneflow.python.lib.core.enable_if.unique",
"oneflow.python.framework.typing_util.TransformWatchedBlob",
"oneflow.python.framework.id_util.UniqueStr",
"oneflow.python.framework.compile_context.CurJobAddOp",
"oneflow.core.job.lbi_diff_watcher_in... | [((1696, 1719), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""watch"""'], {}), "('watch')\n", (1710, 1719), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((4034, 4103), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (4053, 4103), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((4363, 4433), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (4382, 4433), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((5608, 5636), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""watch_diff"""'], {}), "('watch_diff')\n", (5622, 5636), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((11286, 11355), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (11305, 11355), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((12004, 12074), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & ~hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & ~hob.eager_execution_enabled)\n', (12023, 12074), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((3941, 3982), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[EagerWatch, LazyWatch]'], {}), '([EagerWatch, LazyWatch])\n', (3957, 3982), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((4242, 4295), 'oneflow.python.framework.local_blob.MakeLocalBlob4EagerBlob', 'local_blob_util.MakeLocalBlob4EagerBlob', (['blob_watched'], {}), '(blob_watched)\n', (4281, 4295), True, 'import oneflow.python.framework.local_blob as local_blob_util\n'), ((5166, 5193), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (5191, 5193), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((5213, 5247), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ForeignWatch_"""'], {}), "('ForeignWatch_')\n", (5230, 5247), True, 'import oneflow.python.framework.id_util as id_util\n'), ((5536, 5604), 'oneflow.python.framework.watcher.BindUuidAndHandler', 'watcher_util.BindUuidAndHandler', (['handler_uuid', 'blob_watched', 'handler'], {}), '(handler_uuid, blob_watched, handler)\n', (5567, 5604), True, 'import oneflow.python.framework.watcher as watcher_util\n'), ((11185, 11234), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[EagerWatchDiff, LazyWatchDiff]'], {}), '([EagerWatchDiff, LazyWatchDiff])\n', (11201, 11234), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((11537, 11564), 'oneflow.core.job.lbi_diff_watcher_info_pb2.LbiAndDiffWatcherUuidPair', 'LbiAndDiffWatcherUuidPair', ([], {}), '()\n', (11562, 11564), False, 'from oneflow.core.job.lbi_diff_watcher_info_pb2 import LbiAndDiffWatcherUuidPair\n'), ((11766, 11842), 'oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair', 'c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair', (['lbi_and_uuid'], {}), '(lbi_and_uuid)\n', (11828, 11842), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((12828, 12855), 'oneflow.core.job.lbi_diff_watcher_info_pb2.LbiAndDiffWatcherUuidPair', 'LbiAndDiffWatcherUuidPair', ([], {}), '()\n', (12853, 12855), False, 'from oneflow.core.job.lbi_diff_watcher_info_pb2 import LbiAndDiffWatcherUuidPair\n'), ((13057, 13133), 'oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair', 'c_api_util.CurJobBuildAndInferCtx_AddLbiAndDiffWatcherUuidPair', (['lbi_and_uuid'], {}), '(lbi_and_uuid)\n', (13119, 13133), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((13138, 13206), 'oneflow.python.framework.watcher.BindUuidAndHandler', 'watcher_util.BindUuidAndHandler', (['handler_uuid', 'blob_watched', 'handler'], {}), '(handler_uuid, blob_watched, handler)\n', (13169, 13206), True, 'import oneflow.python.framework.watcher as watcher_util\n'), ((4308, 4358), 'oneflow.python.framework.typing_util.TransformWatchedBlob', 'oft_util.TransformWatchedBlob', (['local_blob', 'handler'], {}), '(local_blob, handler)\n', (4337, 4358), True, 'import oneflow.python.framework.typing_util as oft_util\n'), ((5138, 5150), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (5148, 5150), False, 'import uuid\n'), ((5448, 5485), 'oneflow.scope.placement', 'oneflow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (5471, 5485), False, 'import oneflow\n'), ((5495, 5531), 'oneflow.python.framework.compile_context.CurJobAddOp', 'compile_context.CurJobAddOp', (['op_conf'], {}), '(op_conf)\n', (5522, 5531), True, 'import oneflow.python.framework.compile_context as compile_context\n'), ((11504, 11516), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (11514, 11516), False, 'import uuid\n'), ((11868, 11899), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (11897, 11899), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((12795, 12807), 'uuid.uuid1', 'uuid.uuid1', ([], {}), '()\n', (12805, 12807), False, 'import uuid\n'), ((13380, 13438), 'oneflow.python.framework.typing_util.CheckWatchCallbackParameterAnnotation', 'oft_util.CheckWatchCallbackParameterAnnotation', (['parameters'], {}), '(parameters)\n', (13426, 13438), True, 'import oneflow.python.framework.typing_util as oft_util\n'), ((13518, 13581), 'oneflow.python.framework.typing_util.CheckWatchedBlobByAnnotation', 'oft_util.CheckWatchedBlobByAnnotation', (['blob_watched', 'annotation'], {}), '(blob_watched, annotation)\n', (13555, 13581), True, 'import oneflow.python.framework.typing_util as oft_util\n'), ((15343, 15406), 'oneflow.python.framework.local_blob.LocalBlob', 'local_blob_util.LocalBlob', (['local_numpy', 'blob_watched.is_dynamic'], {}), '(local_numpy, blob_watched.is_dynamic)\n', (15368, 15406), True, 'import oneflow.python.framework.local_blob as local_blob_util\n'), ((13324, 13360), 'inspect.signature', 'inspect.signature', (['handler_or_prompt'], {}), '(handler_or_prompt)\n', (13341, 13360), False, 'import inspect\n'), ((15281, 15321), 'numpy.concatenate', 'np.concatenate', (['local_numpy_list'], {'axis': '(0)'}), '(local_numpy_list, axis=0)\n', (15295, 15321), True, 'import numpy as np\n'), ((15423, 15473), 'oneflow.python.framework.typing_util.TransformWatchedBlob', 'oft_util.TransformWatchedBlob', (['local_blob', 'handler'], {}), '(local_blob, handler)\n', (15452, 15473), True, 'import oneflow.python.framework.typing_util as oft_util\n')] |
# import tensorflow as tf
import oneflow as flow
# from core.resnet_module import make_bottleneck_layer, make_basic_layer
from core.resnet_module import *
import datetime
# global time=0
class HighResolutionModule(object):
def __init__(self, num_branches, num_in_channels, num_channels, block, num_blocks, fusion_method, multi_scale_output=True, training=None):
super(HighResolutionModule, self).__init__()
self.num_branches = num_branches
self.num_in_channels = num_in_channels
self.fusion_method = fusion_method
self.multi_scale_output = multi_scale_output
self.num_channels = num_channels
self.block = block
self.num_blocks = num_blocks
self.training = training
# self.branches = self.__make_branches(num_channels, block, num_blocks)
# self.fusion_layer = self.__make_fusion_layers()
def get_output_channels(self):
return self.num_in_channels
def __make_branches(self, inputs_list, num_channels, block, num_blocks, training):
# branch_layers = []
if self.num_branches == 1:
return self.__make_one_branch(inputs_list[0][0], block, num_blocks[0], num_channels[0], training=training)
branch = inputs_list
for i in range(self.num_branches):
# branch_layers.append(self.__make_one_branch(block, num_blocks[i], num_channels[i]))
branch[i][i] = self.__make_one_branch(branch[i][i], block, num_blocks[i], num_channels[i], training=training)
return branch
@staticmethod
def __make_one_branch(inputs, block, num_blocks, num_channels, stride=1, training=None):
if block == "BASIC":
return make_basic_layer(inputs, filter_num=num_channels, blocks=num_blocks, stride=stride, training=training)
elif block == "BOTTLENECK":
return make_bottleneck_layer(inputs, filter_num=num_channels, blocks=num_blocks, stride=stride, training=training)
def __make_fusion_layers(self, x_list, training):##x_list是一个张量列表
time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')
if self.num_branches == 1:
return x_list
num_branches = self.num_branches
num_inchannels = self.num_in_channels
fusion_layers = []
for i in range(self.num_branches if self.multi_scale_output else 1):
temp_layer = []
for j in range(self.num_branches):
if j > i:
with flow.scope.namespace('_fuse_layers' + str(i)+str(j)):
temp = _conv2d_layer(name="fuse1"+str(time), input=x_list[j], filters=self.num_in_channels[i], kernel_size=(1, 1), strides=1, padding="SAME", use_bias=False)
temp = _batch_norm(inputs=temp, momentum=0.1, epsilon=1e-5, training=training)
temp = flow.layers.upsample_2d(x=temp, data_format="NHWC", size=(2**(j-i), 2**(j-i)))
temp_layer.append(temp)
# fusion_layer.append(
# tf.keras.Sequential([
# tf.keras.layers.Conv2D(filters=self.num_in_channels[i], kernel_size=(1, 1), strides=1, padding="same", use_bias=False),
# tf.keras.layers.BatchNormalization(momentum=0.1, epsilon=1e-5),
# tf.keras.layers.UpSampling2D(size=2**(j-i))
# ])
# )
elif j == i:
temp_layer.append(x_list[j])
else:
down_sample = []
for k in range(i - j):
if k == i - j - 1:
with flow.scope.namespace('_fuse_layers' + str(i) + str(j)):
downsample_out_channels = self.num_in_channels[i]
temp = _conv2d_layer(name="fuse11"+str(time), input=x_list[j], filters=downsample_out_channels,
kernel_size=(3, 3), strides=2, padding="SAME",use_bias=False)
temp = _batch_norm(inputs=temp, momentum=0.1, epsilon=1e-5, training=training)
down_sample.append(temp)
# down_sample.append(
# tf.keras.Sequential([
# tf.keras.layers.Conv2D(filters=downsample_out_channels,
# kernel_size=(3, 3),
# strides=2,
# padding="same",
# use_bias=False),
# tf.keras.layers.BatchNormalization(momentum=0.1,
# epsilon=1e-5)
# ])
# )
else:
with flow.scope.namespace('_fuse_layers' + str(i) + str(j)):
downsample_out_channels = self.num_in_channels[j]
temp = _conv2d_layer(name="fuse11"+str(time), input=x_list[j], filters=downsample_out_channels,
kernel_size=(3, 3), strides=2, padding="SAME", use_bias=False)
temp = _batch_norm(inputs=temp, momentum=0.1, epsilon=1e-5, training=training)
temp = flow.nn.relu(temp)
down_sample.append(temp)
# down_sample.append(
# tf.keras.Sequential([
# tf.keras.layers.Conv2D(filters=downsample_out_channels,
# kernel_size=(3, 3),
# strides=2,
# padding="same",
# use_bias=False),
# tf.keras.layers.BatchNormalization(momentum=0.1,
# epsilon=1e-5),
# tf.keras.layers.ReLU()
# ])
# )
temp_layer.append(down_sample)
fusion_layers.append(temp_layer)
return fusion_layers
def call(self, inputs_list, training=None, **kwargs):#这里的inputs是一个链表
if self.num_branches == 1:
# return [self.__make_branches(inputs_list[0], self.num_channels, self.block, self.num_blocks, training)[0]]
return self.__make_branches(inputs_list, self.num_channels, self.block, self.num_blocks, training=training)
# for i in range(self.num_branches):
# # inputs[i] = self.branches[i](inputs[i], training=training)
# inputs_list[i] = self.__make_branches(inputs_list[i], self.num_channels, self.block, self.num_blocks, training)[i]
x_list = self.__make_branches(inputs_list, self.num_channels, self.block, self.num_blocks, training=training)
# x = inputs_list
x_fusion = []
result = self.__make_fusion_layers(x_list, training=training)
# for i in range(len(self.fusion_layer)):
for i in range(len(result)):
y = x_list[0] if i == 0 else result[i][0]#(x[0], training=training)
for j in range(1, self.num_branches):
if i == j:
y = y + x_list[j]
else:
# y = y + self.fusion_layer[i][j](x[j], training=training)
y = y + result[i][j]
x_fusion.append(flow.nn.relu(y))
return x_fusion
class StackLayers(object):
def __init__(self, layers):
super(StackLayers, self).__init__()
self.layers = layers
def call(self, inputs, training=None, **kwargs):
x = inputs
for layer in self.layers:
x = layer(x, training=training)
return x
class HRNet(object):
def __init__(self, config):
super(HRNet, self).__init__()
self.config_params = config
# self.conv1 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), strides=2, padding="same", use_bias=False)
# self.bn1 = tf.keras.layers.BatchNormalization(momentum=0.1, epsilon=1e-5)
# self.conv2 = tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), strides=2, padding="same", use_bias=False)
# self.bn2 = tf.keras.layers.BatchNormalization(momentum=0.1, epsilon=1e-5)
# self.layer1 = make_bottleneck_layer(filter_num=64, blocks=4)
# self.transition1 = self.__make_transition_layer(previous_branches_num=1,
# previous_channels=[256],
# current_branches_num=self.config_params.get_stage("s2")[1],
# current_channels=self.config_params.get_stage("s2")[0])
# self.stage2 = self.__make_stages("s2", self.config_params.get_stage("s2")[0])
# self.transition2 = self.__make_transition_layer(previous_branches_num=self.config_params.get_stage("s2")[1],
# previous_channels=self.config_params.get_stage("s2")[0],
# current_branches_num=self.config_params.get_stage("s3")[1],
# current_channels=self.config_params.get_stage("s3")[0])
# self.stage3 = self.__make_stages("s3", self.config_params.get_stage("s3")[0])
# self.transition3 = self.__make_transition_layer(previous_branches_num=self.config_params.get_stage("s3")[1],
# previous_channels=self.config_params.get_stage("s3")[0],
# current_branches_num=self.config_params.get_stage("s4")[1],
# current_channels=self.config_params.get_stage("s4")[0])
# self.stage4 = self.__make_stages("s4", self.config_params.get_stage("s4")[0], False)
# self.conv3 = tf.keras.layers.Conv2D(filters=self.config_params.num_of_joints,
# kernel_size=self.config_params.conv3_kernel,
# strides=1,
# padding="same")
# def __choose_config(self, config_name):
# return get_config_params(config_name)
def __make_stages(self, inputs_list, stage_name, in_channels, multi_scale_output=True, training=None):
stage_info = self.config_params.get_stage(stage_name)
channels, num_branches, num_modules, block, num_blocks, fusion_method = stage_info
# fusion = []
x_list = inputs_list
for i in range(num_modules):
if not multi_scale_output and i == num_modules - 1:
reset_multi_scale_output = False
else:
reset_multi_scale_output = True
module_list = HighResolutionModule(num_branches=num_branches,
num_in_channels=in_channels,
num_channels=channels,
block=block,
num_blocks=num_blocks,
fusion_method=fusion_method,
multi_scale_output=reset_multi_scale_output,
training=training)
x_list = module_list.call(x_list, training=training)
return x_list
@staticmethod
def __make_transition_layer(x_list, previous_branches_num, previous_channels, current_branches_num, current_channels, training=None):
transition_layers = []
# transition = x_list
for i in range(current_branches_num):
if i < previous_branches_num:
if current_channels[i] != previous_channels[i]:
temp = _conv2d_layer(name="trans1"+str(i), input=x_list[-1], filters=current_channels[i], kernel_size=(3, 3),
strides=1, padding="SAME", use_bias=False)
temp = _batch_norm(name="bn1"+str(i),inputs=temp, momentum=0.1, epsilon=1e-5, training=training)
temp = flow.nn.relu(temp)
transition_layers.append(temp)
# transition_layers.append(temp)
# transition_layers.append(
# tf.keras.Sequential([
# tf.keras.layers.Conv2D(filters=current_channels[i], kernel_size=(3, 3), strides=1, padding="same", use_bias=False),
# tf.keras.layers.BatchNormalization(momentum=0.1, epsilon=1e-5),
# tf.keras.layers.ReLU()
# ])
# )
else:
# transition_layers.append(x)
transition_layers.append(x_list[i])
else:
down_sampling_layers = []
for j in range(i + 1 - previous_branches_num):
in_channels = previous_channels[-1],
out_channels = current_channels[i] if j == i - previous_branches_num else in_channels
with flow.scope.namespace('transition_layers_'+str(j)):
temp = _conv2d_layer(name="fuse11", input=x_list[-1], filters=out_channels,
kernel_size=(3, 3), strides=2, padding="SAME", use_bias=False)
temp = _batch_norm(name="bn1"+str(i),inputs=temp, momentum=0.1, epsilon=1e-5, training=training)
temp = flow.nn.relu(temp)
down_sampling_layers.append(temp)
# down_sampling_layers.append(
# tf.keras.Sequential([
# tf.keras.layers.Conv2D(filters=out_channels, kernel_size=(3, 3), strides=2,
# padding="same", use_bias=False),
# tf.keras.layers.BatchNormalization(momentum=0.1, epsilon=1e-5),
# tf.keras.layers.ReLU()
# ])
# )
transition_layers.append(down_sampling_layers)
return transition_layers
def call(self, inputs, training=None):#mask=None
name = datetime.datetime.now().strftime('%Y-%m-%d-%H_%M_%S_%f')
x = _conv2d_layer(name="conv1", input=inputs, filters=64,
kernel_size=(3, 3), strides=2, padding="SAME", use_bias=False)
# x = self.conv1(inputs)
x = _batch_norm(inputs=x, momentum=0.1, epsilon=1e-5, name=name+"bn1", training=training)
# x = self.bn1(x, training=training)
x = flow.nn.relu(x)
# x = tf.nn.relu(x)
x = _conv2d_layer(name="conv2", input=x, filters=64,
kernel_size=(3, 3), strides=2, padding="SAME", use_bias=False)
# x = self.conv2(x)
x = _batch_norm(inputs=x, momentum=0.1, epsilon=1e-5, name=name+"bn2", training=training)
# x = self.bn2(x, training=training)
x = flow.nn.relu(x)
# x = tf.nn.relu(x)
print(x)
x = make_bottleneck_layer(x, filter_num=64, blocks=4, training=training)
# x = self.layer1(x, training=training)
feature_list = []
# for i in range(self.config_params.get_stage("s2")[1]):
feature_list.append(self.__make_transition_layer(x_list=[x],
previous_branches_num=1,
previous_channels=[256],
current_branches_num=self.config_params.get_stage("s2")[1],
current_channels=self.config_params.get_stage("s2")[0],
training=training))
# if result[i] is not None:
# feature_list.append(result[i])
# # if self.transition1[i] is not None:
# # feature_list.append(self.transition1[i](x, training=training))
# else:
# feature_list.append(x)
y_list = self.__make_stages(feature_list, "s2", self.config_params.get_stage("s2")[0], training=training)
# y_list = self.stage2(feature_list, training=training)
feature_list = []
# for i in range(self.config_params.get_stage("s3")[1]):
feature_list.append(self.__make_transition_layer(x_list=y_list,
previous_branches_num=self.config_params.get_stage("s2")[1],
previous_channels=self.config_params.get_stage("s2")[0],
current_branches_num=self.config_params.get_stage("s3")[1],
current_channels=self.config_params.get_stage("s3")[0],
training=training))
# if result[i] is not None:
# feature_list.append(result[i])
# if self.transition2[i] is not None:
# feature_list.append(self.transition2[i](y_list[-1], training=training))
# else:
# feature_list.append(y_list[i])
y_list = self.__make_stages(feature_list, "s3", self.config_params.get_stage("s3")[0], training=training)
# y_list = self.stage3(feature_list, training=training)
feature_list = []
# for i in range(self.config_params.get_stage("s4")[1]):
feature_list.append(self.__make_transition_layer(x_list=y_list,
previous_branches_num=self.config_params.get_stage("s3")[1],
previous_channels=self.config_params.get_stage("s3")[0],
current_branches_num=self.config_params.get_stage("s4")[1],
current_channels=self.config_params.get_stage("s4")[0],
training=training))
# if result[i] is not None:
# feature_list.append(result[i])
# for i in range(self.config_params.get_stage("s4")[1]):
# if self.transition3[i] is not None:
# feature_list.append(self.transition3[i](y_list[-1], training=training))
# else:
# feature_list.append(y_list[i])
y_list = self.__make_stages(feature_list, "s4", self.config_params.get_stage("s4")[0], False, training=training)
# y_list = self.stage4(feature_list, training=training)
outputs = _conv2d_layer(name="conv3",
input=y_list[0],
filters=self.config_params.num_of_joints,
kernel_size=self.config_params.conv3_kernel,
strides=1,
padding="SAME")
# outputs = self.conv3(y_list[0])
return outputs
| [
"oneflow.layers.upsample_2d",
"oneflow.nn.relu"
] | [((15402, 15417), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {}), '(x)\n', (15414, 15417), True, 'import oneflow as flow\n'), ((15779, 15794), 'oneflow.nn.relu', 'flow.nn.relu', (['x'], {}), '(x)\n', (15791, 15794), True, 'import oneflow as flow\n'), ((2049, 2072), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2070, 2072), False, 'import datetime\n'), ((7939, 7954), 'oneflow.nn.relu', 'flow.nn.relu', (['y'], {}), '(y)\n', (7951, 7954), True, 'import oneflow as flow\n'), ((15002, 15025), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (15023, 15025), False, 'import datetime\n'), ((12821, 12839), 'oneflow.nn.relu', 'flow.nn.relu', (['temp'], {}), '(temp)\n', (12833, 12839), True, 'import oneflow as flow\n'), ((2854, 2944), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', ([], {'x': 'temp', 'data_format': '"""NHWC"""', 'size': '(2 ** (j - i), 2 ** (j - i))'}), "(x=temp, data_format='NHWC', size=(2 ** (j - i), 2 **\n (j - i)))\n", (2877, 2944), True, 'import oneflow as flow\n'), ((14236, 14254), 'oneflow.nn.relu', 'flow.nn.relu', (['temp'], {}), '(temp)\n', (14248, 14254), True, 'import oneflow as flow\n'), ((5635, 5653), 'oneflow.nn.relu', 'flow.nn.relu', (['temp'], {}), '(temp)\n', (5647, 5653), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
from PIL import Image
import oneflow.typing as oft
def _of_image_decode(images):
image_files = [open(im, "rb") for im in images]
images_bytes = [imf.read() for imf in image_files]
static_shape = (len(images_bytes), max([len(bys) for bys in images_bytes]))
for imf in image_files:
imf.close()
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
func_config.default_logical_view(flow.scope.mirrored_view())
@flow.global_function(function_config=func_config)
def image_decode_job(
images_def: oft.ListListNumpy.Placeholder(shape=static_shape, dtype=flow.int8)
):
images_buffer = flow.tensor_list_to_tensor_buffer(images_def)
decoded_images_buffer = flow.image_decode(images_buffer)
return flow.tensor_buffer_to_tensor_list(
decoded_images_buffer, shape=(640, 640, 3), dtype=flow.uint8
)
images_np_arr = [
np.frombuffer(bys, dtype=np.byte).reshape(1, -1) for bys in images_bytes
]
decoded_images = image_decode_job([images_np_arr]).get().numpy_lists()
return decoded_images[0]
def _compare_jpg_decode_with_pil(test_case, images, print_debug_info=False):
r"""
The jpg image's decoded results with opencv and pil image are slightly different,
their green channels have difference of 1.
"""
of_decoded_images = _of_image_decode(images)
pil_images = [Image.open(image) for image in images]
# convert image to BGR
pil_decoded_images = [np.array(image)[:, :, ::-1] for image in pil_images]
for of_decoded_image, pil_decoded_image in zip(
of_decoded_images, pil_decoded_images
):
of_decoded_image = of_decoded_image.squeeze()
test_case.assertTrue(len(of_decoded_image.shape) == 3)
test_case.assertTrue(len(pil_decoded_image.shape) == 3)
diff = of_decoded_image - pil_decoded_image
diff_index = np.where(diff != 0)
diff_abs_values = diff[diff_index]
if print_debug_info:
print("of_decoded_image:\n", of_decoded_image, of_decoded_image.shape)
print("pil_decoded_image:\n", pil_decoded_image, pil_decoded_image.shape)
print("diff_index:\n", diff_index)
print("diff_abs_values:\n", diff_abs_values)
print(
"of_decoded_image diff:\n",
of_decoded_image[diff_index[0], diff_index[1]],
)
print(
"pil_decoded_image diff:\n",
pil_decoded_image[diff_index[0], diff_index[1]],
)
# only green channel has difference of 1
test_case.assertTrue(np.all(diff_index[-1] == 1))
test_case.assertTrue(np.all(diff_abs_values == 1))
def test_image_decode(test_case):
_compare_jpg_decode_with_pil(
test_case,
[
"/dataset/mscoco_2017/val2017/000000000139.jpg",
"/dataset/mscoco_2017/val2017/000000000632.jpg",
],
# True,
)
| [
"oneflow.global_function",
"oneflow.FunctionConfig",
"oneflow.tensor_buffer_to_tensor_list",
"oneflow.typing.ListListNumpy.Placeholder",
"oneflow.clear_default_session",
"oneflow.tensor_list_to_tensor_buffer",
"oneflow.image_decode",
"oneflow.scope.mirrored_view"
] | [((955, 983), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (981, 983), True, 'import oneflow as flow\n'), ((1002, 1023), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1021, 1023), True, 'import oneflow as flow\n'), ((1141, 1190), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1161, 1190), True, 'import oneflow as flow\n'), ((1107, 1133), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1131, 1133), True, 'import oneflow as flow\n'), ((1335, 1380), 'oneflow.tensor_list_to_tensor_buffer', 'flow.tensor_list_to_tensor_buffer', (['images_def'], {}), '(images_def)\n', (1368, 1380), True, 'import oneflow as flow\n'), ((1413, 1445), 'oneflow.image_decode', 'flow.image_decode', (['images_buffer'], {}), '(images_buffer)\n', (1430, 1445), True, 'import oneflow as flow\n'), ((1461, 1561), 'oneflow.tensor_buffer_to_tensor_list', 'flow.tensor_buffer_to_tensor_list', (['decoded_images_buffer'], {'shape': '(640, 640, 3)', 'dtype': 'flow.uint8'}), '(decoded_images_buffer, shape=(640, 640, 3\n ), dtype=flow.uint8)\n', (1494, 1561), True, 'import oneflow as flow\n'), ((2089, 2106), 'PIL.Image.open', 'Image.open', (['image'], {}), '(image)\n', (2099, 2106), False, 'from PIL import Image\n'), ((2595, 2614), 'numpy.where', 'np.where', (['(diff != 0)'], {}), '(diff != 0)\n', (2603, 2614), True, 'import numpy as np\n'), ((1237, 1303), 'oneflow.typing.ListListNumpy.Placeholder', 'oft.ListListNumpy.Placeholder', ([], {'shape': 'static_shape', 'dtype': 'flow.int8'}), '(shape=static_shape, dtype=flow.int8)\n', (1266, 1303), True, 'import oneflow.typing as oft\n'), ((2181, 2196), 'numpy.array', 'np.array', (['image'], {}), '(image)\n', (2189, 2196), True, 'import numpy as np\n'), ((3324, 3351), 'numpy.all', 'np.all', (['(diff_index[-1] == 1)'], {}), '(diff_index[-1] == 1)\n', (3330, 3351), True, 'import numpy as np\n'), ((3382, 3410), 'numpy.all', 'np.all', (['(diff_abs_values == 1)'], {}), '(diff_abs_values == 1)\n', (3388, 3410), True, 'import numpy as np\n'), ((1610, 1643), 'numpy.frombuffer', 'np.frombuffer', (['bys'], {'dtype': 'np.byte'}), '(bys, dtype=np.byte)\n', (1623, 1643), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
from datetime import datetime
import numpy
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
_DATA_DIR = "/dataset/PNGS/PNG299/of_record_repeated"
_EVAL_DIR = _DATA_DIR
_TRAIN_DIR = _DATA_DIR
_MODEL_LOAD = "/dataset/PNGS/cnns_model_for_test/inceptionv3/models/of_model"
_MODEL_SAVE_DIR = "./model_save-{}".format(
str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
)
NODE_LIST = "192.168.1.12,192.168.1.14"
class DLNetSpec(object):
def __init__(self, enable_auto_mixed_precision):
self.batch_size = 8
self.data_part_num = 32
self.eval_dir = _DATA_DIR
self.train_dir = _DATA_DIR
self.model_save_dir = _MODEL_SAVE_DIR
self.model_load_dir = _MODEL_LOAD
self.num_nodes = 1
self.gpu_num_per_node = 1
self.iter_num = 10
self.enable_auto_mixed_precision = enable_auto_mixed_precision
parser = argparse.ArgumentParser(description="flags for multi-node and resource")
parser.add_argument("-g", "--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("-i", "--iter_num", type=int, default=10, required=False)
parser.add_argument("-b", "--batch_size", type=int, default=8, required=False)
parser.add_argument(
"-m", "--multinode", default=False, action="store_true", required=False
)
parser.add_argument("-n", "--node_list", type=str, default=NODE_LIST, required=False)
parser.add_argument(
"-s", "--skip_scp_binary", default=False, action="store_true", required=False
)
parser.add_argument(
"-c",
"--scp_binary_without_uuid",
default=False,
action="store_true",
required=False,
)
parser.add_argument(
"-r", "--remote_by_hand", default=False, action="store_true", required=False
)
parser.add_argument("-e", "--eval_dir", type=str, default=_DATA_DIR, required=False)
parser.add_argument("-t", "--train_dir", type=str, default=_DATA_DIR, required=False)
parser.add_argument(
"-load", "--model_load_dir", type=str, default=_MODEL_LOAD, required=False
)
parser.add_argument(
"-save", "--model_save_dir", type=str, default=_MODEL_SAVE_DIR, required=False
)
parser.add_argument("-dn", "--data_part_num", type=int, default=32, required=False)
def _conv2d_layer(
name,
input,
filters,
kernel_size=3,
strides=1,
padding="SAME",
data_format="NCHW",
dilation_rate=1,
activation=op_conf_util.kSigmoid,
use_bias=True,
weight_initializer=flow.random_uniform_initializer(),
bias_initializer=flow.constant_initializer(),
):
if isinstance(kernel_size, int):
kernel_size = (kernel_size, kernel_size)
else:
kernel_size = tuple(kernel_size)
weight_shape = (filters, input.shape[1]) + kernel_size
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
dtype=input.dtype,
initializer=weight_initializer,
)
output = flow.nn.conv2d(
input, weight, strides, padding, None, data_format, dilation_rate, name=name
)
if use_bias:
bias = flow.get_variable(
name + "-bias",
shape=(filters,),
dtype=input.dtype,
initializer=bias_initializer,
)
output = flow.nn.bias_add(output, bias, data_format)
if activation is not None:
if activation == op_conf_util.kRelu:
output = flow.math.relu(output)
elif activation == op_conf_util.kSigmoid:
output = flow.math.sigmoid(output)
else:
raise NotImplementedError
return output
def _data_load_layer(args, data_dir):
node_num = args.num_nodes
total_batch_size = args.batch_size * args.gpu_num_per_node * node_num
rgb_mean = [123.68, 116.78, 103.94]
ofrecord = flow.data.ofrecord_reader(
data_dir,
batch_size=total_batch_size,
data_part_num=args.data_part_num,
name="decode",
)
image = flow.data.ofrecord_image_decoder(ofrecord, "encoded", color_space="RGB")
label = flow.data.ofrecord_raw_decoder(
ofrecord, "class/label", shape=(), dtype=flow.int32
)
rsz = flow.image.resize(image, resize_x=299, resize_y=299, color_space="RGB")
normal = flow.image.crop_mirror_normalize(
rsz,
color_space="RGB",
output_layout="NCHW",
mean=rgb_mean,
output_dtype=flow.float,
)
return (normal, label)
def InceptionA(in_blob, index):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
with flow.scope.namespace("branch5x5"):
branch5x5_1 = _conv2d_layer(
"conv0", in_blob, filters=48, kernel_size=1, strides=1, padding="SAME"
)
branch5x5_2 = _conv2d_layer(
"conv1",
branch5x5_1,
filters=64,
kernel_size=5,
strides=1,
padding="SAME",
)
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
branch3x3dbl_2 = _conv2d_layer(
"conv1",
branch3x3dbl_1,
filters=96,
kernel_size=3,
strides=1,
padding="SAME",
)
branch3x3dbl_3 = _conv2d_layer(
"conv2",
branch3x3dbl_2,
filters=96,
kernel_size=3,
strides=1,
padding="SAME",
)
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
strides=1,
padding="SAME",
data_format="NCHW",
name="pool",
)
branch_pool_2 = _conv2d_layer(
"conv",
branch_pool_1,
filters=32 if index == 0 else 64,
kernel_size=1,
strides=1,
padding="SAME",
)
inceptionA_bn = []
inceptionA_bn.append(branch1x1)
inceptionA_bn.append(branch5x5_2)
inceptionA_bn.append(branch3x3dbl_3)
inceptionA_bn.append(branch_pool_2)
mixed_concat = flow.concat(values=inceptionA_bn, axis=1, name="concat")
return mixed_concat
def InceptionB(in_blob, index):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch3x3"):
branch3x3 = _conv2d_layer(
"conv0", in_blob, filters=384, kernel_size=3, strides=2, padding="VALID"
)
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=64, kernel_size=1, strides=1, padding="SAME"
)
branch3x3dbl_2 = _conv2d_layer(
"conv1",
branch3x3dbl_1,
filters=96,
kernel_size=3,
strides=1,
padding="SAME",
)
branch3x3dbl_3 = _conv2d_layer(
"conv2",
branch3x3dbl_2,
filters=96,
kernel_size=3,
strides=2,
padding="VALID",
)
with flow.scope.namespace("branch_pool"):
branch_pool = flow.nn.max_pool2d(
in_blob,
ksize=3,
strides=2,
padding="VALID",
data_format="NCHW",
name="pool0",
)
inceptionB_bn = []
inceptionB_bn.append(branch3x3)
inceptionB_bn.append(branch3x3dbl_3)
inceptionB_bn.append(branch_pool)
mixed_concat = flow.concat(values=inceptionB_bn, axis=1, name="concat")
return mixed_concat
def InceptionC(in_blob, index, filters):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
with flow.scope.namespace("branch7x7"):
branch7x7_1 = _conv2d_layer(
"conv0",
in_blob,
filters=filters,
kernel_size=1,
strides=1,
padding="SAME",
)
branch7x7_2 = _conv2d_layer(
"conv1",
branch7x7_1,
filters=filters,
kernel_size=[1, 7],
strides=1,
padding="SAME",
)
branch7x7_3 = _conv2d_layer(
"conv2",
branch7x7_2,
filters=192,
kernel_size=[7, 1],
strides=[1, 1],
padding="SAME",
)
with flow.scope.namespace("branch7x7dbl"):
branch7x7dbl_1 = _conv2d_layer(
"conv0",
in_blob,
filters=filters,
kernel_size=1,
strides=1,
padding="SAME",
)
branch7x7dbl_2 = _conv2d_layer(
"conv1",
branch7x7dbl_1,
filters=filters,
kernel_size=[7, 1],
strides=1,
padding="SAME",
)
branch7x7dbl_3 = _conv2d_layer(
"conv2",
branch7x7dbl_2,
filters=filters,
kernel_size=[1, 7],
strides=1,
padding="SAME",
)
branch7x7dbl_4 = _conv2d_layer(
"conv3",
branch7x7dbl_3,
filters=filters,
kernel_size=[7, 1],
strides=1,
padding="SAME",
)
branch7x7dbl_5 = _conv2d_layer(
"conv4",
branch7x7dbl_4,
filters=192,
kernel_size=[1, 7],
strides=1,
padding="SAME",
)
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
strides=1,
padding="SAME",
data_format="NCHW",
name="pool",
)
branch_pool_2 = _conv2d_layer(
"conv",
branch_pool_1,
filters=192,
kernel_size=[1, 1],
strides=1,
padding="SAME",
)
inceptionC_bn = []
inceptionC_bn.append(branch1x1)
inceptionC_bn.append(branch7x7_3)
inceptionC_bn.append(branch7x7dbl_5)
inceptionC_bn.append(branch_pool_2)
mixed_concat = flow.concat(values=inceptionC_bn, axis=1, name="concat")
return mixed_concat
def InceptionD(in_blob, index):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch3x3"):
branch3x3_1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
branch3x3_2 = _conv2d_layer(
"conv1",
branch3x3_1,
filters=320,
kernel_size=3,
strides=2,
padding="VALID",
)
with flow.scope.namespace("branch7x7x3"):
branch7x7x3_1 = _conv2d_layer(
"conv0", in_blob, filters=192, kernel_size=1, strides=1, padding="SAME"
)
branch7x7x3_2 = _conv2d_layer(
"conv1",
branch7x7x3_1,
filters=192,
kernel_size=[1, 7],
strides=1,
padding="SAME",
)
branch7x7x3_3 = _conv2d_layer(
"conv2",
branch7x7x3_2,
filters=192,
kernel_size=[7, 1],
strides=1,
padding="SAME",
)
branch7x7x3_4 = _conv2d_layer(
"conv3",
branch7x7x3_3,
filters=192,
kernel_size=3,
strides=2,
padding="VALID",
)
with flow.scope.namespace("branch_pool"):
branch_pool = flow.nn.max_pool2d(
in_blob,
ksize=3,
strides=2,
padding="VALID",
data_format="NCHW",
name="pool",
)
inceptionD_bn = []
inceptionD_bn.append(branch3x3_2)
inceptionD_bn.append(branch7x7x3_4)
inceptionD_bn.append(branch_pool)
mixed_concat = flow.concat(values=inceptionD_bn, axis=1, name="concat")
return mixed_concat
def InceptionE(in_blob, index):
with flow.scope.namespace("mixed_{}".format(index)):
with flow.scope.namespace("branch1x1"):
branch1x1 = _conv2d_layer(
"conv0", in_blob, filters=320, kernel_size=1, strides=1, padding="SAME"
)
with flow.scope.namespace("branch3x3"):
branch3x3_1 = _conv2d_layer(
"conv0", in_blob, filters=384, kernel_size=1, strides=1, padding="SAME"
)
branch3x3_2 = _conv2d_layer(
"conv1",
branch3x3_1,
filters=384,
kernel_size=[1, 3],
strides=1,
padding="SAME",
)
branch3x3_3 = _conv2d_layer(
"conv2",
branch3x3_1,
filters=384,
kernel_size=[3, 1],
strides=[1, 1],
padding="SAME",
)
inceptionE_1_bn = []
inceptionE_1_bn.append(branch3x3_2)
inceptionE_1_bn.append(branch3x3_3)
concat_branch3x3 = flow.concat(
values=inceptionE_1_bn, axis=1, name="concat"
)
with flow.scope.namespace("branch3x3dbl"):
branch3x3dbl_1 = _conv2d_layer(
"conv0", in_blob, filters=448, kernel_size=1, strides=1, padding="SAME"
)
branch3x3dbl_2 = _conv2d_layer(
"conv1",
branch3x3dbl_1,
filters=384,
kernel_size=3,
strides=1,
padding="SAME",
)
branch3x3dbl_3 = _conv2d_layer(
"conv2",
branch3x3dbl_2,
filters=384,
kernel_size=[1, 3],
strides=1,
padding="SAME",
)
branch3x3dbl_4 = _conv2d_layer(
"conv3",
branch3x3dbl_2,
filters=384,
kernel_size=[3, 1],
strides=1,
padding="SAME",
)
inceptionE_2_bn = []
inceptionE_2_bn.append(branch3x3dbl_3)
inceptionE_2_bn.append(branch3x3dbl_4)
concat_branch3x3dbl = flow.concat(
values=inceptionE_2_bn, axis=1, name="concat"
)
with flow.scope.namespace("branch_pool"):
branch_pool_1 = flow.nn.avg_pool2d(
in_blob,
ksize=3,
strides=1,
padding="SAME",
data_format="NCHW",
name="pool",
)
branch_pool_2 = _conv2d_layer(
"conv",
branch_pool_1,
filters=192,
kernel_size=[1, 1],
strides=1,
padding="SAME",
)
inceptionE_total_bn = []
inceptionE_total_bn.append(branch1x1)
inceptionE_total_bn.append(concat_branch3x3)
inceptionE_total_bn.append(concat_branch3x3dbl)
inceptionE_total_bn.append(branch_pool_2)
concat_total = flow.concat(values=inceptionE_total_bn, axis=1, name="concat")
return concat_total
def InceptionV3(images, labels, trainable=True):
conv0 = _conv2d_layer(
"conv0", images, filters=32, kernel_size=3, strides=2, padding="VALID"
)
conv1 = _conv2d_layer(
"conv1", conv0, filters=32, kernel_size=3, strides=1, padding="VALID"
)
conv2 = _conv2d_layer(
"conv2", conv1, filters=64, kernel_size=3, strides=1, padding="SAME"
)
pool1 = flow.nn.max_pool2d(
conv2, ksize=3, strides=2, padding="VALID", data_format="NCHW", name="pool1"
)
conv3 = _conv2d_layer(
"conv3", pool1, filters=80, kernel_size=1, strides=1, padding="VALID"
)
conv4 = _conv2d_layer(
"conv4", conv3, filters=192, kernel_size=3, strides=1, padding="VALID"
)
pool2 = flow.nn.max_pool2d(
conv4, ksize=3, strides=2, padding="VALID", data_format="NCHW", name="pool2"
)
mixed_0 = InceptionA(pool2, 0)
mixed_1 = InceptionA(mixed_0, 1)
mixed_2 = InceptionA(mixed_1, 2)
mixed_3 = InceptionB(mixed_2, 3)
mixed_4 = InceptionC(mixed_3, 4, 128)
mixed_5 = InceptionC(mixed_4, 5, 160)
mixed_6 = InceptionC(mixed_5, 6, 160)
mixed_7 = InceptionC(mixed_6, 7, 192)
mixed_8 = InceptionD(mixed_7, 8)
mixed_9 = InceptionE(mixed_8, 9)
mixed_10 = InceptionE(mixed_9, 10)
pool3 = flow.nn.avg_pool2d(
mixed_10, ksize=8, strides=1, padding="VALID", data_format="NCHW", name="pool3"
)
with flow.scope.namespace("logits"):
pool3 = flow.reshape(pool3, [pool3.shape[0], -1])
weight = flow.get_variable(
"fc1-weight",
shape=(pool3.shape[1], 1001),
dtype=flow.float,
initializer=flow.truncated_normal(0.816496580927726),
model_name="weight",
)
bias = flow.get_variable(
"fc1-bias",
shape=(1001,),
dtype=flow.float,
initializer=flow.constant_initializer(),
model_name="bias",
)
fc1 = flow.matmul(pool3, weight)
fc1 = flow.nn.bias_add(fc1, bias)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=fc1, name="softmax_loss"
)
return loss
def main(args):
flow.config.machine_num(args.num_nodes)
flow.config.gpu_device_num(args.gpu_num_per_node)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
func_config.default_data_type(flow.float)
func_config.enable_auto_mixed_precision(args.enable_auto_mixed_precision)
@flow.global_function(type="train", function_config=func_config)
def TrainNet():
(images, labels) = _data_load_layer(args, args.train_dir)
loss = InceptionV3(images, labels)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0
).minimize(loss)
return loss
check_point = flow.train.CheckPoint()
if not args.model_load_dir:
check_point.init()
else:
check_point.load(args.model_load_dir)
num_nodes = args.num_nodes
print(
"Traning inceptionv3: num_gpu_per_node = {}, num_nodes = {}.".format(
args.gpu_num_per_node, num_nodes
)
)
print("{:>12} {:>12} {:>12}".format("iter", "loss type", "loss value"))
loss = []
for i in range(args.iter_num):
train_loss = TrainNet().get().mean()
loss.append(train_loss)
fmt_str = "{:>12} {:>12} {:>12.6f}"
print(fmt_str.format(i, "train loss:", train_loss))
if (i + 1) % 100 == 0:
check_point.save(_MODEL_SAVE_DIR + str(i))
loss_file = "{}n{}c.npy".format(
str(num_nodes), str(args.gpu_num_per_node * num_nodes)
)
loss_path = "./of_loss/inceptionv3"
if not os.path.exists(loss_path):
os.makedirs(loss_path)
numpy.save(os.path.join(loss_path, loss_file), loss)
if __name__ == "__main__":
args = parser.parse_args()
if args.multinode:
flow.env.ctrl_port(12138)
nodes = []
for n in args.node_list.strip().split(","):
addr_dict = {}
addr_dict["addr"] = n
nodes.append(addr_dict)
flow.env.machine(nodes)
if args.remote_by_hand is False:
if args.scp_binary_without_uuid:
flow.deprecated.init_worker(scp_binary=True, use_uuid=False)
elif args.skip_scp_binary:
flow.deprecated.init_worker(scp_binary=False, use_uuid=False)
else:
flow.deprecated.init_worker(scp_binary=True, use_uuid=True)
main(args)
if (
args.multinode
and args.skip_scp_binary is False
and (args.scp_binary_without_uuid is False)
):
flow.deprecated.delete_worker()
| [
"oneflow.config.machine_num",
"oneflow.matmul",
"oneflow.scope.consistent_view",
"oneflow.image.crop_mirror_normalize",
"oneflow.constant_initializer",
"oneflow.env.ctrl_port",
"oneflow.nn.avg_pool2d",
"oneflow.deprecated.delete_worker",
"oneflow.nn.conv2d",
"oneflow.truncated_normal",
"oneflow.... | [((1526, 1598), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""flags for multi-node and resource"""'}), "(description='flags for multi-node and resource')\n", (1549, 1598), False, 'import argparse\n'), ((3062, 3095), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (3093, 3095), True, 'import oneflow as flow\n'), ((3118, 3145), 'oneflow.constant_initializer', 'flow.constant_initializer', ([], {}), '()\n', (3143, 3145), True, 'import oneflow as flow\n'), ((3359, 3469), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer'}), "(name + '-weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer)\n", (3376, 3469), True, 'import oneflow as flow\n'), ((3518, 3614), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'None', 'data_format', 'dilation_rate'], {'name': 'name'}), '(input, weight, strides, padding, None, data_format,\n dilation_rate, name=name)\n', (3532, 3614), True, 'import oneflow as flow\n'), ((4364, 4481), 'oneflow.data.ofrecord_reader', 'flow.data.ofrecord_reader', (['data_dir'], {'batch_size': 'total_batch_size', 'data_part_num': 'args.data_part_num', 'name': '"""decode"""'}), "(data_dir, batch_size=total_batch_size,\n data_part_num=args.data_part_num, name='decode')\n", (4389, 4481), True, 'import oneflow as flow\n'), ((4529, 4601), 'oneflow.data.ofrecord_image_decoder', 'flow.data.ofrecord_image_decoder', (['ofrecord', '"""encoded"""'], {'color_space': '"""RGB"""'}), "(ofrecord, 'encoded', color_space='RGB')\n", (4561, 4601), True, 'import oneflow as flow\n'), ((4614, 4702), 'oneflow.data.ofrecord_raw_decoder', 'flow.data.ofrecord_raw_decoder', (['ofrecord', '"""class/label"""'], {'shape': '()', 'dtype': 'flow.int32'}), "(ofrecord, 'class/label', shape=(), dtype=\n flow.int32)\n", (4644, 4702), True, 'import oneflow as flow\n'), ((4722, 4793), 'oneflow.image.resize', 'flow.image.resize', (['image'], {'resize_x': '(299)', 'resize_y': '(299)', 'color_space': '"""RGB"""'}), "(image, resize_x=299, resize_y=299, color_space='RGB')\n", (4739, 4793), True, 'import oneflow as flow\n'), ((4807, 4930), 'oneflow.image.crop_mirror_normalize', 'flow.image.crop_mirror_normalize', (['rsz'], {'color_space': '"""RGB"""', 'output_layout': '"""NCHW"""', 'mean': 'rgb_mean', 'output_dtype': 'flow.float'}), "(rsz, color_space='RGB', output_layout=\n 'NCHW', mean=rgb_mean, output_dtype=flow.float)\n", (4839, 4930), True, 'import oneflow as flow\n'), ((17381, 17482), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv2'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool1"""'}), "(conv2, ksize=3, strides=2, padding='VALID', data_format=\n 'NCHW', name='pool1')\n", (17399, 17482), True, 'import oneflow as flow\n'), ((17727, 17828), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv4'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool2"""'}), "(conv4, ksize=3, strides=2, padding='VALID', data_format=\n 'NCHW', name='pool2')\n", (17745, 17828), True, 'import oneflow as flow\n'), ((18277, 18380), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['mixed_10'], {'ksize': '(8)', 'strides': '(1)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool3"""'}), "(mixed_10, ksize=8, strides=1, padding='VALID',\n data_format='NCHW', name='pool3')\n", (18295, 18380), True, 'import oneflow as flow\n'), ((19036, 19136), 'oneflow.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'fc1', 'name': '"""softmax_loss"""'}), "(labels=labels, logits=fc1,\n name='softmax_loss')\n", (19084, 19136), True, 'import oneflow as flow\n'), ((19185, 19224), 'oneflow.config.machine_num', 'flow.config.machine_num', (['args.num_nodes'], {}), '(args.num_nodes)\n', (19208, 19224), True, 'import oneflow as flow\n'), ((19229, 19278), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (19255, 19278), True, 'import oneflow as flow\n'), ((19297, 19318), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (19316, 19318), True, 'import oneflow as flow\n'), ((19516, 19579), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (19536, 19579), True, 'import oneflow as flow\n'), ((19881, 19904), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (19902, 19904), True, 'import oneflow as flow\n'), ((3657, 3761), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-bias')"], {'shape': '(filters,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer'}), "(name + '-bias', shape=(filters,), dtype=input.dtype,\n initializer=bias_initializer)\n", (3674, 3761), True, 'import oneflow as flow\n'), ((3834, 3877), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (3850, 3877), True, 'import oneflow as flow\n'), ((7117, 7173), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionA_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionA_bn, axis=1, name='concat')\n", (7128, 7173), True, 'import oneflow as flow\n'), ((8605, 8661), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionB_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionB_bn, axis=1, name='concat')\n", (8616, 8661), True, 'import oneflow as flow\n'), ((11719, 11775), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionC_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionC_bn, axis=1, name='concat')\n", (11730, 11775), True, 'import oneflow as flow\n'), ((13676, 13732), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionD_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionD_bn, axis=1, name='concat')\n", (13687, 13732), True, 'import oneflow as flow\n'), ((16898, 16960), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionE_total_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionE_total_bn, axis=1, name='concat')\n", (16909, 16960), True, 'import oneflow as flow\n'), ((18400, 18430), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""logits"""'], {}), "('logits')\n", (18420, 18430), True, 'import oneflow as flow\n'), ((18448, 18489), 'oneflow.reshape', 'flow.reshape', (['pool3', '[pool3.shape[0], -1]'], {}), '(pool3, [pool3.shape[0], -1])\n', (18460, 18489), True, 'import oneflow as flow\n'), ((18956, 18982), 'oneflow.matmul', 'flow.matmul', (['pool3', 'weight'], {}), '(pool3, weight)\n', (18967, 18982), True, 'import oneflow as flow\n'), ((18997, 19024), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['fc1', 'bias'], {}), '(fc1, bias)\n', (19013, 19024), True, 'import oneflow as flow\n'), ((19356, 19384), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (19382, 19384), True, 'import oneflow as flow\n'), ((20754, 20779), 'os.path.exists', 'os.path.exists', (['loss_path'], {}), '(loss_path)\n', (20768, 20779), False, 'import os\n'), ((20789, 20811), 'os.makedirs', 'os.makedirs', (['loss_path'], {}), '(loss_path)\n', (20800, 20811), False, 'import os\n'), ((20827, 20861), 'os.path.join', 'os.path.join', (['loss_path', 'loss_file'], {}), '(loss_path, loss_file)\n', (20839, 20861), False, 'import os\n'), ((20960, 20985), 'oneflow.env.ctrl_port', 'flow.env.ctrl_port', (['(12138)'], {}), '(12138)\n', (20978, 20985), True, 'import oneflow as flow\n'), ((21162, 21185), 'oneflow.env.machine', 'flow.env.machine', (['nodes'], {}), '(nodes)\n', (21178, 21185), True, 'import oneflow as flow\n'), ((21716, 21747), 'oneflow.deprecated.delete_worker', 'flow.deprecated.delete_worker', ([], {}), '()\n', (21745, 21747), True, 'import oneflow as flow\n'), ((3975, 3997), 'oneflow.math.relu', 'flow.math.relu', (['output'], {}), '(output)\n', (3989, 3997), True, 'import oneflow as flow\n'), ((5104, 5137), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch1x1"""'], {}), "('branch1x1')\n", (5124, 5137), True, 'import oneflow as flow\n'), ((5292, 5325), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch5x5"""'], {}), "('branch5x5')\n", (5312, 5325), True, 'import oneflow as flow\n'), ((5709, 5745), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3dbl"""'], {}), "('branch3x3dbl')\n", (5729, 5745), True, 'import oneflow as flow\n'), ((6371, 6406), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (6391, 6406), True, 'import oneflow as flow\n'), ((6436, 6537), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': '"""pool"""'}), "(in_blob, ksize=3, strides=1, padding='SAME', data_format\n ='NCHW', name='pool')\n", (6454, 6537), True, 'import oneflow as flow\n'), ((7302, 7335), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3"""'], {}), "('branch3x3')\n", (7322, 7335), True, 'import oneflow as flow\n'), ((7492, 7528), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3dbl"""'], {}), "('branch3x3dbl')\n", (7512, 7528), True, 'import oneflow as flow\n'), ((8155, 8190), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (8175, 8190), True, 'import oneflow as flow\n'), ((8218, 8320), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool0"""'}), "(in_blob, ksize=3, strides=2, padding='VALID',\n data_format='NCHW', name='pool0')\n", (8236, 8320), True, 'import oneflow as flow\n'), ((8799, 8832), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch1x1"""'], {}), "('branch1x1')\n", (8819, 8832), True, 'import oneflow as flow\n'), ((8988, 9021), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch7x7"""'], {}), "('branch7x7')\n", (9008, 9021), True, 'import oneflow as flow\n'), ((9739, 9775), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch7x7dbl"""'], {}), "('branch7x7dbl')\n", (9759, 9775), True, 'import oneflow as flow\n'), ((10989, 11024), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (11009, 11024), True, 'import oneflow as flow\n'), ((11054, 11155), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': '"""pool"""'}), "(in_blob, ksize=3, strides=1, padding='SAME', data_format\n ='NCHW', name='pool')\n", (11072, 11155), True, 'import oneflow as flow\n'), ((11904, 11937), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3"""'], {}), "('branch3x3')\n", (11924, 11937), True, 'import oneflow as flow\n'), ((12324, 12359), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch7x7x3"""'], {}), "('branch7x7x3')\n", (12344, 12359), True, 'import oneflow as flow\n'), ((13226, 13261), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (13246, 13261), True, 'import oneflow as flow\n'), ((13289, 13390), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""VALID"""', 'data_format': '"""NCHW"""', 'name': '"""pool"""'}), "(in_blob, ksize=3, strides=2, padding='VALID',\n data_format='NCHW', name='pool')\n", (13307, 13390), True, 'import oneflow as flow\n'), ((13861, 13894), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch1x1"""'], {}), "('branch1x1')\n", (13881, 13894), True, 'import oneflow as flow\n'), ((14050, 14083), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3"""'], {}), "('branch3x3')\n", (14070, 14083), True, 'import oneflow as flow\n'), ((14859, 14917), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionE_1_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionE_1_bn, axis=1, name='concat')\n", (14870, 14917), True, 'import oneflow as flow\n'), ((14961, 14997), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch3x3dbl"""'], {}), "('branch3x3dbl')\n", (14981, 14997), True, 'import oneflow as flow\n'), ((16026, 16084), 'oneflow.concat', 'flow.concat', ([], {'values': 'inceptionE_2_bn', 'axis': '(1)', 'name': '"""concat"""'}), "(values=inceptionE_2_bn, axis=1, name='concat')\n", (16037, 16084), True, 'import oneflow as flow\n'), ((16128, 16163), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""branch_pool"""'], {}), "('branch_pool')\n", (16148, 16163), True, 'import oneflow as flow\n'), ((16193, 16294), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['in_blob'], {'ksize': '(3)', 'strides': '(1)', 'padding': '"""SAME"""', 'data_format': '"""NCHW"""', 'name': '"""pool"""'}), "(in_blob, ksize=3, strides=1, padding='SAME', data_format\n ='NCHW', name='pool')\n", (16211, 16294), True, 'import oneflow as flow\n'), ((971, 985), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (983, 985), False, 'from datetime import datetime\n'), ((4069, 4094), 'oneflow.math.sigmoid', 'flow.math.sigmoid', (['output'], {}), '(output)\n', (4086, 4094), True, 'import oneflow as flow\n'), ((18648, 18688), 'oneflow.truncated_normal', 'flow.truncated_normal', (['(0.816496580927726)'], {}), '(0.816496580927726)\n', (18669, 18688), True, 'import oneflow as flow\n'), ((18872, 18899), 'oneflow.constant_initializer', 'flow.constant_initializer', ([], {}), '()\n', (18897, 18899), True, 'import oneflow as flow\n'), ((21288, 21348), 'oneflow.deprecated.init_worker', 'flow.deprecated.init_worker', ([], {'scp_binary': '(True)', 'use_uuid': '(False)'}), '(scp_binary=True, use_uuid=False)\n', (21315, 21348), True, 'import oneflow as flow\n'), ((19749, 19804), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (19790, 19804), True, 'import oneflow as flow\n'), ((21404, 21465), 'oneflow.deprecated.init_worker', 'flow.deprecated.init_worker', ([], {'scp_binary': '(False)', 'use_uuid': '(False)'}), '(scp_binary=False, use_uuid=False)\n', (21431, 21465), True, 'import oneflow as flow\n'), ((21500, 21559), 'oneflow.deprecated.init_worker', 'flow.deprecated.init_worker', ([], {'scp_binary': '(True)', 'use_uuid': '(True)'}), '(scp_binary=True, use_uuid=True)\n', (21527, 21559), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import imp
import os
import sys
import numpy
import oneflow
from absl import app, flags
FLAGS = flags.FLAGS
flags.DEFINE_string("python_bin", "python3", "python binary program name or filepath.")
flags.DEFINE_boolean(
"enable_auto_mixed_precision",
False,
"automatically change float net to mixed precision net",
)
class TestNetMixin:
"""
Base Tester
"""
def setUp(self):
self.net = ""
self.tf_loss_dir = ""
self.of_loss_dir = ""
self.num_iter = 10
self.set_params()
oneflow.clear_default_session()
def set_params(self):
pass
def assert_tolerance_4_mixed_precision(self):
raise AssertionError
def run_net(self, num_gpu_per_node, num_node=1, node_list=""):
net_modudle = _Import(self.net)
spec = net_modudle.DLNetSpec(FLAGS.enable_auto_mixed_precision)
spec.num_nodes = num_node
spec.gpu_num_per_node = num_gpu_per_node
net_modudle.main(spec)
return
if num_node > 1:
os.system(
"{} {}.py -g {} -m -n {}".format(
FLAGS.python_bin, self.net, num_gpu_per_node, node_list
)
)
else:
os.system(
"{} {}.py -g {}".format(FLAGS.python_bin, self.net, num_gpu_per_node)
)
def load_tf_loss(self):
tf_loss = numpy.load(os.path.join(self.tf_loss_dir, "1n1c.npy"))
return tf_loss[0 : self.num_iter]
def load_of_loss(self, test_type):
path = os.path.join(self.of_loss_dir, test_type + ".npy")
if os.path.exists(path):
of_loss = numpy.load(path)
else:
of_loss = numpy.zeros(self.num_iter)
return of_loss[0 : self.num_iter]
def print_and_check_result(self, result_name):
loss_dict = {}
loss_dict["tensorflow"] = self.load_tf_loss()
loss_dict["oneflow"] = self.load_of_loss(result_name)
print("==".ljust(64, "="))
print(" ".ljust(2, " ") + self.net + " loss report")
print("==".ljust(64, "="))
fmt_str = "{:>6} {:>12} {:>12}"
print(fmt_str.format("iter", "tensorflow", "oneflow-" + result_name))
for i in range(self.num_iter):
fmt_str = "{:>6} {:>12.6f} {:>12.6f}"
print(
fmt_str.format(i, loss_dict["tensorflow"][i], loss_dict["oneflow"][i])
)
if FLAGS.enable_auto_mixed_precision:
tolerance = self.assert_tolerance_4_mixed_precision()
rtol = tolerance["rtol"]
atol = tolerance["atol"]
print(
"assert tolerance for mixed_precision are: rtol", rtol, ", atol", atol
)
self.assertTrue(
numpy.allclose(
loss_dict["tensorflow"], loss_dict["oneflow"], rtol=rtol, atol=atol
)
)
else:
self.assertTrue(
numpy.allclose(loss_dict["tensorflow"], loss_dict["oneflow"])
)
class TestAlexNetMixin(TestNetMixin):
"""
AlexNet Tester
"""
def set_params(self):
self.net = "alexnet"
self.tf_loss_dir = os.path.join(
"/dataset/PNGS/cnns_model_for_test/tf_loss", self.net
)
self.of_loss_dir = os.path.join("./of_loss", self.net)
def assert_tolerance_4_mixed_precision(self):
return {"rtol": 1e-5, "atol": 1e-2}
class TestResNet50Mixin(TestNetMixin):
"""
AlexNet Tester
"""
def set_params(self):
self.net = "resnet50"
self.tf_loss_dir = os.path.join(
"/dataset/PNGS/cnns_model_for_test/tf_loss", self.net
)
self.of_loss_dir = os.path.join("./of_loss", self.net)
def assert_tolerance_4_mixed_precision(self):
return {"rtol": 1e-8, "atol": 1e-5}
class TestVgg16Mixin(TestNetMixin):
"""
Vgg16 Tester
"""
def set_params(self):
self.net = "vgg16"
self.tf_loss_dir = os.path.join(
"/dataset/PNGS/cnns_model_for_test/tf_loss", self.net
)
self.of_loss_dir = os.path.join("./of_loss", self.net)
def assert_tolerance_4_mixed_precision(self):
return {"rtol": 1e-4, "atol": 1e-1} # big tolerance due to running ci on 1080ti
class TestInceptionV3Mixin(TestNetMixin):
"""
InceptionV3 Tester
"""
def set_params(self):
self.net = "inceptionv3"
self.tf_loss_dir = os.path.join(
"/dataset/PNGS/cnns_model_for_test/tf_loss", self.net
)
self.of_loss_dir = os.path.join("./of_loss", self.net)
def assert_tolerance_4_mixed_precision(self):
return {"rtol": 1e-5, "atol": 1e-2}
def _Import(name, globals=None, locals=None, fromlist=None):
# Fast path: see if the module has already been imported.
try:
return sys.modules[name]
except KeyError:
pass
# If any of the following calls raises an exception,
# there's a problem we can't handle -- let the caller handle it.
fp, pathname, description = imp.find_module(name)
try:
return imp.load_module(name, fp, pathname, description)
finally:
# Since we may exit via an exception, close fp explicitly.
if fp:
fp.close()
| [
"oneflow.clear_default_session"
] | [((701, 792), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""python_bin"""', '"""python3"""', '"""python binary program name or filepath."""'], {}), "('python_bin', 'python3',\n 'python binary program name or filepath.')\n", (720, 792), False, 'from absl import app, flags\n'), ((789, 908), 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""enable_auto_mixed_precision"""', '(False)', '"""automatically change float net to mixed precision net"""'], {}), "('enable_auto_mixed_precision', False,\n 'automatically change float net to mixed precision net')\n", (809, 908), False, 'from absl import app, flags\n'), ((5674, 5695), 'imp.find_module', 'imp.find_module', (['name'], {}), '(name)\n', (5689, 5695), False, 'import imp\n'), ((1137, 1168), 'oneflow.clear_default_session', 'oneflow.clear_default_session', ([], {}), '()\n', (1166, 1168), False, 'import oneflow\n'), ((2140, 2190), 'os.path.join', 'os.path.join', (['self.of_loss_dir', "(test_type + '.npy')"], {}), "(self.of_loss_dir, test_type + '.npy')\n", (2152, 2190), False, 'import os\n'), ((2202, 2222), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (2216, 2222), False, 'import os\n'), ((3800, 3867), 'os.path.join', 'os.path.join', (['"""/dataset/PNGS/cnns_model_for_test/tf_loss"""', 'self.net'], {}), "('/dataset/PNGS/cnns_model_for_test/tf_loss', self.net)\n", (3812, 3867), False, 'import os\n'), ((3917, 3952), 'os.path.join', 'os.path.join', (['"""./of_loss"""', 'self.net'], {}), "('./of_loss', self.net)\n", (3929, 3952), False, 'import os\n'), ((4206, 4273), 'os.path.join', 'os.path.join', (['"""/dataset/PNGS/cnns_model_for_test/tf_loss"""', 'self.net'], {}), "('/dataset/PNGS/cnns_model_for_test/tf_loss', self.net)\n", (4218, 4273), False, 'import os\n'), ((4323, 4358), 'os.path.join', 'os.path.join', (['"""./of_loss"""', 'self.net'], {}), "('./of_loss', self.net)\n", (4335, 4358), False, 'import os\n'), ((4604, 4671), 'os.path.join', 'os.path.join', (['"""/dataset/PNGS/cnns_model_for_test/tf_loss"""', 'self.net'], {}), "('/dataset/PNGS/cnns_model_for_test/tf_loss', self.net)\n", (4616, 4671), False, 'import os\n'), ((4721, 4756), 'os.path.join', 'os.path.join', (['"""./of_loss"""', 'self.net'], {}), "('./of_loss', self.net)\n", (4733, 4756), False, 'import os\n'), ((5065, 5132), 'os.path.join', 'os.path.join', (['"""/dataset/PNGS/cnns_model_for_test/tf_loss"""', 'self.net'], {}), "('/dataset/PNGS/cnns_model_for_test/tf_loss', self.net)\n", (5077, 5132), False, 'import os\n'), ((5182, 5217), 'os.path.join', 'os.path.join', (['"""./of_loss"""', 'self.net'], {}), "('./of_loss', self.net)\n", (5194, 5217), False, 'import os\n'), ((5721, 5769), 'imp.load_module', 'imp.load_module', (['name', 'fp', 'pathname', 'description'], {}), '(name, fp, pathname, description)\n', (5736, 5769), False, 'import imp\n'), ((1999, 2041), 'os.path.join', 'os.path.join', (['self.tf_loss_dir', '"""1n1c.npy"""'], {}), "(self.tf_loss_dir, '1n1c.npy')\n", (2011, 2041), False, 'import os\n'), ((2246, 2262), 'numpy.load', 'numpy.load', (['path'], {}), '(path)\n', (2256, 2262), False, 'import numpy\n'), ((2299, 2325), 'numpy.zeros', 'numpy.zeros', (['self.num_iter'], {}), '(self.num_iter)\n', (2310, 2325), False, 'import numpy\n'), ((3373, 3460), 'numpy.allclose', 'numpy.allclose', (["loss_dict['tensorflow']", "loss_dict['oneflow']"], {'rtol': 'rtol', 'atol': 'atol'}), "(loss_dict['tensorflow'], loss_dict['oneflow'], rtol=rtol,\n atol=atol)\n", (3387, 3460), False, 'import numpy\n'), ((3568, 3629), 'numpy.allclose', 'numpy.allclose', (["loss_dict['tensorflow']", "loss_dict['oneflow']"], {}), "(loss_dict['tensorflow'], loss_dict['oneflow'])\n", (3582, 3629), False, 'import numpy\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import tempfile
import os
import numpy as np
from oneflow.test_utils.test_util import GenArgDict
from optimizer_test_util import clip_grad_norm_np
import oneflow as flow
from oneflow.nn.parameter import Parameter
def compare_with_numpy_adam(
test_case,
weight_decay,
scale,
learning_rate,
train_iters,
do_bias_correction,
beta1,
beta2,
):
num_rows = 500
embedding_size = 128
model_shape = (num_rows, embedding_size)
line_size = embedding_size * 3
num_valid_seq = np.random.randint(1, num_rows, (train_iters))
skip_if_seq = [np.random.randint(2) for i in range(train_iters)]
random_grad_seq = []
for _ in range(train_iters):
random_grad_seq.append(np.random.uniform(size=model_shape).astype(np.float32))
init_value = np.random.uniform(size=(num_rows, line_size)).astype(np.float32)
down_scale_by = 10
epsilon = 1e-5
def adam_by_oneflow():
unique_embeddings_tensor = flow.tensor(init_value, requires_grad=False).to(
"cuda"
)
lr_tensor = flow.tensor(
np.array(learning_rate).reshape(1,).astype(np.float32)
).to("cuda")
down_scale_by_tensor = flow.tensor(
np.array(down_scale_by).astype(np.float32)
).to("cuda")
def train_one_iter(
num_valid,
unique_embeddings,
embedding_grad,
skip_if,
bias_correction1,
bias_correction2,
):
return flow._C.one_embedding_adam_update(
num_valid,
unique_embeddings,
embedding_grad,
lr_tensor,
down_scale_by_tensor,
skip_if,
bias_correction1,
bias_correction2,
scale,
weight_decay,
beta1,
beta2,
epsilon,
do_bias_correction,
)
for i in range(1, train_iters):
num_valid_tensor = flow.tensor(
np.array(num_valid_seq[i]).reshape(1,).astype(np.int32)
).to("cuda")
grad_tensor = flow.tensor(random_grad_seq[i]).to("cuda")
skip_if_tensor = flow.tensor(
np.array(skip_if_seq[i]).reshape(1,).astype(np.int64)
).to("cuda")
if do_bias_correction:
bias_correction1 = 1.0 - np.power(beta1, i)
bias_correction2 = 1.0 - np.power(beta2, i)
bias_correction1_tensor = flow.tensor(
np.array(bias_correction1).reshape(1,).astype(np.float32)
).to("cuda")
bias_correction2_tensor = flow.tensor(
np.array(bias_correction2).reshape(1,).astype(np.float32)
).to("cuda")
else:
bias_correction1_tensor = None
bias_correction2_tensor = None
updated_tensor = train_one_iter(
num_valid_tensor,
unique_embeddings_tensor,
grad_tensor,
skip_if_tensor,
bias_correction1_tensor,
bias_correction2_tensor,
)
unique_embeddings_tensor[0 : num_valid_seq[i]] = updated_tensor[
0 : num_valid_seq[i]
]
return unique_embeddings_tensor
def adam_by_numpy():
x = init_value[:, 0:embedding_size]
m = init_value[:, embedding_size : 2 * embedding_size]
v = init_value[:, 2 * embedding_size : 3 * embedding_size]
def np_train_one_iter(step, num_valid, grad, model, state_m, state_v):
grad[0:num_valid] = grad[0:num_valid] * (scale / down_scale_by)
bias_correction1 = 1.0
bias_correction2 = 1.0
if do_bias_correction:
bias_correction1 = 1.0 - np.power(beta1, step)
bias_correction2 = 1.0 - np.power(beta2, step)
state_m[0:num_valid] = (
beta1 * state_m[0:num_valid] + (1 - beta1) * grad[0:num_valid]
)
state_v[0:num_valid] = (
beta2 * state_v[0:num_valid]
+ (1 - beta2) * grad[0:num_valid] * grad[0:num_valid]
)
denom = np.sqrt(state_v[0:num_valid]) / np.sqrt(bias_correction2) + epsilon
model[0:num_valid] = (
model[0:num_valid]
- ((learning_rate / bias_correction1) * state_m[0:num_valid] / denom)
- learning_rate * weight_decay * model[0:num_valid]
)
return (model, state_m, state_v)
for i in range(1, train_iters): # if step = 0, bias_correction2 is 0
if skip_if_seq[i] > 0:
pass
else:
(x, m, v) = np_train_one_iter(
i, int(num_valid_seq[i]), random_grad_seq[i], x, m, v
)
return x, m, v
oneflow_res = adam_by_oneflow().numpy()
of_model = oneflow_res[:, 0:embedding_size]
of_m = oneflow_res[:, embedding_size : 2 * embedding_size]
of_v = oneflow_res[:, 2 * embedding_size : 3 * embedding_size]
np_model, np_m, np_v = adam_by_numpy()
test_case.assertTrue(
np.allclose(of_model.flatten(), np_model.flatten(), rtol=0.001, atol=0.001)
)
test_case.assertTrue(
np.allclose(of_m.flatten(), np_m.flatten(), rtol=0.001, atol=0.001)
)
test_case.assertTrue(
np.allclose(of_v.flatten(), np_v.flatten(), rtol=0.001, atol=0.001)
)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestOptimizers(flow.unittest.TestCase):
def test_one_embedding_adam(test_case):
arg_dict = OrderedDict()
arg_dict["weight_decay"] = [0, 0.1]
arg_dict["scale"] = [1, 0.1]
arg_dict["learning_rate"] = [1, 1.5]
arg_dict["train_iters"] = [10]
arg_dict["do_bias_correction"] = [True, False]
arg_dict["beta1"] = [0.9, 0.8]
arg_dict["beta2"] = [0.9, 0.8]
for arg in GenArgDict(arg_dict):
compare_with_numpy_adam(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow._C.one_embedding_adam_update",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor",
"oneflow.test_utils.test_util.GenArgDict"
] | [((6290, 6322), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (6320, 6322), True, 'import oneflow as flow\n'), ((1163, 1206), 'numpy.random.randint', 'np.random.randint', (['(1)', 'num_rows', 'train_iters'], {}), '(1, num_rows, train_iters)\n', (1180, 1206), True, 'import numpy as np\n'), ((6230, 6264), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (6239, 6264), False, 'import os\n'), ((6873, 6888), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6886, 6888), False, 'import unittest\n'), ((1228, 1248), 'numpy.random.randint', 'np.random.randint', (['(2)'], {}), '(2)\n', (1245, 1248), True, 'import numpy as np\n'), ((6432, 6445), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6443, 6445), False, 'from collections import OrderedDict\n'), ((6764, 6784), 'oneflow.test_utils.test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (6774, 6784), False, 'from oneflow.test_utils.test_util import GenArgDict\n'), ((1442, 1487), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(num_rows, line_size)'}), '(size=(num_rows, line_size))\n', (1459, 1487), True, 'import numpy as np\n'), ((2154, 2387), 'oneflow._C.one_embedding_adam_update', 'flow._C.one_embedding_adam_update', (['num_valid', 'unique_embeddings', 'embedding_grad', 'lr_tensor', 'down_scale_by_tensor', 'skip_if', 'bias_correction1', 'bias_correction2', 'scale', 'weight_decay', 'beta1', 'beta2', 'epsilon', 'do_bias_correction'], {}), '(num_valid, unique_embeddings,\n embedding_grad, lr_tensor, down_scale_by_tensor, skip_if,\n bias_correction1, bias_correction2, scale, weight_decay, beta1, beta2,\n epsilon, do_bias_correction)\n', (2187, 2387), True, 'import oneflow as flow\n'), ((1613, 1657), 'oneflow.tensor', 'flow.tensor', (['init_value'], {'requires_grad': '(False)'}), '(init_value, requires_grad=False)\n', (1624, 1657), True, 'import oneflow as flow\n'), ((1368, 1403), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'model_shape'}), '(size=model_shape)\n', (1385, 1403), True, 'import numpy as np\n'), ((2823, 2854), 'oneflow.tensor', 'flow.tensor', (['random_grad_seq[i]'], {}), '(random_grad_seq[i])\n', (2834, 2854), True, 'import oneflow as flow\n'), ((3079, 3097), 'numpy.power', 'np.power', (['beta1', 'i'], {}), '(beta1, i)\n', (3087, 3097), True, 'import numpy as np\n'), ((3139, 3157), 'numpy.power', 'np.power', (['beta2', 'i'], {}), '(beta2, i)\n', (3147, 3157), True, 'import numpy as np\n'), ((4544, 4565), 'numpy.power', 'np.power', (['beta1', 'step'], {}), '(beta1, step)\n', (4552, 4565), True, 'import numpy as np\n'), ((4607, 4628), 'numpy.power', 'np.power', (['beta2', 'step'], {}), '(beta2, step)\n', (4615, 4628), True, 'import numpy as np\n'), ((4946, 4975), 'numpy.sqrt', 'np.sqrt', (['state_v[0:num_valid]'], {}), '(state_v[0:num_valid])\n', (4953, 4975), True, 'import numpy as np\n'), ((4978, 5003), 'numpy.sqrt', 'np.sqrt', (['bias_correction2'], {}), '(bias_correction2)\n', (4985, 5003), True, 'import numpy as np\n'), ((1868, 1891), 'numpy.array', 'np.array', (['down_scale_by'], {}), '(down_scale_by)\n', (1876, 1891), True, 'import numpy as np\n'), ((1736, 1759), 'numpy.array', 'np.array', (['learning_rate'], {}), '(learning_rate)\n', (1744, 1759), True, 'import numpy as np\n'), ((2716, 2742), 'numpy.array', 'np.array', (['num_valid_seq[i]'], {}), '(num_valid_seq[i])\n', (2724, 2742), True, 'import numpy as np\n'), ((2924, 2948), 'numpy.array', 'np.array', (['skip_if_seq[i]'], {}), '(skip_if_seq[i])\n', (2932, 2948), True, 'import numpy as np\n'), ((3233, 3259), 'numpy.array', 'np.array', (['bias_correction1'], {}), '(bias_correction1)\n', (3241, 3259), True, 'import numpy as np\n'), ((3395, 3421), 'numpy.array', 'np.array', (['bias_correction2'], {}), '(bias_correction2)\n', (3403, 3421), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import Args, GenArgDict
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_data_type(flow.float)
def test_repeat_acc(test_case, device_type, shape, dtype, acc_num):
flow.clear_default_session()
if flow.eager_execution_enabled():
return
@flow.global_function(function_config=func_config)
def RepeatAccJob(a: oft.Numpy.Placeholder(shape)):
if dtype == "float16":
return flow.cast(
flow.acc(flow.repeat(flow.cast(a, flow.float16), acc_num), acc_num),
flow.float,
)
else:
return flow.acc(flow.repeat(a, acc_num), acc_num)
x = np.random.rand(*shape).astype(np.float32)
y = RepeatAccJob(x).get().numpy()
z = x * acc_num
if dtype == "float16":
z = x.astype(np.float16) * acc_num
z = z.astype(np.float32)
test_case.assertTrue(np.allclose(y, z, rtol=1e-05, atol=1e-05))
@flow.unittest.skip_unless_1n1d()
class TestRepeatAcc(flow.unittest.TestCase):
def test_case(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["shape"] = [(1024, 1024, 4)]
arg_dict["dtype"] = ["float16", "float32", "double"]
arg_dict["acc_num"] = [5]
for arg in GenArgDict(arg_dict):
if arg["device_type"] == "cpu" and arg["dtype"] == "float16":
continue
test_repeat_acc(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.repeat",
"oneflow.compatible.single_client.typing.Numpy.Placeholder",
"oneflow.compatible.single_client.cast",
"oneflow.compatible.single_client.clear_default_session",
"oneflow.compatible.single_client.global_function",
... | [((879, 900), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (898, 900), True, 'from oneflow.compatible import single_client as flow\n'), ((1819, 1851), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1849, 1851), True, 'from oneflow.compatible import single_client as flow\n'), ((934, 960), 'oneflow.compatible.single_client.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (958, 960), True, 'from oneflow.compatible import single_client as flow\n'), ((1078, 1106), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1104, 1106), True, 'from oneflow.compatible import single_client as flow\n'), ((1114, 1144), 'oneflow.compatible.single_client.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (1142, 1144), True, 'from oneflow.compatible import single_client as flow\n'), ((1167, 1216), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1187, 1216), True, 'from oneflow.compatible import single_client as flow\n'), ((2369, 2384), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2382, 2384), False, 'import unittest\n'), ((1773, 1814), 'numpy.allclose', 'np.allclose', (['y', 'z'], {'rtol': '(1e-05)', 'atol': '(1e-05)'}), '(y, z, rtol=1e-05, atol=1e-05)\n', (1784, 1814), True, 'import numpy as np\n'), ((1946, 1959), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1957, 1959), False, 'from collections import OrderedDict\n'), ((2169, 2189), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (2179, 2189), False, 'from test_util import Args, GenArgDict\n'), ((1241, 1269), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['shape'], {}), '(shape)\n', (1262, 1269), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((1545, 1567), 'numpy.random.rand', 'np.random.rand', (['*shape'], {}), '(*shape)\n', (1559, 1567), True, 'import numpy as np\n'), ((1502, 1525), 'oneflow.compatible.single_client.repeat', 'flow.repeat', (['a', 'acc_num'], {}), '(a, acc_num)\n', (1513, 1525), True, 'from oneflow.compatible import single_client as flow\n'), ((1370, 1396), 'oneflow.compatible.single_client.cast', 'flow.cast', (['a', 'flow.float16'], {}), '(a, flow.float16)\n', (1379, 1396), True, 'from oneflow.compatible import single_client as flow\n')] |
import oneflow as flow
import oneflow.typing as tp
import numpy as np
def _get_regularizer(model_name):
#all decay
return flow.regularizers.l2(0.00004)
def _get_initializer(model_name):
if model_name == "weight":
return flow.variance_scaling_initializer(2.0, mode="fan_in", distribution="random_normal", data_format="NCHW")
elif model_name == "bias":
return flow.zeros_initializer()
elif model_name == "gamma":
return flow.ones_initializer()
elif model_name == "beta":
return flow.zeros_initializer()
elif model_name == "dense_weight":
return flow.random_normal_initializer(0, 0.01)
def _conv2d(name, x, filters, kernel_size, strides, num_group, padding="SAME", data_format="NCHW"):
assert data_format=="NCHW", "Mobilenet does not support channel_last mode."
weight_initializer = _get_initializer("weight")
weight_regularizer=_get_regularizer("beta")
shape = (filters, x.shape[1] // num_group, kernel_size, kernel_size)
weight = flow.get_variable(
name + "-weight",
shape=shape,
dtype=x.dtype,
initializer=weight_initializer,
regularizer=weight_regularizer,
model_name="--weight",
trainable=True,
)
return flow.nn.conv2d(x, weight, strides, padding, data_format, name=name, groups=num_group)
def _batch_norms(name, x, axis, momentum, epsilon, center=True, scale=True, trainable=True):
return flow.layers.batch_normalization(
name=name,
inputs=x,
axis=axis,
momentum=momentum,
epsilon=epsilon,
center=center,
scale=scale,
beta_initializer = _get_initializer("beta"),
gamma_initializer = _get_initializer("gamma"),
beta_regularizer = _get_regularizer("beta"),
gamma_regularizer = _get_regularizer("gamma"),
trainable=trainable
)
def hswish(x):
out = x * flow.nn.relu6(x + 3) / 6
return out
def hsigmoid(x):
out = flow.nn.relu6(x + 3) / 6
return out
def SeModule(name, x, channel, reduction=4):
N, C, H, W = x.shape
y = flow.nn.avg_pool2d(x, ksize=[H, W], strides=None, padding="SAME")
y = flow.flatten(y, start_dim=1, end_dim=-1)
y = flow.layers.dense(
y,
units=channel // reduction,
use_bias=False,
kernel_initializer=_get_initializer("dense_weight"),
bias_initializer=_get_initializer("bias"),
kernel_regularizer=_get_regularizer("dense_weight"),
bias_regularizer=_get_regularizer("bias"),
name=name+"dense1a",
)
y = flow.math.relu(y)
y = flow.layers.dense(
y,
units=channel,
use_bias=False,
kernel_initializer=_get_initializer("dense_weight"),
bias_initializer=_get_initializer("bias"),
kernel_regularizer=_get_regularizer("dense_weight"),
bias_regularizer=_get_regularizer("bias"),
name=name+"dense2",
)
y = hsigmoid(y)
y = flow.expand_dims(input=y, axis=2)
y = flow.expand_dims(input=y, axis=3)
y_expand = flow.broadcast_like(y, x, broadcast_axes=(2, 3))
out = x * y_expand
return out
def small_unit(name, x, kernel_size=1, num_filter=1, strides=1, padding="SAME", num_group=1, data_format="NCHW", act=None):
conv = _conv2d(name=name+"-small_unit", x=x, filters=num_filter, kernel_size=kernel_size, strides=strides,
num_group=num_group, padding=padding, data_format=data_format)
bn = _batch_norms(name+"-small_unit_bn", conv, axis=1, momentum=0.9, epsilon=1e-5)
if act == "_relu":
return flow.math.relu(bn)
elif act == "_hswish":
return hswish(bn)
else:
return bn
def mnv3_unit(name, x, kernel_size, expansion, num_filter, shortcut, strides, act, sechannel, data_format="NCHW"):
# num_exp_filter = int(round(num_in_filter * expansion_factor))
y = small_unit(name+"-mnv3_unit1", x, kernel_size=1, num_filter=expansion, strides=1, padding="VALID", num_group=1, act=act)
y = small_unit(name+"-mnv3_unit2", y, kernel_size=kernel_size, num_filter=expansion, strides=strides,
padding=([0, 0, kernel_size//2, kernel_size//2]), num_group=expansion, act=act)
out = small_unit(name+"-mnv3_unit3", y, kernel_size=1, num_filter=num_filter, strides=1, padding="VALID", num_group=1, act=None)
if sechannel != None:
out = SeModule(name+"-semodule", out, sechannel)
if shortcut:
_x = small_unit(name+"-mnv3_unit_shortcut", x, kernel_size=1, num_filter=num_filter, strides=1, padding="VALID", num_group=1, act=None)
return out
def MobileNetV3_Large(x, data_format="NCHW", num_class=1000):
layer1 = small_unit("large-layer1", x, num_filter=16, kernel_size=3, strides=2, padding="SAME", num_group=1, data_format=data_format, act="_hswish")
layerneck = mnv3_unit("large-neck1", layer1, 3, 16, 16, False, 1, "_relu", None)
layerneck = mnv3_unit("large-neck2", layerneck, 3, 64, 24, False, 2, "_relu", None)
layerneck = mnv3_unit("large-neck3", layerneck, 3, 72, 24, False, 1, "_relu", None)
layerneck = mnv3_unit("large-neck4", layerneck, 5, 72, 40, False, 2, "_relu", 40)
layerneck = mnv3_unit("large-neck5", layerneck, 5, 120, 40, False, 1, "_relu", 40)
layerneck = mnv3_unit("large-neck6", layerneck, 5, 120, 40, False, 1, "_relu", 40)
layerneck = mnv3_unit("large-neck7", layerneck, 3, 240, 80, False, 2, "_hswish", None)
layerneck = mnv3_unit("large-neck8", layerneck, 3, 200, 80, False, 1, "_hswish", None)
layerneck = mnv3_unit("large-neck9", layerneck, 3, 184, 80, False, 1, "_hswish", None)
layerneck = mnv3_unit("large-neck10", layerneck, 3, 184, 80, False, 1, "_hswish", None)
layerneck = mnv3_unit("large-neck11", layerneck, 3, 480, 112, True, 1, "_hswish", 112)
layerneck = mnv3_unit("large-neck12", layerneck, 3, 672, 112, False, 1, "_hswish", 112)
layerneck = mnv3_unit("large-neck13", layerneck, 5, 672, 160, True, 1, "_hswish", 160)
layerneck = mnv3_unit("large-neck14", layerneck, 5, 672, 160, False, 2, "_hswish", 160)
layerneck = mnv3_unit("large-neck15", layerneck, 3, 960, 160, False, 1, "_hswish", 160)
layer2 = small_unit("large-layer2", layerneck, num_filter=960, act="_hswish", padding="VALID") # number > 1 exists
layer_avg = flow.nn.avg_pool2d(layer2, ksize=[layer2.shape[2], layer2.shape[3]], strides=None, padding="VALID")
layer_view = flow.reshape(layer_avg, (layer_avg.shape[0], -1))
dense3 = flow.layers.dense(
layer_view,
units=1280,
use_bias=False,
kernel_initializer=_get_initializer("dense_weight"),
bias_initializer=_get_initializer("bias"),
kernel_regularizer=_get_regularizer("dense_weight"),
bias_regularizer=_get_regularizer("bias"),
name="dense3-large",
)
bn3 = _batch_norms("bn3-large", dense3, axis=1, momentum=0.9, epsilon=1e-5)
hs3 = hswish(bn3)
dense4 = flow.layers.dense(
hs3,
units=num_class,
use_bias=False,
kernel_initializer=_get_initializer("dense_weight"),
bias_initializer=_get_initializer("bias"),
kernel_regularizer=_get_regularizer("dense_weight"),
bias_regularizer=_get_regularizer("bias"),
name="dense4-large",
)
return dense4
def MobileNetV3_Small(x, data_format="NCHW", num_class=1000):
layer1 = small_unit("small-layer1", x, num_filter=16, kernel_size=3, strides=2, padding="SAME", num_group=1, data_format=data_format, act="_hswish")
layerneck = mnv3_unit("small-neck1", layer1, 3, 16, 16, True, 1, "_relu", 16)
layerneck = mnv3_unit("small-neck2", layerneck, 3, 72, 24, False, 2, "_relu", None)
layerneck = mnv3_unit("small-neck3", layerneck, 3, 88, 24, False, 1, "_relu", None)
layerneck = mnv3_unit("small-neck4", layerneck, 5, 96, 40, True, 2, "_hswish", 40)
layerneck = mnv3_unit("small-neck5", layerneck, 5, 240, 40, True, 1, "_hswish", 40)
layerneck = mnv3_unit("small-neck6", layerneck, 5, 240, 40, True, 1, "_hswish", 40)
layerneck = mnv3_unit("small-neck7", layerneck, 5, 120, 48, True, 1, "_hswish", 48)
layerneck = mnv3_unit("small-neck8", layerneck, 5, 144, 48, True, 1, "_hswish", 48)
layerneck = mnv3_unit("small-neck9", layerneck, 5, 288, 96, True, 2, "_hswish", 96)
layerneck = mnv3_unit("small-neck10", layerneck, 5, 576, 96, True, 1, "_hswish", 96)
layerneck = mnv3_unit("small-neck11", layerneck, 5, 576, 96, True, 1, "_hswish", 96)
layer2 = small_unit("small-layer2", layerneck, num_filter=576, act="_hswish", padding="VALID")
layer_avg = flow.nn.avg_pool2d(layer2, ksize=[layer2.shape[2], layer2.shape[3]], strides=None, padding="VALID") # review it
layer_view = flow.reshape(layer_avg, (layer_avg.shape[0], -1)) # review it
dense3 = flow.layers.dense(
layer_view,
units=1280,
use_bias=False,
kernel_initializer=_get_initializer("dense_weight"),
bias_initializer=_get_initializer("bias"),
kernel_regularizer=_get_regularizer("dense_weight"),
bias_regularizer=_get_regularizer("bias"),
name="dense3-large",
)
bn3 = _batch_norms("bn3-large", dense3, axis=1, momentum=0.9, epsilon=1e-5)
hs3 = hswish(bn3)
dense4 = flow.layers.dense(
hs3,
units=num_class,
use_bias=False,
kernel_initializer=_get_initializer("dense_weight"),
bias_initializer=_get_initializer("bias"),
kernel_regularizer=_get_regularizer("dense_weight"),
bias_regularizer=_get_regularizer("bias"),
name="dense4-large",
)
return dense4
def Mobilenet_Large(input_data, args, trainable=True, training=True, num_classes=1000, prefix = ""):
assert args.channel_last==False, "Mobilenet does not support channel_last mode, set channel_last=False will be right!"
data_format="NHWC" if args.channel_last else "NCHW"
out = MobileNetV3_Large(input_data, data_format=data_format, num_class=num_classes)
return out
def Mobilenet_Small(input_data, args, trainable=True, training=True, num_classes=1000, prefix = ""):
assert args.channel_last==False, "Mobilenet does not support channel_last mode, set channel_last=False will be right!"
data_format="NHWC" if args.channel_last else "NCHW"
out = MobileNetV3_Small(input_data, data_format=data_format, num_class=num_classes)
return out
| [
"oneflow.expand_dims",
"oneflow.variance_scaling_initializer",
"oneflow.nn.conv2d",
"oneflow.ones_initializer",
"oneflow.nn.relu6",
"oneflow.random_normal_initializer",
"oneflow.get_variable",
"oneflow.broadcast_like",
"oneflow.reshape",
"oneflow.zeros_initializer",
"oneflow.nn.avg_pool2d",
"o... | [((131, 158), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['(4e-05)'], {}), '(4e-05)\n', (151, 158), True, 'import oneflow as flow\n'), ((1021, 1197), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'shape', 'dtype': 'x.dtype', 'initializer': 'weight_initializer', 'regularizer': 'weight_regularizer', 'model_name': '"""--weight"""', 'trainable': '(True)'}), "(name + '-weight', shape=shape, dtype=x.dtype, initializer\n =weight_initializer, regularizer=weight_regularizer, model_name=\n '--weight', trainable=True)\n", (1038, 1197), True, 'import oneflow as flow\n'), ((1262, 1352), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['x', 'weight', 'strides', 'padding', 'data_format'], {'name': 'name', 'groups': 'num_group'}), '(x, weight, strides, padding, data_format, name=name, groups=\n num_group)\n', (1276, 1352), True, 'import oneflow as flow\n'), ((2107, 2172), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['x'], {'ksize': '[H, W]', 'strides': 'None', 'padding': '"""SAME"""'}), "(x, ksize=[H, W], strides=None, padding='SAME')\n", (2125, 2172), True, 'import oneflow as flow\n'), ((2181, 2221), 'oneflow.flatten', 'flow.flatten', (['y'], {'start_dim': '(1)', 'end_dim': '(-1)'}), '(y, start_dim=1, end_dim=-1)\n', (2193, 2221), True, 'import oneflow as flow\n'), ((2593, 2610), 'oneflow.math.relu', 'flow.math.relu', (['y'], {}), '(y)\n', (2607, 2610), True, 'import oneflow as flow\n'), ((2988, 3021), 'oneflow.expand_dims', 'flow.expand_dims', ([], {'input': 'y', 'axis': '(2)'}), '(input=y, axis=2)\n', (3004, 3021), True, 'import oneflow as flow\n'), ((3030, 3063), 'oneflow.expand_dims', 'flow.expand_dims', ([], {'input': 'y', 'axis': '(3)'}), '(input=y, axis=3)\n', (3046, 3063), True, 'import oneflow as flow\n'), ((3079, 3127), 'oneflow.broadcast_like', 'flow.broadcast_like', (['y', 'x'], {'broadcast_axes': '(2, 3)'}), '(y, x, broadcast_axes=(2, 3))\n', (3098, 3127), True, 'import oneflow as flow\n'), ((6305, 6408), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['layer2'], {'ksize': '[layer2.shape[2], layer2.shape[3]]', 'strides': 'None', 'padding': '"""VALID"""'}), "(layer2, ksize=[layer2.shape[2], layer2.shape[3]],\n strides=None, padding='VALID')\n", (6323, 6408), True, 'import oneflow as flow\n'), ((6422, 6471), 'oneflow.reshape', 'flow.reshape', (['layer_avg', '(layer_avg.shape[0], -1)'], {}), '(layer_avg, (layer_avg.shape[0], -1))\n', (6434, 6471), True, 'import oneflow as flow\n'), ((8605, 8708), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['layer2'], {'ksize': '[layer2.shape[2], layer2.shape[3]]', 'strides': 'None', 'padding': '"""VALID"""'}), "(layer2, ksize=[layer2.shape[2], layer2.shape[3]],\n strides=None, padding='VALID')\n", (8623, 8708), True, 'import oneflow as flow\n'), ((8735, 8784), 'oneflow.reshape', 'flow.reshape', (['layer_avg', '(layer_avg.shape[0], -1)'], {}), '(layer_avg, (layer_avg.shape[0], -1))\n', (8747, 8784), True, 'import oneflow as flow\n'), ((242, 350), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2.0)'], {'mode': '"""fan_in"""', 'distribution': '"""random_normal"""', 'data_format': '"""NCHW"""'}), "(2.0, mode='fan_in', distribution=\n 'random_normal', data_format='NCHW')\n", (275, 350), True, 'import oneflow as flow\n'), ((1987, 2007), 'oneflow.nn.relu6', 'flow.nn.relu6', (['(x + 3)'], {}), '(x + 3)\n', (2000, 2007), True, 'import oneflow as flow\n'), ((3602, 3620), 'oneflow.math.relu', 'flow.math.relu', (['bn'], {}), '(bn)\n', (3616, 3620), True, 'import oneflow as flow\n'), ((392, 416), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (414, 416), True, 'import oneflow as flow\n'), ((1919, 1939), 'oneflow.nn.relu6', 'flow.nn.relu6', (['(x + 3)'], {}), '(x + 3)\n', (1932, 1939), True, 'import oneflow as flow\n'), ((464, 487), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (485, 487), True, 'import oneflow as flow\n'), ((534, 558), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (556, 558), True, 'import oneflow as flow\n'), ((613, 652), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', (['(0)', '(0.01)'], {}), '(0, 0.01)\n', (643, 652), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import traceback
from google.protobuf import text_format
import oneflow._oneflow_internal
import oneflow.core.record.record_pb2 as record_util
import oneflow.framework.local_blob as local_blob_util
import oneflow.framework.ofblob as ofblob
import oneflow.framework.remote_blob as remote_blob_util
import oneflow.framework.session_context as session_ctx
import oneflow.framework.typing_util as oft_util
def BindUuidAndHandler(uuid, blob_watched, handler):
assert isinstance(blob_watched, oneflow._oneflow_internal.ConsistentBlob)
session_ctx.GetDefaultSession().uuid2watch_handler[uuid] = (blob_watched, handler)
class _Watcher(oneflow._oneflow_internal.ForeignWatcher):
def __init__(self):
oneflow._oneflow_internal.ForeignWatcher.__init__(self)
def Call(self, handler_uuid, of_blob_ptr):
try:
_WatcherHandler(handler_uuid, of_blob_ptr)
except Exception as e:
print(traceback.format_exc())
raise e
def _WatcherHandler(handler_uuid, of_blob_ptr):
uuid2handler = session_ctx.GetDefaultSession().uuid2watch_handler
assert handler_uuid in uuid2handler
(blob_watched, handler) = uuid2handler[handler_uuid]
assert callable(handler)
ndarray = ofblob.OfBlob(of_blob_ptr).CopyToNdarray()
local_blob = local_blob_util.LocalBlob(ndarray, blob_watched.is_dynamic)
handler(oft_util.TransformWatchedBlob(local_blob, handler))
_global_watcher = _Watcher()
| [
"oneflow.framework.ofblob.OfBlob",
"oneflow.framework.session_context.GetDefaultSession",
"oneflow.framework.local_blob.LocalBlob",
"oneflow.framework.typing_util.TransformWatchedBlob"
] | [((1891, 1950), 'oneflow.framework.local_blob.LocalBlob', 'local_blob_util.LocalBlob', (['ndarray', 'blob_watched.is_dynamic'], {}), '(ndarray, blob_watched.is_dynamic)\n', (1916, 1950), True, 'import oneflow.framework.local_blob as local_blob_util\n'), ((1640, 1671), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1669, 1671), True, 'import oneflow.framework.session_context as session_ctx\n'), ((1963, 2013), 'oneflow.framework.typing_util.TransformWatchedBlob', 'oft_util.TransformWatchedBlob', (['local_blob', 'handler'], {}), '(local_blob, handler)\n', (1992, 2013), True, 'import oneflow.framework.typing_util as oft_util\n'), ((1131, 1162), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1160, 1162), True, 'import oneflow.framework.session_context as session_ctx\n'), ((1831, 1857), 'oneflow.framework.ofblob.OfBlob', 'ofblob.OfBlob', (['of_blob_ptr'], {}), '(of_blob_ptr)\n', (1844, 1857), True, 'import oneflow.framework.ofblob as ofblob\n'), ((1527, 1549), 'traceback.format_exc', 'traceback.format_exc', ([], {}), '()\n', (1547, 1549), False, 'import traceback\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.clamp,
"""
Clamp all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]` and return
a resulting tensor:
.. math::
y_i = \\begin{cases}
\\text{min} & \\text{if } x_i < \\text{min} \\\\
x_i & \\text{if } \\text{min} \\leq x_i \\leq \\text{max} \\\\
\\text{max} & \\text{if } x_i > \\text{max}
\\end{cases}
If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, args :attr:`min`
and :attr:`max` must be real numbers, otherwise they should be integers.
Args:
input (Tensor): the input tensor.
min (Number): lower-bound of the range to be clamped to. Defaults to None.
max (Number): upper-bound of the range to be clamped to. Defaults to None.
out (Tensor, optional): the output tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> arr = np.array([0.2, 0.6, -1.5, -0.3])
>>> input = flow.Tensor(arr)
>>> output = flow.clamp(input, min=-0.5, max=0.5)
>>> output
tensor([ 0.2000, 0.5000, -0.5000, -0.3000], dtype=oneflow.float32)
>>> arr = np.array([0.2, 0.6, -1.5, -0.3])
>>> input = flow.Tensor(arr)
>>> output = flow.clamp(input, min=None, max=0.5)
>>> output
tensor([ 0.2000, 0.5000, -1.5000, -0.3000], dtype=oneflow.float32)
>>> arr = np.array([0.2, 0.6, -1.5, -0.3])
>>> input = flow.Tensor(arr)
>>> output = flow.clamp(input, min=-0.5, max=None)
>>> output
tensor([ 0.2000, 0.6000, -0.5000, -0.3000], dtype=oneflow.float32)
""",
)
add_docstr(
oneflow.clip,
"""
Alias for :func:`oneflow.clamp`.
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 2358), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.clamp', '"""\n Clamp all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]` and return\n a resulting tensor:\n\n .. math::\n y_i = \\\\begin{cases}\n \\\\text{min} & \\\\text{if } x_i < \\\\text{min} \\\\\\\\\n x_i & \\\\text{if } \\\\text{min} \\\\leq x_i \\\\leq \\\\text{max} \\\\\\\\\n \\\\text{max} & \\\\text{if } x_i > \\\\text{max}\n \\\\end{cases}\n\n If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, args :attr:`min`\n and :attr:`max` must be real numbers, otherwise they should be integers.\n\n Args:\n input (Tensor): the input tensor.\n min (Number): lower-bound of the range to be clamped to. Defaults to None.\n max (Number): upper-bound of the range to be clamped to. Defaults to None.\n out (Tensor, optional): the output tensor.\n\n For example:\n\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> arr = np.array([0.2, 0.6, -1.5, -0.3])\n >>> input = flow.Tensor(arr)\n >>> output = flow.clamp(input, min=-0.5, max=0.5)\n >>> output\n tensor([ 0.2000, 0.5000, -0.5000, -0.3000], dtype=oneflow.float32)\n\n >>> arr = np.array([0.2, 0.6, -1.5, -0.3])\n >>> input = flow.Tensor(arr)\n >>> output = flow.clamp(input, min=None, max=0.5)\n >>> output\n tensor([ 0.2000, 0.5000, -1.5000, -0.3000], dtype=oneflow.float32)\n\n >>> arr = np.array([0.2, 0.6, -1.5, -0.3])\n >>> input = flow.Tensor(arr)\n >>> output = flow.clamp(input, min=-0.5, max=None)\n >>> output\n tensor([ 0.2000, 0.6000, -0.5000, -0.3000], dtype=oneflow.float32)\n\n """'], {}), '(oneflow.clamp,\n """\n Clamp all elements in :attr:`input` into the range `[` :attr:`min`, :attr:`max` `]` and return\n a resulting tensor:\n\n .. math::\n y_i = \\\\begin{cases}\n \\\\text{min} & \\\\text{if } x_i < \\\\text{min} \\\\\\\\\n x_i & \\\\text{if } \\\\text{min} \\\\leq x_i \\\\leq \\\\text{max} \\\\\\\\\n \\\\text{max} & \\\\text{if } x_i > \\\\text{max}\n \\\\end{cases}\n\n If :attr:`input` is of type `FloatTensor` or `DoubleTensor`, args :attr:`min`\n and :attr:`max` must be real numbers, otherwise they should be integers.\n\n Args:\n input (Tensor): the input tensor.\n min (Number): lower-bound of the range to be clamped to. Defaults to None.\n max (Number): upper-bound of the range to be clamped to. Defaults to None.\n out (Tensor, optional): the output tensor.\n\n For example:\n\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> arr = np.array([0.2, 0.6, -1.5, -0.3])\n >>> input = flow.Tensor(arr)\n >>> output = flow.clamp(input, min=-0.5, max=0.5)\n >>> output\n tensor([ 0.2000, 0.5000, -0.5000, -0.3000], dtype=oneflow.float32)\n\n >>> arr = np.array([0.2, 0.6, -1.5, -0.3])\n >>> input = flow.Tensor(arr)\n >>> output = flow.clamp(input, min=None, max=0.5)\n >>> output\n tensor([ 0.2000, 0.5000, -1.5000, -0.3000], dtype=oneflow.float32)\n\n >>> arr = np.array([0.2, 0.6, -1.5, -0.3])\n >>> input = flow.Tensor(arr)\n >>> output = flow.clamp(input, min=-0.5, max=None)\n >>> output\n tensor([ 0.2000, 0.6000, -0.5000, -0.3000], dtype=oneflow.float32)\n\n """\n )\n', (670, 2358), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2362, 2437), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.clip', '"""\n Alias for :func:`oneflow.clamp`. \n """'], {}), '(oneflow.clip, """\n Alias for :func:`oneflow.clamp`. \n """)\n', (2372, 2437), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.nonzero,
"""nonzero(input, *, out=None, as_tuple=False) -> Tensor or tuple of Tensors
.. note::
When :attr:`as_tuple` is ``False`` (default): returns a
2-D tensor where each row is the index for a nonzero value.
When :attr:`as_tuple` is ``True``: returns a tuple of 1-D
index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``
gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor
contains nonzero indices for a certain dimension.
See below for more details on the two behaviors.
**When** :attr:`as_tuple` **is** ``False`` **(default)**:
Returns a tensor containing the indices of all non-zero elements of
:attr:`input`. Each row in the result contains the indices of a non-zero
element in :attr:`input`. The result is sorted lexicographically, with
the last index changing the fastest (C-style).
If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor
:attr:`out` is of size :math:`(z \\times n)`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
**When** :attr:`as_tuple` **is** ``True``:
Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,
each containing the indices (in that dimension) of all non-zero elements of
:attr:`input` .
If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`
tensors of size :math:`z`, where :math:`z` is the total number of
non-zero elements in the :attr:`input` tensor.
As a special case, when :attr:`input` has zero dimensions and a nonzero scalar
value, it is treated as a one-dimensional tensor with one element.
Args:
input(Tensor): the input tensor.
Keyword args:
out (Tensor, optional): the output tensor containing indices
Returns:
Tensor or tuple of Tensors: If :attr:`as_tuple` is ``False``, the output
tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for
each dimension, containing the indices of each nonzero element along that
dimension.
Example::
>>> import oneflow as flow
>>> flow.nonzero(flow.tensor([1, 1, 1, 0, 1]))
tensor([[0],
[1],
[2],
[4]], dtype=oneflow.int32)
>>> flow.nonzero(flow.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]))
tensor([[0, 0],
[1, 1],
[2, 2],
[3, 3]], dtype=oneflow.int32)
>>> flow.nonzero(flow.tensor([1, 1, 1, 0, 1]), as_tuple=True)
(tensor([0, 1, 2, 4], dtype=oneflow.int32),)
>>> flow.nonzero(flow.tensor([[0.6, 0.0, 0.0, 0.0],
... [0.0, 0.4, 0.0, 0.0],
... [0.0, 0.0, 1.2, 0.0],
... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)
(tensor([0, 1, 2, 3], dtype=oneflow.int32), tensor([0, 1, 2, 3], dtype=oneflow.int32))
>>> flow.nonzero(flow.tensor(5), as_tuple=True)
(tensor([0], dtype=oneflow.int32),)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 4033), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.nonzero', '"""nonzero(input, *, out=None, as_tuple=False) -> Tensor or tuple of Tensors\n\n .. note::\n When :attr:`as_tuple` is ``False`` (default): returns a\n 2-D tensor where each row is the index for a nonzero value.\n\n When :attr:`as_tuple` is ``True``: returns a tuple of 1-D\n index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``\n gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor\n contains nonzero indices for a certain dimension.\n\n See below for more details on the two behaviors.\n\n **When** :attr:`as_tuple` **is** ``False`` **(default)**:\n\n Returns a tensor containing the indices of all non-zero elements of\n :attr:`input`. Each row in the result contains the indices of a non-zero\n element in :attr:`input`. The result is sorted lexicographically, with\n the last index changing the fastest (C-style).\n\n If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor\n :attr:`out` is of size :math:`(z \\\\times n)`, where :math:`z` is the total number of\n non-zero elements in the :attr:`input` tensor.\n\n **When** :attr:`as_tuple` **is** ``True``:\n\n Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,\n each containing the indices (in that dimension) of all non-zero elements of\n :attr:`input` .\n\n If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`\n tensors of size :math:`z`, where :math:`z` is the total number of\n non-zero elements in the :attr:`input` tensor.\n\n As a special case, when :attr:`input` has zero dimensions and a nonzero scalar\n value, it is treated as a one-dimensional tensor with one element.\n\n Args:\n input(Tensor): the input tensor.\n\n Keyword args:\n out (Tensor, optional): the output tensor containing indices\n\n Returns:\n Tensor or tuple of Tensors: If :attr:`as_tuple` is ``False``, the output\n tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for\n each dimension, containing the indices of each nonzero element along that\n dimension.\n\n Example::\n\n >>> import oneflow as flow\n >>> flow.nonzero(flow.tensor([1, 1, 1, 0, 1]))\n tensor([[0],\n [1],\n [2],\n [4]], dtype=oneflow.int32)\n >>> flow.nonzero(flow.tensor([[0.6, 0.0, 0.0, 0.0],\n ... [0.0, 0.4, 0.0, 0.0],\n ... [0.0, 0.0, 1.2, 0.0],\n ... [0.0, 0.0, 0.0,-0.4]]))\n tensor([[0, 0],\n [1, 1],\n [2, 2],\n [3, 3]], dtype=oneflow.int32)\n >>> flow.nonzero(flow.tensor([1, 1, 1, 0, 1]), as_tuple=True)\n (tensor([0, 1, 2, 4], dtype=oneflow.int32),)\n >>> flow.nonzero(flow.tensor([[0.6, 0.0, 0.0, 0.0],\n ... [0.0, 0.4, 0.0, 0.0],\n ... [0.0, 0.0, 1.2, 0.0],\n ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)\n (tensor([0, 1, 2, 3], dtype=oneflow.int32), tensor([0, 1, 2, 3], dtype=oneflow.int32))\n >>> flow.nonzero(flow.tensor(5), as_tuple=True)\n (tensor([0], dtype=oneflow.int32),)\n\n """'], {}), '(oneflow.nonzero,\n """nonzero(input, *, out=None, as_tuple=False) -> Tensor or tuple of Tensors\n\n .. note::\n When :attr:`as_tuple` is ``False`` (default): returns a\n 2-D tensor where each row is the index for a nonzero value.\n\n When :attr:`as_tuple` is ``True``: returns a tuple of 1-D\n index tensors, allowing for advanced indexing, so ``x[x.nonzero(as_tuple=True)]``\n gives all nonzero values of tensor ``x``. Of the returned tuple, each index tensor\n contains nonzero indices for a certain dimension.\n\n See below for more details on the two behaviors.\n\n **When** :attr:`as_tuple` **is** ``False`` **(default)**:\n\n Returns a tensor containing the indices of all non-zero elements of\n :attr:`input`. Each row in the result contains the indices of a non-zero\n element in :attr:`input`. The result is sorted lexicographically, with\n the last index changing the fastest (C-style).\n\n If :attr:`input` has :math:`n` dimensions, then the resulting indices tensor\n :attr:`out` is of size :math:`(z \\\\times n)`, where :math:`z` is the total number of\n non-zero elements in the :attr:`input` tensor.\n\n **When** :attr:`as_tuple` **is** ``True``:\n\n Returns a tuple of 1-D tensors, one for each dimension in :attr:`input`,\n each containing the indices (in that dimension) of all non-zero elements of\n :attr:`input` .\n\n If :attr:`input` has :math:`n` dimensions, then the resulting tuple contains :math:`n`\n tensors of size :math:`z`, where :math:`z` is the total number of\n non-zero elements in the :attr:`input` tensor.\n\n As a special case, when :attr:`input` has zero dimensions and a nonzero scalar\n value, it is treated as a one-dimensional tensor with one element.\n\n Args:\n input(Tensor): the input tensor.\n\n Keyword args:\n out (Tensor, optional): the output tensor containing indices\n\n Returns:\n Tensor or tuple of Tensors: If :attr:`as_tuple` is ``False``, the output\n tensor containing indices. If :attr:`as_tuple` is ``True``, one 1-D tensor for\n each dimension, containing the indices of each nonzero element along that\n dimension.\n\n Example::\n\n >>> import oneflow as flow\n >>> flow.nonzero(flow.tensor([1, 1, 1, 0, 1]))\n tensor([[0],\n [1],\n [2],\n [4]], dtype=oneflow.int32)\n >>> flow.nonzero(flow.tensor([[0.6, 0.0, 0.0, 0.0],\n ... [0.0, 0.4, 0.0, 0.0],\n ... [0.0, 0.0, 1.2, 0.0],\n ... [0.0, 0.0, 0.0,-0.4]]))\n tensor([[0, 0],\n [1, 1],\n [2, 2],\n [3, 3]], dtype=oneflow.int32)\n >>> flow.nonzero(flow.tensor([1, 1, 1, 0, 1]), as_tuple=True)\n (tensor([0, 1, 2, 4], dtype=oneflow.int32),)\n >>> flow.nonzero(flow.tensor([[0.6, 0.0, 0.0, 0.0],\n ... [0.0, 0.4, 0.0, 0.0],\n ... [0.0, 0.0, 1.2, 0.0],\n ... [0.0, 0.0, 0.0,-0.4]]), as_tuple=True)\n (tensor([0, 1, 2, 3], dtype=oneflow.int32), tensor([0, 1, 2, 3], dtype=oneflow.int32))\n >>> flow.nonzero(flow.tensor(5), as_tuple=True)\n (tensor([0], dtype=oneflow.int32),)\n\n """\n )\n', (670, 4033), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestConstantModule(flow.unittest.TestCase):
def test_ones(test_case):
shape1 = (1, 2, 3, 4)
y = flow.ones(shape1)
test_case.assertTrue(np.array_equal(np.ones(shape1), y.numpy()))
y2 = flow.ones(10)
test_case.assertTrue(np.array_equal(np.ones(10), y2.numpy()))
y3 = flow.ones(10, dtype=flow.float64)
test_case.assertTrue(np.array_equal(np.ones(10, dtype=np.float64), y3.numpy()))
def test_zeros(test_case):
shape = (3, 2, 5, 1)
y = flow.zeros(shape)
test_case.assertTrue(np.array_equal(np.zeros(shape), y.numpy()))
y2 = flow.zeros(10)
test_case.assertTrue(np.array_equal(np.zeros(10), y2.numpy()))
y3 = flow.zeros(10, dtype=flow.int)
test_case.assertTrue(np.array_equal(np.zeros(10, dtype=int), y3.numpy()))
def test_ones_like(test_case):
x = flow.Tensor(np.ones([2, 4], dtype=np.float64))
test_case.assertTrue(
np.array_equal(np.ones_like(x.numpy()), flow.ones_like(x).numpy())
)
x2 = flow.Tensor(np.ones([2, 4], dtype=int))
test_case.assertTrue(
np.array_equal(np.ones_like(x2.numpy()), flow.ones_like(x2).numpy())
)
def test_zeros_like(test_case):
x = flow.Tensor(np.ones([2, 4], dtype=np.float64))
test_case.assertTrue(
np.array_equal(np.zeros_like(x.numpy()), flow.zeros_like(x).numpy())
)
x2 = flow.Tensor(np.ones([2, 4], dtype=int))
test_case.assertTrue(
np.array_equal(np.zeros_like(x2.numpy()), flow.zeros_like(x2).numpy())
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.ones_like",
"oneflow.experimental.zeros_like",
"oneflow.experimental.ones",
"oneflow.experimental.zeros"
] | [((2433, 2448), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2446, 2448), False, 'import unittest\n'), ((901, 918), 'oneflow.experimental.ones', 'flow.ones', (['shape1'], {}), '(shape1)\n', (910, 918), True, 'import oneflow.experimental as flow\n'), ((1006, 1019), 'oneflow.experimental.ones', 'flow.ones', (['(10)'], {}), '(10)\n', (1015, 1019), True, 'import oneflow.experimental as flow\n'), ((1104, 1137), 'oneflow.experimental.ones', 'flow.ones', (['(10)'], {'dtype': 'flow.float64'}), '(10, dtype=flow.float64)\n', (1113, 1137), True, 'import oneflow.experimental as flow\n'), ((1299, 1316), 'oneflow.experimental.zeros', 'flow.zeros', (['shape'], {}), '(shape)\n', (1309, 1316), True, 'import oneflow.experimental as flow\n'), ((1404, 1418), 'oneflow.experimental.zeros', 'flow.zeros', (['(10)'], {}), '(10)\n', (1414, 1418), True, 'import oneflow.experimental as flow\n'), ((1504, 1534), 'oneflow.experimental.zeros', 'flow.zeros', (['(10)'], {'dtype': 'flow.int'}), '(10, dtype=flow.int)\n', (1514, 1534), True, 'import oneflow.experimental as flow\n'), ((690, 733), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (731, 733), True, 'import oneflow.experimental as flow\n'), ((1677, 1710), 'numpy.ones', 'np.ones', (['[2, 4]'], {'dtype': 'np.float64'}), '([2, 4], dtype=np.float64)\n', (1684, 1710), True, 'import numpy as np\n'), ((1857, 1883), 'numpy.ones', 'np.ones', (['[2, 4]'], {'dtype': 'int'}), '([2, 4], dtype=int)\n', (1864, 1883), True, 'import numpy as np\n'), ((2067, 2100), 'numpy.ones', 'np.ones', (['[2, 4]'], {'dtype': 'np.float64'}), '([2, 4], dtype=np.float64)\n', (2074, 2100), True, 'import numpy as np\n'), ((2249, 2275), 'numpy.ones', 'np.ones', (['[2, 4]'], {'dtype': 'int'}), '([2, 4], dtype=int)\n', (2256, 2275), True, 'import numpy as np\n'), ((963, 978), 'numpy.ones', 'np.ones', (['shape1'], {}), '(shape1)\n', (970, 978), True, 'import numpy as np\n'), ((1064, 1075), 'numpy.ones', 'np.ones', (['(10)'], {}), '(10)\n', (1071, 1075), True, 'import numpy as np\n'), ((1182, 1211), 'numpy.ones', 'np.ones', (['(10)'], {'dtype': 'np.float64'}), '(10, dtype=np.float64)\n', (1189, 1211), True, 'import numpy as np\n'), ((1361, 1376), 'numpy.zeros', 'np.zeros', (['shape'], {}), '(shape)\n', (1369, 1376), True, 'import numpy as np\n'), ((1463, 1475), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1471, 1475), True, 'import numpy as np\n'), ((1579, 1602), 'numpy.zeros', 'np.zeros', (['(10)'], {'dtype': 'int'}), '(10, dtype=int)\n', (1587, 1602), True, 'import numpy as np\n'), ((1794, 1811), 'oneflow.experimental.ones_like', 'flow.ones_like', (['x'], {}), '(x)\n', (1808, 1811), True, 'import oneflow.experimental as flow\n'), ((1968, 1986), 'oneflow.experimental.ones_like', 'flow.ones_like', (['x2'], {}), '(x2)\n', (1982, 1986), True, 'import oneflow.experimental as flow\n'), ((2185, 2203), 'oneflow.experimental.zeros_like', 'flow.zeros_like', (['x'], {}), '(x)\n', (2200, 2203), True, 'import oneflow.experimental as flow\n'), ((2361, 2380), 'oneflow.experimental.zeros_like', 'flow.zeros_like', (['x2'], {}), '(x2)\n', (2376, 2380), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestCosineSimilarity(flow.unittest.TestCase):
def test_cosine_similarity_not_floating_type(test_case):
x = flow.randn(2, 5).to(flow.int32)
y = flow.randn(2, 5).to(flow.int32)
with test_case.assertRaises(RuntimeError) as ctx:
out = flow.nn.functional.cosine_similarity(x, y, dim=1)
test_case.assertTrue(
"expected common dtype to be floating point, yet common dtype is oneflow.int32"
in str(ctx.exception)
)
def test_cosine_similarity_broadcast(test_case):
x = flow.randn(2, 5)
y = flow.randn(2, 4)
with test_case.assertRaises(RuntimeError) as ctx:
out = flow.nn.functional.cosine_similarity(x, y, dim=1)
test_case.assertTrue(
"The size of tensor a (5) must match the size of tensor b (4) at non-singleton dimension 1"
in str(ctx.exception)
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.functional.cosine_similarity",
"oneflow.randn",
"oneflow.unittest.skip_unless_1n1d"
] | [((657, 689), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (687, 689), True, 'import oneflow as flow\n'), ((1632, 1647), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1645, 1647), False, 'import unittest\n'), ((1249, 1265), 'oneflow.randn', 'flow.randn', (['(2)', '(5)'], {}), '(2, 5)\n', (1259, 1265), True, 'import oneflow as flow\n'), ((1278, 1294), 'oneflow.randn', 'flow.randn', (['(2)', '(4)'], {}), '(2, 4)\n', (1288, 1294), True, 'import oneflow as flow\n'), ((967, 1016), 'oneflow.nn.functional.cosine_similarity', 'flow.nn.functional.cosine_similarity', (['x', 'y'], {'dim': '(1)'}), '(x, y, dim=1)\n', (1003, 1016), True, 'import oneflow as flow\n'), ((1371, 1420), 'oneflow.nn.functional.cosine_similarity', 'flow.nn.functional.cosine_similarity', (['x', 'y'], {'dim': '(1)'}), '(x, y, dim=1)\n', (1407, 1420), True, 'import oneflow as flow\n'), ((815, 831), 'oneflow.randn', 'flow.randn', (['(2)', '(5)'], {}), '(2, 5)\n', (825, 831), True, 'import oneflow as flow\n'), ((859, 875), 'oneflow.randn', 'flow.randn', (['(2)', '(5)'], {}), '(2, 5)\n', (869, 875), True, 'import oneflow as flow\n')] |
from logging import log
import math
import random
from typing import Optional, Tuple
import oneflow as flow
import oneflow.nn as nn
from oneflow.nn import CrossEntropyLoss, MSELoss
from .bert import Bert
from .bart_utils import (
shift_tokens_right,
_make_causal_mask,
_expand_mask,
init_weights,
tensor_unique, # for tensor.unique
)
ACT2FN = {
"relu": flow.nn.functional.relu,
# "silu": silu,
# "swish": silu,
"gelu": flow.nn.functional.gelu,
"tanh": flow.nn.functional.tanh,
# "gelu_new": gelu_new,
# "gelu_fast": gelu_fast,
# "quick_gelu": quick_gelu,
# "mish": mish,
# "linear": linear_act,
"sigmoid": flow.nn.functional.sigmoid,
}
class BartLearnedPositionalEmbedding(nn.Embedding):
"""
This module learns positional embeddings up to a fixed maximum size.
"""
def __init__(self, num_embeddings: int, embedding_dim: int):
# Bart is set up so that if padding_idx is specified then offset the embedding ids by 2
# and adjust num_embeddings appropriately. Other models dont have this hack
self.offset = 2
super().__init__(num_embeddings + self.offset, embedding_dim)
def forward(self, input_ids_shape: flow.Size, past_key_values_length: int = 0):
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
bsz, seq_len = input_ids_shape[:2]
positions = flow.arange(
past_key_values_length,
past_key_values_length + seq_len,
dtype=flow.long,
device=self.weight.device,
)
return super().forward(positions + self.offset)
class BartAttention(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
assert (
self.head_dim * num_heads == self.embed_dim
), f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`: {num_heads})."
self.scaling = self.head_dim ** -0.5
self.is_decoder = is_decoder
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: flow.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
def forward(
self,
hidden_states: flow.Tensor,
key_value_states: Optional[flow.Tensor] = None,
past_key_value: Optional[Tuple[flow.Tensor]] = None,
attention_mask: Optional[flow.Tensor] = None,
layer_head_mask: Optional[flow.Tensor] = None,
output_attentions: bool = False,
) -> Tuple[flow.Tensor, Optional[flow.Tensor], Optional[Tuple[flow.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
# if key_value_states are provided this layer is used as a cross-attention layer
# for the decoder
is_cross_attention = key_value_states is not None
bsz, tgt_len, embed_dim = hidden_states.size()
# get query proj
query_states = self.q_proj(hidden_states) * self.scaling
# get key, value proj
if is_cross_attention and past_key_value is not None:
# reuse k,v, cross_attentions
key_states = past_key_value[0]
value_states = past_key_value[1]
elif is_cross_attention:
# cross_attentions
key_states = self._shape(self.k_proj(key_value_states), -1, bsz)
value_states = self._shape(self.v_proj(key_value_states), -1, bsz)
elif past_key_value is not None:
# reuse k, v, self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
key_states = flow.cat([past_key_value[0], key_states], dim=2)
value_states = flow.cat([past_key_value[1], value_states], dim=2)
else:
# self_attention
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
if self.is_decoder:
# if cross_attention save Tuple(flow.Tensor, flow.Tensor) of all cross attention key/value_states.
# Further calls to cross_attention layer can then reuse all cross-attention
# key/value_states (first "if" case)
# if uni-directional self-attention (decoder) save Tuple(flow.Tensor, flow.Tensor) of
# all previous decoder key/value_states. Further calls to uni-directional self-attention
# can concat previous decoder key/value_states to current projected key/value_states (third "elif" case)
# if encoder bi-directional self-attention `past_key_value` is always `None`
past_key_value = (key_states, value_states)
proj_shape = (bsz * self.num_heads, -1, self.head_dim)
query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
key_states = key_states.view(*proj_shape)
value_states = value_states.view(*proj_shape)
src_len = key_states.size(1)
attn_weights = flow.bmm(query_states, key_states.transpose(1, 2))
assert attn_weights.size() == (
bsz * self.num_heads,
tgt_len,
src_len,
), f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is {attn_weights.size()}"
if attention_mask is not None:
assert attention_mask.size() == (
bsz,
1,
tgt_len,
src_len,
), f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
attn_weights = (
attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
+ attention_mask
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
attn_weights = attn_weights.softmax(dim=-1)
if layer_head_mask is not None:
assert layer_head_mask.size() == (
self.num_heads,
), f"Head mask for a single layer should be of size {(self.num_heads,)}, but is {layer_head_mask.size()}"
attn_weights = layer_head_mask.view(1, -1, 1, 1) * attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
if output_attentions:
# this operation is a bit akward, but it's required to
# make sure that attn_weights keeps its gradient.
# In order to do so, attn_weights have to reshaped
# twice and have to be reused in the following
attn_weights_reshaped = attn_weights.view(
bsz, self.num_heads, tgt_len, src_len
)
attn_weights = attn_weights_reshaped.view(
bsz * self.num_heads, tgt_len, src_len
)
else:
attn_weights_reshaped = None
# with mpu.get_cuda_rng_tracker().fork():
# prob = self.dropout if self.training else 0
# attn_probs = flow.F.dropout(attn_weights, p=prob)
# attn_output = flow.bmm(attn_probs, value_states)
if self.training:
attn_weights = flow.nn.functional.dropout(attn_weights, p=self.dropout)
attn_output = flow.bmm(attn_weights, value_states)
assert attn_output.size() == (
bsz * self.num_heads,
tgt_len,
self.head_dim,
), f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is {attn_output.size()}"
attn_output = (
attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
.transpose(1, 2)
.reshape(bsz, tgt_len, embed_dim)
)
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights_reshaped, past_key_value
class BartDecoderLayer(nn.Module):
def __init__(
self,
d_model: int = 1024,
num_heads: int = 16,
ffn_dim: int = 4096,
activation: str = "gelu",
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout: float = 0.0,
):
super(BartDecoderLayer, self).__init__()
self.embed_dim = d_model
self.self_attn = BartAttention(
embed_dim=self.embed_dim,
num_heads=num_heads,
dropout=attn_dropout,
is_decoder=True,
)
self.dropout = hidden_dropout
self.activation_fn = ACT2FN[activation]
self.activation_dropout = act_dropout
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.encoder_attn = BartAttention(
self.embed_dim, num_heads, dropout=attn_dropout, is_decoder=True,
)
self.encoder_attn_layer_norm = nn.LayerNorm(self.embed_dim)
self.fc1 = nn.Linear(self.embed_dim, ffn_dim)
self.fc2 = nn.Linear(ffn_dim, self.embed_dim)
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
def forward(
self,
hidden_states: flow.Tensor,
attention_mask: Optional[flow.Tensor] = None,
encoder_hidden_states: Optional[flow.Tensor] = None,
encoder_attention_mask: Optional[flow.Tensor] = None,
layer_head_mask: Optional[flow.Tensor] = None,
encoder_layer_head_mask: Optional[flow.Tensor] = None,
past_key_value: Optional[Tuple[flow.Tensor]] = None,
output_attentions: Optional[bool] = False,
use_cache: Optional[bool] = True,
):
residual = hidden_states
# Self Attention
# decoder uni-directional self-attention cached key/values tuple is at positions 1,2
self_attn_past_key_value = (
past_key_value[:2] if past_key_value is not None else None
)
# add present self-attn cache to positions 1,2 of present_key_value tuple
hidden_states, self_attn_weights, present_key_value = self.self_attn(
hidden_states,
None,
self_attn_past_key_value,
attention_mask,
layer_head_mask,
output_attentions,
)
if self.training:
hidden_states = flow.nn.functional.dropout(hidden_states, p=self.dropout)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
# Cross-Attention Block
cross_attn_present_key_value = None
cross_attn_weights = None
if encoder_hidden_states is not None:
residual = hidden_states
# cross_attn cached key/values tuple is at positions 3,4 of present_key_value tuple
cross_attn_past_key_value = (
past_key_value[-2:] if past_key_value is not None else None
)
(
hidden_states,
cross_attn_weights,
cross_attn_present_key_value,
) = self.encoder_attn(
hidden_states,
encoder_hidden_states,
cross_attn_past_key_value,
encoder_attention_mask,
encoder_layer_head_mask,
output_attentions,
)
if self.training:
hidden_states = flow.nn.functional.dropout(hidden_states, p=self.dropout)
hidden_states = residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# add cross-attn to positions 3,4 of present_key_value tuple
present_key_value = present_key_value + cross_attn_present_key_value
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
if self.training:
hidden_states = flow.nn.functional.dropout(hidden_states, p=self.activation_dropout)
hidden_states = self.fc2(hidden_states)
if self.training:
hidden_states = flow.nn.functional.dropout(hidden_states, p=self.dropout)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
if use_cache:
outputs += (present_key_value,)
return outputs
class BartDecoder(nn.Module):
"""
Transformer decoder consisting of *num_layers* layers. Each layer is a :class:`BartDecoderLayer`
Args:
embed_tokens (flow.nn.Embedding): output embedding
"""
def __init__(
self,
d_model: int = 1024,
vocab_size: int = 50265,
num_layers: int = 12,
decoder_attn_heads: int = 16,
decoder_ffn_dim: int = 4096,
max_position_embeddings: int = 1024,
activation="gelu",
pad_token_id: int = 1,
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout=0.0,
decoder_layerdrop: float = 0.0,
scale_embedding: bool = False,
embed_tokens: Optional[nn.Embedding] = None,
):
super(BartDecoder, self).__init__()
self.dropout = hidden_dropout
self.layerdrop = decoder_layerdrop
self.padding_idx = pad_token_id
self.max_target_positions = max_position_embeddings
self.embed_scale = math.sqrt(d_model) if scale_embedding else 1.0
self.num_layers = num_layers
if embed_tokens is not None:
self.embed_tokens = embed_tokens
else:
self.embed_tokens = nn.Embedding(vocab_size, d_model, self.padding_idx)
self.embed_positions = BartLearnedPositionalEmbedding(
max_position_embeddings, d_model
)
self.layers = nn.ModuleList(
[
BartDecoderLayer(
d_model,
decoder_attn_heads,
decoder_ffn_dim,
activation,
attn_dropout,
hidden_dropout,
act_dropout,
)
for _ in range(num_layers)
]
)
self.layernorm_embedding = nn.LayerNorm(d_model)
self.init_weights()
def init_weights(self):
self.apply(init_weights)
def _prepare_decoder_attention_mask(
self, attention_mask, input_shape, inputs_embeds, past_key_values_length
):
# create causal mask
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
combined_attention_mask = None
if input_shape[-1] > 1:
combined_attention_mask = _make_causal_mask(
input_shape,
inputs_embeds.dtype,
inputs_embeds.device,
past_key_values_length=past_key_values_length,
)
if attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
expanded_attn_mask = _expand_mask(
attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
combined_attention_mask = (
expanded_attn_mask
if combined_attention_mask is None
else expanded_attn_mask + combined_attention_mask
)
return combined_attention_mask
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
# retrieve input_ids and inputs_embeds
if input_ids is not None and inputs_embeds is not None:
raise ValueError(
"You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
)
elif input_ids is not None:
input_shape = input_ids.size()
input_ids = input_ids.view(-1, input_shape[-1])
elif inputs_embeds is not None:
input_shape = inputs_embeds.size()[:-1]
else:
raise ValueError(
"You have to specify either decoder_input_ids or decoder_inputs_embeds"
)
# past_key_values_length
past_key_values_length = (
past_key_values[0][0].shape[2] if past_key_values is not None else 0
)
if inputs_embeds is None:
inputs_embeds = self.embed_tokens(input_ids) * self.embed_scale
attention_mask = self._prepare_decoder_attention_mask(
attention_mask, input_shape, inputs_embeds, past_key_values_length
)
# expand encoder attention mask
if encoder_hidden_states is not None and encoder_attention_mask is not None:
# [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
encoder_attention_mask = _expand_mask(
encoder_attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
)
# embed positions
positions = self.embed_positions(input_shape, past_key_values_length)
hidden_states = inputs_embeds + positions
hidden_states = self.layernorm_embedding(hidden_states)
if self.training:
hidden_states = flow.nn.functional.dropout(hidden_states, p=self.dropout)
# decoder layers
all_hidden_states = () if output_hidden_states else None
all_self_attns = () if output_attentions else None
all_cross_attentions = (
() if (output_attentions and encoder_hidden_states is not None) else None
)
next_decoder_cache = () if use_cache else None
# check if head_mask has a correct number of layers specified if desired
if head_mask is not None:
assert head_mask.size()[0] == (
len(self.layers)
), f"The head_mask should be specified for {len(self.layers)} layers, but it is for {head_mask.size()[0]}."
for idx, decoder_layer in enumerate(self.layers):
# add LayerDrop (see https://arxiv.org/abs/1909.11556 for description)
if output_hidden_states:
all_hidden_states += (hidden_states,)
dropout_probability = random.uniform(0, 1)
if self.training and (dropout_probability < self.layerdrop):
continue
past_key_value = (
past_key_values[idx] if past_key_values is not None else None
)
layer_outputs = decoder_layer(
hidden_states,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
(head_mask[idx] if head_mask is not None else None),
(encoder_head_mask[idx] if encoder_head_mask is not None else None),
past_key_value,
output_attentions,
use_cache,
)
hidden_states = layer_outputs[0]
if use_cache:
next_decoder_cache += (layer_outputs[3 if output_attentions else 1],)
if output_attentions:
all_self_attns += (layer_outputs[1],)
if encoder_hidden_states is not None:
all_cross_attentions += (layer_outputs[2],)
# add hidden states from the last decoder layer
if output_hidden_states:
all_hidden_states += (hidden_states,)
next_cache = next_decoder_cache if use_cache else None
# last_hidden_state, past_key_value, hidden_states, attentions, cross_attentions
return (
hidden_states,
next_cache,
all_hidden_states,
all_self_attns,
all_cross_attentions,
)
class CPT(nn.Module):
def __init__(
self,
d_model: int = 1024,
vocab_size: int = 50265,
num_encoder_layers: int = 12,
num_decoder_layers: int = 12,
encoder_attn_heads: int = 16,
decoder_attn_heads: int = 16,
encoder_ffn_dim: int = 4096,
decoder_ffn_dim: int = 4096,
max_position_embeddings: int = 1024,
activation="gelu",
pad_token_id: int = 1,
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout=0.0,
decoder_layerdrop: float = 0.0,
scale_embedding: bool = False,
decoder_start_token_id=2,
encoder_layernorm_eps=1e-12,
):
super(CPT, self).__init__()
self.encoder = Bert(
vocab_size=vocab_size,
hidden_size=d_model,
num_layers=num_encoder_layers,
nheads=encoder_attn_heads,
intermediate_size=encoder_ffn_dim,
hidden_dropout=act_dropout,
attn_dropout=act_dropout,
add_pooling_layer=False,
layer_norm_eps=encoder_layernorm_eps,
)
self.shared = self.encoder.get_input_embeddings()
self.decoder = BartDecoder(
d_model,
vocab_size,
num_decoder_layers,
decoder_attn_heads,
decoder_ffn_dim,
max_position_embeddings,
activation,
pad_token_id,
attn_dropout,
hidden_dropout,
act_dropout,
decoder_layerdrop,
scale_embedding,
self.shared,
)
self.d_model = d_model
self.vocab_size = vocab_size
self.num_decoder_layers = num_decoder_layers
self.num_encoder_layers = num_encoder_layers
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.init_weights()
def init_weights(self):
self.apply(init_weights)
def get_input_embeddings(self):
return self.shared
def set_input_embeddings(self, value):
self.shared = value
self.encoder.embed_tokens = self.shared
self.decoder.embed_tokens = self.shared
def get_encoder(self):
class _Encoder(flow.nn.Module):
def __init__(self, encoder):
super().__init__()
self.encoder = encoder
def forward(self, *args, **kwargs):
kwargs["output_hidden_states"] = True
return self.encoder(*args, **kwargs)
return _Encoder(self.encoder)
def get_decoder(self):
return self.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
# different to other models, Bart automatically creates decoder_input_ids from
# input_ids if no decoder_input_ids are provided
if decoder_input_ids is None and decoder_inputs_embeds is None:
decoder_input_ids = shift_tokens_right(
input_ids, self.pad_token_id, self.decoder_start_token_id
)
if encoder_outputs is None:
encoder_outputs = self.encoder(
input_ids,
attention_mask,
flow.ones_like(input_ids),
None,
head_mask,
inputs_embeds,
None,
None,
None,
None,
output_attentions,
True,
)
# last_hidden_states, hidden_states, attentions
encoder_outputs = (
encoder_outputs[0],
encoder_outputs[3],
encoder_outputs[4],
)
# If the user passed a tuple for encoder_outputs
elif isinstance(encoder_outputs, (tuple, list)):
encoder_outputs = (
encoder_outputs[0],
encoder_outputs[1] if len(encoder_outputs) > 1 else None,
encoder_outputs[2] if len(encoder_outputs) > 2 else None,
)
if isinstance(encoder_outputs, (flow.Tensor)):
encoder_hidden_states = encoder_outputs
encoder_outputs = (encoder_outputs,)
else:
encoder_hidden_states = (
encoder_outputs[1][-self.num_decoder_layers - 1]
if encoder_outputs[1] is not None
else None
)
# decoder outputs consists of (dec_features, past_key_value, dec_hidden, dec_attn)
decoder_outputs = self.decoder(
decoder_input_ids,
decoder_attention_mask,
encoder_hidden_states,
attention_mask,
decoder_head_mask,
head_mask,
past_key_values,
decoder_inputs_embeds,
use_cache,
output_attentions,
output_hidden_states,
)
# last_hidden_state, past_key_values, decoder_hidden_states, decoder_attentions, cross_attentnions
# encoder_last_hidden_state, encoder_hidden_states, encoder_attentions
return decoder_outputs + encoder_outputs
class BartDecoderWrapper(nn.Module):
"""
This wrapper class is a helper class to correctly load pretrained checkpoints when the causal language model is
used in combination with the :class:`~transformers.EncoderDecoderModel` framework.
"""
def __init__(
self,
d_model: int = 1024,
vocab_size: int = 50265,
num_layers: int = 12,
decoder_attn_heads: int = 16,
decoder_ffn_dim: int = 4096,
max_position_embeddings: int = 1024,
activation="gelu",
pad_token_id: int = 1,
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout=0.0,
decoder_layerdrop: float = 0.0,
scale_embedding: bool = False,
embed_tokens: Optional[nn.Embedding] = None,
):
super(BartDecoderWrapper, self).__init__()
self.decoder = BartDecoder(
d_model,
vocab_size,
num_layers,
decoder_attn_heads,
decoder_ffn_dim,
max_position_embeddings,
activation,
pad_token_id,
attn_dropout,
hidden_dropout,
act_dropout,
decoder_layerdrop,
scale_embedding,
embed_tokens,
)
def forward(self, *args, **kwargs):
return self.decoder(*args, **kwargs)
class BartClassificationHead(nn.Module):
"""Head for sentence-level classification tasks."""
def __init__(
self, input_dim: int, inner_dim: int, num_classes: int, pooler_dropout: float
):
super().__init__()
self.dense = nn.Linear(input_dim, inner_dim)
self.dropout = nn.Dropout(p=pooler_dropout)
self.out_proj = nn.Linear(inner_dim, num_classes)
def forward(self, hidden_states: flow.Tensor):
hidden_states = self.dropout(hidden_states)
hidden_states = self.dense(hidden_states)
hidden_states = flow.tanh(hidden_states)
hidden_states = self.dropout(hidden_states)
hidden_states = self.out_proj(hidden_states)
return hidden_states
class CPTForCausalLM(nn.Module):
def __init__(
self,
d_model: int = 1024,
vocab_size: int = 50265,
num_layers: int = 12,
decoder_attn_heads: int = 16,
decoder_ffn_dim: int = 4096,
max_position_embeddings: int = 1024,
activation="gelu",
pad_token_id: int = 1,
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout=0.0,
decoder_layerdrop: float = 0.0,
scale_embedding: bool = False,
embed_tokens: Optional[nn.Embedding] = None,
):
super(CPTForCausalLM, self).__init__()
self.model = BartDecoderWrapper(
d_model,
vocab_size,
num_layers,
decoder_attn_heads,
decoder_ffn_dim,
max_position_embeddings,
activation,
pad_token_id,
attn_dropout,
hidden_dropout,
act_dropout,
decoder_layerdrop,
scale_embedding,
embed_tokens,
)
self.lm_head = nn.Linear(d_model, vocab_size, bias=False)
self.vocab_size = vocab_size
self.init_weights()
def init_weights(self):
self.apply(init_weights)
def get_input_embeddings(self):
return self.model.decoder.embed_tokens
def set_input_embeddings(self, value):
self.model.decoder.embed_tokens = value
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def set_decoder(self, decoder):
self.model.decoder = decoder
def get_decoder(self):
return self.model.decoder
def forward(
self,
input_ids=None,
attention_mask=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
head_mask=None,
encoder_head_mask=None,
past_key_values=None,
inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
# decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
outputs = self.model.decoder(
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
head_mask,
encoder_head_mask,
past_key_values,
inputs_embeds,
use_cache,
output_attentions,
output_hidden_states,
)
logits = self.lm_head(outputs[0])
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.vocab_size), labels.view(-1))
return (loss, logits) + outputs[1:]
def prepare_inputs_for_generation(
self, input_ids, past=None, attention_mask=None, use_cache=None, **kwargs
):
# if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
if attention_mask is None:
attention_mask = input_ids.new_ones(input_ids.shape)
if past:
input_ids = input_ids[:, -1:]
# first step, decoder_cached_states are empty
return {
"input_ids": input_ids, # encoder_outputs is defined. input_ids not needed
"attention_mask": attention_mask,
"past_key_values": past,
"use_cache": use_cache,
}
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
reordered_past += (
tuple(
past_state.index_select(0, beam_idx) for past_state in layer_past
),
)
return reordered_past
class CPTForMaskedLM(nn.Module):
def __init__(
self,
cls_mode: int = 2,
d_model: int = 1024,
vocab_size: int = 50265,
num_encoder_layers: int = 12,
num_decoder_layers: int = 12,
encoder_attn_heads: int = 16,
decoder_attn_heads: int = 16,
encoder_ffn_dim: int = 4096,
decoder_ffn_dim: int = 4096,
max_position_embeddings: int = 1024,
activation="gelu",
pad_token_id: int = 1,
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout=0.0,
decoder_layerdrop: float = 0.0,
scale_embedding: bool = False,
decoder_start_token_id=2,
):
super(CPTForMaskedLM, self).__init__()
self.model = CPT(
d_model,
vocab_size,
num_encoder_layers,
num_decoder_layers,
encoder_attn_heads,
decoder_attn_heads,
encoder_ffn_dim,
decoder_ffn_dim,
max_position_embeddings,
activation,
pad_token_id,
attn_dropout,
hidden_dropout,
act_dropout,
decoder_layerdrop,
scale_embedding,
decoder_start_token_id,
)
self.cls_mode = cls_mode
self.register_buffer(
"final_logits_bias", flow.zeros((1, self.model.shared.num_embeddings))
)
self.lm_head = nn.Linear(d_model, self.model.shared.num_embeddings, bias=False)
self.init_weights()
def init_weights(self):
self.apply(init_weights)
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
head_mask,
decoder_head_mask,
encoder_outputs,
None,
inputs_embeds,
decoder_inputs_embeds,
use_cache,
output_attentions,
output_hidden_states,
)
hidden_states = outputs[0]
enc_hidden_states = outputs[5]
dec_logits = self.lm_head(hidden_states) + self.final_logits_bias
enc_logits = self.lm_head(enc_hidden_states) + self.final_logits_bias
return (enc_logits, dec_logits) + outputs[1:]
class CPTForConditionalGeneration(nn.Module):
def __init__(
self,
d_model: int = 1024,
vocab_size: int = 50265,
num_encoder_layers: int = 12,
num_decoder_layers: int = 12,
encoder_attn_heads: int = 16,
decoder_attn_heads: int = 16,
encoder_ffn_dim: int = 4096,
decoder_ffn_dim: int = 4096,
max_position_embeddings: int = 1024,
activation="gelu",
pad_token_id: int = 1,
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout=0.0,
decoder_layerdrop: float = 0.0,
scale_embedding: bool = False,
decoder_start_token_id=2,
):
super(CPTForConditionalGeneration, self).__init__()
self.model = CPT(
d_model,
vocab_size,
num_encoder_layers,
num_decoder_layers,
encoder_attn_heads,
decoder_attn_heads,
encoder_ffn_dim,
decoder_ffn_dim,
max_position_embeddings,
activation,
pad_token_id,
attn_dropout,
hidden_dropout,
act_dropout,
decoder_layerdrop,
scale_embedding,
decoder_start_token_id,
)
self.register_buffer(
"final_logits_bias", flow.zeros((1, self.model.shared.num_embeddings))
)
self.lm_head = nn.Linear(d_model, self.model.shared.num_embeddings, bias=False)
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.vocab_size = vocab_size
self.init_weights()
def init_weights(self):
self.apply(init_weights)
def get_encoder(self):
return self.model.get_encoder()
def get_decoder(self):
return self.model.get_decoder()
def resize_token_embeddings(self, new_num_tokens: int) -> nn.Embedding:
new_embeddings = super().resize_token_embeddings(new_num_tokens)
self._resize_final_logits_bias(new_num_tokens)
return new_embeddings
def _resize_final_logits_bias(self, new_num_tokens: int) -> None:
old_num_tokens = self.final_logits_bias.shape[-1]
if new_num_tokens <= old_num_tokens:
new_bias = self.final_logits_bias[:, :new_num_tokens]
else:
extra_bias = flow.zeros(
(1, new_num_tokens - old_num_tokens),
device=self.final_logits_bias.device,
)
new_bias = flow.cat([self.final_logits_bias, extra_bias], dim=1)
self.register_buffer("final_logits_bias", new_bias)
def get_output_embeddings(self):
return self.lm_head
def set_output_embeddings(self, new_embeddings):
self.lm_head = new_embeddings
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
past_key_values=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
if labels is not None:
if decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
labels, self.pad_token_id, self.decoder_start_token_id
)
outputs = self.model(
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
head_mask,
decoder_head_mask,
encoder_outputs,
past_key_values,
inputs_embeds,
decoder_inputs_embeds,
use_cache,
output_attentions,
output_hidden_states,
)
lm_logits = self.lm_head(outputs[0]) + self.final_logits_bias
masked_lm_loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
masked_lm_loss = loss_fct(
lm_logits.view(-1, self.vocab_size), labels.view(-1)
)
# loss, logits, past_key_values, decoder_hidden_states, decoder_attentions, cross_attentions
# encoder_last_hidden_state, encoder_hidden_states, encoder_attentions
return (masked_lm_loss, lm_logits,) + outputs[1:]
def prepare_inputs_for_generation(
self,
decoder_input_ids,
past=None,
attention_mask=None,
head_mask=None,
use_cache=None,
encoder_outputs=None,
):
# cut decoder_input_ids if past is used
if past is not None:
decoder_input_ids = decoder_input_ids[:, -1:]
return {
"input_ids": None, # encoder_outputs is defined. input_ids not needed
"encoder_outputs": encoder_outputs,
"past_key_values": past,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"head_mask": head_mask,
# change this to avoid caching (presumably for debugging)
"use_cache": use_cache,
}
@staticmethod
def _expand_inputs_for_generation(
input_ids: flow.Tensor,
expand_size: int = 1,
is_encoder_decoder: bool = False,
attention_mask: flow.Tensor = None,
encoder_outputs=None,
**model_kwargs,
):
expanded_return_idx = (
flow.arange(input_ids.shape[0])
.view(-1, 1)
.repeat(1, expand_size)
.view(-1)
.to(input_ids.device)
)
input_ids = input_ids.index_select(0, expanded_return_idx)
if "token_type_ids" in model_kwargs:
token_type_ids = model_kwargs["token_type_ids"]
model_kwargs["token_type_ids"] = token_type_ids.index_select(
0, expanded_return_idx
)
if attention_mask is not None:
model_kwargs["attention_mask"] = attention_mask.index_select(
0, expanded_return_idx
)
if is_encoder_decoder:
assert encoder_outputs is not None
device = encoder_outputs.last_hidden_state.device
encoder_outputs["hidden_states"] = tuple(
h.index_select(0, expanded_return_idx.to(device))
for h in encoder_outputs["hidden_states"]
)
model_kwargs["encoder_outputs"] = encoder_outputs
return input_ids, model_kwargs
def prepare_decoder_input_ids_from_labels(self, labels: flow.Tensor):
return shift_tokens_right(
labels, self.pad_token_id, self.decoder_start_token_id
)
@staticmethod
def _reorder_cache(past, beam_idx):
reordered_past = ()
for layer_past in past:
# cached cross_attention states don't have to be reordered -> they are always the same
reordered_past += (
tuple(
past_state.index_select(0, beam_idx)
for past_state in layer_past[:2]
)
+ layer_past[2:],
)
return reordered_past
class CPTForSequenceClassification(nn.Module):
def __init__(
self,
cls_mode: int = 2,
num_labels: int = 2,
d_model: int = 1024,
vocab_size: int = 50265,
num_encoder_layers: int = 12,
num_decoder_layers: int = 12,
encoder_attn_heads: int = 16,
decoder_attn_heads: int = 16,
encoder_ffn_dim: int = 4096,
decoder_ffn_dim: int = 4096,
max_position_embeddings: int = 1024,
activation="gelu",
pad_token_id: int = 1,
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout=0.0,
classifier_dropout=0.0,
decoder_layerdrop: float = 0.0,
scale_embedding: bool = False,
decoder_start_token_id=2,
eos_token_id=2,
):
super(CPTForSequenceClassification, self).__init__()
self.model = CPT(
d_model,
vocab_size,
num_encoder_layers,
num_decoder_layers,
encoder_attn_heads,
decoder_attn_heads,
encoder_ffn_dim,
decoder_ffn_dim,
max_position_embeddings,
activation,
pad_token_id,
attn_dropout,
hidden_dropout,
act_dropout,
decoder_layerdrop,
scale_embedding,
decoder_start_token_id,
)
# Encoder for classification
if cls_mode == 1:
cls_dim = d_model
# Decoder for classification
elif cls_mode == 2:
cls_dim = d_model
# Both encoder & decoder for classification
elif cls_mode == 3:
cls_dim = d_model * 2
else:
raise NotImplementedError
self.cls_head = BartClassificationHead(
cls_dim, cls_dim, num_labels, classifier_dropout
)
init_weights(self.cls_head.dense)
init_weights(self.cls_head.out_proj)
self.cls_mode = cls_mode
self.num_labels = num_labels
self.eos_token_id = eos_token_id
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
labels=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
labels (:obj:`flow.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
num_labels - 1]`. If :obj:`num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
if labels is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
head_mask,
decoder_head_mask,
encoder_outputs,
None,
inputs_embeds,
decoder_inputs_embeds,
use_cache,
output_attentions,
output_hidden_states,
)
hidden_states = outputs[0]
enc_hidden_states = outputs[5]
enc_rep = enc_hidden_states[:, 0]
eos_mask = input_ids.eq(self.eos_token_id).to(flow.int32)
# flow.unique(eos_mask.sum(1))
if tensor_unique(eos_mask.sum(1)).shape[0] > 1:
raise ValueError("All examples must have the same number of <eos> tokens.")
dec_rep = hidden_states[eos_mask, :].view(
hidden_states.size(0), -1, hidden_states.size(-1)
)[:, -1, :]
if self.cls_mode == 1:
logits = self.cls_head(enc_rep)
elif self.cls_mode == 2:
logits = self.cls_head(dec_rep)
elif self.cls_mode == 3:
rep = flow.cat([enc_rep, dec_rep], dim=-1)
logits = self.cls_head(rep)
else:
raise NotImplementedError
loss = None
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
# loss, logits, past_key_values, decoder_hidden_states, decoder_attentions, cross_attentions,
# encoder_last_hidden_states, encoder_hidden_states, encoder_attentions
return (loss, logits) + outputs[1:]
class CPTForQuestionAnswering(nn.Module):
def __init__(
self,
cls_mode=3,
d_model: int = 1024,
vocab_size: int = 50265,
num_encoder_layers: int = 12,
num_decoder_layers: int = 12,
encoder_attn_heads: int = 16,
decoder_attn_heads: int = 16,
encoder_ffn_dim: int = 4096,
decoder_ffn_dim: int = 4096,
max_position_embeddings: int = 1024,
activation="gelu",
pad_token_id: int = 1,
attn_dropout: float = 0.0,
hidden_dropout: float = 0.0,
act_dropout=0.0,
decoder_layerdrop: float = 0.0,
scale_embedding: bool = False,
decoder_start_token_id=2,
):
super(CPTForQuestionAnswering, self).__init__()
self.num_labels = 2
self.model = CPT(
d_model,
vocab_size,
num_encoder_layers,
num_decoder_layers,
encoder_attn_heads,
decoder_attn_heads,
encoder_ffn_dim,
decoder_ffn_dim,
max_position_embeddings,
activation,
pad_token_id,
attn_dropout,
hidden_dropout,
act_dropout,
decoder_layerdrop,
scale_embedding,
decoder_start_token_id,
)
# Encoder for classification.
if cls_mode == 1:
cls_dim = d_model
# Decoder for classification.
elif cls_mode == 2:
cls_dim = d_model
# Both encoder & decoder for classification.'
elif cls_mode == 3:
cls_dim = d_model * 2
else:
raise NotImplementedError
self.qa_outputs = nn.Linear(cls_dim, self.num_labels)
init_weights(self.qa_outputs)
self.cls_mode = cls_mode
def forward(
self,
input_ids=None,
attention_mask=None,
decoder_input_ids=None,
decoder_attention_mask=None,
head_mask=None,
decoder_head_mask=None,
encoder_outputs=None,
start_positions=None,
end_positions=None,
inputs_embeds=None,
decoder_inputs_embeds=None,
use_cache=None,
output_attentions=None,
output_hidden_states=None,
):
r"""
start_positions (:obj:`flow.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (:obj:`flow.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
if start_positions is not None and end_positions is not None:
use_cache = False
if input_ids is None and inputs_embeds is not None:
raise NotImplementedError(
f"Passing input embeddings is currently not supported for {self.__class__.__name__}"
)
outputs = self.model(
input_ids,
attention_mask,
decoder_input_ids,
decoder_attention_mask,
head_mask,
decoder_head_mask,
encoder_outputs,
None,
inputs_embeds,
decoder_inputs_embeds,
use_cache,
output_attentions,
output_hidden_states,
)
hidden_states = outputs[0]
enc_hidden_states = outputs[0]
if self.cls_mode == 1:
logits = self.qa_outputs(enc_hidden_states)
elif self.cls_mode == 2:
logits = self.qa_outputs(hidden_states)
elif self.cls_mode == 3:
rep = flow.cat([enc_hidden_states, hidden_states], dim=-1)
logits = self.qa_outputs(rep)
else:
raise NotImplementedError
start_logits, end_logits = logits.split(1, dim=-1)
start_logits = start_logits.squeeze(-1)
end_logits = end_logits.squeeze(-1)
total_loss = None
if start_positions is not None and end_positions is not None:
# If we are on multi-GPU, split add a dimension
if len(start_positions.size()) > 1:
start_positions = start_positions.squeeze(-1)
if len(end_positions.size()) > 1:
end_positions = end_positions.squeeze(-1)
# sometimes the start/end positions are outside our model inputs, we ignore these terms
ignored_index = start_logits.size(1)
start_positions = start_positions.clamp(0, ignored_index)
start_positions = end_positions.clamp(0, ignored_index)
loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
start_loss = loss_fct(start_logits, start_positions)
end_loss = loss_fct(end_logits, end_positions)
total_loss = (start_loss + end_loss) / 2
# total_loss, start_logits, end_logits, past_key_values,
# decoder_hidden_states, decoder_attentions, cross_attentions,
# encoder_last_hidden_states, encoder_hidden_states, encoder_attentions
return (total_loss, start_logits, end_logits) + outputs[1:]
| [
"oneflow.arange",
"oneflow.cat",
"oneflow.nn.LayerNorm",
"oneflow.nn.functional.dropout",
"oneflow.nn.CrossEntropyLoss",
"oneflow.zeros",
"oneflow.nn.Dropout",
"oneflow.bmm",
"oneflow.nn.Embedding",
"oneflow.tanh",
"oneflow.nn.Linear",
"oneflow.ones_like"
] | [((1401, 1519), 'oneflow.arange', 'flow.arange', (['past_key_values_length', '(past_key_values_length + seq_len)'], {'dtype': 'flow.long', 'device': 'self.weight.device'}), '(past_key_values_length, past_key_values_length + seq_len, dtype\n =flow.long, device=self.weight.device)\n', (1412, 1519), True, 'import oneflow as flow\n'), ((2389, 2431), 'oneflow.nn.Linear', 'nn.Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (2398, 2431), True, 'import oneflow.nn as nn\n'), ((2454, 2496), 'oneflow.nn.Linear', 'nn.Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (2463, 2496), True, 'import oneflow.nn as nn\n'), ((2519, 2561), 'oneflow.nn.Linear', 'nn.Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (2528, 2561), True, 'import oneflow.nn as nn\n'), ((2586, 2628), 'oneflow.nn.Linear', 'nn.Linear', (['embed_dim', 'embed_dim'], {'bias': 'bias'}), '(embed_dim, embed_dim, bias=bias)\n', (2595, 2628), True, 'import oneflow.nn as nn\n'), ((7918, 7954), 'oneflow.bmm', 'flow.bmm', (['attn_weights', 'value_states'], {}), '(attn_weights, value_states)\n', (7926, 7954), True, 'import oneflow as flow\n'), ((9240, 9268), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['self.embed_dim'], {}), '(self.embed_dim)\n', (9252, 9268), True, 'import oneflow.nn as nn\n'), ((9439, 9467), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['self.embed_dim'], {}), '(self.embed_dim)\n', (9451, 9467), True, 'import oneflow.nn as nn\n'), ((9487, 9521), 'oneflow.nn.Linear', 'nn.Linear', (['self.embed_dim', 'ffn_dim'], {}), '(self.embed_dim, ffn_dim)\n', (9496, 9521), True, 'import oneflow.nn as nn\n'), ((9541, 9575), 'oneflow.nn.Linear', 'nn.Linear', (['ffn_dim', 'self.embed_dim'], {}), '(ffn_dim, self.embed_dim)\n', (9550, 9575), True, 'import oneflow.nn as nn\n'), ((9608, 9636), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['self.embed_dim'], {}), '(self.embed_dim)\n', (9620, 9636), True, 'import oneflow.nn as nn\n'), ((14808, 14829), 'oneflow.nn.LayerNorm', 'nn.LayerNorm', (['d_model'], {}), '(d_model)\n', (14820, 14829), True, 'import oneflow.nn as nn\n'), ((27615, 27646), 'oneflow.nn.Linear', 'nn.Linear', (['input_dim', 'inner_dim'], {}), '(input_dim, inner_dim)\n', (27624, 27646), True, 'import oneflow.nn as nn\n'), ((27670, 27698), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {'p': 'pooler_dropout'}), '(p=pooler_dropout)\n', (27680, 27698), True, 'import oneflow.nn as nn\n'), ((27723, 27756), 'oneflow.nn.Linear', 'nn.Linear', (['inner_dim', 'num_classes'], {}), '(inner_dim, num_classes)\n', (27732, 27756), True, 'import oneflow.nn as nn\n'), ((27935, 27959), 'oneflow.tanh', 'flow.tanh', (['hidden_states'], {}), '(hidden_states)\n', (27944, 27959), True, 'import oneflow as flow\n'), ((29172, 29214), 'oneflow.nn.Linear', 'nn.Linear', (['d_model', 'vocab_size'], {'bias': '(False)'}), '(d_model, vocab_size, bias=False)\n', (29181, 29214), True, 'import oneflow.nn as nn\n'), ((33396, 33460), 'oneflow.nn.Linear', 'nn.Linear', (['d_model', 'self.model.shared.num_embeddings'], {'bias': '(False)'}), '(d_model, self.model.shared.num_embeddings, bias=False)\n', (33405, 33460), True, 'import oneflow.nn as nn\n'), ((36285, 36349), 'oneflow.nn.Linear', 'nn.Linear', (['d_model', 'self.model.shared.num_embeddings'], {'bias': '(False)'}), '(d_model, self.model.shared.num_embeddings, bias=False)\n', (36294, 36349), True, 'import oneflow.nn as nn\n'), ((48550, 48585), 'oneflow.nn.Linear', 'nn.Linear', (['cls_dim', 'self.num_labels'], {}), '(cls_dim, self.num_labels)\n', (48559, 48585), True, 'import oneflow.nn as nn\n'), ((7839, 7895), 'oneflow.nn.functional.dropout', 'flow.nn.functional.dropout', (['attn_weights'], {'p': 'self.dropout'}), '(attn_weights, p=self.dropout)\n', (7865, 7895), True, 'import oneflow as flow\n'), ((10826, 10883), 'oneflow.nn.functional.dropout', 'flow.nn.functional.dropout', (['hidden_states'], {'p': 'self.dropout'}), '(hidden_states, p=self.dropout)\n', (10852, 10883), True, 'import oneflow as flow\n'), ((12408, 12476), 'oneflow.nn.functional.dropout', 'flow.nn.functional.dropout', (['hidden_states'], {'p': 'self.activation_dropout'}), '(hidden_states, p=self.activation_dropout)\n', (12434, 12476), True, 'import oneflow as flow\n'), ((12579, 12636), 'oneflow.nn.functional.dropout', 'flow.nn.functional.dropout', (['hidden_states'], {'p': 'self.dropout'}), '(hidden_states, p=self.dropout)\n', (12605, 12636), True, 'import oneflow as flow\n'), ((13978, 13996), 'math.sqrt', 'math.sqrt', (['d_model'], {}), '(d_model)\n', (13987, 13996), False, 'import math\n'), ((14191, 14242), 'oneflow.nn.Embedding', 'nn.Embedding', (['vocab_size', 'd_model', 'self.padding_idx'], {}), '(vocab_size, d_model, self.padding_idx)\n', (14203, 14242), True, 'import oneflow.nn as nn\n'), ((17991, 18048), 'oneflow.nn.functional.dropout', 'flow.nn.functional.dropout', (['hidden_states'], {'p': 'self.dropout'}), '(hidden_states, p=self.dropout)\n', (18017, 18048), True, 'import oneflow as flow\n'), ((18962, 18982), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (18976, 18982), False, 'import random\n'), ((30780, 30798), 'oneflow.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (30796, 30798), False, 'from oneflow.nn import CrossEntropyLoss, MSELoss\n'), ((33313, 33362), 'oneflow.zeros', 'flow.zeros', (['(1, self.model.shared.num_embeddings)'], {}), '((1, self.model.shared.num_embeddings))\n', (33323, 33362), True, 'import oneflow as flow\n'), ((36202, 36251), 'oneflow.zeros', 'flow.zeros', (['(1, self.model.shared.num_embeddings)'], {}), '((1, self.model.shared.num_embeddings))\n', (36212, 36251), True, 'import oneflow as flow\n'), ((37232, 37323), 'oneflow.zeros', 'flow.zeros', (['(1, new_num_tokens - old_num_tokens)'], {'device': 'self.final_logits_bias.device'}), '((1, new_num_tokens - old_num_tokens), device=self.\n final_logits_bias.device)\n', (37242, 37323), True, 'import oneflow as flow\n'), ((37389, 37442), 'oneflow.cat', 'flow.cat', (['[self.final_logits_bias, extra_bias]'], {'dim': '(1)'}), '([self.final_logits_bias, extra_bias], dim=1)\n', (37397, 37442), True, 'import oneflow as flow\n'), ((38914, 38932), 'oneflow.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (38930, 38932), False, 'from oneflow.nn import CrossEntropyLoss, MSELoss\n'), ((46520, 46538), 'oneflow.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {}), '()\n', (46536, 46538), False, 'from oneflow.nn import CrossEntropyLoss, MSELoss\n'), ((51920, 51964), 'oneflow.nn.CrossEntropyLoss', 'CrossEntropyLoss', ([], {'ignore_index': 'ignored_index'}), '(ignore_index=ignored_index)\n', (51936, 51964), False, 'from oneflow.nn import CrossEntropyLoss, MSELoss\n'), ((11888, 11945), 'oneflow.nn.functional.dropout', 'flow.nn.functional.dropout', (['hidden_states'], {'p': 'self.dropout'}), '(hidden_states, p=self.dropout)\n', (11914, 11945), True, 'import oneflow as flow\n'), ((24092, 24117), 'oneflow.ones_like', 'flow.ones_like', (['input_ids'], {}), '(input_ids)\n', (24106, 24117), True, 'import oneflow as flow\n'), ((4271, 4319), 'oneflow.cat', 'flow.cat', (['[past_key_value[0], key_states]'], {'dim': '(2)'}), '([past_key_value[0], key_states], dim=2)\n', (4279, 4319), True, 'import oneflow as flow\n'), ((4347, 4397), 'oneflow.cat', 'flow.cat', (['[past_key_value[1], value_states]'], {'dim': '(2)'}), '([past_key_value[1], value_states], dim=2)\n', (4355, 4397), True, 'import oneflow as flow\n'), ((46316, 46352), 'oneflow.cat', 'flow.cat', (['[enc_rep, dec_rep]'], {'dim': '(-1)'}), '([enc_rep, dec_rep], dim=-1)\n', (46324, 46352), True, 'import oneflow as flow\n'), ((50939, 50991), 'oneflow.cat', 'flow.cat', (['[enc_hidden_states, hidden_states]'], {'dim': '(-1)'}), '([enc_hidden_states, hidden_states], dim=-1)\n', (50947, 50991), True, 'import oneflow as flow\n'), ((40390, 40421), 'oneflow.arange', 'flow.arange', (['input_ids.shape[0]'], {}), '(input_ids.shape[0])\n', (40401, 40421), True, 'import oneflow as flow\n')] |
from typing import Optional, Dict
import oneflow as flow
from . import RearrangeMixin, ReduceMixin
from ._einmix import _EinmixMixin
__author__ = '<NAME> & <NAME>'
class Rearrange(RearrangeMixin, flow.nn.Module):
def forward(self, input):
return self._apply_recipe(input)
class Reduce(ReduceMixin, flow.nn.Module):
def forward(self, input):
return self._apply_recipe(input)
class EinMix(_EinmixMixin, flow.nn.Module):
def _create_parameters(self, weight_shape, weight_bound, bias_shape, bias_bound):
self.weight = flow.nn.Parameter(flow.zeros(weight_shape).uniform_(-weight_bound, weight_bound),
requires_grad=True)
if bias_shape is not None:
self.bias = flow.nn.Parameter(flow.zeros(bias_shape).uniform_(-bias_bound, bias_bound),
requires_grad=True)
else:
self.bias = None
def _create_rearrange_layers(self,
pre_reshape_pattern: Optional[str],
pre_reshape_lengths: Optional[Dict],
post_reshape_pattern: Optional[str],
post_reshape_lengths: Optional[Dict],
):
self.pre_rearrange = None
if pre_reshape_pattern is not None:
self.pre_rearrange = Rearrange(pre_reshape_pattern, **pre_reshape_lengths)
self.post_rearrange = None
if post_reshape_pattern is not None:
self.post_rearrange = Rearrange(post_reshape_pattern, **post_reshape_lengths)
def forward(self, input):
if self.pre_rearrange is not None:
input = self.pre_rearrange(input)
result = flow.einsum(self.einsum_pattern, input, self.weight)
if self.bias is not None:
result += self.bias
if self.post_rearrange is not None:
result = self.post_rearrange(result)
return result
| [
"oneflow.einsum",
"oneflow.zeros"
] | [((1772, 1824), 'oneflow.einsum', 'flow.einsum', (['self.einsum_pattern', 'input', 'self.weight'], {}), '(self.einsum_pattern, input, self.weight)\n', (1783, 1824), True, 'import oneflow as flow\n'), ((577, 601), 'oneflow.zeros', 'flow.zeros', (['weight_shape'], {}), '(weight_shape)\n', (587, 601), True, 'import oneflow as flow\n'), ((779, 801), 'oneflow.zeros', 'flow.zeros', (['bias_shape'], {}), '(bias_shape)\n', (789, 801), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible import single_client as flow
def __add__(self, rhs):
return flow.math.add(self, rhs)
def __radd__(self, lhs):
return flow.math.add(lhs, self)
def __sub__(self, rhs):
return flow.math.subtract(self, rhs)
def __rsub__(self, lhs):
return flow.math.subtract(lhs, self)
def __mul__(self, rhs):
return flow.math.multiply(self, rhs)
def __rmul__(self, lhs):
return flow.math.multiply(lhs, self)
def __truediv__(self, rhs):
return flow.math.divide(self, rhs)
def __rtruediv__(self, lhs):
return flow.math.divide(lhs, self)
def __div__(self, rhs):
return flow.math.divide(self, rhs)
def __mod__(self, rhs):
return flow.math.mod(self, rhs)
def __eq__(self, rhs):
return flow.math.equal(self, rhs)
def __ne__(self, rhs):
return flow.math.not_equal(self, rhs)
def __lt__(self, rhs):
return flow.math.less(self, rhs)
def __le__(self, rhs):
return flow.math.less_equal(self, rhs)
def __gt__(self, rhs):
return flow.math.greater(self, rhs)
def __ge__(self, rhs):
return flow.math.greater_equal(self, rhs)
def RegisterBlobOperatorTraitMethod(blob_class):
blob_class.__add__ = __add__
blob_class.__radd__ = __radd__
blob_class.__sub__ = __sub__
blob_class.__rsub__ = __rsub__
blob_class.__mul__ = __mul__
blob_class.__rmul__ = __rmul__
blob_class.__truediv__ = __truediv__
blob_class.__rtruediv__ = __rtruediv__
blob_class.__div__ = __div__
blob_class.__mod__ = __mod__
blob_class.__eq__ = __eq__
blob_class.__ne__ = __ne__
blob_class.__lt__ = __lt__
blob_class.__le__ = __le__
blob_class.__gt__ = __gt__
blob_class.__ge__ = __ge__
| [
"oneflow.compatible.single_client.math.less_equal",
"oneflow.compatible.single_client.math.divide",
"oneflow.compatible.single_client.math.less",
"oneflow.compatible.single_client.math.equal",
"oneflow.compatible.single_client.math.greater",
"oneflow.compatible.single_client.math.mod",
"oneflow.compatib... | [((680, 704), 'oneflow.compatible.single_client.math.add', 'flow.math.add', (['self', 'rhs'], {}), '(self, rhs)\n', (693, 704), True, 'from oneflow.compatible import single_client as flow\n'), ((743, 767), 'oneflow.compatible.single_client.math.add', 'flow.math.add', (['lhs', 'self'], {}), '(lhs, self)\n', (756, 767), True, 'from oneflow.compatible import single_client as flow\n'), ((805, 834), 'oneflow.compatible.single_client.math.subtract', 'flow.math.subtract', (['self', 'rhs'], {}), '(self, rhs)\n', (823, 834), True, 'from oneflow.compatible import single_client as flow\n'), ((873, 902), 'oneflow.compatible.single_client.math.subtract', 'flow.math.subtract', (['lhs', 'self'], {}), '(lhs, self)\n', (891, 902), True, 'from oneflow.compatible import single_client as flow\n'), ((940, 969), 'oneflow.compatible.single_client.math.multiply', 'flow.math.multiply', (['self', 'rhs'], {}), '(self, rhs)\n', (958, 969), True, 'from oneflow.compatible import single_client as flow\n'), ((1008, 1037), 'oneflow.compatible.single_client.math.multiply', 'flow.math.multiply', (['lhs', 'self'], {}), '(lhs, self)\n', (1026, 1037), True, 'from oneflow.compatible import single_client as flow\n'), ((1079, 1106), 'oneflow.compatible.single_client.math.divide', 'flow.math.divide', (['self', 'rhs'], {}), '(self, rhs)\n', (1095, 1106), True, 'from oneflow.compatible import single_client as flow\n'), ((1149, 1176), 'oneflow.compatible.single_client.math.divide', 'flow.math.divide', (['lhs', 'self'], {}), '(lhs, self)\n', (1165, 1176), True, 'from oneflow.compatible import single_client as flow\n'), ((1214, 1241), 'oneflow.compatible.single_client.math.divide', 'flow.math.divide', (['self', 'rhs'], {}), '(self, rhs)\n', (1230, 1241), True, 'from oneflow.compatible import single_client as flow\n'), ((1279, 1303), 'oneflow.compatible.single_client.math.mod', 'flow.math.mod', (['self', 'rhs'], {}), '(self, rhs)\n', (1292, 1303), True, 'from oneflow.compatible import single_client as flow\n'), ((1340, 1366), 'oneflow.compatible.single_client.math.equal', 'flow.math.equal', (['self', 'rhs'], {}), '(self, rhs)\n', (1355, 1366), True, 'from oneflow.compatible import single_client as flow\n'), ((1403, 1433), 'oneflow.compatible.single_client.math.not_equal', 'flow.math.not_equal', (['self', 'rhs'], {}), '(self, rhs)\n', (1422, 1433), True, 'from oneflow.compatible import single_client as flow\n'), ((1470, 1495), 'oneflow.compatible.single_client.math.less', 'flow.math.less', (['self', 'rhs'], {}), '(self, rhs)\n', (1484, 1495), True, 'from oneflow.compatible import single_client as flow\n'), ((1532, 1563), 'oneflow.compatible.single_client.math.less_equal', 'flow.math.less_equal', (['self', 'rhs'], {}), '(self, rhs)\n', (1552, 1563), True, 'from oneflow.compatible import single_client as flow\n'), ((1600, 1628), 'oneflow.compatible.single_client.math.greater', 'flow.math.greater', (['self', 'rhs'], {}), '(self, rhs)\n', (1617, 1628), True, 'from oneflow.compatible import single_client as flow\n'), ((1665, 1699), 'oneflow.compatible.single_client.math.greater_equal', 'flow.math.greater_equal', (['self', 'rhs'], {}), '(self, rhs)\n', (1688, 1699), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow._C.one_hot,
r"""
one_hot(input, num_classes=-1, on_value=1, off_value=0)
This operator generates a onehot Tensor from input Tensor.
If input Tensor's rank is `N`, the corresponding onehot Tensor's rank is `N+1`.
Flow.one_hot is aligned with tf.one_hot operator. If you want to use torch version, you can turn on_value is set to 1, off_value is set to 0.
Args:
input (Tensor): The input Tensor.
num_classes (int): The length of onehot Tensor.
on_value (Union[int, float], optional): The fill value when `x[i] == i`. Defaults to 1.
off_value (Union[int, float], optional): The fill value when `x[i] != i`. Defaults to 0.
Note:
The data type of input blob should be `int32` or `int64`.
Returns:
oneflow.Tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input=flow.Tensor(np.array([0, 3, 1, 2]).astype(np.int32), dtype=flow.int64)
>>> out = flow._C.one_hot(input, num_classes=5)
>>> out
tensor([[1, 0, 0, 0, 0],
[0, 0, 0, 1, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0]], dtype=oneflow.int64)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1922), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.one_hot', '"""\n one_hot(input, num_classes=-1, on_value=1, off_value=0)\n This operator generates a onehot Tensor from input Tensor.\n\n If input Tensor\'s rank is `N`, the corresponding onehot Tensor\'s rank is `N+1`.\n\n Flow.one_hot is aligned with tf.one_hot operator. If you want to use torch version, you can turn on_value is set to 1, off_value is set to 0.\n\n Args:\n input (Tensor): The input Tensor.\n num_classes (int): The length of onehot Tensor.\n on_value (Union[int, float], optional): The fill value when `x[i] == i`. Defaults to 1.\n off_value (Union[int, float], optional): The fill value when `x[i] != i`. Defaults to 0.\n Note:\n\n The data type of input blob should be `int32` or `int64`.\n\n Returns:\n oneflow.Tensor.\n \n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> input=flow.Tensor(np.array([0, 3, 1, 2]).astype(np.int32), dtype=flow.int64)\n >>> out = flow._C.one_hot(input, num_classes=5)\n >>> out\n tensor([[1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0]], dtype=oneflow.int64)\n \n """'], {}), '(oneflow._C.one_hot,\n """\n one_hot(input, num_classes=-1, on_value=1, off_value=0)\n This operator generates a onehot Tensor from input Tensor.\n\n If input Tensor\'s rank is `N`, the corresponding onehot Tensor\'s rank is `N+1`.\n\n Flow.one_hot is aligned with tf.one_hot operator. If you want to use torch version, you can turn on_value is set to 1, off_value is set to 0.\n\n Args:\n input (Tensor): The input Tensor.\n num_classes (int): The length of onehot Tensor.\n on_value (Union[int, float], optional): The fill value when `x[i] == i`. Defaults to 1.\n off_value (Union[int, float], optional): The fill value when `x[i] != i`. Defaults to 0.\n Note:\n\n The data type of input blob should be `int32` or `int64`.\n\n Returns:\n oneflow.Tensor.\n \n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n\n >>> input=flow.Tensor(np.array([0, 3, 1, 2]).astype(np.int32), dtype=flow.int64)\n >>> out = flow._C.one_hot(input, num_classes=5)\n >>> out\n tensor([[1, 0, 0, 0, 0],\n [0, 0, 0, 1, 0],\n [0, 1, 0, 0, 0],\n [0, 0, 1, 0, 0]], dtype=oneflow.int64)\n \n """\n )\n', (670, 1922), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.F.conv1d,
r"""
conv1d(input, weight, bias=None, stride=[1], padding=[0], dilation=[1], groups=1) -> Tensor
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv1d.html?highlight=conv1d
Applies a 1D convolution over an input signal composed of several input
planes.
See :class:`~oneflow.nn.Conv1d` for details and output shape.
Args:
input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in_channels} , iW)`
weight: quantized filters of shape :math:`(\text{out_channels} , \frac{\text{in_channels}}{\text{groups}} , iW)`
bias: **non-quantized** bias tensor of shape :math:`(\text{out_channels})`. The tensor type must be `torch.float`.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sW,)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a
single number or a tuple `(padW,)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dW,)`. Default: 1
groups: split input into groups, :math:`\text{in_channels}` should be divisible by the
number of groups. Default: 1
padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
scale: quantization scale for the output. Default: 1.0
zero_point: quantization zero_point for the output. Default: 0
dtype: quantization data type to use. Default: ``torch.quint8``
For examples:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> import oneflow.nn as nn
>>> input = flow.Tensor(np.random.randn(33, 16, 30))
>>> filters = flow.Tensor(np.random.randn(20, 16, 5))
>>> out = nn.functional.conv1d(input, filters,stride=[1], padding=[0], dilation=[1])
""",
)
add_docstr(
oneflow.F.conv2d,
r"""
conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv2d.html?highlight=conv2d
Applies a 2D convolution over an input image composed of several input
planes.
See :class:`~oneflow.nn.Conv2d` for details and output shape.
Args:
input: quantized input tensor of shape :math:`(\text{minibatch} , \text{in_channels} , iH , iW)`
weight: quantized filters of shape :math:`(\text{out_channels} , \frac{\text{in_channels}}{\text{groups}} , kH , kW)`
bias: **non-quantized** bias tensor of shape :math:`(\text{out_channels})`. The tensor type must be `torch.float`.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a
single number or a tuple `(padH, padW)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in_channels}` should be divisible by the
number of groups. Default: 1
padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"
scale: quantization scale for the output. Default: 1.0
zero_point: quantization zero_point for the output. Default: 0
dtype: quantization data type to use. Default: ``torch.quint8``
""",
)
add_docstr(
oneflow.F.conv3d,
r"""
conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv3d.html?highlight=conv3d
Applies a 3D convolution over an input image composed of several input
planes.
See :class:`~oneflow.nn.Conv3d` for details and output shape.
Args:
input: quantized input tensor of shape
:math:`(\text{minibatch} , \text{in_channels} , iD , iH , iW)`
weight: quantized filters of shape
:math:`(\text{out_channels} , \frac{\text{in_channels}}{\text{groups}} , kD , kH , kW)`
bias: **non-quantized** bias tensor of shape
:math:`(\text{out_channels})`. The tensor type must be `torch.float`.
stride: the stride of the convolving kernel. Can be a single number or a
tuple `(sD, sH, sW)`. Default: 1
padding: implicit paddings on both sides of the input. Can be a
single number or a tuple `(padD, padH, padW)`. Default: 0
dilation: the spacing between kernel elements. Can be a single number or
a tuple `(dD, dH, dW)`. Default: 1
groups: split input into groups, :math:`\text{in_channels}` should be
divisible by the number of groups. Default: 1
padding_mode: the padding mode to use. Only "zeros" is supported for
quantized convolution at the moment. Default: "zeros"
scale: quantization scale for the output. Default: 1.0
zero_point: quantization zero_point for the output. Default: 0
dtype: quantization data type to use. Default: ``torch.quint8``
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 2674), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.conv1d', '"""\n conv1d(input, weight, bias=None, stride=[1], padding=[0], dilation=[1], groups=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv1d.html?highlight=conv1d\n\n Applies a 1D convolution over an input signal composed of several input\n planes.\n\n See :class:`~oneflow.nn.Conv1d` for details and output shape.\n\n Args:\n input: quantized input tensor of shape :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iW)`\n weight: quantized filters of shape :math:`(\\\\text{out_channels} , \\\\frac{\\\\text{in_channels}}{\\\\text{groups}} , iW)`\n bias: **non-quantized** bias tensor of shape :math:`(\\\\text{out_channels})`. The tensor type must be `torch.float`.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sW,)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a\n single number or a tuple `(padW,)`. Default: 0\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dW,)`. Default: 1\n groups: split input into groups, :math:`\\\\text{in_channels}` should be divisible by the\n number of groups. Default: 1\n padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"\n scale: quantization scale for the output. Default: 1.0\n zero_point: quantization zero_point for the output. Default: 0\n dtype: quantization data type to use. Default: ``torch.quint8``\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> import oneflow.nn as nn\n \n >>> input = flow.Tensor(np.random.randn(33, 16, 30))\n >>> filters = flow.Tensor(np.random.randn(20, 16, 5))\n >>> out = nn.functional.conv1d(input, filters,stride=[1], padding=[0], dilation=[1])\n """'], {}), '(oneflow.F.conv1d,\n """\n conv1d(input, weight, bias=None, stride=[1], padding=[0], dilation=[1], groups=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv1d.html?highlight=conv1d\n\n Applies a 1D convolution over an input signal composed of several input\n planes.\n\n See :class:`~oneflow.nn.Conv1d` for details and output shape.\n\n Args:\n input: quantized input tensor of shape :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iW)`\n weight: quantized filters of shape :math:`(\\\\text{out_channels} , \\\\frac{\\\\text{in_channels}}{\\\\text{groups}} , iW)`\n bias: **non-quantized** bias tensor of shape :math:`(\\\\text{out_channels})`. The tensor type must be `torch.float`.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sW,)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a\n single number or a tuple `(padW,)`. Default: 0\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dW,)`. Default: 1\n groups: split input into groups, :math:`\\\\text{in_channels}` should be divisible by the\n number of groups. Default: 1\n padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"\n scale: quantization scale for the output. Default: 1.0\n zero_point: quantization zero_point for the output. Default: 0\n dtype: quantization data type to use. Default: ``torch.quint8``\n\n For examples:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n >>> import numpy as np\n >>> import oneflow.nn as nn\n \n >>> input = flow.Tensor(np.random.randn(33, 16, 30))\n >>> filters = flow.Tensor(np.random.randn(20, 16, 5))\n >>> out = nn.functional.conv1d(input, filters,stride=[1], padding=[0], dilation=[1])\n """\n )\n', (670, 2674), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2670, 4333), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.conv2d', '"""\n conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv2d.html?highlight=conv2d\n\n Applies a 2D convolution over an input image composed of several input\n planes.\n\n See :class:`~oneflow.nn.Conv2d` for details and output shape.\n\n Args:\n input: quantized input tensor of shape :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iH , iW)`\n weight: quantized filters of shape :math:`(\\\\text{out_channels} , \\\\frac{\\\\text{in_channels}}{\\\\text{groups}} , kH , kW)`\n bias: **non-quantized** bias tensor of shape :math:`(\\\\text{out_channels})`. The tensor type must be `torch.float`.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sH, sW)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a\n single number or a tuple `(padH, padW)`. Default: 0\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dH, dW)`. Default: 1\n groups: split input into groups, :math:`\\\\text{in_channels}` should be divisible by the\n number of groups. Default: 1\n padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"\n scale: quantization scale for the output. Default: 1.0\n zero_point: quantization zero_point for the output. Default: 0\n dtype: quantization data type to use. Default: ``torch.quint8``\n \n \n """'], {}), '(oneflow.F.conv2d,\n """\n conv2d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv2d.html?highlight=conv2d\n\n Applies a 2D convolution over an input image composed of several input\n planes.\n\n See :class:`~oneflow.nn.Conv2d` for details and output shape.\n\n Args:\n input: quantized input tensor of shape :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iH , iW)`\n weight: quantized filters of shape :math:`(\\\\text{out_channels} , \\\\frac{\\\\text{in_channels}}{\\\\text{groups}} , kH , kW)`\n bias: **non-quantized** bias tensor of shape :math:`(\\\\text{out_channels})`. The tensor type must be `torch.float`.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sH, sW)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a\n single number or a tuple `(padH, padW)`. Default: 0\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dH, dW)`. Default: 1\n groups: split input into groups, :math:`\\\\text{in_channels}` should be divisible by the\n number of groups. Default: 1\n padding_mode: the padding mode to use. Only "zeros" is supported for quantized convolution at the moment. Default: "zeros"\n scale: quantization scale for the output. Default: 1.0\n zero_point: quantization zero_point for the output. Default: 0\n dtype: quantization data type to use. Default: ``torch.quint8``\n \n \n """\n )\n', (2680, 4333), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4329, 6052), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.F.conv3d', '"""\n conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv3d.html?highlight=conv3d\n\n Applies a 3D convolution over an input image composed of several input\n planes.\n\n\n See :class:`~oneflow.nn.Conv3d` for details and output shape.\n\n Args:\n input: quantized input tensor of shape\n :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iD , iH , iW)`\n weight: quantized filters of shape\n :math:`(\\\\text{out_channels} , \\\\frac{\\\\text{in_channels}}{\\\\text{groups}} , kD , kH , kW)`\n bias: **non-quantized** bias tensor of shape\n :math:`(\\\\text{out_channels})`. The tensor type must be `torch.float`.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sD, sH, sW)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a\n single number or a tuple `(padD, padH, padW)`. Default: 0\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dD, dH, dW)`. Default: 1\n groups: split input into groups, :math:`\\\\text{in_channels}` should be\n divisible by the number of groups. Default: 1\n padding_mode: the padding mode to use. Only "zeros" is supported for\n quantized convolution at the moment. Default: "zeros"\n scale: quantization scale for the output. Default: 1.0\n zero_point: quantization zero_point for the output. Default: 0\n dtype: quantization data type to use. Default: ``torch.quint8``\n \n """'], {}), '(oneflow.F.conv3d,\n """\n conv3d(input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1) -> Tensor\n\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.functional.conv3d.html?highlight=conv3d\n\n Applies a 3D convolution over an input image composed of several input\n planes.\n\n\n See :class:`~oneflow.nn.Conv3d` for details and output shape.\n\n Args:\n input: quantized input tensor of shape\n :math:`(\\\\text{minibatch} , \\\\text{in_channels} , iD , iH , iW)`\n weight: quantized filters of shape\n :math:`(\\\\text{out_channels} , \\\\frac{\\\\text{in_channels}}{\\\\text{groups}} , kD , kH , kW)`\n bias: **non-quantized** bias tensor of shape\n :math:`(\\\\text{out_channels})`. The tensor type must be `torch.float`.\n stride: the stride of the convolving kernel. Can be a single number or a\n tuple `(sD, sH, sW)`. Default: 1\n padding: implicit paddings on both sides of the input. Can be a\n single number or a tuple `(padD, padH, padW)`. Default: 0\n dilation: the spacing between kernel elements. Can be a single number or\n a tuple `(dD, dH, dW)`. Default: 1\n groups: split input into groups, :math:`\\\\text{in_channels}` should be\n divisible by the number of groups. Default: 1\n padding_mode: the padding mode to use. Only "zeros" is supported for\n quantized convolution at the moment. Default: "zeros"\n scale: quantization scale for the output. Default: 1.0\n zero_point: quantization zero_point for the output. Default: 0\n dtype: quantization data type to use. Default: ``torch.quint8``\n \n """\n )\n', (4339, 6052), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _count(shape, begin_axis, end_axis):
cnt = 1
for i in range(begin_axis, end_axis):
cnt *= shape[i]
return cnt
def _l2_norm_numpy(x, dim, epsilon=1e-12):
square_x_sum_shape = list(x.shape)
square_x_sum_shape[dim] = 1
c = x.shape[dim]
n = int(x.size / c)
d = _count(x.shape, dim + 1, len(x.shape))
square_x_sum = np.zeros(square_x_sum_shape)
square_x_sum_flatten = square_x_sum.reshape(-1)
in_flatten = x.reshape(-1)
out = np.zeros(x.size)
for i in range(0, n):
offset = int(int((i / d)) * d * c + (i % d))
for j in range(0, c):
item = in_flatten[offset + j * d]
square_x_sum_flatten[i] = square_x_sum_flatten[i] + item * item
norm = np.sqrt(np.maximum(square_x_sum_flatten[i], epsilon))
for j in range(0, c):
index = offset + j * d
out[index] = in_flatten[index] / norm
square_x_sum = square_x_sum_flatten.reshape(square_x_sum.shape)
out = out.reshape(x.shape)
return out, square_x_sum
def _l2_norm_backward_np(dy, y, square_x_sum, dim, epsilon=1e-12):
c = dy.shape[dim]
n = int(dy.size / c)
d = _count(dy.shape, dim + 1, len(y.shape))
dx = np.zeros(dy.shape).reshape(-1)
dy_flatten = dy.reshape(-1)
y_flatten = y.reshape(-1)
square_x_sum_flatten = square_x_sum.reshape(-1)
for i in range(0, n):
norm = np.sqrt(np.maximum(square_x_sum_flatten[i], epsilon))
offset = int(int(int((i / d)) * d * c) + (i % d))
if square_x_sum_flatten[i] >= epsilon:
y_dy_inner_prod = 0
for j in range(0, c):
index = offset + j * d
y_dy_inner_prod = y_dy_inner_prod + dy_flatten[index] * y_flatten[index]
for j in range(0, c):
index = offset + j * d
dx[index] = (1 / norm) * (
dy_flatten[index] - y_dy_inner_prod * y_flatten[index]
)
else:
for j in range(0, c):
index = offset + j * d
dx[index] = (1 / norm) * dy_flatten[index]
return dx.reshape(y.shape)
def _test_l2_normalize(test_case, device, dim, shape):
input = np.random.randn(*shape)
np_out, square_x_sum = _l2_norm_numpy(input, dim)
of_input = flow.tensor(
input, dtype=flow.float32, requires_grad=True, device=flow.device(device)
)
of_out = flow.nn.functional.l2_normalize(of_input, dim)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-4, 1e-4))
z = of_out.sum()
z.backward()
dx = _l2_norm_backward_np(np.ones(np_out.shape), np_out, square_x_sum, dim)
test_case.assertTrue(np.allclose(of_input.grad.numpy(), dx, 1e-4, 1e-4))
@flow.unittest.skip_unless_1n1d()
class TestL2Normalize(flow.unittest.TestCase):
def test_l2_normalize(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_l2_normalize,
]
arg_dict["device"] = ["cpu", "cuda"]
arg_dict["dim"] = [0, 1, 2, 3]
arg_dict["shape"] = [
(10, 10, 20, 30),
]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.functional.l2_normalize",
"oneflow.device"
] | [((3493, 3525), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3523, 3525), True, 'import oneflow as flow\n'), ((1109, 1137), 'numpy.zeros', 'np.zeros', (['square_x_sum_shape'], {}), '(square_x_sum_shape)\n', (1117, 1137), True, 'import numpy as np\n'), ((1232, 1248), 'numpy.zeros', 'np.zeros', (['x.size'], {}), '(x.size)\n', (1240, 1248), True, 'import numpy as np\n'), ((2965, 2988), 'numpy.random.randn', 'np.random.randn', (['*shape'], {}), '(*shape)\n', (2980, 2988), True, 'import numpy as np\n'), ((3172, 3218), 'oneflow.nn.functional.l2_normalize', 'flow.nn.functional.l2_normalize', (['of_input', 'dim'], {}), '(of_input, dim)\n', (3203, 3218), True, 'import oneflow as flow\n'), ((3987, 4002), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4000, 4002), False, 'import unittest\n'), ((3363, 3384), 'numpy.ones', 'np.ones', (['np_out.shape'], {}), '(np_out.shape)\n', (3370, 3384), True, 'import numpy as np\n'), ((3630, 3643), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3641, 3643), False, 'from collections import OrderedDict\n'), ((3892, 3912), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3902, 3912), False, 'from test_util import GenArgList\n'), ((1505, 1549), 'numpy.maximum', 'np.maximum', (['square_x_sum_flatten[i]', 'epsilon'], {}), '(square_x_sum_flatten[i], epsilon)\n', (1515, 1549), True, 'import numpy as np\n'), ((1969, 1987), 'numpy.zeros', 'np.zeros', (['dy.shape'], {}), '(dy.shape)\n', (1977, 1987), True, 'import numpy as np\n'), ((2164, 2208), 'numpy.maximum', 'np.maximum', (['square_x_sum_flatten[i]', 'epsilon'], {}), '(square_x_sum_flatten[i], epsilon)\n', (2174, 2208), True, 'import numpy as np\n'), ((3133, 3152), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (3144, 3152), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestFlattenModule(flow.unittest.TestCase):
def test_flatten(test_case):
m = flow.nn.Flatten()
x = flow.Tensor(32, 2, 5, 5)
flow.nn.init.uniform_(x)
y = m(x)
test_case.assertTrue(y.shape == flow.Size((32, 50)))
test_case.assertTrue(np.array_equal(y.numpy().flatten(), x.numpy().flatten()))
y2 = flow.flatten(x, start_dim=2)
test_case.assertTrue(y2.shape == flow.Size((32, 2, 25)))
test_case.assertTrue(np.array_equal(y2.numpy().flatten(), x.numpy().flatten()))
y3 = x.flatten(start_dim=1)
test_case.assertTrue(y3.shape == flow.Size((32, 50)))
test_case.assertTrue(np.array_equal(y3.numpy().flatten(), x.numpy().flatten()))
y4 = x.flatten(start_dim=1, end_dim=2)
test_case.assertTrue(y4.shape == flow.Size((32, 10, 5)))
test_case.assertTrue(np.array_equal(y4.numpy().flatten(), x.numpy().flatten()))
y5 = flow.flatten(x)
test_case.assertTrue(y5.shape == flow.Size((1600,)))
test_case.assertTrue(np.array_equal(y5.numpy().flatten(), x.numpy().flatten()))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.flatten",
"oneflow.experimental.Tensor",
"oneflow.experimental.nn.Flatten",
"oneflow.experimental.nn.init.uniform_",
"oneflow.experimental.unittest.env.eager_execution_enabled",
"oneflow.experimental.Size"
] | [((1922, 1937), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1935, 1937), False, 'import unittest\n'), ((873, 890), 'oneflow.experimental.nn.Flatten', 'flow.nn.Flatten', ([], {}), '()\n', (888, 890), True, 'import oneflow.experimental as flow\n'), ((903, 927), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(32)', '(2)', '(5)', '(5)'], {}), '(32, 2, 5, 5)\n', (914, 927), True, 'import oneflow.experimental as flow\n'), ((936, 960), 'oneflow.experimental.nn.init.uniform_', 'flow.nn.init.uniform_', (['x'], {}), '(x)\n', (957, 960), True, 'import oneflow.experimental as flow\n'), ((1140, 1168), 'oneflow.experimental.flatten', 'flow.flatten', (['x'], {'start_dim': '(2)'}), '(x, start_dim=2)\n', (1152, 1168), True, 'import oneflow.experimental as flow\n'), ((1724, 1739), 'oneflow.experimental.flatten', 'flow.flatten', (['x'], {}), '(x)\n', (1736, 1739), True, 'import oneflow.experimental as flow\n'), ((690, 733), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (731, 733), True, 'import oneflow.experimental as flow\n'), ((1018, 1037), 'oneflow.experimental.Size', 'flow.Size', (['(32, 50)'], {}), '((32, 50))\n', (1027, 1037), True, 'import oneflow.experimental as flow\n'), ((1210, 1232), 'oneflow.experimental.Size', 'flow.Size', (['(32, 2, 25)'], {}), '((32, 2, 25))\n', (1219, 1232), True, 'import oneflow.experimental as flow\n'), ((1400, 1419), 'oneflow.experimental.Size', 'flow.Size', (['(32, 50)'], {}), '((32, 50))\n', (1409, 1419), True, 'import oneflow.experimental as flow\n'), ((1598, 1620), 'oneflow.experimental.Size', 'flow.Size', (['(32, 10, 5)'], {}), '((32, 10, 5))\n', (1607, 1620), True, 'import oneflow.experimental as flow\n'), ((1781, 1799), 'oneflow.experimental.Size', 'flow.Size', (['(1600,)'], {}), '((1600,))\n', (1790, 1799), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from collections import OrderedDict
from typing import Union
import oneflow._oneflow_internal
import oneflow.python.framework.tensor_tuple_util as tensor_tuple_util
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.nn.module import Module
from oneflow.python.framework.tensor import Tensor
from oneflow.python.nn.parameter import Parameter
from oneflow.python.nn.optimizer.optimizer import Optimizer
from oneflow.python.framework.function_util import FunctionConfig
@oneflow_export("nn.Graph", "nn.graph.Graph")
@experimental_api
class Graph(object):
_child_init_cnt = dict()
def __init__(self):
self.config = GraphConfig()
self._generate_name()
self._c_nn_graph = oneflow._oneflow_internal.NNGraph(self._name)
self._blocks = OrderedDict()
self._optimizers = OrderedDict()
self._is_compiled = False
self._state_tensortuple = None
@property
def name(self):
return self._name
@property
def training(self):
return self.config.training
def build(self, *args):
raise NotImplementedError()
def add_optimizer(
self,
name: str,
optimizer: Optimizer = None,
lr_scheduler=None,
grad_clipping_conf=None,
weight_decay_conf=None,
):
self._optimizers[name] = self.OptimizerConfig(
optimizer, lr_scheduler, grad_clipping_conf, weight_decay_conf
)
def _generate_name(self):
child_name = self.__class__.__name__
if Graph._child_init_cnt.get(child_name) is None:
Graph._child_init_cnt[child_name] = 0
self._name = child_name + "_" + str(Graph._child_init_cnt[child_name])
Graph._child_init_cnt[child_name] += 1
def _named_state(self):
for _, b in self._blocks.items():
prefix = b.name + "."
p_gen = b.origin.named_parameters()
for n, p in p_gen:
yield prefix + n, p
b_gen = b.origin.named_buffers()
for n, b in b_gen:
yield prefix + n, b
def _compile(self):
assert not self._is_compiled, (
"nn.Graph " + self._name + " has already been compiled."
)
self._state_tensortuple = tensor_tuple_util.convert_to_tensor_tuple(
tuple(t for _, t in self._named_state())
)
# TODO(xuxiaoyu)
# sess = session_ctx.GetDefaultSession()
# sess.TryInit()
# do job compile
self._is_compiled = True
def _launch(self):
# TODO(xuxiaoyu)
# return self._c_nn_graph.run()
...
def __call__(self, *args):
# TODO(xuxiaoyu)
# if not self._is_compiled:
# self._compile()
# return self._launch()
...
def _add_block(self, name: str, module: Module = None) -> None:
r"""Adds a module to the current graph as a block.
The block can be accessed as an attribute using the given name.
Args:
name (string): name of the child block. The child block can be
accessed from this graph using the given name
module (Module): child module to be added to the graph.
"""
if not isinstance(module, Module) and module is not None:
raise TypeError("{} is not a Module subclass".format(type(module)))
elif not isinstance(name, str):
raise TypeError("module name should be a string. Got {}".format(type(name)))
elif hasattr(self, name) and name not in self._blocks:
raise KeyError("attribute '{}' already exists".format(name))
elif "." in name:
raise KeyError('module name can\'t contain ".", got: {}'.format(name))
elif name == "":
raise KeyError('module name can\'t be empty string ""')
self._blocks[name] = Block(self._name + ".", name, module)
def __setattr__(self, name: str, value=None):
if isinstance(value, Module):
self._add_block(name, value)
elif isinstance(value, Optimizer):
raise AttributeError(
"'{}' object are not allowed to set Optimizer attribute named '{}', \
please use add_optimizer(...) instead.".format(
type(self).__name__, name
)
)
else:
object.__setattr__(self, name, value)
def __getattr__(self, name: str):
if "_blocks" in self.__dict__:
if name in self._blocks:
return self._blocks[name]
if name in self.__dict__:
return self.__dict__[name]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
)
def __repr__(self):
lines = None
if len(self._blocks) > 0:
child_lines = []
for n, m in self._blocks.items():
mod_str = repr(m)
mod_str = _add_indent(mod_str, 2)
child_lines.append(mod_str)
lines = child_lines
main_str = "(" + self._name + ":" + self.__class__.__name__ + ":GRAPH): ("
if lines is not None:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
class BlockType:
NONE = "NONE"
MODULE = "MODULE"
PARAMETER = "PARAMETER"
BUFFER = "BUFFER"
@oneflow_export("nn.graph.Block")
@experimental_api
class Block(object):
def __init__(
self,
prefix: str = "",
name: str = "",
value: Union[Module, Parameter, Tensor] = None,
):
assert not isinstance(value, Block)
self._name = name
self._name_prefix = prefix
self._type = BlockType.NONE
self._origin = value
self._config = BlockConfig()
if isinstance(value, Module):
self._type = BlockType.MODULE
self._modules = OrderedDict()
self._parameters = OrderedDict()
self._buffers = OrderedDict()
for n, m in list(value.named_children()):
self.__setattr__(n, Block(self._name_prefix + self._name + ".", n, m))
for n, p in list(value.named_parameters("", False)):
self.__setattr__(n, Block(self._name_prefix + self._name + ".", n, p))
for n, b in list(value.named_buffers("", False)):
self.__setattr__(n, Block(self._name_prefix + self._name + ".", n, b))
elif isinstance(value, Parameter):
self._type = BlockType.PARAMETER
elif isinstance(value, Tensor):
self._type = BlockType.BUFFER
else:
raise NotImplementedError()
@property
def name(self):
return self._name
@property
def name_prefix(self):
return self._name_prefix
@property
def type(self):
return self._type
@property
def origin(self):
return self._origin
def __call__(self, *args):
assert self._type == BlockType.MODULE
# TODO(): with oneflow_c_api.set_scope(self.config_):
return self._origin.__class__.__call__(self, *args)
def forward(self, *args):
assert self._type == BlockType.MODULE
return self._origin.__class__.forward(self, *args)
def __setattr__(self, name: str, value=None) -> None:
if value is None or not isinstance(value, Block):
self.__dict__[name] = value
else:
dicts_or_sets = (
self.__dict__,
self._modules,
self._parameters,
self._buffers,
)
for d in dicts_or_sets:
if name in d:
raise AttributeError(
"'{}' object has duplicated attribute named '{}'".format(
self._name, name
)
)
if value.type == BlockType.MODULE:
self._modules[name] = value
elif value.type == BlockType.PARAMETER:
self._parameters[name] = value
elif value.type == BlockType.BUFFER:
self._buffers[name] = value
else:
raise AttributeError(
"'{}' object are not allowed to set attribute named '{}'".format(
type(self).__name__, name
)
)
def __getattr__(self, name: str):
if name in self.__dict__:
return self.__dict__[name]
if self._type == BlockType.MODULE:
if "_modules" in self.__dict__:
modules = self.__dict__["_modules"]
if name in modules:
return modules[name]
if "_parameters" in self.__dict__:
_parameters = self.__dict__["_parameters"]
if name in _parameters:
# TODO(): return block when need config
# return _parameters[name]
return _parameters[name].origin
if "_buffers" in self.__dict__:
_buffers = self.__dict__["_buffers"]
if name in _buffers:
# TODO(): return block when need config
# return _buffers[name]
return _buffers[name].origin
if name in self._origin.__dict__:
return self._origin.__dict__[name]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
)
def __repr__(self):
lines = None
if self._type == BlockType.MODULE:
child_lines = []
def _append_child(d):
for _, n in d.items():
n_str = repr(n)
n_str = _add_indent(n_str, 2)
child_lines.append(n_str)
_append_child(self._modules)
_append_child(self._parameters)
_append_child(self._buffers)
if len(child_lines) > 0:
lines = child_lines
main_str = (
"("
+ self._name
+ ":"
+ self._origin.__class__.__name__
+ ":"
+ self._type
+ "): ("
)
if lines is not None:
main_str += "\n " + "\n ".join(lines) + "\n"
main_str += ")"
return main_str
@oneflow_export("nn.graph.GraphConfig")
@experimental_api
class GraphConfig(FunctionConfig):
def __init__(self):
super().__init__()
self._train(False)
@property
def proto(self):
return self.function_desc.job_config_proto
@property
def training(self):
if self.function_desc.job_config_proto.has_train_conf():
return True
if self.function_desc.job_config_proto.has_predict_conf():
return False
raise NotImplementedError
def _train(self, mode: bool = True):
if mode:
self.function_desc.job_config_proto.mutable_train_conf()
else:
self.function_desc.job_config_proto.mutable_predict_conf()
@oneflow_export("nn.graph.BlockConfig")
@experimental_api
class BlockConfig(object):
def __init__(self):
# TODO(xuxiaoyu): implement config for block
# support generating Scope Object
pass
@oneflow_export("nn.graph.OptimizerConfig")
@experimental_api
class OptimizerConfig(object):
def __init__(
self,
name: str,
optimizer: Optimizer = None,
lr_scheduler=None,
grad_clipping_conf=None,
weight_decay_conf=None,
):
self.name = name
self.optimizer = optimizer
self.lr_scheduler = lr_scheduler
self.grad_clipping_conf = grad_clipping_conf
self.weight_decay_conf = weight_decay_conf
def _add_indent(in_s, num_spaces):
s = in_s.split("\n")
if len(s) == 1:
return in_s
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
| [
"oneflow.python.oneflow_export.oneflow_export"
] | [((1144, 1188), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Graph"""', '"""nn.graph.Graph"""'], {}), "('nn.Graph', 'nn.graph.Graph')\n", (1158, 1188), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((6071, 6103), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.graph.Block"""'], {}), "('nn.graph.Block')\n", (6085, 6103), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((11115, 11153), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.graph.GraphConfig"""'], {}), "('nn.graph.GraphConfig')\n", (11129, 11153), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((11842, 11880), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.graph.BlockConfig"""'], {}), "('nn.graph.BlockConfig')\n", (11856, 11880), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((12061, 12103), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.graph.OptimizerConfig"""'], {}), "('nn.graph.OptimizerConfig')\n", (12075, 12103), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((1444, 1457), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1455, 1457), False, 'from collections import OrderedDict\n'), ((1485, 1498), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1496, 1498), False, 'from collections import OrderedDict\n'), ((6604, 6617), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6615, 6617), False, 'from collections import OrderedDict\n'), ((6649, 6662), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6660, 6662), False, 'from collections import OrderedDict\n'), ((6691, 6704), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (6702, 6704), False, 'from collections import OrderedDict\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import collections
from typing import Optional, Sequence, Union
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
from oneflow.nn.modules.utils import _check_axis
from oneflow.ops.transpose_util import (
get_inversed_perm,
get_perm_when_transpose_axis_to_last_dim,
)
def asin_op(input):
"""
Returns a new tensor with the arcsine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sin^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([-0.5, 0.8, 1.0, -0.8]), dtype=flow.float32)
>>> output = flow.asin(input)
>>> output.shape
oneflow.Size([4])
>>> output
tensor([-0.5236, 0.9273, 1.5708, -0.9273], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[0.8, 1.0], [-0.6, -1.0]]), dtype=flow.float32)
>>> output1 = input1.asin()
>>> output1.shape
oneflow.Size([2, 2])
>>> output1
tensor([[ 0.9273, 1.5708],
[-0.6435, -1.5708]], dtype=oneflow.float32)
"""
return flow._C.asin(input)
def arcsin_op(input):
"""
Alias for :func:`oneflow.asin`
"""
return flow._C.asin(input)
def asinh_op(input):
"""
Returns a new tensor with the inverse hyperbolic sine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sinh^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([2, 3, 4]), dtype=flow.float32)
>>> output = flow.asinh(input)
>>> output.shape
oneflow.Size([3])
>>> output
tensor([1.4436, 1.8184, 2.0947], dtype=oneflow.float32)
>>> input1 = flow.tensor(np.array([[-1, 0, -0.4], [5, 7, 0.8]]), dtype=flow.float32)
>>> output1 = input1.asinh()
>>> output1.shape
oneflow.Size([2, 3])
>>> output1
tensor([[-0.8814, 0.0000, -0.3900],
[ 2.3124, 2.6441, 0.7327]], dtype=oneflow.float32)
"""
return flow._C.asinh(input)
def arcsinh_op(input):
"""
Alias for :func:`oneflow.asinh`
"""
return flow._C.asinh(input)
@register_tensor_op("asinh")
def asinh_op_tensor(input):
"""
See :func:`oneflow.asinh`
"""
return flow._C.asinh(input)
@register_tensor_op("sin_")
def inplace_sin_op_tensor(input):
"""
In-place version of :func:`oneflow.sin`
"""
return flow._C.sin_(input)
def atan_op(input):
"""
Returns a new tensor with the arctangent of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\tan^{-1}(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([0.5, 0.6, 0.7]), dtype=flow.float32)
>>> output = flow.atan(input)
>>> output.shape
oneflow.Size([3])
"""
return flow._C.atan(input)
def arctan_op(input):
"""
Alias for :func:`oneflow.atan`
"""
return flow._C.atan(input)
def fmod_op(input, other):
"""
fmod(input, other, *, out=None) -> Tensor
Computes the element-wise remainder of division.
The dividend and divisor may contain both for integer and floating point
numbers. The remainder has the same sign as the dividend :attr:`input`.
Supports broadcasting to a common shape, integer and float inputs.
Args:
input (Tensor): the dividend
other (Tensor or Scalar): the divisor
Keyword args:
out (Tensor, optional): the output tensor.
Example::
>>> import oneflow as flow
>>> flow.fmod(flow.tensor([-3., -2, -1, 1, 2, 3]), 2.)
tensor([-1., -0., -1., 1., 0., 1.], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4, 5.]), 1.5)
tensor([1.0000, 0.5000, 0.0000, 1.0000, 0.5000], dtype=oneflow.float32)
>>> flow.fmod(flow.tensor([1, 2, 3, 4., -5]), flow.tensor([4, 2, 1, 3., 1]))
tensor([1., 0., 0., 1., -0.], dtype=oneflow.float32)
"""
return flow._C.fmod(input, other)
def addmm(x, mat1, mat2, alpha=1, beta=1):
if len(x.shape) > 2 or len(mat1.shape) > 2 or len(mat2.shape) > 2:
raise ValueError("input matrixes shape can not be greater than 2")
else:
return flow.mul(x, beta) + flow.mul(flow._C.matmul(mat1, mat2), alpha)
def addmm_op(input, mat1, mat2, alpha=1, beta=1):
"""addmm(beta=1, input, alpha=1, mat1, mat2, out=None) -> Tensor
Performs a matrix multiplication of the matrices :attr:`mat1` and :attr:`mat2`.
The matrix :attr:`input` is added to the final result.
If :attr:`mat1` is a :math:`(n \\times m)` tensor, :attr:`mat2` is a
:math:`(m \\times p)` tensor, then :attr:`input` must be
broadcastable with a :math:`(n \\times p)` tensor
and :attr:`out` will be a :math:`(n \\times p)` tensor.
:attr:`alpha` and :attr:`beta` are scaling factors on matrix-vector product between
:attr:`mat1` and :attr:`mat2` and the added matrix :attr:`input` respectively.
.. math::
\\text{out} = \\beta\\ \\text{input} + \\alpha\\ (\\text{mat1}_i \\mathbin{@} \\text{mat2}_i)
For inputs of type `float` or `double`, arguments :attr:`beta` and
:attr:`alpha` must be real numbers, otherwise they should be integers.
Args:
beta (Number, optional): multiplier for :attr:`input` (:math:`\\beta`)
input (Tensor): matrix to be added
alpha (Number, optional): multiplier for :math:`mat1 @ mat2` (:math:`\\alpha`)
mat1 (Tensor): the first matrix to be multiplied
mat2 (Tensor): the second matrix to be multiplied
out (Tensor, optional): the output tensor.
For example:
>>> import numpy as np
>>> import oneflow as flow
>>> input = flow.tensor(np.array([[1,2,4],[5,11,9.1]]))
>>> mat1 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5]]))
>>> mat2 = flow.tensor(np.array([[7.3,1.9,7.3],[10.2,1,5.5],[3.7,2.2,8.1]]))
>>> output = flow.addmm(input, mat1, mat2)
>>> output
tensor([[100.6800, 33.8300, 126.8700],
[110.0100, 43.4800, 133.6100]], dtype=oneflow.float64)
>>> output.shape
oneflow.Size([2, 3])
>>> input2 = flow.tensor(np.array([1.7]))
>>> mat1 = flow.tensor(np.array([[1,2],[5,9.1],[7.7,1.4]]))
>>> mat2 = flow.tensor(np.array([[1,2,3.7],[5,9.1,6.8]]))
>>> output2 = flow.addmm(input2, mat1, mat2, alpha=1, beta=2)
>>> output2
tensor([[14.4000, 23.6000, 20.7000],
[53.9000, 96.2100, 83.7800],
[18.1000, 31.5400, 41.4100]], dtype=oneflow.float64)
>>> output2.shape
oneflow.Size([3, 3])
"""
return addmm(input, mat1, mat2, alpha, beta)
class Topk(Module):
def __init__(
self, k, dim: int = None, largest: bool = True, sorted: bool = True
) -> None:
super().__init__()
self.k = k
self.sorted = sorted
self.dim = dim
self.largest = largest
def forward(self, input):
if self.dim == None:
self.dim = -1
num_axes = len(input.shape)
axis = self.dim if self.dim >= 0 else self.dim + num_axes
assert 0 <= axis < num_axes, "axis out of range"
if axis == num_axes - 1:
if self.largest:
indices = flow._C.top_k(input, self.k)
else:
neg_input = flow.mul(input, -1)
indices = flow._C.top_k(neg_input, self.k)
return (flow.gather(input, axis, indices), indices)
else:
perm = get_perm_when_transpose_axis_to_last_dim(num_axes, axis)
x = flow._C.transpose(input, perm=perm)
if self.largest:
indices = flow._C.top_k(x, self.k)
else:
neg_input = flow.mul(x, -1)
indices = flow._C.top_k(neg_input, self.k)
indices = flow._C.transpose(indices, perm=get_inversed_perm(perm))
return (flow.gather(input, axis, indices), indices)
def topk_op(input, k, dim: int = None, largest: bool = True, sorted: bool = True):
return Topk(k=k, dim=dim, largest=largest, sorted=sorted)(input)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim",
"oneflow._C.transpose",
"oneflow._C.matmul",
"oneflow._C.top_k",
"oneflow.ops.transpose_util.get_inversed_perm",
"oneflow._C.asin",
"oneflow._C.fmod",
"oneflow.gather",
"oneflow.framework.tensor.register_tensor_op",
"oneflow._C... | [((3076, 3103), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""asinh"""'], {}), "('asinh')\n", (3094, 3103), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((3214, 3240), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""sin_"""'], {}), "('sin_')\n", (3232, 3240), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((1864, 1883), 'oneflow._C.asin', 'flow._C.asin', (['input'], {}), '(input)\n', (1876, 1883), True, 'import oneflow as flow\n'), ((1973, 1992), 'oneflow._C.asin', 'flow._C.asin', (['input'], {}), '(input)\n', (1985, 1992), True, 'import oneflow as flow\n'), ((2940, 2960), 'oneflow._C.asinh', 'flow._C.asinh', (['input'], {}), '(input)\n', (2953, 2960), True, 'import oneflow as flow\n'), ((3052, 3072), 'oneflow._C.asinh', 'flow._C.asinh', (['input'], {}), '(input)\n', (3065, 3072), True, 'import oneflow as flow\n'), ((3190, 3210), 'oneflow._C.asinh', 'flow._C.asinh', (['input'], {}), '(input)\n', (3203, 3210), True, 'import oneflow as flow\n'), ((3351, 3370), 'oneflow._C.sin_', 'flow._C.sin_', (['input'], {}), '(input)\n', (3363, 3370), True, 'import oneflow as flow\n'), ((3917, 3936), 'oneflow._C.atan', 'flow._C.atan', (['input'], {}), '(input)\n', (3929, 3936), True, 'import oneflow as flow\n'), ((4028, 4047), 'oneflow._C.atan', 'flow._C.atan', (['input'], {}), '(input)\n', (4040, 4047), True, 'import oneflow as flow\n'), ((5064, 5090), 'oneflow._C.fmod', 'flow._C.fmod', (['input', 'other'], {}), '(input, other)\n', (5076, 5090), True, 'import oneflow as flow\n'), ((9308, 9344), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (9323, 9344), False, 'import doctest\n'), ((5307, 5324), 'oneflow.mul', 'flow.mul', (['x', 'beta'], {}), '(x, beta)\n', (5315, 5324), True, 'import oneflow as flow\n'), ((8648, 8704), 'oneflow.ops.transpose_util.get_perm_when_transpose_axis_to_last_dim', 'get_perm_when_transpose_axis_to_last_dim', (['num_axes', 'axis'], {}), '(num_axes, axis)\n', (8688, 8704), False, 'from oneflow.ops.transpose_util import get_inversed_perm, get_perm_when_transpose_axis_to_last_dim\n'), ((8721, 8756), 'oneflow._C.transpose', 'flow._C.transpose', (['input'], {'perm': 'perm'}), '(input, perm=perm)\n', (8738, 8756), True, 'import oneflow as flow\n'), ((5336, 5362), 'oneflow._C.matmul', 'flow._C.matmul', (['mat1', 'mat2'], {}), '(mat1, mat2)\n', (5350, 5362), True, 'import oneflow as flow\n'), ((8397, 8425), 'oneflow._C.top_k', 'flow._C.top_k', (['input', 'self.k'], {}), '(input, self.k)\n', (8410, 8425), True, 'import oneflow as flow\n'), ((8472, 8491), 'oneflow.mul', 'flow.mul', (['input', '(-1)'], {}), '(input, -1)\n', (8480, 8491), True, 'import oneflow as flow\n'), ((8518, 8550), 'oneflow._C.top_k', 'flow._C.top_k', (['neg_input', 'self.k'], {}), '(neg_input, self.k)\n', (8531, 8550), True, 'import oneflow as flow\n'), ((8571, 8604), 'oneflow.gather', 'flow.gather', (['input', 'axis', 'indices'], {}), '(input, axis, indices)\n', (8582, 8604), True, 'import oneflow as flow\n'), ((8812, 8836), 'oneflow._C.top_k', 'flow._C.top_k', (['x', 'self.k'], {}), '(x, self.k)\n', (8825, 8836), True, 'import oneflow as flow\n'), ((8883, 8898), 'oneflow.mul', 'flow.mul', (['x', '(-1)'], {}), '(x, -1)\n', (8891, 8898), True, 'import oneflow as flow\n'), ((8925, 8957), 'oneflow._C.top_k', 'flow._C.top_k', (['neg_input', 'self.k'], {}), '(neg_input, self.k)\n', (8938, 8957), True, 'import oneflow as flow\n'), ((9057, 9090), 'oneflow.gather', 'flow.gather', (['input', 'axis', 'indices'], {}), '(input, axis, indices)\n', (9068, 9090), True, 'import oneflow as flow\n'), ((9012, 9035), 'oneflow.ops.transpose_util.get_inversed_perm', 'get_inversed_perm', (['perm'], {}), '(perm)\n', (9029, 9035), False, 'from oneflow.ops.transpose_util import get_inversed_perm, get_perm_when_transpose_axis_to_last_dim\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
class Argwhere(Module):
def __init__(self, dtype) -> None:
super().__init__()
if dtype == None:
dtype = flow.int32
self._op = (
flow.builtin_op("argwhere")
.Input("input")
.Output("output")
.Output("output_size")
.Attr("dtype", dtype)
.Build()
)
def forward(self, x):
size = self._op(x)[1].numpy()
res = self._op(x)[0]
slice_tup_list = [[0, int(size), 1]]
return flow.experimental.slice(res, slice_tup_list=slice_tup_list)
@oneflow_export("argwhere")
@experimental_api
def argwhere_op(x, dtype: Optional[flow.dtype] = None):
"""This operator finds the indices of input Tensor `x` elements that are non-zero.
It returns a list in which each element is a coordinate that points to a non-zero element in the condition.
Args:
x (oneflow.Tensor): The input Tensor.
dtype (Optional[flow.dtype], optional): The data type of output. Defaults to None.
Returns:
oneflow.Tensor: The result Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array([[0, 1, 0],
... [2, 0, 2]]).astype(np.float32)
>>> input = flow.Tensor(x)
>>> output = flow.argwhere(input)
>>> output
tensor([[0, 1],
[1, 0],
[1, 2]], dtype=oneflow.int32)
"""
return Argwhere(dtype=dtype)(x)
@register_tensor_op("argwhere")
@experimental_api
def argwhere_tebsor_op(x, dtype: Optional[flow.dtype] = None):
"""
argwhere() -> Tensor
See :func:`oneflow.experimental.argwhere`
"""
return Argwhere(dtype=dtype)(x)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.python.framework.tensor.register_tensor_op",
"oneflow.builtin_op",
"oneflow.experimental.slice",
"oneflow.python.oneflow_export.oneflow_export"
] | [((1409, 1435), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""argwhere"""'], {}), "('argwhere')\n", (1423, 1435), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((2423, 2453), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""argwhere"""'], {}), "('argwhere')\n", (2441, 2453), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((2714, 2750), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (2729, 2750), False, 'import doctest\n'), ((1346, 1405), 'oneflow.experimental.slice', 'flow.experimental.slice', (['res'], {'slice_tup_list': 'slice_tup_list'}), '(res, slice_tup_list=slice_tup_list)\n', (1369, 1405), True, 'import oneflow as flow\n'), ((1006, 1033), 'oneflow.builtin_op', 'flow.builtin_op', (['"""argwhere"""'], {}), "('argwhere')\n", (1021, 1033), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import numpy as np
import pandas as pd
from datetime import datetime
import oneflow as flow
def InitNodes(args):
if args.num_nodes > 1:
assert args.num_nodes <= len(args.node_ips)
flow.env.ctrl_port(args.ctrl_port)
nodes = []
for ip in args.node_ips[:args.num_nodes]:
addr_dict = {}
addr_dict["addr"] = ip
nodes.append(addr_dict)
flow.env.machine(nodes)
class Snapshot(object):
def __init__(self, model_save_dir, model_load_dir):
self._model_save_dir = model_save_dir
self._check_point = flow.train.CheckPoint()
if model_load_dir:
assert os.path.isdir(model_load_dir)
print("Restoring model from {}.".format(model_load_dir))
self._check_point.load(model_load_dir)
else:
self._check_point.init()
self.save('initial_model')
print("Init model on demand.")
def save(self, name):
snapshot_save_path = os.path.join(self._model_save_dir, "snapshot_{}".format(name))
if not os.path.exists(snapshot_save_path):
os.makedirs(snapshot_save_path)
print("Saving model to {}.".format(snapshot_save_path))
self._check_point.save(snapshot_save_path)
class StopWatch(object):
def __init__(self):
pass
def start(self):
self.start_time = time.time()
self.last_split = self.start_time
def split(self):
now = time.time()
duration = now - self.last_split
self.last_split = now
return duration
def stop(self):
self.stop_time = time.time()
def duration(self):
return self.stop_time - self.start_time
def match_top_k(predictions, labels, top_k=1):
max_k_preds = np.argpartition(predictions.numpy(), -top_k)[:, -top_k:]
match_array = np.logical_or.reduce(max_k_preds == labels.reshape((-1, 1)), axis=1)
num_matched = match_array.sum()
return num_matched, match_array.shape[0]
class Metric(object):
def __init__(self, desc='train', calculate_batches=-1, batch_size=256, top_k=5,
prediction_key='predictions', label_key='labels', loss_key=None):
self.desc = desc
self.calculate_batches = calculate_batches
self.top_k = top_k
self.prediction_key = prediction_key
self.label_key = label_key
self.loss_key = loss_key
if loss_key:
self.fmt = "{}: epoch {}, iter {}, loss: {:.6f}, top_1: {:.6f}, top_k: {:.6f}, samples/s: {:.3f}"
else:
self.fmt = "{}: epoch {}, iter {}, top_1: {:.6f}, top_k: {:.6f}, samples/s: {:.3f}"
self.timer = StopWatch()
self.timer.start()
self._clear()
def _clear(self):
self.top_1_num_matched = 0
self.top_k_num_matched = 0
self.num_samples = 0.0
def metric_cb(self, epoch, step):
def callback(outputs):
if step == 0: self._clear()
if self.prediction_key:
num_matched, num_samples = match_top_k(outputs[self.prediction_key],
outputs[self.label_key])
self.top_1_num_matched += num_matched
num_matched, _ = match_top_k(outputs[self.prediction_key],
outputs[self.label_key], self.top_k)
self.top_k_num_matched += num_matched
else:
num_samples = outputs[self.label_key].shape[0]
self.num_samples += num_samples
if (step + 1) % self.calculate_batches == 0:
throughput = self.num_samples / self.timer.split()
if self.prediction_key:
top_1_accuracy = self.top_1_num_matched / self.num_samples
top_k_accuracy = self.top_k_num_matched / self.num_samples
else:
top_1_accuracy = 0.0
top_k_accuracy = 0.0
if self.loss_key:
loss = outputs[self.loss_key].mean()
print(self.fmt.format(self.desc, epoch, step + 1, loss, top_1_accuracy,
top_k_accuracy, throughput), time.time())
else:
print(self.fmt.format(self.desc, epoch, step + 1, top_1_accuracy,
top_k_accuracy, throughput), time.time())
self._clear()
return callback
| [
"oneflow.env.ctrl_port",
"oneflow.env.machine",
"oneflow.train.CheckPoint"
] | [((815, 849), 'oneflow.env.ctrl_port', 'flow.env.ctrl_port', (['args.ctrl_port'], {}), '(args.ctrl_port)\n', (833, 849), True, 'import oneflow as flow\n'), ((1026, 1049), 'oneflow.env.machine', 'flow.env.machine', (['nodes'], {}), '(nodes)\n', (1042, 1049), True, 'import oneflow as flow\n'), ((1206, 1229), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (1227, 1229), True, 'import oneflow as flow\n'), ((2000, 2011), 'time.time', 'time.time', ([], {}), '()\n', (2009, 2011), False, 'import time\n'), ((2090, 2101), 'time.time', 'time.time', ([], {}), '()\n', (2099, 2101), False, 'import time\n'), ((2243, 2254), 'time.time', 'time.time', ([], {}), '()\n', (2252, 2254), False, 'import time\n'), ((1276, 1305), 'os.path.isdir', 'os.path.isdir', (['model_load_dir'], {}), '(model_load_dir)\n', (1289, 1305), False, 'import os\n'), ((1693, 1727), 'os.path.exists', 'os.path.exists', (['snapshot_save_path'], {}), '(snapshot_save_path)\n', (1707, 1727), False, 'import os\n'), ((1741, 1772), 'os.makedirs', 'os.makedirs', (['snapshot_save_path'], {}), '(snapshot_save_path)\n', (1752, 1772), False, 'import os\n'), ((4860, 4871), 'time.time', 'time.time', ([], {}), '()\n', (4869, 4871), False, 'import time\n'), ((5052, 5063), 'time.time', 'time.time', ([], {}), '()\n', (5061, 5063), False, 'import time\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow._C.swapaxes,
"""swapaxes(input, axis0, axis1) -> Tensor
This function is equivalent to NumPy’s swapaxes function.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])
>>> x.shape
oneflow.Size([2, 2, 2])
>>> flow.swapaxes(x, 0, 1).shape
oneflow.Size([2, 2, 2])
>>> flow.swapaxes(x, 0, 2).shape
oneflow.Size([2, 2, 2])
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1179), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow._C.swapaxes', '"""swapaxes(input, axis0, axis1) -> Tensor\n \n This function is equivalent to NumPy’s swapaxes function.\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n \n >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> x.shape\n oneflow.Size([2, 2, 2])\n >>> flow.swapaxes(x, 0, 1).shape\n oneflow.Size([2, 2, 2])\n >>> flow.swapaxes(x, 0, 2).shape\n oneflow.Size([2, 2, 2])\n\n """'], {}), '(oneflow._C.swapaxes,\n """swapaxes(input, axis0, axis1) -> Tensor\n \n This function is equivalent to NumPy’s swapaxes function.\n\n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n \n >>> x = flow.tensor([[[0,1],[2,3]],[[4,5],[6,7]]])\n >>> x.shape\n oneflow.Size([2, 2, 2])\n >>> flow.swapaxes(x, 0, 1).shape\n oneflow.Size([2, 2, 2])\n >>> flow.swapaxes(x, 0, 2).shape\n oneflow.Size([2, 2, 2])\n\n """\n )\n', (670, 1179), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import math
import warnings
import numbers
from typing import List, Tuple, Optional
import oneflow as flow
from oneflow import nn
from oneflow.framework.tensor import Tensor
from oneflow.nn.utils.rnn import PackedSequence
# NOTE(<NAME>): The implementation of rnn modules are modified from
# https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/rnn.py
def apply_permutation(tensor: Tensor, permutation: Tensor, dim: int = 1) -> Tensor:
return tensor.index_select(dim, permutation)
class RNNBase(nn.Module):
def __init__(
self,
mode: str,
input_size: int,
hidden_size: int,
num_layers: int = 1,
bias: bool = True,
batch_first: bool = False,
dropout: float = 0.0,
bidirectional: bool = False,
proj_size: int = 0,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.mode = mode
self.input_size = input_size
self.hidden_size = hidden_size
self.num_layers = num_layers
self.bias = bias
self.batch_first = batch_first
self.dropout = float(dropout)
self.bidirectional = bidirectional
self.proj_size = proj_size
num_directions = 2 if bidirectional else 1
if (
not isinstance(dropout, numbers.Number)
or not 0 <= dropout <= 1
or isinstance(dropout, bool)
):
raise ValueError(
"dropout should be a number in range [0, 1] "
"representing the probability of an element being "
"zeroed"
)
if dropout > 0 and num_layers == 1:
warnings.warn(
"dropout option adds dropout after all but last "
"recurrent layer, so non-zero dropout expects "
"num_layers greater than 1, but got dropout={} and "
"num_layers={}".format(dropout, num_layers)
)
if proj_size < 0:
raise ValueError(
"proj_size should be a positive integer or zero to disable projections"
)
if proj_size >= hidden_size:
raise ValueError("proj_size has to be smaller than hidden_size")
if mode == "LSTM":
gate_size = 4 * hidden_size
elif mode == "GRU":
gate_size = 3 * hidden_size
elif mode == "RNN_TANH":
gate_size = hidden_size
elif mode == "RNN_RELU":
gate_size = hidden_size
else:
raise ValueError("Unrecognized RNN mode: " + mode)
self._flat_weights_names = []
self._all_weights = []
for layer in range(num_layers):
for direction in range(num_directions):
real_hidden_size = proj_size if proj_size > 0 else hidden_size
layer_input_size = (
input_size if layer == 0 else real_hidden_size * num_directions
)
w_ih = nn.Parameter(
flow.empty((gate_size, layer_input_size), **factory_kwargs)
)
w_hh = nn.Parameter(
flow.empty((gate_size, real_hidden_size), **factory_kwargs)
)
b_ih = nn.Parameter(flow.empty(gate_size, **factory_kwargs))
b_hh = nn.Parameter(flow.empty(gate_size, **factory_kwargs))
layer_params: Tuple[Tensor, ...] = ()
if self.proj_size == 0:
if bias:
layer_params = (w_ih, w_hh, b_ih, b_hh)
else:
layer_params = (w_ih, w_hh)
else:
w_hr = nn.Parameter(
flow.empty((proj_size, hidden_size), **factory_kwargs)
)
if bias:
layer_params = (w_ih, w_hh, b_ih, b_hh, w_hr)
else:
layer_params = (w_ih, w_hh, w_hr)
suffix = "_reverse" if direction == 1 else ""
param_names = ["weight_ih_l{}{}", "weight_hh_l{}{}"]
if bias:
param_names += ["bias_ih_l{}{}", "bias_hh_l{}{}"]
if self.proj_size > 0:
param_names += ["weight_hr_l{}{}"]
param_names = [x.format(layer, suffix) for x in param_names]
for name, param in zip(param_names, layer_params):
setattr(self, name, param)
self._flat_weights_names.extend(param_names)
self._all_weights.append(param_names)
self._flat_weights = [
(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn)
for wn in self._flat_weights_names
]
self.reset_parameters()
def __setattr__(self, attr, value):
if hasattr(self, "_flat_weights_names") and attr in self._flat_weights_names:
# keep self._flat_weights up to date if you do self.weight = ...
idx = self._flat_weights_names.index(attr)
self._flat_weights[idx] = value
super().__setattr__(attr, value)
def to_global(self, placement=None, sbp=None):
def convert(t):
return t.to_global(placement=placement, sbp=sbp)
self = self._apply(convert)
self._flat_weights = [
(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn)
for wn in self._flat_weights_names
]
return self
def reset_parameters(self) -> None:
stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
def check_input(self, input: Tensor, batch_sizes: Optional[Tensor]) -> None:
expected_input_dim = 2 if batch_sizes is not None else 3
if input.dim() != expected_input_dim:
raise RuntimeError(
"input must have {} dimensions, got {}".format(
expected_input_dim, input.dim()
)
)
if self.input_size != input.size(-1):
raise RuntimeError(
"input.size(-1) must be equal to input_size. Expected {}, got {}".format(
self.input_size, input.size(-1)
)
)
def get_expected_hidden_size(
self, input: Tensor, batch_sizes: Optional[Tensor]
) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
if self.proj_size > 0:
expected_hidden_size = (
self.num_layers * num_directions,
mini_batch,
self.proj_size,
)
else:
expected_hidden_size = (
self.num_layers * num_directions,
mini_batch,
self.hidden_size,
)
return expected_hidden_size
def check_hidden_size(
self,
hx: Tensor,
expected_hidden_size: Tuple[int, int, int],
msg: str = "Expected hidden size {}, got {}",
) -> None:
if hx.size() != expected_hidden_size:
raise RuntimeError(msg.format(expected_hidden_size, list(hx.size())))
def check_forward_args(
self, input: Tensor, hidden: Tensor, batch_sizes: Optional[Tensor]
):
self.check_input(input, batch_sizes)
expected_hidden_size = self.get_expected_hidden_size(input, batch_sizes)
self.check_hidden_size(hidden, expected_hidden_size)
def permute_hidden(self, hx: Tensor, permutation: Optional[Tensor]):
if permutation is None:
return hx
return apply_permutation(hx, permutation)
def extra_repr(self) -> str:
s = "{input_size}, {hidden_size}"
if self.proj_size != 0:
s += ", proj_size={proj_size}"
if self.num_layers != 1:
s += ", num_layers={num_layers}"
if self.bias is not True:
s += ", bias={bias}"
if self.batch_first is not False:
s += ", batch_first={batch_first}"
if self.dropout != 0:
s += ", dropout={dropout}"
if self.bidirectional is not False:
s += ", bidirectional={bidirectional}"
return s.format(**self.__dict__)
@property
def all_weights(self) -> List[List[nn.Parameter]]:
return [
[getattr(self, weight) for weight in weights]
for weights in self._all_weights
]
class RNN(RNNBase):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.10/generated/torch.nn.RNN.html.
Applies a multi-layer Elman RNN with \tanhtanh or \text{ReLU}ReLU non-linearity to an input sequence.
For each element in the input sequence, each layer computes the following function:
function:
.. math::
h_t = \tanh(W_{ih} x_t + b_{ih} + W_{hh} h_{(t-1)} + b_{hh})
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is
the input at time `t`, and :math:`h_{(t-1)}` is the hidden state of the
previous layer at time `t-1` or the initial hidden state at time `0`.
If :attr:`nonlinearity` is ``'relu'``, then :math:`\text{ReLU}` is used instead of :math:`\tanh`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two RNNs together to form a `stacked RNN`,
with the second RNN taking in outputs of the first RNN and
computing the final results. Default: 1
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
Note that this does not apply to hidden or cell states. See the
Inputs/Outputs sections below for details. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
RNN layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional RNN. Default: ``False``
Inputs: input, h_0
* **input**: tensor of shape :math:`(L, N, H_{in})` when ``batch_first=False`` or
:math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
the input sequence.
* **h_0**: tensor of shape :math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden
state for each element in the batch. Defaults to zeros if not provided.
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input\_size} \\
H_{out} ={} & \text{hidden\_size}
\end{aligned}
Outputs: output, h_n
* **output**: tensor of shape :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
:math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
`(h_t)` from the last layer of the RNN, for each `t`.
* **h_n**: tensor of shape :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
for each element in the batch.
Attributes:
weight_ih_l[k]: the learnable input-hidden weights of the k-th layer,
of shape `(hidden_size, input_size)` for `k = 0`. Otherwise, the shape is
`(hidden_size, num_directions * hidden_size)`
weight_hh_l[k]: the learnable hidden-hidden weights of the k-th layer,
of shape `(hidden_size, hidden_size)`
bias_ih_l[k]: the learnable input-hidden bias of the k-th layer,
of shape `(hidden_size)`
bias_hh_l[k]: the learnable hidden-hidden bias of the k-th layer,
of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. note::
For bidirectional RNNs, forward and backward are directions 0 and 1 respectively.
Example of splitting the output layers when ``batch_first=False``:
``output.view((seq_len, batch, num_directions, hidden_size))``.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> rnn = flow.nn.RNN(10, 20, 2)
>>> input = flow.tensor(np.random.randn(5, 3, 10), dtype=flow.float32)
>>> h0 = flow.tensor(np.random.randn(2, 3, 20), dtype=flow.float32)
>>> output, hn = rnn(input, h0)
>>> output.size()
oneflow.Size([5, 3, 20])
"""
def __init__(self, *args, **kwargs):
if "proj_size" in kwargs:
raise ValueError(
"proj_size argument is only supported for LSTM, not RNN or GRU"
)
self.nonlinearity = kwargs.pop("nonlinearity", "tanh")
if self.nonlinearity == "tanh":
mode = "RNN_TANH"
elif self.nonlinearity == "relu":
mode = "RNN_RELU"
else:
raise ValueError("Unknown nonlinearity '{}'".format(self.nonlinearity))
super().__init__(mode, *args, **kwargs)
def forward(self, input, hx=None): # noqa: F811
orig_input = input
if isinstance(orig_input, PackedSequence):
input = orig_input.data
batch_sizes = orig_input.batch_sizes
sorted_indices = orig_input.sorted_indices
unsorted_indices = orig_input.unsorted_indices
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
if hx is not None:
if hx.dim() != 2:
raise RuntimeError(
f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor"
)
hx = hx.unsqueeze(1)
else:
if hx is not None and hx.dim() != 3:
raise RuntimeError(
f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor"
)
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
if input.is_global:
hx = flow.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
sbp=input.sbp,
placement=input.placement,
)
else:
hx = flow.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self._flat_weights = [
(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn)
for wn in self._flat_weights_names
]
assert hx is not None
self.check_forward_args(input, hx, batch_sizes)
assert self.mode == "RNN_TANH" or self.mode == "RNN_RELU"
if batch_sizes is None:
if self.mode == "RNN_TANH":
result = flow._C.rnn_tanh(
input,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
else:
result = flow._C.rnn_relu(
input,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
else:
if self.mode == "RNN_TANH":
result = flow._C.rnn_tanh(
input,
batch_sizes,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
)
else:
result = flow._C.rnn_relu(
input,
batch_sizes,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
)
output = result[0]
hidden = result[1]
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(
output, batch_sizes, sorted_indices, unsorted_indices
)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
if not is_batched:
output = output.squeeze(batch_dim)
hidden = hidden.squeeze(1)
return output, self.permute_hidden(hidden, unsorted_indices)
class LSTM(RNNBase):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.10/_modules/torch/nn/modules/rnn.html#LSTM.
Applies a multi-layer long short-term memory (LSTM) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll} \\
i_t = \sigma(W_{ii} x_t + b_{ii} + W_{hi} h_{t-1} + b_{hi}) \\
f_t = \sigma(W_{if} x_t + b_{if} + W_{hf} h_{t-1} + b_{hf}) \\
g_t = \tanh(W_{ig} x_t + b_{ig} + W_{hg} h_{t-1} + b_{hg}) \\
o_t = \sigma(W_{io} x_t + b_{io} + W_{ho} h_{t-1} + b_{ho}) \\
c_t = f_t \odot c_{t-1} + i_t \odot g_t \\
h_t = o_t \odot \tanh(c_t) \\
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`c_t` is the cell
state at time `t`, :math:`x_t` is the input at time `t`, :math:`h_{t-1}`
is the hidden state of the layer at time `t-1` or the initial hidden
state at time `0`, and :math:`i_t`, :math:`f_t`, :math:`g_t`,
:math:`o_t` are the input, forget, cell, and output gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`\odot` is the Hadamard product.
In a multilayer LSTM, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
If ``proj_size > 0`` is specified, LSTM with projections will be used. This changes
the LSTM cell in the following way. First, the dimension of :math:`h_t` will be changed from
``hidden_size`` to ``proj_size`` (dimensions of :math:`W_{hi}` will be changed accordingly).
Second, the output hidden state of each layer will be multiplied by a learnable projection
matrix: :math:`h_t = W_{hr}h_t`. Note that as a consequence of this, the output
of LSTM network will be of different shape as well. See Inputs/Outputs sections below for exact
dimensions of all variables. You can find more details in https://arxiv.org/abs/1402.1128.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two LSTMs together to form a `stacked LSTM`,
with the second LSTM taking in outputs of the first LSTM and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
Note that this does not apply to hidden or cell states. See the
Inputs/Outputs sections below for details. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
LSTM layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional LSTM. Default: ``False``
proj_size: If ``> 0``, will use LSTM with projections of corresponding size. Default: 0
Inputs: input, (h_0, c_0)
* **input**: tensor of shape :math:`(L, N, H_{in})` when ``batch_first=False`` or
:math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
the input sequence.
* **h_0**: tensor of shape :math:`(D * \text{num\_layers}, N, H_{out})` containing the
initial hidden state for each element in the batch.
Defaults to zeros if (h_0, c_0) is not provided.
* **c_0**: tensor of shape :math:`(D * \text{num\_layers}, N, H_{cell})` containing the
initial cell state for each element in the batch.
Defaults to zeros if (h_0, c_0) is not provided.
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input\_size} \\
H_{cell} ={} & \text{hidden\_size} \\
H_{out} ={} & \text{proj\_size if } \text{proj\_size}>0 \text{ otherwise hidden\_size} \\
\end{aligned}
Outputs: output, (h_n, c_n)
* **output**: tensor of shape :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
:math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
`(h_t)` from the last layer of the LSTM, for each `t`.
* **h_n**: tensor of shape :math:`(D * \text{num\_layers}, N, H_{out})` containing the
final hidden state for each element in the batch.
* **c_n**: tensor of shape :math:`(D * \text{num\_layers}, N, H_{cell})` containing the
final cell state for each element in the batch.
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
`(W_ii|W_if|W_ig|W_io)`, of shape `(4*hidden_size, input_size)` for `k = 0`.
Otherwise, the shape is `(4*hidden_size, num_directions * hidden_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
`(W_hi|W_hf|W_hg|W_ho)`, of shape `(4*hidden_size, hidden_size)`. If ``proj_size > 0``
was specified, the shape will be `(4*hidden_size, proj_size)`.
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
`(b_ii|b_if|b_ig|b_io)`, of shape `(4*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
`(b_hi|b_hf|b_hg|b_ho)`, of shape `(4*hidden_size)`
weight_hr_l[k] : the learnable projection weights of the :math:`\text{k}^{th}` layer
of shape `(proj_size, hidden_size)`. Only present when ``proj_size > 0`` was
specified.
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. note::
For bidirectional LSTMs, forward and backward are directions 0 and 1 respectively.
Example of splitting the output layers when ``batch_first=False``:
``output.view(seq_len, batch, num_directions, hidden_size)``.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> rnn = flow.nn.LSTM(10, 20, 2)
>>> input = flow.tensor(np.random.randn(5, 3, 10), dtype=flow.float32)
>>> h0 = flow.tensor(np.random.randn(2, 3, 20), dtype=flow.float32)
>>> c0 = flow.tensor(np.random.randn(2, 3, 20), dtype=flow.float32)
>>> output, (hn, cn) = rnn(input, (h0, c0))
>>> output.size()
oneflow.Size([5, 3, 20])
"""
def __init__(self, *args, **kwargs):
super().__init__("LSTM", *args, **kwargs)
def get_expected_cell_size(
self, input: Tensor, batch_sizes: Optional[Tensor]
) -> Tuple[int, int, int]:
if batch_sizes is not None:
mini_batch = int(batch_sizes[0])
else:
mini_batch = input.size(0) if self.batch_first else input.size(1)
num_directions = 2 if self.bidirectional else 1
expected_hidden_size = (
self.num_layers * num_directions,
mini_batch,
self.hidden_size,
)
return expected_hidden_size
def check_forward_args(
self,
input: Tensor,
hidden: Tuple[Tensor, Tensor],
batch_sizes: Optional[Tensor],
):
self.check_input(input, batch_sizes)
self.check_hidden_size(
hidden[0],
self.get_expected_hidden_size(input, batch_sizes),
"Expected hidden[0] size {}, got {}",
)
self.check_hidden_size(
hidden[1],
self.get_expected_cell_size(input, batch_sizes),
"Expected hidden[1] size {}, got {}",
)
def permute_hidden(
self, hx: Tuple[Tensor, Tensor], permutation: Optional[Tensor]
) -> Tuple[Tensor, Tensor]:
if permutation is None:
return hx
return (
apply_permutation(hx[0], permutation),
apply_permutation(hx[1], permutation),
)
def forward(self, input, hx=None):
orig_input = input
batch_sizes = None
if isinstance(orig_input, PackedSequence):
input = orig_input.data
batch_sizes = orig_input.batch_sizes
sorted_indices = orig_input.sorted_indices
unsorted_indices = orig_input.unsorted_indices
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
real_hidden_size = (
self.proj_size if self.proj_size > 0 else self.hidden_size
)
if input.is_global:
h_zeros = flow.zeros(
self.num_layers * num_directions,
max_batch_size,
real_hidden_size,
dtype=input.dtype,
sbp=input.sbp,
placement=input.placement,
)
c_zeros = flow.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
sbp=input.sbp,
placement=input.placement,
)
else:
h_zeros = flow.zeros(
self.num_layers * num_directions,
max_batch_size,
real_hidden_size,
dtype=input.dtype,
device=input.device,
)
c_zeros = flow.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
hx = (h_zeros, c_zeros)
else:
if batch_sizes is None: # If not PackedSequence input.
if is_batched:
if hx[0].dim() != 3 or hx[1].dim() != 3:
msg = (
"For batched 3-D input, hx and cx should "
f"also be 3-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors"
)
raise RuntimeError(msg)
else:
if hx[0].dim() != 2 or hx[1].dim() != 2:
msg = (
"For unbatched 2-D input, hx and cx should "
f"also be 2-D but got ({hx[0].dim()}-D, {hx[1].dim()}-D) tensors"
)
raise RuntimeError(msg)
hx = (hx[0].unsqueeze(1), hx[1].unsqueeze(1))
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
self._flat_weights = [
(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn)
for wn in self._flat_weights_names
]
if batch_sizes is None:
result = flow._C.lstm(
input,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
else:
result = flow._C.lstm(
input,
batch_sizes,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
)
output = result[0]
hidden = result[1:]
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(
output, batch_sizes, sorted_indices, unsorted_indices
)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
else:
if not is_batched:
output = output.squeeze(batch_dim)
hidden = (hidden[0].squeeze(1), hidden[1].squeeze(1))
return output, self.permute_hidden(hidden, unsorted_indices)
class GRU(RNNBase):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/1.10/_modules/torch/nn/modules/rnn.html#GRU.
Applies a multi-layer gated recurrent unit (GRU) RNN to an input sequence.
For each element in the input sequence, each layer computes the following
function:
.. math::
\begin{array}{ll}
r_t = \sigma(W_{ir} x_t + b_{ir} + W_{hr} h_{(t-1)} + b_{hr}) \\
z_t = \sigma(W_{iz} x_t + b_{iz} + W_{hz} h_{(t-1)} + b_{hz}) \\
n_t = \tanh(W_{in} x_t + b_{in} + r_t * (W_{hn} h_{(t-1)}+ b_{hn})) \\
h_t = (1 - z_t) * n_t + z_t * h_{(t-1)}
\end{array}
where :math:`h_t` is the hidden state at time `t`, :math:`x_t` is the input
at time `t`, :math:`h_{(t-1)}` is the hidden state of the layer
at time `t-1` or the initial hidden state at time `0`, and :math:`r_t`,
:math:`z_t`, :math:`n_t` are the reset, update, and new gates, respectively.
:math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
In a multilayer GRU, the input :math:`x^{(l)}_t` of the :math:`l` -th layer
(:math:`l >= 2`) is the hidden state :math:`h^{(l-1)}_t` of the previous layer multiplied by
dropout :math:`\delta^{(l-1)}_t` where each :math:`\delta^{(l-1)}_t` is a Bernoulli random
variable which is :math:`0` with probability :attr:`dropout`.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
num_layers: Number of recurrent layers. E.g., setting ``num_layers=2``
would mean stacking two GRUs together to form a `stacked GRU`,
with the second GRU taking in outputs of the first GRU and
computing the final results. Default: 1
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
batch_first: If ``True``, then the input and output tensors are provided
as `(batch, seq, feature)` instead of `(seq, batch, feature)`.
Note that this does not apply to hidden or cell states. See the
Inputs/Outputs sections below for details. Default: ``False``
dropout: If non-zero, introduces a `Dropout` layer on the outputs of each
GRU layer except the last layer, with dropout probability equal to
:attr:`dropout`. Default: 0
bidirectional: If ``True``, becomes a bidirectional GRU. Default: ``False``
Inputs: input, h_0
* **input**: tensor of shape :math:`(L, N, H_{in})` when ``batch_first=False`` or
:math:`(N, L, H_{in})` when ``batch_first=True`` containing the features of
the input sequence.
* **h_0**: tensor of shape :math:`(D * \text{num\_layers}, N, H_{out})` containing the initial hidden
state for each element in the batch. Defaults to zeros if not provided.
where:
.. math::
\begin{aligned}
N ={} & \text{batch size} \\
L ={} & \text{sequence length} \\
D ={} & 2 \text{ if bidirectional=True otherwise } 1 \\
H_{in} ={} & \text{input\_size} \\
H_{out} ={} & \text{hidden\_size}
\end{aligned}
Outputs: output, h_n
* **output**: tensor of shape :math:`(L, N, D * H_{out})` when ``batch_first=False`` or
:math:`(N, L, D * H_{out})` when ``batch_first=True`` containing the output features
`(h_t)` from the last layer of the GRU, for each `t`. If a
* **h_n**: tensor of shape :math:`(D * \text{num\_layers}, N, H_{out})` containing the final hidden state
for each element in the batch.
Attributes:
weight_ih_l[k] : the learnable input-hidden weights of the :math:`\text{k}^{th}` layer
(W_ir|W_iz|W_in), of shape `(3*hidden_size, input_size)` for `k = 0`.
Otherwise, the shape is `(3*hidden_size, num_directions * hidden_size)`
weight_hh_l[k] : the learnable hidden-hidden weights of the :math:`\text{k}^{th}` layer
(W_hr|W_hz|W_hn), of shape `(3*hidden_size, hidden_size)`
bias_ih_l[k] : the learnable input-hidden bias of the :math:`\text{k}^{th}` layer
(b_ir|b_iz|b_in), of shape `(3*hidden_size)`
bias_hh_l[k] : the learnable hidden-hidden bias of the :math:`\text{k}^{th}` layer
(b_hr|b_hz|b_hn), of shape `(3*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
.. note::
For bidirectional GRUs, forward and backward are directions 0 and 1 respectively.
Example of splitting the output layers when ``batch_first=False``:
``output.view(seq_len, batch, num_directions, hidden_size)``.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> rnn = flow.nn.GRU(10, 20, 2)
>>> input = flow.tensor(np.random.randn(5, 3, 10), dtype=flow.float32)
>>> h0 = flow.tensor(np.random.randn(2, 3, 20), dtype=flow.float32)
>>> output, hn = rnn(input, h0)
>>> output.size()
oneflow.Size([5, 3, 20])
"""
def __init__(self, *args, **kwargs):
if "proj_size" in kwargs:
raise ValueError(
"proj_size argument is only supported for LSTM, not RNN or GRU"
)
super().__init__("GRU", *args, **kwargs)
def forward(self, input, hx=None):
orig_input = input
if isinstance(orig_input, PackedSequence):
input = orig_input.data
batch_sizes = orig_input.batch_sizes
sorted_indices = orig_input.sorted_indices
unsorted_indices = orig_input.unsorted_indices
max_batch_size = int(batch_sizes[0])
else:
batch_sizes = None
is_batched = input.dim() == 3
batch_dim = 0 if self.batch_first else 1
if not is_batched:
input = input.unsqueeze(batch_dim)
if hx is not None:
if hx.dim() != 2:
raise RuntimeError(
f"For unbatched 2-D input, hx should also be 2-D but got {hx.dim()}-D tensor"
)
hx = hx.unsqueeze(1)
else:
if hx is not None and hx.dim() != 3:
raise RuntimeError(
f"For batched 3-D input, hx should also be 3-D but got {hx.dim()}-D tensor"
)
max_batch_size = input.size(0) if self.batch_first else input.size(1)
sorted_indices = None
unsorted_indices = None
if hx is None:
num_directions = 2 if self.bidirectional else 1
if input.is_global:
hx = flow.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
sbp=input.sbp,
placement=input.placement,
)
else:
hx = flow.zeros(
self.num_layers * num_directions,
max_batch_size,
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
else:
# Each batch of the hidden state should match the input sequence that
# the user believes he/she is passing in.
hx = self.permute_hidden(hx, sorted_indices)
self.check_forward_args(input, hx, batch_sizes)
self._flat_weights = [
(lambda wn: getattr(self, wn) if hasattr(self, wn) else None)(wn)
for wn in self._flat_weights_names
]
if batch_sizes is None:
result = flow._C.gru(
input,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
self.batch_first,
)
else:
result = flow._C.gru(
input,
batch_sizes,
hx,
self._flat_weights,
self.bias,
self.num_layers,
self.dropout,
self.training,
self.bidirectional,
)
output = result[0]
hidden = result[1]
if isinstance(orig_input, PackedSequence):
output_packed = PackedSequence(
output, batch_sizes, sorted_indices, unsorted_indices
)
return output_packed, self.permute_hidden(hidden, unsorted_indices)
else:
if not is_batched:
output = output.squeeze(batch_dim)
hidden = hidden.squeeze(1)
return output, self.permute_hidden(hidden, unsorted_indices)
class RNNCellBase(nn.Module):
def __init__(
self,
input_size: int,
hidden_size: int,
bias: bool,
num_chunks: int,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.bias = bias
self.weight_ih = nn.Parameter(
flow.empty(num_chunks * hidden_size, input_size, **factory_kwargs)
)
self.weight_hh = nn.Parameter(
flow.empty(num_chunks * hidden_size, hidden_size, **factory_kwargs)
)
if bias:
self.bias_ih = nn.Parameter(
flow.empty(num_chunks * hidden_size, **factory_kwargs)
)
self.bias_hh = nn.Parameter(
flow.empty(num_chunks * hidden_size, **factory_kwargs)
)
else:
self.register_parameter("bias_ih", None)
self.register_parameter("bias_hh", None)
self.reset_parameters()
def extra_repr(self) -> str:
s = "{input_size}, {hidden_size}"
if "bias" in self.__dict__ and self.bias is not True:
s += ", bias={bias}"
if "nonlinearity" in self.__dict__ and self.nonlinearity != "tanh":
s += ", nonlinearity={nonlinearity}"
return s.format(**self.__dict__)
def reset_parameters(self) -> None:
stdv = 1.0 / math.sqrt(self.hidden_size) if self.hidden_size > 0 else 0
for weight in self.parameters():
nn.init.uniform_(weight, -stdv, stdv)
class RNNCell(RNNCellBase):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.RNNCell.html.
An Elman RNN cell with tanh or ReLU non-linearity.
.. math::
h' = \tanh(W_{ih} x + b_{ih} + W_{hh} h + b_{hh})
If :attr:`nonlinearity` is `'relu'`, then ReLU is used in place of tanh.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and `b_hh`.
Default: ``True``
nonlinearity: The non-linearity to use. Can be either ``'tanh'`` or ``'relu'``. Default: ``'tanh'``
Inputs: input, hidden
- **input**: tensor containing input features
- **hidden**: tensor containing the initial hidden state
Defaults to zero if not provided.
Outputs: h'
- **h'** of shape `(batch, hidden_size)`: tensor containing the next hidden state
for each element in the batch
Shape:
- input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
:math:`H_{in}` = `input_size`.
- hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
- output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import oneflow.nn as nn
>>> rnn = nn.RNNCell(10, 20)
>>> input = flow.randn(6, 3, 10)
>>> hx = flow.randn(3, 20)
>>> hx = rnn(input[0], hx)
>>> hx.size()
oneflow.Size([3, 20])
"""
def __init__(
self,
input_size: int,
hidden_size: int,
bias: bool = True,
nonlinearity: str = "tanh",
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
super(RNNCell, self).__init__(
input_size, hidden_size, bias, num_chunks=1, **factory_kwargs
)
self.nonlinearity = nonlinearity
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
assert input.dim() in (
1,
2,
), f"RNNCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
if input.is_global():
hx = flow.zeros(
input.size(0),
self.hidden_size,
dtype=input.dtype,
sbp=input.sbp,
placement=input.placement,
)
else:
hx = flow.zeros(
input.size(0),
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
else:
hx = hx.unsqueeze(0) if not is_batched else hx
if self.nonlinearity == "tanh":
ret = flow._C.rnn_tanh_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh,
)
elif self.nonlinearity == "relu":
ret = flow._C.rnn_relu_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh,
)
else:
raise RuntimeError("Unknown nonlinearity: {}".format(self.nonlinearity))
if not is_batched:
ret = ret.squeeze(0)
return ret
class LSTMCell(RNNCellBase):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.LSTMCell.html.
A long short-term memory (LSTM) cell.
.. math::
\begin{array}{ll}
i = \sigma(W_{ii} x + b_{ii} + W_{hi} h + b_{hi}) \\
f = \sigma(W_{if} x + b_{if} + W_{hf} h + b_{hf}) \\
g = \tanh(W_{ig} x + b_{ig} + W_{hg} h + b_{hg}) \\
o = \sigma(W_{io} x + b_{io} + W_{ho} h + b_{ho}) \\
c' = f * c + i * g \\
h' = o * \tanh(c') \\
\end{array}
where :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and
`b_hh`. Default: ``True``
Inputs: input, (h_0, c_0)
- **input** of shape `(batch, input_size)` or `(input_size)`: tensor containing input features
- **h_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial hidden state
- **c_0** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the initial cell state
If `(h_0, c_0)` is not provided, both **h_0** and **c_0** default to zero.
Outputs: (h_1, c_1)
- **h_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next hidden state
- **c_1** of shape `(batch, hidden_size)` or `(hidden_size)`: tensor containing the next cell state
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(4*hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(4*hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(4*hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(4*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import oneflow.nn as nn
>>> rnn = nn.LSTMCell(10, 20) # (input_size, hidden_size)
>>> input = flow.randn(2, 3, 10) # (time_steps, batch, input_size)
>>> hx = flow.randn(3, 20) # (batch, hidden_size)
>>> cx = flow.randn(3, 20)
>>> hx, cx = rnn(input[0], (hx, cx))
>>> hx.size()
oneflow.Size([3, 20])
"""
def __init__(
self,
input_size: int,
hidden_size: int,
bias: bool = True,
device=None,
dtype=None,
) -> None:
factory_kwargs = {"device": device, "dtype": dtype}
super(LSTMCell, self).__init__(
input_size, hidden_size, bias, num_chunks=4, **factory_kwargs
)
def forward(
self, input: Tensor, hx: Optional[Tuple[Tensor, Tensor]] = None
) -> Tuple[Tensor, Tensor]:
assert input.dim() in (
1,
2,
), f"LSTMCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
if input.is_global():
zeros = flow.zeros(
input.size(0),
self.hidden_size,
dtype=input.dtype,
sbp=input.sbp,
placement=input.placement,
)
else:
zeros = flow.zeros(
input.size(0),
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
hx = (zeros, zeros)
else:
hx = (hx[0].unsqueeze(0), hx[1].unsqueeze(0)) if not is_batched else hx
ret = flow._C.lstm_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh,
)
if not is_batched:
ret = (ret[0].squeeze(0), ret[1].squeeze(0))
return ret
class GRUCell(RNNCellBase):
"""The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.nn.GRUCell.html.
A gated recurrent unit (GRU) cell
.. math::
\begin{array}{ll}
r = \sigma(W_{ir} x + b_{ir} + W_{hr} h + b_{hr}) \\
z = \sigma(W_{iz} x + b_{iz} + W_{hz} h + b_{hz}) \\
n = \tanh(W_{in} x + b_{in} + r * (W_{hn} h + b_{hn})) \\
h' = (1 - z) * n + z * h
\end{array}
where :math:`\sigma` is the sigmoid function, and :math:`*` is the Hadamard product.
Args:
input_size: The number of expected features in the input `x`
hidden_size: The number of features in the hidden state `h`
bias: If ``False``, then the layer does not use bias weights `b_ih` and
`b_hh`. Default: ``True``
Inputs: input, hidden
- **input** : tensor containing input features
- **hidden** : tensor containing the initial hidden
state for each element in the batch.
Defaults to zero if not provided.
Outputs: h'
- **h'** : tensor containing the next hidden state
for each element in the batch
Shape:
- input: :math:`(N, H_{in})` or :math:`(H_{in})` tensor containing input features where
:math:`H_{in}` = `input_size`.
- hidden: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the initial hidden
state where :math:`H_{out}` = `hidden_size`. Defaults to zero if not provided.
- output: :math:`(N, H_{out})` or :math:`(H_{out})` tensor containing the next hidden state.
Attributes:
weight_ih: the learnable input-hidden weights, of shape
`(3*hidden_size, input_size)`
weight_hh: the learnable hidden-hidden weights, of shape
`(3*hidden_size, hidden_size)`
bias_ih: the learnable input-hidden bias, of shape `(3*hidden_size)`
bias_hh: the learnable hidden-hidden bias, of shape `(3*hidden_size)`
.. note::
All the weights and biases are initialized from :math:`\mathcal{U}(-\sqrt{k}, \sqrt{k})`
where :math:`k = \frac{1}{\text{hidden\_size}}`
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import oneflow.nn as nn
>>> rnn = nn.GRUCell(10, 20)
>>> input = flow.randn(6, 3, 10)
>>> hx = flow.randn(3, 20)
>>> hx = rnn(input[0], hx)
>>> hx.size()
oneflow.Size([3, 20])
"""
def __init__(
self,
input_size: int,
hidden_size: int,
bias: bool = True,
device=None,
dtype=None,
):
factory_kwargs = {"device": device, "dtype": dtype}
super().__init__(input_size, hidden_size, bias, num_chunks=3, **factory_kwargs)
def forward(self, input: Tensor, hx: Optional[Tensor] = None) -> Tensor:
assert input.dim() in (
1,
2,
), f"GRUCell: Expected input to be 1-D or 2-D but received {input.dim()}-D tensor"
is_batched = input.dim() == 2
if not is_batched:
input = input.unsqueeze(0)
if hx is None:
if input.is_global():
hx = flow.zeros(
input.size(0),
self.hidden_size,
dtype=input.dtype,
sbp=input.sbp,
placement=input.placement,
)
else:
hx = flow.zeros(
input.size(0),
self.hidden_size,
dtype=input.dtype,
device=input.device,
)
else:
hx = hx.unsqueeze(0) if not is_batched else hx
ret = flow._C.gru_cell(
input, hx, self.weight_ih, self.weight_hh, self.bias_ih, self.bias_hh,
)
if not is_batched:
ret = ret.squeeze(0)
return ret
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow._C.lstm",
"oneflow.nn.init.uniform_",
"oneflow._C.rnn_tanh_cell",
"oneflow._C.lstm_cell",
"oneflow._C.rnn_tanh",
"oneflow._C.gru_cell",
"oneflow.zeros",
"oneflow._C.rnn_relu",
"oneflow.nn.utils.rnn.PackedSequence",
"oneflow.empty",
"oneflow._C.rnn_relu_cell",
"oneflow._C.gru"
] | [((56153, 56189), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (56168, 56189), False, 'import doctest\n'), ((51904, 51996), 'oneflow._C.lstm_cell', 'flow._C.lstm_cell', (['input', 'hx', 'self.weight_ih', 'self.weight_hh', 'self.bias_ih', 'self.bias_hh'], {}), '(input, hx, self.weight_ih, self.weight_hh, self.bias_ih,\n self.bias_hh)\n', (51921, 51996), True, 'import oneflow as flow\n'), ((55908, 55999), 'oneflow._C.gru_cell', 'flow._C.gru_cell', (['input', 'hx', 'self.weight_ih', 'self.weight_hh', 'self.bias_ih', 'self.bias_hh'], {}), '(input, hx, self.weight_ih, self.weight_hh, self.bias_ih,\n self.bias_hh)\n', (55924, 55999), True, 'import oneflow as flow\n'), ((6380, 6417), 'oneflow.nn.init.uniform_', 'nn.init.uniform_', (['weight', '(-stdv)', 'stdv'], {}), '(weight, -stdv, stdv)\n', (6396, 6417), False, 'from oneflow import nn\n'), ((18866, 18935), 'oneflow.nn.utils.rnn.PackedSequence', 'PackedSequence', (['output', 'batch_sizes', 'sorted_indices', 'unsorted_indices'], {}), '(output, batch_sizes, sorted_indices, unsorted_indices)\n', (18880, 18935), False, 'from oneflow.nn.utils.rnn import PackedSequence\n'), ((31504, 31646), 'oneflow._C.lstm', 'flow._C.lstm', (['input', 'hx', 'self._flat_weights', 'self.bias', 'self.num_layers', 'self.dropout', 'self.training', 'self.bidirectional', 'self.batch_first'], {}), '(input, hx, self._flat_weights, self.bias, self.num_layers,\n self.dropout, self.training, self.bidirectional, self.batch_first)\n', (31516, 31646), True, 'import oneflow as flow\n'), ((31837, 31975), 'oneflow._C.lstm', 'flow._C.lstm', (['input', 'batch_sizes', 'hx', 'self._flat_weights', 'self.bias', 'self.num_layers', 'self.dropout', 'self.training', 'self.bidirectional'], {}), '(input, batch_sizes, hx, self._flat_weights, self.bias, self.\n num_layers, self.dropout, self.training, self.bidirectional)\n', (31849, 31975), True, 'import oneflow as flow\n'), ((32264, 32333), 'oneflow.nn.utils.rnn.PackedSequence', 'PackedSequence', (['output', 'batch_sizes', 'sorted_indices', 'unsorted_indices'], {}), '(output, batch_sizes, sorted_indices, unsorted_indices)\n', (32278, 32333), False, 'from oneflow.nn.utils.rnn import PackedSequence\n'), ((40748, 40890), 'oneflow._C.gru', 'flow._C.gru', (['input', 'hx', 'self._flat_weights', 'self.bias', 'self.num_layers', 'self.dropout', 'self.training', 'self.bidirectional', 'self.batch_first'], {}), '(input, hx, self._flat_weights, self.bias, self.num_layers, self\n .dropout, self.training, self.bidirectional, self.batch_first)\n', (40759, 40890), True, 'import oneflow as flow\n'), ((41080, 41217), 'oneflow._C.gru', 'flow._C.gru', (['input', 'batch_sizes', 'hx', 'self._flat_weights', 'self.bias', 'self.num_layers', 'self.dropout', 'self.training', 'self.bidirectional'], {}), '(input, batch_sizes, hx, self._flat_weights, self.bias, self.\n num_layers, self.dropout, self.training, self.bidirectional)\n', (41091, 41217), True, 'import oneflow as flow\n'), ((41506, 41575), 'oneflow.nn.utils.rnn.PackedSequence', 'PackedSequence', (['output', 'batch_sizes', 'sorted_indices', 'unsorted_indices'], {}), '(output, batch_sizes, sorted_indices, unsorted_indices)\n', (41520, 41575), False, 'from oneflow.nn.utils.rnn import PackedSequence\n'), ((42346, 42412), 'oneflow.empty', 'flow.empty', (['(num_chunks * hidden_size)', 'input_size'], {}), '(num_chunks * hidden_size, input_size, **factory_kwargs)\n', (42356, 42412), True, 'import oneflow as flow\n'), ((42474, 42541), 'oneflow.empty', 'flow.empty', (['(num_chunks * hidden_size)', 'hidden_size'], {}), '(num_chunks * hidden_size, hidden_size, **factory_kwargs)\n', (42484, 42541), True, 'import oneflow as flow\n'), ((43485, 43522), 'oneflow.nn.init.uniform_', 'nn.init.uniform_', (['weight', '(-stdv)', 'stdv'], {}), '(weight, -stdv, stdv)\n', (43501, 43522), False, 'from oneflow import nn\n'), ((47354, 47451), 'oneflow._C.rnn_tanh_cell', 'flow._C.rnn_tanh_cell', (['input', 'hx', 'self.weight_ih', 'self.weight_hh', 'self.bias_ih', 'self.bias_hh'], {}), '(input, hx, self.weight_ih, self.weight_hh, self.\n bias_ih, self.bias_hh)\n', (47375, 47451), True, 'import oneflow as flow\n'), ((6268, 6295), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (6277, 6295), False, 'import math\n'), ((16030, 16174), 'oneflow.zeros', 'flow.zeros', (['(self.num_layers * num_directions)', 'max_batch_size', 'self.hidden_size'], {'dtype': 'input.dtype', 'sbp': 'input.sbp', 'placement': 'input.placement'}), '(self.num_layers * num_directions, max_batch_size, self.\n hidden_size, dtype=input.dtype, sbp=input.sbp, placement=input.placement)\n', (16040, 16174), True, 'import oneflow as flow\n'), ((16348, 16471), 'oneflow.zeros', 'flow.zeros', (['(self.num_layers * num_directions)', 'max_batch_size', 'self.hidden_size'], {'dtype': 'input.dtype', 'device': 'input.device'}), '(self.num_layers * num_directions, max_batch_size, self.\n hidden_size, dtype=input.dtype, device=input.device)\n', (16358, 16471), True, 'import oneflow as flow\n'), ((17208, 17354), 'oneflow._C.rnn_tanh', 'flow._C.rnn_tanh', (['input', 'hx', 'self._flat_weights', 'self.bias', 'self.num_layers', 'self.dropout', 'self.training', 'self.bidirectional', 'self.batch_first'], {}), '(input, hx, self._flat_weights, self.bias, self.num_layers,\n self.dropout, self.training, self.bidirectional, self.batch_first)\n', (17224, 17354), True, 'import oneflow as flow\n'), ((17593, 17739), 'oneflow._C.rnn_relu', 'flow._C.rnn_relu', (['input', 'hx', 'self._flat_weights', 'self.bias', 'self.num_layers', 'self.dropout', 'self.training', 'self.bidirectional', 'self.batch_first'], {}), '(input, hx, self._flat_weights, self.bias, self.num_layers,\n self.dropout, self.training, self.bidirectional, self.batch_first)\n', (17609, 17739), True, 'import oneflow as flow\n'), ((18014, 18155), 'oneflow._C.rnn_tanh', 'flow._C.rnn_tanh', (['input', 'batch_sizes', 'hx', 'self._flat_weights', 'self.bias', 'self.num_layers', 'self.dropout', 'self.training', 'self.bidirectional'], {}), '(input, batch_sizes, hx, self._flat_weights, self.bias,\n self.num_layers, self.dropout, self.training, self.bidirectional)\n', (18030, 18155), True, 'import oneflow as flow\n'), ((18394, 18535), 'oneflow._C.rnn_relu', 'flow._C.rnn_relu', (['input', 'batch_sizes', 'hx', 'self._flat_weights', 'self.bias', 'self.num_layers', 'self.dropout', 'self.training', 'self.bidirectional'], {}), '(input, batch_sizes, hx, self._flat_weights, self.bias,\n self.num_layers, self.dropout, self.training, self.bidirectional)\n', (18410, 18535), True, 'import oneflow as flow\n'), ((29001, 29149), 'oneflow.zeros', 'flow.zeros', (['(self.num_layers * num_directions)', 'max_batch_size', 'real_hidden_size'], {'dtype': 'input.dtype', 'sbp': 'input.sbp', 'placement': 'input.placement'}), '(self.num_layers * num_directions, max_batch_size,\n real_hidden_size, dtype=input.dtype, sbp=input.sbp, placement=input.\n placement)\n', (29011, 29149), True, 'import oneflow as flow\n'), ((29306, 29450), 'oneflow.zeros', 'flow.zeros', (['(self.num_layers * num_directions)', 'max_batch_size', 'self.hidden_size'], {'dtype': 'input.dtype', 'sbp': 'input.sbp', 'placement': 'input.placement'}), '(self.num_layers * num_directions, max_batch_size, self.\n hidden_size, dtype=input.dtype, sbp=input.sbp, placement=input.placement)\n', (29316, 29450), True, 'import oneflow as flow\n'), ((29629, 29751), 'oneflow.zeros', 'flow.zeros', (['(self.num_layers * num_directions)', 'max_batch_size', 'real_hidden_size'], {'dtype': 'input.dtype', 'device': 'input.device'}), '(self.num_layers * num_directions, max_batch_size,\n real_hidden_size, dtype=input.dtype, device=input.device)\n', (29639, 29751), True, 'import oneflow as flow\n'), ((29893, 30016), 'oneflow.zeros', 'flow.zeros', (['(self.num_layers * num_directions)', 'max_batch_size', 'self.hidden_size'], {'dtype': 'input.dtype', 'device': 'input.device'}), '(self.num_layers * num_directions, max_batch_size, self.\n hidden_size, dtype=input.dtype, device=input.device)\n', (29903, 30016), True, 'import oneflow as flow\n'), ((39709, 39853), 'oneflow.zeros', 'flow.zeros', (['(self.num_layers * num_directions)', 'max_batch_size', 'self.hidden_size'], {'dtype': 'input.dtype', 'sbp': 'input.sbp', 'placement': 'input.placement'}), '(self.num_layers * num_directions, max_batch_size, self.\n hidden_size, dtype=input.dtype, sbp=input.sbp, placement=input.placement)\n', (39719, 39853), True, 'import oneflow as flow\n'), ((40027, 40150), 'oneflow.zeros', 'flow.zeros', (['(self.num_layers * num_directions)', 'max_batch_size', 'self.hidden_size'], {'dtype': 'input.dtype', 'device': 'input.device'}), '(self.num_layers * num_directions, max_batch_size, self.\n hidden_size, dtype=input.dtype, device=input.device)\n', (40037, 40150), True, 'import oneflow as flow\n'), ((42626, 42680), 'oneflow.empty', 'flow.empty', (['(num_chunks * hidden_size)'], {}), '(num_chunks * hidden_size, **factory_kwargs)\n', (42636, 42680), True, 'import oneflow as flow\n'), ((42752, 42806), 'oneflow.empty', 'flow.empty', (['(num_chunks * hidden_size)'], {}), '(num_chunks * hidden_size, **factory_kwargs)\n', (42762, 42806), True, 'import oneflow as flow\n'), ((43373, 43400), 'math.sqrt', 'math.sqrt', (['self.hidden_size'], {}), '(self.hidden_size)\n', (43382, 43400), False, 'import math\n'), ((47538, 47635), 'oneflow._C.rnn_relu_cell', 'flow._C.rnn_relu_cell', (['input', 'hx', 'self.weight_ih', 'self.weight_hh', 'self.bias_ih', 'self.bias_hh'], {}), '(input, hx, self.weight_ih, self.weight_hh, self.\n bias_ih, self.bias_hh)\n', (47559, 47635), True, 'import oneflow as flow\n'), ((3696, 3755), 'oneflow.empty', 'flow.empty', (['(gate_size, layer_input_size)'], {}), '((gate_size, layer_input_size), **factory_kwargs)\n', (3706, 3755), True, 'import oneflow as flow\n'), ((3831, 3890), 'oneflow.empty', 'flow.empty', (['(gate_size, real_hidden_size)'], {}), '((gate_size, real_hidden_size), **factory_kwargs)\n', (3841, 3890), True, 'import oneflow as flow\n'), ((3945, 3984), 'oneflow.empty', 'flow.empty', (['gate_size'], {}), '(gate_size, **factory_kwargs)\n', (3955, 3984), True, 'import oneflow as flow\n'), ((4022, 4061), 'oneflow.empty', 'flow.empty', (['gate_size'], {}), '(gate_size, **factory_kwargs)\n', (4032, 4061), True, 'import oneflow as flow\n'), ((4415, 4469), 'oneflow.empty', 'flow.empty', (['(proj_size, hidden_size)'], {}), '((proj_size, hidden_size), **factory_kwargs)\n', (4425, 4469), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from oneflow.nn.graph.optimizer import OptDict
import oneflow._oneflow_internal.oneflow.core.job.job_conf as job_conf_cfg
class GraphConfig(object):
r"""For configuration of nn.Graph.
"""
def __init__(self):
super().__init__()
self._outputs_buffer_size = 2
self.proto = job_conf_cfg.JobConfigProto()
self._train(False)
def _train(self, mode: bool = True):
if mode:
self.proto.mutable_train_conf()
else:
self.proto.mutable_predict_conf()
@property
def training(self):
if self.proto.has_train_conf():
return True
if self.proto.has_predict_conf():
return False
raise NotImplementedError
def set_outputs_buffer_size(self, value: int = 2):
r"""Set the outputs buffer size of ``nn.Graph``.
When graph's outputs buffer size is greater than 2, multiple call on the
graph can work like a pipeline. This makes multiple call takes less time.
The default outputs buffer size is 2.
Args:
value (int): graph ouputs buffer size.
"""
self._outputs_buffer_size = value
def enable_amp(self, mode: bool = True):
"""If true, then graph will use mixed precision mode, it means use both float16 and float32 during model training.
Args:
mode (bool, optional): [description]. Default is True.
"""
assert type(mode) is bool
self.proto.set_enable_auto_mixed_precision(mode)
def allow_fuse_model_update_ops(self, mode: bool = True):
"""If true, try to fuse cast + scale + l1_l2_regularize_gradient + model_update to one op to improve performance.
Args:
mode (bool, optional): [description]. Default is True.
"""
self.proto.set_enable_fuse_model_update_ops(mode)
def allow_fuse_add_to_output(self, mode: bool = True):
"""If true, try to fuse a binary element-wise add to one of the predecessors to improve performance.
Args:
mode (bool, optional): [description]. Default is True.
"""
self.proto.set_enable_fuse_add_to_output(mode)
def allow_fuse_cast_scale(self, mode: bool = True):
"""If true, try to fuse cast and scalar_mul_by_tensor to improve performance.
Args:
mode (bool, optional): [description]. Default is True.
"""
self.proto.set_enable_fuse_cast_scale(mode)
def set_gradient_accumulation_steps(self, value):
"""Set num of steps to accumulate gradient.
Args:
value (int): num of steps.
"""
self.proto.set_num_gradient_accumulation_steps(value)
def set_zero_redundancy_optimizer_mode(self, mode: str = "distributed_split"):
"""Set mode to remove redundancy of optimizer states.
This optimzation will reduce optimizer states memory consumption as described
by ZeRO https://arxiv.org/abs/1910.02054 .
Args:
mode (str): "distributed_split" or "non_distributed". "distributed_split" mode
will shard each optimizer state across devices. "non_distributed" mode
will place each optimizer state to only one device.
"""
assert mode in ("distributed_split", "non_distributed")
self.proto.set_optimizer_placement_optimization_mode(mode)
def enable_xla_jit(self, value=True):
"""Whether use xla_jit in xrt or not. When this option enable, oneflow will check all operators is supported by
xla_jit or not. Clustering supported operators as subgraph, then runing subgraph by xla_jit.
XLA: https://www.tensorflow.org/xla
Args:
value (bool, optional): [description]. Defaults to True.
"""
self.proto.mutable_xrt_config().set_use_xla_jit(value)
def enable_tensorrt(self, value=True):
"""Whether use tensorrt in xrt or not. When this option enable, oneflow will check all operators is supported by
tensorrt or not. Clustering supported operators as subgraph, then runing subgraph by tensorrt.
TensorRT: https://developer.nvidia.com/tensorrt
Args:
value (bool, optional): [description]. Defaults to True.
"""
self.proto.mutable_xrt_config().set_use_tensorrt(value)
def enable_openvino(self, value=True):
"""Whether use openvino in xrt or not. When this option enable, oneflow will check all operators is supported by
openvino or not. Clustering supported operators as subgraph, then runing subgraph by openvino.
Please note that, openvino only support inference mode.
OpenVINO: https://www.intel.com/content/www/us/en/developer/tools/openvino-toolkit/overview.html
Args:
value (bool, optional): [description]. Defaults to True.
"""
self.proto.mutable_xrt_config().set_use_openvino(value)
def enable_cudnn_conv_heuristic_search_algo(self, mode: bool = True):
""" Whether enable cudnn conv operatioin to use heuristic search algorithm.
Args:
mode (bool, optional): Whether enable cudnn conv operatioin to use heuristic
search algorithm. Default is True.
"""
self.proto.set_cudnn_conv_heuristic_search_algo(mode)
def _generate_optimizer_and_variable_configs(
self, opt_dict: OptDict = None, variables_conf: OrderedDict = None,
):
opt_dict.generate_optimizer_and_variable_configs(self.proto, variables_conf)
def __repr__(self):
main_str = (
"("
+ "CONFIG"
+ ":config:"
+ self.__class__.__name__
+ "("
+ ("training=" + str(self.training) + ", ")
+ "))"
)
return main_str
| [
"oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto"
] | [((936, 965), 'oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto', 'job_conf_cfg.JobConfigProto', ([], {}), '()\n', (963, 965), True, 'import oneflow._oneflow_internal.oneflow.core.job.job_conf as job_conf_cfg\n')] |
"""
Copyright 2020 Tianshu AI Platform. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import sys
import os
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "./src")))
import bert as bert_util
import oneflow.core.operator.op_conf_pb2 as op_conf_util
def maskedBert(
input_ids_blob,
input_mask_blob,
token_type_ids_blob,
masked_lm_positions_blob,
# masked_lm_positions_blob,
# masked_lm_ids_blob,
vocab_size,
seq_length=512,
hidden_size=768,
num_hidden_layers=12,
num_attention_heads=12,
intermediate_size=3072,
hidden_act="gelu",
hidden_dropout_prob=0.1,
attention_probs_dropout_prob=0.1,
max_position_embeddings=512,
type_vocab_size=16,
# max_predictions_per_seq=20,
initializer_range=0.02,
):
backbone = bert_util.BertBackbone(
input_ids_blob=input_ids_blob,
input_mask_blob=input_mask_blob,
token_type_ids_blob=token_type_ids_blob,
vocab_size=vocab_size,
seq_length=seq_length,
hidden_size=hidden_size,
num_hidden_layers=num_hidden_layers,
num_attention_heads=num_attention_heads,
intermediate_size=intermediate_size,
hidden_act=hidden_act,
hidden_dropout_prob=hidden_dropout_prob,
attention_probs_dropout_prob=attention_probs_dropout_prob,
max_position_embeddings=max_position_embeddings,
type_vocab_size=type_vocab_size,
initializer_range=initializer_range,
)
predictions = _AddMaskedLanguageModel(
input_blob=backbone.sequence_output(),
output_weights_blob=backbone.embedding_table(),
positions_blob=masked_lm_positions_blob,
seq_length=seq_length,
hidden_size=hidden_size,
vocab_size=vocab_size,
hidden_act=bert_util.GetActivation(hidden_act),
initializer_range=initializer_range,
)
pooled_output = PooledOutput(
backbone.sequence_output(), hidden_size, initializer_range
)
return predictions
def PooledOutput(sequence_output, hidden_size, initializer_range):
with flow.scope.namespace("bert-pooler"):
first_token_tensor = flow.slice(sequence_output, [None, 0, 0], [None, 1, -1])
first_token_tensor = flow.reshape(first_token_tensor, [-1, hidden_size])
pooled_output = bert_util._FullyConnected(
first_token_tensor,
input_size=hidden_size,
units=hidden_size,
weight_initializer=bert_util.CreateInitializer(initializer_range),
name="dense",
)
pooled_output = flow.math.tanh(pooled_output)
return pooled_output
def _AddMaskedLanguageModelLoss(
input_blob,
output_weights_blob,
positions_blob,
label_id_blob,
label_weight_blob,
seq_length,
hidden_size,
vocab_size,
max_predictions_per_seq,
hidden_act,
initializer_range,
):
with flow.scope.namespace("other"):
sum_label_weight_blob = flow.math.reduce_sum(label_weight_blob, axis=[-1])
ones = sum_label_weight_blob * 0.0 + 1.0
sum_label_weight_blob = flow.math.reduce_sum(sum_label_weight_blob)
batch_size = flow.math.reduce_sum(ones)
sum_label_weight_blob = sum_label_weight_blob / batch_size
with flow.scope.namespace("cls-predictions"):
input_blob = _GatherIndexes(input_blob, positions_blob, seq_length, hidden_size)
with flow.scope.namespace("transform"):
if callable(hidden_act):
act_fn = op_conf_util.kNone
else:
act_fn = hidden_act
input_blob = bert_util._FullyConnected(
input_blob,
input_size=hidden_size,
units=hidden_size,
activation=act_fn,
weight_initializer=bert_util.CreateInitializer(initializer_range),
name="dense",
)
if callable(hidden_act):
input_blob = hidden_act(input_blob)
input_blob = bert_util._LayerNorm(input_blob, hidden_size)
output_bias = flow.get_variable(
name="output_bias",
shape=[vocab_size],
dtype=input_blob.dtype,
initializer=flow.constant_initializer(1.0),
)
logit_blob = flow.matmul(input_blob, output_weights_blob, transpose_b=True)
logit_blob = flow.nn.bias_add(logit_blob, output_bias)
label_id_blob = flow.reshape(label_id_blob, [-1])
pre_example_loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
logits=logit_blob, labels=label_id_blob
)
pre_example_loss = flow.reshape(pre_example_loss, [-1, max_predictions_per_seq])
numerator = pre_example_loss * label_weight_blob
with flow.scope.namespace("loss"):
numerator = flow.math.reduce_sum(numerator, axis=[-1])
denominator = sum_label_weight_blob + 1e-5
loss = numerator / denominator
return loss, pre_example_loss, logit_blob
def _AddMaskedLanguageModel(
input_blob,
output_weights_blob,
positions_blob,
seq_length,
hidden_size,
vocab_size,
hidden_act,
initializer_range,
):
with flow.scope.namespace("cls-predictions"):
# 获取mask词的encode
input_blob = _GatherIndexes(input_blob, positions_blob, seq_length, hidden_size)
# 在输出之前添加一个非线性变换,只在预训练阶段起作用
with flow.scope.namespace("transform"):
if callable(hidden_act):
act_fn = op_conf_util.kNone
else:
act_fn = hidden_act
# print('hhhhh')
input_blob = bert_util._FullyConnected(
input_blob,
input_size=hidden_size,
units=hidden_size,
activation=act_fn,
weight_initializer=bert_util.CreateInitializer(initializer_range),
name="dense",
)
if callable(hidden_act):
input_blob = hidden_act(input_blob)
input_blob = bert_util._LayerNorm(input_blob, hidden_size)
# output_weights是和传入的word embedding一样的
# 这里再添加一个bias
output_bias = flow.get_variable(
name="output_bias",
shape=[vocab_size],
dtype=input_blob.dtype,
initializer=flow.constant_initializer(1.0),
)
logit_blob = flow.matmul(input_blob, output_weights_blob, transpose_b=True)
logit_blob = flow.nn.bias_add(logit_blob, output_bias)
return logit_blob
def _GatherIndexes(sequence_blob, positions_blob, seq_length, hidden_size):
output = flow.gather(
params=sequence_blob, indices=positions_blob, axis=2, batch_dims=2
)
output = flow.reshape(output, [-1, hidden_size])
return output
def _AddNextSentenceOutput(input_blob, label_blob, hidden_size, initializer_range):
with flow.scope.namespace("cls-seq_relationship"):
output_weight_blob = flow.get_variable(
name="output_weights",
shape=[2, hidden_size],
dtype=input_blob.dtype,
initializer=bert_util.CreateInitializer(initializer_range),
)
output_bias_blob = flow.get_variable(
name="output_bias",
shape=[2],
dtype=input_blob.dtype,
initializer=flow.constant_initializer(0.0),
)
logit_blob = flow.matmul(input_blob, output_weight_blob, transpose_b=True)
logit_blob = flow.nn.bias_add(logit_blob, output_bias_blob)
pre_example_loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
logits=logit_blob, labels=label_blob
)
loss = pre_example_loss
return loss, pre_example_loss, logit_blob
| [
"oneflow.scope.namespace",
"oneflow.matmul",
"oneflow.math.reduce_sum",
"oneflow.gather",
"oneflow.nn.sparse_softmax_cross_entropy_with_logits",
"oneflow.constant_initializer",
"oneflow.reshape",
"oneflow.slice",
"oneflow.nn.bias_add",
"oneflow.math.tanh"
] | [((657, 682), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (672, 682), False, 'import os\n'), ((695, 717), 'os.path.split', 'os.path.split', (['curPath'], {}), '(curPath)\n', (708, 717), False, 'import os\n'), ((1411, 2003), 'bert.BertBackbone', 'bert_util.BertBackbone', ([], {'input_ids_blob': 'input_ids_blob', 'input_mask_blob': 'input_mask_blob', 'token_type_ids_blob': 'token_type_ids_blob', 'vocab_size': 'vocab_size', 'seq_length': 'seq_length', 'hidden_size': 'hidden_size', 'num_hidden_layers': 'num_hidden_layers', 'num_attention_heads': 'num_attention_heads', 'intermediate_size': 'intermediate_size', 'hidden_act': 'hidden_act', 'hidden_dropout_prob': 'hidden_dropout_prob', 'attention_probs_dropout_prob': 'attention_probs_dropout_prob', 'max_position_embeddings': 'max_position_embeddings', 'type_vocab_size': 'type_vocab_size', 'initializer_range': 'initializer_range'}), '(input_ids_blob=input_ids_blob, input_mask_blob=\n input_mask_blob, token_type_ids_blob=token_type_ids_blob, vocab_size=\n vocab_size, seq_length=seq_length, hidden_size=hidden_size,\n num_hidden_layers=num_hidden_layers, num_attention_heads=\n num_attention_heads, intermediate_size=intermediate_size, hidden_act=\n hidden_act, hidden_dropout_prob=hidden_dropout_prob,\n attention_probs_dropout_prob=attention_probs_dropout_prob,\n max_position_embeddings=max_position_embeddings, type_vocab_size=\n type_vocab_size, initializer_range=initializer_range)\n', (1433, 2003), True, 'import bert as bert_util\n'), ((7243, 7322), 'oneflow.gather', 'flow.gather', ([], {'params': 'sequence_blob', 'indices': 'positions_blob', 'axis': '(2)', 'batch_dims': '(2)'}), '(params=sequence_blob, indices=positions_blob, axis=2, batch_dims=2)\n', (7254, 7322), True, 'import oneflow as flow\n'), ((7350, 7389), 'oneflow.reshape', 'flow.reshape', (['output', '[-1, hidden_size]'], {}), '(output, [-1, hidden_size])\n', (7362, 7389), True, 'import oneflow as flow\n'), ((2700, 2735), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""bert-pooler"""'], {}), "('bert-pooler')\n", (2720, 2735), True, 'import oneflow as flow\n'), ((2766, 2822), 'oneflow.slice', 'flow.slice', (['sequence_output', '[None, 0, 0]', '[None, 1, -1]'], {}), '(sequence_output, [None, 0, 0], [None, 1, -1])\n', (2776, 2822), True, 'import oneflow as flow\n'), ((2852, 2903), 'oneflow.reshape', 'flow.reshape', (['first_token_tensor', '[-1, hidden_size]'], {}), '(first_token_tensor, [-1, hidden_size])\n', (2864, 2903), True, 'import oneflow as flow\n'), ((3193, 3222), 'oneflow.math.tanh', 'flow.math.tanh', (['pooled_output'], {}), '(pooled_output)\n', (3207, 3222), True, 'import oneflow as flow\n'), ((3515, 3544), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""other"""'], {}), "('other')\n", (3535, 3544), True, 'import oneflow as flow\n'), ((3578, 3628), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['label_weight_blob'], {'axis': '[-1]'}), '(label_weight_blob, axis=[-1])\n', (3598, 3628), True, 'import oneflow as flow\n'), ((3710, 3753), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['sum_label_weight_blob'], {}), '(sum_label_weight_blob)\n', (3730, 3753), True, 'import oneflow as flow\n'), ((3775, 3801), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['ones'], {}), '(ones)\n', (3795, 3801), True, 'import oneflow as flow\n'), ((3878, 3917), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""cls-predictions"""'], {}), "('cls-predictions')\n", (3898, 3917), True, 'import oneflow as flow\n'), ((4900, 4962), 'oneflow.matmul', 'flow.matmul', (['input_blob', 'output_weights_blob'], {'transpose_b': '(True)'}), '(input_blob, output_weights_blob, transpose_b=True)\n', (4911, 4962), True, 'import oneflow as flow\n'), ((4984, 5025), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['logit_blob', 'output_bias'], {}), '(logit_blob, output_bias)\n', (5000, 5025), True, 'import oneflow as flow\n'), ((5050, 5083), 'oneflow.reshape', 'flow.reshape', (['label_id_blob', '[-1]'], {}), '(label_id_blob, [-1])\n', (5062, 5083), True, 'import oneflow as flow\n'), ((5111, 5205), 'oneflow.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logit_blob', 'labels': 'label_id_blob'}), '(logits=logit_blob, labels=\n label_id_blob)\n', (5159, 5205), True, 'import oneflow as flow\n'), ((5250, 5311), 'oneflow.reshape', 'flow.reshape', (['pre_example_loss', '[-1, max_predictions_per_seq]'], {}), '(pre_example_loss, [-1, max_predictions_per_seq])\n', (5262, 5311), True, 'import oneflow as flow\n'), ((5818, 5857), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""cls-predictions"""'], {}), "('cls-predictions')\n", (5838, 5857), True, 'import oneflow as flow\n'), ((6999, 7061), 'oneflow.matmul', 'flow.matmul', (['input_blob', 'output_weights_blob'], {'transpose_b': '(True)'}), '(input_blob, output_weights_blob, transpose_b=True)\n', (7010, 7061), True, 'import oneflow as flow\n'), ((7083, 7124), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['logit_blob', 'output_bias'], {}), '(logit_blob, output_bias)\n', (7099, 7124), True, 'import oneflow as flow\n'), ((7503, 7547), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""cls-seq_relationship"""'], {}), "('cls-seq_relationship')\n", (7523, 7547), True, 'import oneflow as flow\n'), ((8010, 8071), 'oneflow.matmul', 'flow.matmul', (['input_blob', 'output_weight_blob'], {'transpose_b': '(True)'}), '(input_blob, output_weight_blob, transpose_b=True)\n', (8021, 8071), True, 'import oneflow as flow\n'), ((8093, 8139), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['logit_blob', 'output_bias_blob'], {}), '(logit_blob, output_bias_blob)\n', (8109, 8139), True, 'import oneflow as flow\n'), ((8167, 8258), 'oneflow.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', ([], {'logits': 'logit_blob', 'labels': 'label_blob'}), '(logits=logit_blob, labels=\n label_blob)\n', (8215, 8258), True, 'import oneflow as flow\n'), ((766, 777), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (775, 777), False, 'import os\n'), ((2404, 2439), 'bert.GetActivation', 'bert_util.GetActivation', (['hidden_act'], {}), '(hidden_act)\n', (2427, 2439), True, 'import bert as bert_util\n'), ((4021, 4054), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""transform"""'], {}), "('transform')\n", (4041, 4054), True, 'import oneflow as flow\n'), ((5382, 5410), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""loss"""'], {}), "('loss')\n", (5402, 5410), True, 'import oneflow as flow\n'), ((5436, 5478), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['numerator'], {'axis': '[-1]'}), '(numerator, axis=[-1])\n', (5456, 5478), True, 'import oneflow as flow\n'), ((6022, 6055), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""transform"""'], {}), "('transform')\n", (6042, 6055), True, 'import oneflow as flow\n'), ((3085, 3131), 'bert.CreateInitializer', 'bert_util.CreateInitializer', (['initializer_range'], {}), '(initializer_range)\n', (3112, 3131), True, 'import bert as bert_util\n'), ((4626, 4671), 'bert._LayerNorm', 'bert_util._LayerNorm', (['input_blob', 'hidden_size'], {}), '(input_blob, hidden_size)\n', (4646, 4671), True, 'import bert as bert_util\n'), ((4837, 4867), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (4862, 4867), True, 'import oneflow as flow\n'), ((6656, 6701), 'bert._LayerNorm', 'bert_util._LayerNorm', (['input_blob', 'hidden_size'], {}), '(input_blob, hidden_size)\n', (6676, 6701), True, 'import bert as bert_util\n'), ((6936, 6966), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(1.0)'], {}), '(1.0)\n', (6961, 6966), True, 'import oneflow as flow\n'), ((7728, 7774), 'bert.CreateInitializer', 'bert_util.CreateInitializer', (['initializer_range'], {}), '(initializer_range)\n', (7755, 7774), True, 'import bert as bert_util\n'), ((7947, 7977), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0.0)'], {}), '(0.0)\n', (7972, 7977), True, 'import oneflow as flow\n'), ((4416, 4462), 'bert.CreateInitializer', 'bert_util.CreateInitializer', (['initializer_range'], {}), '(initializer_range)\n', (4443, 4462), True, 'import bert as bert_util\n'), ((6446, 6492), 'bert.CreateInitializer', 'bert_util.CreateInitializer', (['initializer_range'], {}), '(initializer_range)\n', (6473, 6492), True, 'import bert as bert_util\n')] |
import oneflow as flow
from oneflow.utils.data import Dataset
import pickle
import json
import numpy as np
from oneflow.utils.data import DataLoader
class CollateFn(object):
def __init__(self, frame_size):
self.frame_size = frame_size
def make_frames(self, tensor):
out = tensor.view(
tensor.size(0),
tensor.size(1) // self.frame_size,
self.frame_size * tensor.size(2),
)
out = out.transpose(1, 2)
return out
def __call__(self, l):
data_tensor = flow.tensor(np.array(l))
segment = self.make_frames(data_tensor)
return segment
def get_data_loader(
dataset, batch_size, frame_size, shuffle=True, num_workers=0, drop_last=False
):
_collate_fn = CollateFn(frame_size=frame_size)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=_collate_fn,
)
return dataloader
class SequenceDataset(Dataset):
def __init__(self, data):
self.data = data
self.utt_ids = list(self.data.keys())
def __getitem__(self, ind):
utt_id = self.utt_ids[ind]
ret = self.data[utt_id].transpose()
return ret
def __len__(self):
return len(self.utt_ids)
class PickleDataset(Dataset):
def __init__(self, pickle_path, sample_index_path, segment_size):
with open(pickle_path, "rb") as f:
self.data = pickle.load(f)
with open(sample_index_path, "r") as f:
self.indexes = json.load(f)
self.segment_size = segment_size
def __getitem__(self, ind):
utt_id, t = self.indexes[ind]
segment = self.data[utt_id][t : t + self.segment_size]
return segment
def __len__(self):
return len(self.indexes)
| [
"oneflow.utils.data.DataLoader"
] | [((818, 931), 'oneflow.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'batch_size', 'shuffle': 'shuffle', 'num_workers': 'num_workers', 'collate_fn': '_collate_fn'}), '(dataset, batch_size=batch_size, shuffle=shuffle, num_workers=\n num_workers, collate_fn=_collate_fn)\n', (828, 931), False, 'from oneflow.utils.data import DataLoader\n'), ((558, 569), 'numpy.array', 'np.array', (['l'], {}), '(l)\n', (566, 569), True, 'import numpy as np\n'), ((1488, 1502), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (1499, 1502), False, 'import pickle\n'), ((1578, 1590), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1587, 1590), False, 'import json\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Optional, Sequence, Union
import oneflow
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("pad")
def pad(
x: remote_blob_util.BlobDef,
paddings: Sequence[int],
constant_value: Union[int, float] = 0,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
padding_before = []
padding_after = []
if isinstance(paddings, (list, tuple)):
assert len(paddings) == len(x.shape), ValueError(
"paddings must be the same size of input dims"
)
for p in paddings:
assert isinstance(p, (list, tuple)) and len(p) == 2, ValueError(
"the elem of paddings must be a tuple or a list with length of 2"
)
padding_before.append(p[0])
padding_after.append(p[1])
else:
raise ValueError("paddings must be a tuple or a list.")
return (
oneflow.user_op_builder(name if name is not None else id_util.UniqueStr("Pad_"))
.Op("pad")
.Input("x", [x])
.Output("y")
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Attr("floating_constant_value", float(constant_value))
.Attr("integral_constant_value", int(constant_value))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("pad_grad")
def pad_grad(
x: remote_blob_util.BlobDef,
paddings: Sequence[int],
constant_value: Union[int, float] = 0,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
padding_before = []
padding_after = []
if isinstance(paddings, (list, tuple)):
assert len(paddings) == len(x.shape), ValueError(
"paddings must be the same size of input dims"
)
for p in paddings:
assert isinstance(p, (list, tuple)) and len(p) == 2, ValueError(
"the elem of paddings must be a tuple or a list with length of 2"
)
padding_before.append(p[0])
padding_after.append(p[1])
else:
raise ValueError("paddings must be a tuple or a list.")
return (
oneflow.user_op_builder(
name if name is not None else id_util.UniqueStr("PadGrad_")
)
.Op("pad_grad")
.Input("dy", [x])
.Output("dx")
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Attr("floating_constant_value", float(constant_value))
.Attr("integral_constant_value", int(constant_value))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("same_padding")
def same_padding(
x, padding, data_format, kernel_size, strides, dilation_rate, name=None,
):
assert isinstance(padding, str) and (
padding.upper() == "SAME_LOWER" or padding.upper() == "SAME_UPPER"
), 'padding must be "SAME_LOWER" or "SAME_UPPER".'
channel_pos = "channels_first" if data_format.startswith("NC") else "channels_last"
assert isinstance(kernel_size, (list, tuple))
assert isinstance(strides, (list, tuple))
assert isinstance(dilation_rate, (list, tuple))
num_spatial_dims = len(x.shape) - 2
assert len(kernel_size) == num_spatial_dims
assert len(strides) == num_spatial_dims
assert len(dilation_rate) == num_spatial_dims
return (
oneflow.user_op_builder(
name if name is not None else id_util.UniqueStr("SamePadding_")
)
.Op("same_padding")
.Input("x", [x])
.Output("y")
.Attr("padding", padding.lower())
.Attr("data_format", channel_pos)
.Attr("kernel_size", kernel_size)
.Attr("strides", strides)
.Attr("dilation_rate", dilation_rate)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
| [
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.id_util.UniqueStr"
] | [((866, 887), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""pad"""'], {}), "('pad')\n", (880, 887), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((2104, 2130), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""pad_grad"""'], {}), "('pad_grad')\n", (2118, 2130), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3385, 3415), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""same_padding"""'], {}), "('same_padding')\n", (3399, 3415), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1711, 1736), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Pad_"""'], {}), "('Pad_')\n", (1728, 1736), True, 'import oneflow.python.framework.id_util as id_util\n'), ((2972, 3001), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""PadGrad_"""'], {}), "('PadGrad_')\n", (2989, 3001), True, 'import oneflow.python.framework.id_util as id_util\n'), ((4193, 4226), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SamePadding_"""'], {}), "('SamePadding_')\n", (4210, 4226), True, 'import oneflow.python.framework.id_util as id_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@autotest(n=10, auto_backward=True, check_graph=False)
def _test_scatter_random_data(test_case, placement):
input = random_tensor(ndim=2, dim0=2, dim1=2).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
src = random_tensor(ndim=2, dim0=2, dim1=2).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
index = (
torch.tensor(np.array([[0, 1], [1, 0]]), dtype=torch.int64)
.to_global(flow.env.all_device_placement("cpu"), [flow.sbp.broadcast,])
.to_global(placement, sbp=random_sbp(placement, max_dim=2),)
)
dim = random(0, 2).to(int).value()
return torch.scatter(input, dim, index, src)
@autotest(n=10, auto_backward=True, check_graph=False)
def _test_scatter_scalar_random_data(test_case, placement):
input = random_tensor(ndim=2, dim0=2, dim1=2).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
index = (
torch.tensor(np.array([[0, 1], [1, 0]]), dtype=torch.int64)
.to_global(flow.env.all_device_placement("cpu"), [flow.sbp.broadcast,])
.to_global(placement, sbp=random_sbp(placement, max_dim=2),)
)
dim = random(0, 2).to(int).value()
return torch.scatter(input, dim, index, 3.14)
@autotest(n=10, auto_backward=True, check_graph=False)
def _test_scatter_add_random_data(test_case, placement):
input = random_tensor(ndim=2, dim0=2, dim1=2).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
src = random_tensor(ndim=2, dim0=2, dim1=2).to_global(
placement=placement, sbp=random_sbp(placement, max_dim=2)
)
index = (
torch.tensor(np.array([[0, 1], [1, 0]]), dtype=torch.int64)
.to_global(flow.env.all_device_placement("cpu"), [flow.sbp.broadcast,])
.to_global(placement, sbp=random_sbp(placement, max_dim=2),)
)
dim = random(0, 2).to(int).value()
return torch.scatter_add(input, dim, index, src)
@flow.unittest.skip_unless_1n2d()
class TestScatterOps(flow.unittest.TestCase):
@globaltest
def test_scatter_ops(test_case):
for placement in all_placement():
_test_scatter_random_data(test_case, placement)
_test_scatter_scalar_random_data(test_case, placement)
_test_scatter_add_random_data(test_case, placement)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.env.all_device_placement",
"oneflow.unittest.skip_unless_1n2d"
] | [((2712, 2744), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (2742, 2744), True, 'import oneflow as flow\n'), ((3110, 3125), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3123, 3125), False, 'import unittest\n'), ((1202, 1238), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (1231, 1238), True, 'import oneflow as flow\n'), ((1777, 1813), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (1806, 1813), True, 'import oneflow as flow\n'), ((2481, 2517), 'oneflow.env.all_device_placement', 'flow.env.all_device_placement', (['"""cpu"""'], {}), "('cpu')\n", (2510, 2517), True, 'import oneflow as flow\n'), ((1136, 1162), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (1144, 1162), True, 'import numpy as np\n'), ((1711, 1737), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (1719, 1737), True, 'import numpy as np\n'), ((2415, 2441), 'numpy.array', 'np.array', (['[[0, 1], [1, 0]]'], {}), '([[0, 1], [1, 0]])\n', (2423, 2441), True, 'import numpy as np\n')] |
import os
import numpy as np
import argparse
from datetime import datetime
import cv2
import random
import oneflow as flow
import oneflow.typing as tp
import vgg16_model
import style_model
CONSOLE_ARGUMENTS = None
def float_list(x):
return list(map(float, x.split(',')))
def load_image(image_path):
im = cv2.imread(image_path)
im = cv2.resize(im, (CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size))
im = cv2.cvtColor(im, cv2.COLOR_BGR2RGB)
im = np.transpose(im, (2, 0, 1))
im = np.expand_dims(im, axis=0)
return np.ascontiguousarray(im, 'float32')
def recover_image(im):
im = np.squeeze(im)
im = np.transpose(im, (1, 2, 0))
im = cv2.cvtColor(np.float32(im), cv2.COLOR_RGB2BGR)
return im.astype(np.uint8)
def get_train_config():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.consistent_view())
return func_config
def get_predict_config():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.consistent_view())
return func_config
def main(args):
global CONSOLE_ARGUMENTS
CONSOLE_ARGUMENTS = args
@flow.global_function("train", get_train_config())
def TrainNet(
image: tp.Numpy.Placeholder((1, 3, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size), dtype = flow.float32),
mean: tp.Numpy.Placeholder((1, 3, 1, 1), dtype = flow.float32),
std: tp.Numpy.Placeholder((1, 3, 1, 1), dtype = flow.float32),
style_image_relu1_2: tp.Numpy.Placeholder((1, 64, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size), dtype = flow.float32),
style_image_relu2_2: tp.Numpy.Placeholder((1, 128, CONSOLE_ARGUMENTS.train_image_size // 2, CONSOLE_ARGUMENTS.train_image_size // 2), dtype = flow.float32),
style_image_relu3_3: tp.Numpy.Placeholder((1, 256, CONSOLE_ARGUMENTS.train_image_size // 4, CONSOLE_ARGUMENTS.train_image_size // 4), dtype = flow.float32),
style_image_relu4_3: tp.Numpy.Placeholder((1, 512, CONSOLE_ARGUMENTS.train_image_size // 8, CONSOLE_ARGUMENTS.train_image_size // 8), dtype = flow.float32),
):
with flow.scope.placement("gpu", "0:0-0"):
style_out = style_model.styleNet(image, trainable = True)
image_norm = (image - mean) / std
org_content_relu2_2 = vgg16_model.vgg16bn_content_layer(image_norm, trainable = False, training = False)
style_out_norm = (style_out - mean) / std
style_out_relu1_2, style_out_relu2_2, style_out_relu3_3, style_out_relu4_3 = vgg16_model.vgg16bn_style_layer(style_out_norm, trainable = False, training = False)
# compute mean square error loss
content_loss = style_model.mse_loss(org_content_relu2_2 - style_out_relu2_2)
style_loss = style_model.mse_loss(style_model.gram_matrix(style_out_relu1_2) - style_model.gram_matrix(style_image_relu1_2)) \
+ style_model.mse_loss(style_model.gram_matrix(style_out_relu2_2) - style_model.gram_matrix(style_image_relu2_2)) \
+ style_model.mse_loss(style_model.gram_matrix(style_out_relu3_3) - style_model.gram_matrix(style_image_relu3_3)) \
+ style_model.mse_loss(style_model.gram_matrix(style_out_relu4_3) - style_model.gram_matrix(style_image_relu4_3))
loss = content_loss * CONSOLE_ARGUMENTS.content_weight + style_loss * CONSOLE_ARGUMENTS.style_weight
flow.optimizer.Adam(flow.optimizer.PiecewiseConstantScheduler([], [CONSOLE_ARGUMENTS.learning_rate])).minimize(loss)
return style_out, loss
@flow.global_function("predict", get_predict_config())
def getVgg16MiddleLayers(
style_image: tp.Numpy.Placeholder((1, 3, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size), dtype = flow.float32),
mean: tp.Numpy.Placeholder((1, 3, 1, 1), dtype = flow.float32),
std: tp.Numpy.Placeholder((1, 3, 1, 1), dtype = flow.float32)):
with flow.scope.placement("gpu", "0:0-0"):
style_image = (style_image - mean) / std
style_out_relu1_2, style_out_relu2_2, style_out_relu3_3, style_out_relu4_3 = vgg16_model.vgg16bn_style_layer(style_image, trainable = False, training = False)
return style_out_relu1_2, style_out_relu2_2, style_out_relu3_3, style_out_relu4_3
check_point = flow.train.CheckPoint()
check_point.load(CONSOLE_ARGUMENTS.model_load_dir)
mean_nd = np.array(float_list(CONSOLE_ARGUMENTS.rgb_mean)).reshape((1, 3, 1, 1)).astype(np.float32)
std_nd = np.array(float_list(CONSOLE_ARGUMENTS.rgb_std)).reshape((1, 3, 1, 1)).astype(np.float32)
# prepare style image vgg16 middle layer outputs
style_image = load_image(CONSOLE_ARGUMENTS.style_image_path)
style_image_recover = recover_image(style_image)
style_image_relu1_2, style_image_relu2_2, style_image_relu3_3, style_image_relu4_3 = \
getVgg16MiddleLayers(style_image, mean_nd, std_nd).get()
style_image_relu1_2 = style_image_relu1_2.numpy()
style_image_relu2_2 = style_image_relu2_2.numpy()
style_image_relu3_3 = style_image_relu3_3.numpy()
style_image_relu4_3 = style_image_relu4_3.numpy()
train_images = os.listdir(CONSOLE_ARGUMENTS.dataset_path)
random.shuffle(train_images)
images_num = len(train_images)
print("dataset size: %d" % images_num)
for e in range(CONSOLE_ARGUMENTS.train_epoch):
for i in range(images_num):
image = load_image("%s/%s" % (CONSOLE_ARGUMENTS.dataset_path, train_images[i]))
style_out, loss = TrainNet(image, mean_nd, std_nd, style_image_relu1_2, style_image_relu2_2, style_image_relu3_3, style_image_relu4_3).get()
if i % 100 == 0:
image_recover = recover_image(image)
style_out_recover = recover_image(style_out.numpy())
result = np.concatenate((style_image_recover, image_recover), axis=1)
result = np.concatenate((result, style_out_recover), axis=1)
cv2.imwrite(CONSOLE_ARGUMENTS.save_tmp_image_path, result)
cur_loss = loss.numpy().mean()
check_point.save("%s/lr_%f_cw_%f_sw_%f_epoch_%d_iter_%d_loss_%f" % \
(CONSOLE_ARGUMENTS.model_save_dir, CONSOLE_ARGUMENTS.learning_rate, CONSOLE_ARGUMENTS.content_weight, CONSOLE_ARGUMENTS.style_weight, e, i, cur_loss))
print("epoch: %d, iter: %d, loss : %f" % (e, i, cur_loss))
def get_parser(parser = None):
parser = argparse.ArgumentParser("flags for neural style")
parser.add_argument("--dataset_path", type = str, default = './Coco/test2015', help = "dataset path")
parser.add_argument("--style_image_path", type = str, default = 'test_img/tiger.jpg', help = "image path")
parser.add_argument("--save_tmp_image_path", type = str, default = 'images/train_temp_image.jpg', help = "image path")
# for data process
parser.add_argument('--rgb_mean', type = str, default = "123.68, 116.779, 103.939",
help = 'a tuple of size 3 for the mean rgb')
parser.add_argument('--rgb_std', type = str, default = "58.393, 57.12, 57.375",
help = 'a tuple of size 3 for the std rgb')
# snapshot
parser.add_argument("--model_load_dir", type = str,
default = "./output/snapshots/model_save-{}".format(
str(datetime.now().strftime("%Y%m%d%H%M%S"))),
help = "model save directory",
)
parser.add_argument("--model_save_dir", type = str, default = "./checkpoints", help = "model save directory")
# training hyper-parameters
parser.add_argument("--train_epoch", type = int, default = 2)
parser.add_argument("--learning_rate", type = float, default = 0.001)
parser.add_argument("--content_weight", type = float, default = 1)
parser.add_argument("--style_weight", type = float, default = 50)
parser.add_argument("--train_image_size", type = int, default = 224)
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
main(args)
| [
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.consistent_view",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.FunctionConfig",
"oneflow.scope.placement",
"oneflow.train.CheckPoint"
] | [((316, 338), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (326, 338), False, 'import cv2\n'), ((348, 441), 'cv2.resize', 'cv2.resize', (['im', '(CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size)'], {}), '(im, (CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.\n train_image_size))\n', (358, 441), False, 'import cv2\n'), ((446, 481), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (458, 481), False, 'import cv2\n'), ((491, 518), 'numpy.transpose', 'np.transpose', (['im', '(2, 0, 1)'], {}), '(im, (2, 0, 1))\n', (503, 518), True, 'import numpy as np\n'), ((528, 554), 'numpy.expand_dims', 'np.expand_dims', (['im'], {'axis': '(0)'}), '(im, axis=0)\n', (542, 554), True, 'import numpy as np\n'), ((566, 601), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['im', '"""float32"""'], {}), "(im, 'float32')\n", (586, 601), True, 'import numpy as np\n'), ((635, 649), 'numpy.squeeze', 'np.squeeze', (['im'], {}), '(im)\n', (645, 649), True, 'import numpy as np\n'), ((659, 686), 'numpy.transpose', 'np.transpose', (['im', '(1, 2, 0)'], {}), '(im, (1, 2, 0))\n', (671, 686), True, 'import numpy as np\n'), ((818, 839), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (837, 839), True, 'import oneflow as flow\n'), ((1023, 1044), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1042, 1044), True, 'import oneflow as flow\n'), ((4528, 4551), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (4549, 4551), True, 'import oneflow as flow\n'), ((5380, 5422), 'os.listdir', 'os.listdir', (['CONSOLE_ARGUMENTS.dataset_path'], {}), '(CONSOLE_ARGUMENTS.dataset_path)\n', (5390, 5422), False, 'import os\n'), ((5427, 5455), 'random.shuffle', 'random.shuffle', (['train_images'], {}), '(train_images)\n', (5441, 5455), False, 'import random\n'), ((6700, 6749), 'argparse.ArgumentParser', 'argparse.ArgumentParser', (['"""flags for neural style"""'], {}), "('flags for neural style')\n", (6723, 6749), False, 'import argparse\n'), ((709, 723), 'numpy.float32', 'np.float32', (['im'], {}), '(im)\n', (719, 723), True, 'import numpy as np\n'), ((925, 953), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (951, 953), True, 'import oneflow as flow\n'), ((1130, 1158), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1156, 1158), True, 'import oneflow as flow\n'), ((1347, 1471), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size)'], {'dtype': 'flow.float32'}), '((1, 3, CONSOLE_ARGUMENTS.train_image_size,\n CONSOLE_ARGUMENTS.train_image_size), dtype=flow.float32)\n', (1367, 1471), True, 'import oneflow.typing as tp\n'), ((1485, 1539), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, 1, 1)'], {'dtype': 'flow.float32'}), '((1, 3, 1, 1), dtype=flow.float32)\n', (1505, 1539), True, 'import oneflow.typing as tp\n'), ((1556, 1610), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, 1, 1)'], {'dtype': 'flow.float32'}), '((1, 3, 1, 1), dtype=flow.float32)\n', (1576, 1610), True, 'import oneflow.typing as tp\n'), ((1643, 1768), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 64, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size)'], {'dtype': 'flow.float32'}), '((1, 64, CONSOLE_ARGUMENTS.train_image_size,\n CONSOLE_ARGUMENTS.train_image_size), dtype=flow.float32)\n', (1663, 1768), True, 'import oneflow.typing as tp\n'), ((1797, 1934), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 128, CONSOLE_ARGUMENTS.train_image_size // 2, CONSOLE_ARGUMENTS.\n train_image_size // 2)'], {'dtype': 'flow.float32'}), '((1, 128, CONSOLE_ARGUMENTS.train_image_size // 2, \n CONSOLE_ARGUMENTS.train_image_size // 2), dtype=flow.float32)\n', (1817, 1934), True, 'import oneflow.typing as tp\n'), ((1962, 2099), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 256, CONSOLE_ARGUMENTS.train_image_size // 4, CONSOLE_ARGUMENTS.\n train_image_size // 4)'], {'dtype': 'flow.float32'}), '((1, 256, CONSOLE_ARGUMENTS.train_image_size // 4, \n CONSOLE_ARGUMENTS.train_image_size // 4), dtype=flow.float32)\n', (1982, 2099), True, 'import oneflow.typing as tp\n'), ((2127, 2264), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 512, CONSOLE_ARGUMENTS.train_image_size // 8, CONSOLE_ARGUMENTS.\n train_image_size // 8)'], {'dtype': 'flow.float32'}), '((1, 512, CONSOLE_ARGUMENTS.train_image_size // 8, \n CONSOLE_ARGUMENTS.train_image_size // 8), dtype=flow.float32)\n', (2147, 2264), True, 'import oneflow.typing as tp\n'), ((2284, 2320), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-0"""'], {}), "('gpu', '0:0-0')\n", (2304, 2320), True, 'import oneflow as flow\n'), ((2346, 2389), 'style_model.styleNet', 'style_model.styleNet', (['image'], {'trainable': '(True)'}), '(image, trainable=True)\n', (2366, 2389), False, 'import style_model\n'), ((2473, 2551), 'vgg16_model.vgg16bn_content_layer', 'vgg16_model.vgg16bn_content_layer', (['image_norm'], {'trainable': '(False)', 'training': '(False)'}), '(image_norm, trainable=False, training=False)\n', (2506, 2551), False, 'import vgg16_model\n'), ((2712, 2797), 'vgg16_model.vgg16bn_style_layer', 'vgg16_model.vgg16bn_style_layer', (['style_out_norm'], {'trainable': '(False)', 'training': '(False)'}), '(style_out_norm, trainable=False, training=False\n )\n', (2743, 2797), False, 'import vgg16_model\n'), ((2870, 2931), 'style_model.mse_loss', 'style_model.mse_loss', (['(org_content_relu2_2 - style_out_relu2_2)'], {}), '(org_content_relu2_2 - style_out_relu2_2)\n', (2890, 2931), False, 'import style_model\n'), ((3876, 4000), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, CONSOLE_ARGUMENTS.train_image_size, CONSOLE_ARGUMENTS.train_image_size)'], {'dtype': 'flow.float32'}), '((1, 3, CONSOLE_ARGUMENTS.train_image_size,\n CONSOLE_ARGUMENTS.train_image_size), dtype=flow.float32)\n', (3896, 4000), True, 'import oneflow.typing as tp\n'), ((4014, 4068), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, 1, 1)'], {'dtype': 'flow.float32'}), '((1, 3, 1, 1), dtype=flow.float32)\n', (4034, 4068), True, 'import oneflow.typing as tp\n'), ((4085, 4139), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(1, 3, 1, 1)'], {'dtype': 'flow.float32'}), '((1, 3, 1, 1), dtype=flow.float32)\n', (4105, 4139), True, 'import oneflow.typing as tp\n'), ((4157, 4193), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:0-0"""'], {}), "('gpu', '0:0-0')\n", (4177, 4193), True, 'import oneflow as flow\n'), ((4337, 4414), 'vgg16_model.vgg16bn_style_layer', 'vgg16_model.vgg16bn_style_layer', (['style_image'], {'trainable': '(False)', 'training': '(False)'}), '(style_image, trainable=False, training=False)\n', (4368, 4414), False, 'import vgg16_model\n'), ((6044, 6104), 'numpy.concatenate', 'np.concatenate', (['(style_image_recover, image_recover)'], {'axis': '(1)'}), '((style_image_recover, image_recover), axis=1)\n', (6058, 6104), True, 'import numpy as np\n'), ((6130, 6181), 'numpy.concatenate', 'np.concatenate', (['(result, style_out_recover)'], {'axis': '(1)'}), '((result, style_out_recover), axis=1)\n', (6144, 6181), True, 'import numpy as np\n'), ((6198, 6256), 'cv2.imwrite', 'cv2.imwrite', (['CONSOLE_ARGUMENTS.save_tmp_image_path', 'result'], {}), '(CONSOLE_ARGUMENTS.save_tmp_image_path, result)\n', (6209, 6256), False, 'import cv2\n'), ((3398, 3440), 'style_model.gram_matrix', 'style_model.gram_matrix', (['style_out_relu4_3'], {}), '(style_out_relu4_3)\n', (3421, 3440), False, 'import style_model\n'), ((3443, 3487), 'style_model.gram_matrix', 'style_model.gram_matrix', (['style_image_relu4_3'], {}), '(style_image_relu4_3)\n', (3466, 3487), False, 'import style_model\n'), ((3636, 3721), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[CONSOLE_ARGUMENTS.learning_rate]'], {}), '([], [CONSOLE_ARGUMENTS.learning_rate]\n )\n', (3677, 3721), True, 'import oneflow as flow\n'), ((3258, 3300), 'style_model.gram_matrix', 'style_model.gram_matrix', (['style_out_relu3_3'], {}), '(style_out_relu3_3)\n', (3281, 3300), False, 'import style_model\n'), ((3303, 3347), 'style_model.gram_matrix', 'style_model.gram_matrix', (['style_image_relu3_3'], {}), '(style_image_relu3_3)\n', (3326, 3347), False, 'import style_model\n'), ((7605, 7619), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (7617, 7619), False, 'from datetime import datetime\n'), ((2978, 3020), 'style_model.gram_matrix', 'style_model.gram_matrix', (['style_out_relu1_2'], {}), '(style_out_relu1_2)\n', (3001, 3020), False, 'import style_model\n'), ((3023, 3067), 'style_model.gram_matrix', 'style_model.gram_matrix', (['style_image_relu1_2'], {}), '(style_image_relu1_2)\n', (3046, 3067), False, 'import style_model\n'), ((3118, 3160), 'style_model.gram_matrix', 'style_model.gram_matrix', (['style_out_relu2_2'], {}), '(style_out_relu2_2)\n', (3141, 3160), False, 'import style_model\n'), ((3163, 3207), 'style_model.gram_matrix', 'style_model.gram_matrix', (['style_image_relu2_2'], {}), '(style_image_relu2_2)\n', (3186, 3207), False, 'import style_model\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from automated_test_util import *
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
test_conv2d_weight = np.array(
[
[
[
[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],
[0.35005471110343933, 0.5360521078109741, 1.5194443464279175],
[1.9040879011154175, -1.5734431743621826, -0.14007866382598877],
]
],
[
[
[0.29670074582099915, 1.3111951351165771, 0.5035904049873352],
[-1.1894450187683105, -0.5502137541770935, -1.591875672340393],
[-1.1081947088241577, 0.07872020453214645, -0.9185634255409241],
]
],
[
[
[-0.7457143664360046, -1.2080862522125244, 1.8140212297439575],
[-1.5227429866790771, -2.515244960784912, -1.3549325466156006],
[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952],
]
],
]
)
test_conv2d_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
],
[
1.5580710172653198,
-0.5459445714950562,
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
],
[
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
0.37723132967948914,
],
[
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
],
[
1.830764889717102,
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
],
]
]
]
)
test_conv2d_data_grad = np.array(
[
[
[
[
0.4095913469791412,
0.2847584038972855,
2.803684800863266,
2.3940934538841248,
2.5189263969659805,
],
[
-1.9525419473648071,
-4.606781497597694,
-3.51521897315979,
-1.562677025794983,
1.0915625244379044,
],
[
-2.1141327619552612,
-6.987950943410397,
-5.84306687861681,
-3.7289341166615486,
1.1448840647935867,
],
[
-2.5237241089344025,
-7.272709347307682,
-8.646751679480076,
-6.123027570545673,
-1.3740423321723938,
],
[
-0.1615908145904541,
-2.381169445812702,
-2.32784790545702,
-2.1662570908665657,
0.0533215403556824,
],
]
]
]
)
test_conv2d_weight_grad = np.array(
[
[
[
[0.6277393400669098, -2.7888944894075394, -0.2910575419664383],
[-3.095237225294113, -4.835702538490295, -1.8706469237804413],
[-1.0139376372098923, -6.076017692685127, -5.780256435275078],
]
],
[
[
[0.6277393400669098, -2.7888944894075394, -0.2910575419664383],
[-3.095237225294113, -4.835702538490295, -1.8706469237804413],
[-1.0139376372098923, -6.076017692685127, -5.780256435275078],
]
],
[
[
[0.6277393400669098, -2.7888944894075394, -0.2910575419664383],
[-3.095237225294113, -4.835702538490295, -1.8706469237804413],
[-1.0139376372098923, -6.076017692685127, -5.780256435275078],
]
],
]
)
test_conv2d_output = np.array(
[
[
[
[0.9699610471725464, -0.20758534967899323, 2.3857712745666504],
[0.3666309118270874, 4.690882682800293, -8.203354835510254],
[2.6072847843170166, -1.9033538103103638, 2.331153154373169],
],
[
[2.519343852996826, 2.3757898807525635, -1.6613528728485107],
[0.5777544379234314, -3.5739502906799316, 5.349126815795898],
[0.729295015335083, 1.5791023969650269, 3.7627718448638916],
],
[
[-0.27685487270355225, 6.446267127990723, -2.762883424758911],
[-8.25644588470459, 9.616064071655273, 8.005367279052734],
[-0.6944921016693115, 3.866114854812622, 4.788446426391602],
],
]
]
)
test_conv2d_with_bias_weight = np.array(
[
[
[
[1.8271433115005493, -1.0446699857711792, 1.0062190294265747],
[0.5174201130867004, -0.806931734085083, 1.3769007921218872],
[0.205885112285614, 0.9943519234657288, -0.23580588400363922],
]
],
[
[
[0.29881811141967773, -1.9982075691223145, 0.3511354625225067],
[-0.7644741535186768, 1.2594351768493652, -0.9629734754562378],
[0.5080506205558777, 0.7561734318733215, 1.6839302778244019],
]
],
[
[
[1.2573646306991577, 0.13123232126235962, 1.6403018236160278],
[-1.2138012647628784, 2.399970531463623, -0.38509097695350647],
[-0.9878040552139282, 0.9585888385772705, -1.4976465702056885],
]
],
]
)
test_conv2d_with_bias_bias = np.array(
[0.6605162620544434, -0.18903568387031555, -0.27302607893943787]
)
test_conv2d_with_bias_data = np.array(
[
[
[
[
-0.47827261686325073,
-1.1739492416381836,
-0.7921845316886902,
0.9321041703224182,
-3.1557741165161133,
],
[
2.1935296058654785,
-0.5385921001434326,
-0.8611332774162292,
-1.881519079208374,
-0.7205708026885986,
],
[
-0.35601571202278137,
-0.15963983535766602,
1.797447681427002,
0.19594945013523102,
-1.7376397848129272,
],
[
0.047347065061330795,
0.14580930769443512,
0.32604914903640747,
0.4578782916069031,
-0.8942581415176392,
],
[
0.49383941292762756,
-0.9043426513671875,
-1.2140793800354004,
2.1564064025878906,
1.0938222408294678,
],
]
]
]
)
test_conv2d_with_bias_output = np.array(
[
[
[
[-0.05607491731643677, -0.185230553150177, -3.8808679580688477],
[6.861937046051025, -2.3341472148895264, -0.5597308874130249],
[1.8299254179000854, -2.770848274230957, 2.1958212852478027],
],
[
[2.9348952770233154, 4.117504119873047, -6.278541088104248],
[0.2638452351093292, 3.998856782913208, 2.612290620803833],
[-1.9891828298568726, -1.6476304531097412, 3.39066219329834],
],
[
[-8.44466781616211, 0.5747121572494507, -8.501373291015625],
[-0.036642804741859436, -0.23458999395370483, -2.370849370956421],
[2.8372013568878174, -2.987276077270508, 1.8382092714309692],
],
]
]
)
test_conv2d_group_weight = np.array(
[
[
[
[-0.7248556613922119, 1.1119636297225952, -0.47827261686325073],
[-1.1739492416381836, -0.7921845316886902, 0.9321041703224182],
[-3.1557741165161133, 2.1935296058654785, -0.5385921001434326],
]
],
[
[
[-0.8611332774162292, -1.881519079208374, -0.7205708026885986],
[-0.35601571202278137, -0.15963983535766602, 1.797447681427002],
[0.19594945013523102, -1.7376397848129272, 0.047347065061330795],
]
],
]
)
test_conv2d_group_data_grad = np.array(
[
[
[
[
-0.7248556613922119,
0.3871079683303833,
-0.0911646485328674,
0.6336910128593445,
-0.4782726168632507,
],
[
-1.8988049030303955,
-1.5790258049964905,
-1.125194251537323,
0.7736106514930725,
0.4538315534591675,
],
[
-5.054579019546509,
-2.5412703156471252,
-2.6260308623313904,
2.4285481572151184,
-0.0847605466842651,
],
[
-4.329723358154297,
-2.9283782839775085,
-2.534866213798523,
1.794857144355774,
0.3935120701789856,
],
[
-3.1557741165161133,
-0.9622445106506348,
-1.5008366107940674,
1.654937505722046,
-0.5385921001434326,
],
],
[
[
-0.8611332774162292,
-2.7426523566246033,
-3.463223159313202,
-2.6020898818969727,
-0.7205708026885986,
],
[
-1.2171489894390106,
-3.2583079040050507,
-2.1814310252666473,
-0.9642820358276367,
1.0768768787384033,
],
[
-1.0211995393037796,
-4.799998238682747,
-3.6757742948830128,
-2.654574755579233,
1.1242239437997341,
],
[
-0.1600662618875504,
-2.0573458820581436,
-0.2125511355698109,
-0.0524848736822605,
1.8447947464883327,
],
[
0.195949450135231,
-1.5416903346776962,
-1.4943432696163654,
-1.6902927197515965,
0.0473470650613308,
],
],
]
]
)
test_conv2d_group_weight_grad = np.array(
[
[
[
[0.6277393400669098, -2.7888944894075394, -0.2910575419664383],
[-3.095237225294113, -4.835702538490295, -1.8706469237804413],
[-1.0139376372098923, -6.076017692685127, -5.780256435275078],
]
],
[
[
[3.30740749835968, -0.7220746576786041, -3.660933956503868],
[0.5273916646838188, -2.631059892475605, -7.6207195818424225],
[-3.5466641262173653, -8.214546449482441, -11.031560003757477],
]
],
]
)
test_conv2d_group_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
],
[
1.5580710172653198,
-0.5459445714950562,
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
],
[
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
0.37723132967948914,
],
[
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
],
[
1.830764889717102,
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
],
],
[
[
0.8586049675941467,
-0.2279418259859085,
0.2013147622346878,
0.35005471110343933,
0.5360521078109741,
],
[
1.5194443464279175,
1.9040879011154175,
-1.5734431743621826,
-0.14007866382598877,
0.29670074582099915,
],
[
1.3111951351165771,
0.5035904049873352,
-1.1894450187683105,
-0.5502137541770935,
-1.591875672340393,
],
[
-1.1081947088241577,
0.07872020453214645,
-0.9185634255409241,
-0.7457143664360046,
-1.2080862522125244,
],
[
1.8140212297439575,
-1.5227429866790771,
-2.515244960784912,
-1.3549325466156006,
-0.9574840068817139,
],
],
]
]
)
test_conv2d_group_output = np.array(
[
[
[
[-8.836943626403809, 3.2316627502441406, 6.994439601898193],
[-0.8386597037315369, -9.857108116149902, 13.68197250366211],
[-13.020713806152344, 7.310227870941162, -3.3760271072387695],
],
[
[-4.803101539611816, 1.026240587234497, 0.5452112555503845],
[-6.839838027954102, 2.0195930004119873, 0.11328654736280441],
[0.393694669008255, 4.987061023712158, 3.297354221343994],
],
]
]
)
test_conv2d_padding_weight = np.array(
[
[
[
[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],
[0.35005471110343933, 0.5360521078109741, 1.5194443464279175],
[1.9040879011154175, -1.5734431743621826, -0.14007866382598877],
]
]
]
)
test_conv2d_padding_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
],
[
1.5580710172653198,
-0.5459445714950562,
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
],
[
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
0.37723132967948914,
],
[
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
],
[
1.830764889717102,
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
],
]
]
]
)
test_conv2d_padding_data_grad = np.array(
[
[
[
[
3.237529069185257,
3.237529069185257,
3.237529069185257,
3.237529069185257,
3.237529069185257,
],
[
3.428095132112503,
3.428095132112503,
3.428095132112503,
3.428095132112503,
3.428095132112503,
],
[
3.428095132112503,
3.428095132112503,
3.428095132112503,
3.428095132112503,
3.428095132112503,
],
[
3.428095132112503,
3.428095132112503,
3.428095132112503,
3.428095132112503,
3.428095132112503,
],
[
2.596117228269577,
2.596117228269577,
2.596117228269577,
2.596117228269577,
2.596117228269577,
],
]
]
]
)
test_conv2d_padding_weight_grad = np.array(
[
[
[
[1.7594299167394638, 1.7594299167394638, 1.7594299167394638],
[-0.6019042432308197, -0.6019042432308197, -0.6019042432308197],
[-1.532561555504799, -1.532561555504799, -1.532561555504799],
]
]
]
)
test_conv2d_padding_output = np.array(
[
[
[
[
1.5489805936813354,
-1.0164761543273926,
5.277345657348633,
3.153532028198242,
-7.301508903503418,
-3.7565059661865234,
4.690962314605713,
],
[
2.425799608230591,
-2.0592665672302246,
0.9699610471725464,
-0.20758534967899323,
2.3857712745666504,
1.1719579696655273,
0.6523551940917969,
],
[
2.1625545024871826,
-1.3517316579818726,
0.3666309118270874,
4.690882682800293,
-8.203354835510254,
3.0248217582702637,
1.2624683380126953,
],
[
0.6193475723266602,
-2.0285415649414062,
2.6072847843170166,
-1.9033538103103638,
2.331153154373169,
-3.998155355453491,
-1.0176407098770142,
],
[
2.8643176555633545,
-0.7396122217178345,
-0.2253415733575821,
-2.846742630004883,
-4.961236476898193,
-0.1308247298002243,
-0.7344070672988892,
],
]
]
]
)
test_conv2d_stride_weight = np.array(
[
[
[
[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],
[0.35005471110343933, 0.5360521078109741, 1.5194443464279175],
[1.9040879011154175, -1.5734431743621826, -0.14007866382598877],
]
]
]
)
test_conv2d_stride_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
],
[
1.5580710172653198,
-0.5459445714950562,
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
],
[
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
0.37723132967948914,
],
[
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
],
[
1.830764889717102,
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
],
]
]
]
)
test_conv2d_stride_data_grad = np.array(
[
[
[
[
0.5360521078109741,
1.5194443464279175,
0.3500547111034393,
0.5360521078109741,
1.5194443464279175,
],
[
-1.8013850003480911,
0.061236098408699,
2.762692868709564,
-1.8013850003480911,
0.061236098408699,
],
[
0.5360521078109741,
1.5194443464279175,
0.3500547111034393,
0.5360521078109741,
1.5194443464279175,
],
[
-1.8013850003480911,
0.061236098408699,
2.762692868709564,
-1.8013850003480911,
0.061236098408699,
],
[
0.5360521078109741,
1.5194443464279175,
0.3500547111034393,
0.5360521078109741,
1.5194443464279175,
],
]
]
]
)
test_conv2d_stride_weight_grad = np.array(
[
[
[
[-5.1135923862457275, 3.5859558284282684, 2.089697480201721],
[-0.3276629596948624, 1.7587070614099503, -2.5950092673301697],
[-5.1135923862457275, 3.5859558284282684, 2.089697480201721],
]
]
]
)
test_conv2d_stride_output = np.array(
[
[
[
[-1.0164761543273926, -7.301508903503418],
[-1.3517316579818726, -8.203354835510254],
[-0.7396122217178345, -4.961236476898193],
]
]
]
)
test_conv2d_kernel_weight = np.array(
[
[
[
[
-0.9574840068817139,
-0.7248556613922119,
1.1119636297225952,
-0.47827261686325073,
-1.1739492416381836,
],
[
-0.7921845316886902,
0.9321041703224182,
-3.1557741165161133,
2.1935296058654785,
-0.5385921001434326,
],
[
-0.8611332774162292,
-1.881519079208374,
-0.7205708026885986,
-0.35601571202278137,
-0.15963983535766602,
],
]
]
]
)
test_conv2d_kernel_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
1.5580710172653198,
-0.5459445714950562,
],
[
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
],
[
0.37723132967948914,
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
1.830764889717102,
],
[
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
0.8586049675941467,
-0.2279418259859085,
0.2013147622346878,
],
[
0.35005471110343933,
0.5360521078109741,
1.5194443464279175,
1.9040879011154175,
-1.5734431743621826,
-0.14007866382598877,
0.29670074582099915,
],
[
1.3111951351165771,
0.5035904049873352,
-1.1894450187683105,
-0.5502137541770935,
-1.591875672340393,
-1.1081947088241577,
0.07872020453214645,
],
[
-0.9185634255409241,
-0.7457143664360046,
-1.2080862522125244,
1.8140212297439575,
-1.5227429866790771,
-2.515244960784912,
-1.3549325466156006,
],
]
]
]
)
test_conv2d_kernel_data_grad = np.array(
[
[
[
[
-0.9574840068817139,
-1.6823396682739258,
-0.5703760385513306,
-0.0911646485328674,
-0.5402582287788391,
-1.6522218585014343,
-1.1739492416381836,
],
[
-1.749668538570404,
-1.5424200296401978,
-3.586230516433716,
-0.121304988861084,
-2.0410948395729065,
0.0027156472206116,
-1.7125413417816162,
],
[
-2.6108018159866333,
-4.285072386264801,
-7.049453675746918,
-3.079410582780838,
-3.2773211896419525,
-0.5129399001598358,
-1.8721811771392822,
],
[
-2.6108018159866333,
-4.285072386264801,
-7.049453675746918,
-3.079410582780838,
-3.2773211896419525,
-0.5129399001598358,
-1.8721811771392822,
],
[
-2.6108018159866333,
-4.285072386264801,
-7.049453675746918,
-3.079410582780838,
-3.2773211896419525,
-0.5129399001598358,
-1.8721811771392822,
],
[
-1.6533178091049194,
-2.6027327179908752,
-6.479077637195587,
-2.9882459342479706,
-2.7370629608631134,
1.1392819583415985,
-0.6982319355010986,
],
[
-0.8611332774162292,
-2.7426523566246033,
-3.463223159313202,
-2.958105593919754,
-1.236226350069046,
-0.5156555473804474,
-0.159639835357666,
],
]
]
]
)
test_conv2d_kernel_weight_grad = np.array(
[
[
[
[
2.974529668688774,
4.548736393451691,
1.1672898679971695,
-1.499158263206482,
0.1862268149852753,
],
[
1.6534235626459122,
2.3762744814157486,
-1.448018729686737,
-5.2917241007089615,
-2.278435029089451,
],
[
-2.083257421851158,
-2.23808591067791,
-5.749193429946899,
-7.540486767888069,
-6.306201495230198,
],
]
]
]
)
test_conv2d_kernel_output = np.array(
[
[
[
[-3.5647754669189453, -4.234736919403076, 1.4046944379806519],
[-0.6964312791824341, 16.42838478088379, -9.649789810180664],
[4.312150478363037, -6.283960819244385, -4.8443922996521],
[-2.772286891937256, -4.483709812164307, 12.315184593200684],
[7.39893913269043, 1.305102825164795, -2.049992561340332],
]
]
]
)
test_conv2d_dilation_weight = np.array(
[
[
[
[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952],
[-0.47827261686325073, -1.1739492416381836, -0.7921845316886902],
[0.9321041703224182, -3.1557741165161133, 2.1935296058654785],
]
]
]
)
test_conv2d_dilation_data = np.array(
[
[
[
[
1.1630785465240479,
0.4838046133518219,
0.299563467502594,
0.15302546322345734,
-1.168814778327942,
1.5580710172653198,
-0.5459445714950562,
],
[
-2.3556296825408936,
0.5414402484893799,
2.678506374359131,
1.2546343803405762,
-0.5487740635871887,
-0.6810643672943115,
-0.13531559705734253,
],
[
0.37723132967948914,
0.41016456484794617,
0.5712682008743286,
-2.757962703704834,
1.0762799978256226,
-0.6141325235366821,
1.830764889717102,
],
[
-1.1468064785003662,
0.053837940096855164,
-2.5074806213378906,
-0.5916498899459839,
0.8586049675941467,
-0.2279418259859085,
0.2013147622346878,
],
[
0.35005471110343933,
0.5360521078109741,
1.5194443464279175,
1.9040879011154175,
-1.5734431743621826,
-0.14007866382598877,
0.29670074582099915,
],
[
1.3111951351165771,
0.5035904049873352,
-1.1894450187683105,
-0.5502137541770935,
-1.591875672340393,
-1.1081947088241577,
0.07872020453214645,
],
[
-0.9185634255409241,
-0.7457143664360046,
-1.2080862522125244,
1.8140212297439575,
-1.5227429866790771,
-2.515244960784912,
-1.3549325466156006,
],
]
]
]
)
test_conv2d_dilation_data_grad = np.array(
[
[
[
[
-0.9574840068817139,
0.0,
0.0,
-0.7248556613922119,
0.0,
0.0,
1.1119636297225952,
],
[
-0.9574840068817139,
0.0,
0.0,
-0.7248556613922119,
0.0,
0.0,
1.1119636297225952,
],
[
-1.4357566237449646,
0.0,
0.0,
-1.8988049030303955,
0.0,
0.0,
0.319779098033905,
],
[
-0.4782726168632507,
0.0,
0.0,
-1.1739492416381836,
0.0,
0.0,
-0.7921845316886902,
],
[
0.4538315534591675,
0.0,
0.0,
-4.329723358154297,
0.0,
0.0,
1.4013450741767883,
],
[
0.9321041703224182,
0.0,
0.0,
-3.1557741165161133,
0.0,
0.0,
2.1935296058654785,
],
[
0.9321041703224182,
0.0,
0.0,
-3.1557741165161133,
0.0,
0.0,
2.1935296058654785,
],
]
]
]
)
test_conv2d_dilation_weight_grad = np.array(
[
[
[
[-0.8153198063373566, -1.3503028601408005, 1.1495047211647034],
[-0.4195204377174377, -1.4455246925354004, 2.328780397772789],
[0.7426864206790924, 3.1678953766822815, -0.979511596262455],
]
]
]
)
test_conv2d_dilation_output = np.array(
[[[[-5.2563982009887695], [5.410353183746338], [-8.517012596130371]]]]
)
def _test_conv2d(test_case, conv, data, weight, output, bias=None, device="cuda"):
to_device = flow.device(device)
x = flow.Tensor(data, device=to_device)
conv.weight = flow.nn.Parameter(flow.Tensor(weight))
if bias is not None:
conv.bias = flow.nn.Parameter(flow.Tensor(bias))
conv.to(to_device)
of_out = conv(x)
test_case.assertTrue(np.allclose(of_out.numpy(), output, rtol=0.001, atol=1e-07))
def _test_conv2d_backward(
test_case, conv, data, weight, data_grad, weight_grad, bias=None, device="cuda"
):
to_device = flow.device(device)
x = flow.Tensor(data, device=to_device, requires_grad=True)
conv.weight = flow.nn.Parameter(flow.Tensor(weight), requires_grad=True)
if bias is not None:
conv.bias = flow.nn.Parameter(flow.Tensor(bias))
conv.to(to_device)
of_out = conv(x)
of_out.sum().backward()
test_case.assertTrue(
np.allclose(x.grad.numpy(), data_grad, rtol=0.0001, atol=1e-08)
)
test_case.assertTrue(
np.allclose(conv.weight.grad.numpy(), weight_grad, rtol=0.0001, atol=1e-08)
)
def _test_conv2d_large_in_channel(test_case, device):
np_arr = np.array(
[
[
[
[
0.6206631238581714,
-1.1225329393404626,
0.8407155480700242,
-0.6845162855236345,
],
[
-0.5186484633906412,
0.10420735184519186,
-0.1711568947473012,
0.5168640476046483,
],
[
-0.12429464919764661,
0.050277779246134253,
-1.0144501797426606,
-2.184600444658526,
],
[
0.28918126931309923,
-0.822872663244595,
0.44019150436683663,
-1.0247720130825562,
],
],
[
[
0.7786504412818226,
-0.7501839068078657,
-0.8187283189941765,
-1.1116653569170698,
],
[
0.18085524152316743,
-1.3461349607476678,
1.142505437476448,
-0.000649619704040145,
],
[
0.03160672782674317,
-0.006318157449953413,
1.2218487782604377,
0.15903027907930234,
],
[
1.5857011815642381,
0.6656477116332891,
-0.04036621813223574,
-0.3427168687988546,
],
],
[
[
-1.1774346070102524,
1.6195241269303395,
-0.36185552303441965,
-1.1382193113192487,
],
[
0.08061907334568702,
1.5025447613238763,
-1.1591348706634745,
1.6449050139676873,
],
[
1.1539915649822392,
-2.414624939646017,
0.3056063774849572,
1.1920089257083162,
],
[
0.7623012858982319,
-0.01685314742940813,
-1.096666898224702,
-0.4406476137098582,
],
],
[
[
0.9383797282214235,
-1.1075876842796508,
-0.4420913825139058,
-1.0736097610655628,
],
[
-0.3101376466546291,
1.6578227745160954,
-0.6225454278031398,
0.6831188620748697,
],
[
0.00743800968372913,
-0.8089158949698473,
2.08084287836801,
0.721204366332351,
],
[
0.5694701823297723,
0.031519314469744895,
-0.5041680957766629,
-0.4738588233094669,
],
],
]
]
)
input = flow.Tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[
[
[0.06456436216831207, -0.10852358490228653, -0.21638715267181396],
[-0.2279110550880432, 0.1476770043373108, 0.19457484781742096],
[0.05026858672499657, 0.10818571597337723, 0.02056501805782318],
],
[
[0.205095112323761, 0.1488947868347168, -0.2344113141298294],
[0.1684819906949997, -0.21986986696720123, 0.1082606166601181],
[-0.1528974026441574, 0.17120417952537537, 0.01954500749707222],
],
],
[
[
[-0.09441672265529633, -0.03644559532403946, -0.22235223650932312],
[-0.1771145612001419, 0.08043312281370163, 0.06938580423593521],
[0.054393064230680466, -0.05483492836356163, 0.23438701033592224],
],
[
[0.22666795551776886, 0.0874653309583664, 0.07092718034982681],
[0.08883464336395264, -0.052362944930791855, -0.1720171570777893],
[0.10441060364246368, 0.011952142231166363, -0.0894528403878212],
],
],
]
)
m = flow.nn.Conv2d(4, 2, 3, groups=2, bias=False)
m.weight = flow.nn.Parameter(flow.Tensor(weight), requires_grad=True)
m = m.to(device)
output = m(input)
np_out = [
[
[
[0.7666134238243103, -0.3961866497993469],
[-0.656266987323761, -1.1613956689834595],
],
[
[0.3077264130115509, -0.42817503213882446],
[-0.5761325359344482, 0.1300736665725708],
],
]
]
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-06, 1e-06))
output = output.sum()
output.backward()
np_grad = [
[
[
[
0.06456436216831207,
-0.04395922273397446,
-0.3249107301235199,
-0.21638715267181396,
],
[
-0.16334669291973114,
-0.12419328093528748,
0.017341122031211853,
-0.021812304854393005,
],
[
-0.17764246463775635,
0.07822024822235107,
0.47100257873535156,
0.21513986587524414,
],
[
0.05026858672499657,
0.1584542989730835,
0.128750741481781,
0.02056501805782318,
],
],
[
[
0.205095112323761,
0.3539898991584778,
-0.08551652729511261,
-0.2344113141298294,
],
[
0.3735771179199219,
0.30260205268859863,
-0.19712577760219574,
-0.1261506974697113,
],
[
0.015584588050842285,
-0.03308109939098358,
0.07913993299007416,
0.12780562043190002,
],
[
-0.1528974026441574,
0.018306776881217957,
0.1907491832971573,
0.01954500749707222,
],
],
[
[
-0.09441672265529633,
-0.13086232542991638,
-0.258797824382782,
-0.22235223650932312,
],
[
-0.27153128385543823,
-0.22754377126693726,
-0.10897888988256454,
-0.1529664397239685,
],
[
-0.12272149324417114,
-0.09712330251932144,
0.32937100529670715,
0.30377280712127686,
],
[
0.054393064230680466,
-0.00044186413288116455,
0.1795520782470703,
0.23438701033592224,
],
],
[
[
0.22666795551776886,
0.31413328647613525,
0.1583925187587738,
0.07092718034982681,
],
[
0.3155025839805603,
0.35060498118400574,
-0.06598758697509766,
-0.1010899767279625,
],
[
0.19324524700641632,
0.1528344452381134,
-0.301880806684494,
-0.2614699900150299,
],
[
0.10441060364246368,
0.11636274307966232,
-0.07750070095062256,
-0.0894528403878212,
],
],
]
]
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
def _test_conv2d_large_out_channel(test_case, device):
np_arr = np.array(
[
[
[
[0.56573248, -0.1968932, -0.67875558, 0.34328273, 0.31964567],
[-1.33715475, 0.33422229, -1.27643383, 0.37904647, 0.35891593],
[0.84579802, 2.12729621, -0.51423287, 0.6129756, -1.31156564],
[-0.71047139, 1.02679253, -0.76686019, -0.72969633, 0.7342515],
[-0.13592879, -1.03207183, -0.22554775, 0.74148071, 0.9660151],
],
[
[0.51595992, 0.49624804, 0.91145641, 0.49247262, 0.41002217],
[-1.08001196, 1.55497086, -0.8196314, -0.45511565, -0.60269165],
[0.05563145, -0.94318372, -1.17058158, -0.73568577, 0.57810956],
[-0.40260276, -0.10309298, 1.123788, -0.23510537, -0.73893374],
[-0.52712536, -0.00717016, -1.85051966, -1.5079056, 1.38335907],
],
]
]
)
input = flow.Tensor(
np_arr, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
weight = np.array(
[
[
[
[-0.19489679, -0.32377058, 0.21736273],
[0.04095296, -0.21552679, -0.14626531],
[-0.19359522, -0.00742865, -0.19832158],
]
],
[
[
[0.29926914, 0.00931164, 0.2619766],
[0.27611443, -0.15439281, -0.19027126],
[-0.2890912, 0.30367029, -0.05168664],
]
],
[
[
[-0.03155736, 0.17610769, 0.22111714],
[0.2279067, -0.32897446, -0.03260243],
[-0.10274851, -0.06903386, -0.19438276],
]
],
[
[
[-0.24573688, -0.06723209, -0.21363299],
[-0.02136187, -0.24994437, -0.18691199],
[0.12189507, 0.29469389, 0.03398871],
]
],
]
)
m = flow.nn.Conv2d(2, 4, 3, groups=2, bias=False)
m.weight = flow.nn.Parameter(flow.Tensor(weight), requires_grad=True)
m = m.to(device)
output = m(input)
np_out = np.array(
[
[
[
[-0.21170563, 0.03652292, 0.25926736],
[-0.19168918, 0.49044561, 0.25099146],
[-1.0248934, 0.25361472, -0.51828313],
],
[
[0.23977707, -0.56090075, -0.19285655],
[-0.17167747, 0.24558367, -0.3093586],
[-0.33303234, 1.52472734, -0.49013454],
],
[
[-0.17137986, 1.21333742, 0.18988736],
[0.31785482, -0.1212157, -0.18676008],
[-0.10680684, -0.30298883, 0.41809759],
],
[
[-0.87821335, -0.51665992, -0.44061098],
[0.7480458, 0.5310725, 0.50418228],
[-0.00512899, -0.3645584, -0.23643512],
],
]
]
)
test_case.assertTrue(np.allclose(output.numpy(), np_out, 1e-05, 1e-06))
output = output.sum()
output.backward()
np_grad = np.array(
[
[
[
[0.10437235, -0.21008658, 0.26925275, 0.16488039, 0.47933933],
[0.42143974, -0.2629388, -0.12013602, -0.54157579, 0.14280275],
[-0.06124666, -0.44938356, -0.55658901, -0.49534237, -0.10720548],
[-0.16561902, -0.23929697, -0.82584178, -0.66022277, -0.58654481],
[-0.4826864, -0.18644476, -0.43645298, 0.04623342, -0.25000823],
],
[
[-0.27729425, -0.16841865, -0.16093449, 0.11635975, 0.00748415],
[-0.07074942, -0.54079264, -0.75282294, -0.68207347, -0.21203026],
[-0.05160286, -0.29598606, -0.66841042, -0.61680746, -0.3724243],
[0.22569139, -0.12756741, -0.50747585, -0.73316729, -0.37990844],
[0.01914656, 0.24480659, 0.08441254, 0.06526598, -0.16039404],
],
]
]
)
test_case.assertTrue(np.allclose(input.grad.numpy(), np_grad, 1e-06, 1e-06))
@flow.unittest.skip_unless_1n1d()
class TestConv2d(flow.unittest.TestCase):
def test_conv2d_default_init(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 1, (3, 3), bias=True).to(flow.device(device))
test_case.assertTrue(
not np.allclose(
conv.weight.numpy(), np.zeros((1, 1, 3, 3)), rtol=1e-09, atol=1e-10
)
)
test_case.assertTrue(
not np.allclose(
conv.bias.numpy(), np.zeros((1,)), rtol=1e-09, atol=1e-10
)
)
def test_conv2d(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 3, (3, 3), bias=False).to(flow.device(device))
_test_conv2d(
test_case,
conv,
test_conv2d_data,
test_conv2d_weight,
test_conv2d_output,
device=device,
)
def test_conv2d_backward(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 3, (3, 3), bias=False).to(flow.device(device))
_test_conv2d_backward(
test_case,
conv,
test_conv2d_data,
test_conv2d_weight,
test_conv2d_data_grad,
test_conv2d_weight_grad,
device=device,
)
def test_conv2d_with_bias(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 3, (3, 3), bias=True).to(flow.device(device))
_test_conv2d(
test_case,
conv,
test_conv2d_with_bias_data,
test_conv2d_with_bias_weight,
test_conv2d_with_bias_output,
bias=test_conv2d_with_bias_bias,
device=device,
)
def test_conv2d_group(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(2, 2, (3, 3), groups=2, bias=False).to(
flow.device(device)
)
_test_conv2d(
test_case,
conv,
test_conv2d_group_data,
test_conv2d_group_weight,
test_conv2d_group_output,
device=device,
)
def test_conv2d_group_backward(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(2, 2, (3, 3), groups=2, bias=False).to(
flow.device(device)
)
_test_conv2d_backward(
test_case,
conv,
test_conv2d_group_data,
test_conv2d_group_weight,
test_conv2d_group_data_grad,
test_conv2d_group_weight_grad,
device=device,
)
def test_conv2d_padding(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 1, (3, 3), padding=(1, 2), bias=False).to(
flow.device(device)
)
_test_conv2d(
test_case,
conv,
test_conv2d_padding_data,
test_conv2d_padding_weight,
test_conv2d_padding_output,
device=device,
)
def test_conv2d_padding_backward(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 1, (3, 3), padding=(1, 2), bias=False).to(
flow.device(device)
)
_test_conv2d_backward(
test_case,
conv,
test_conv2d_padding_data,
test_conv2d_padding_weight,
test_conv2d_padding_data_grad,
test_conv2d_padding_weight_grad,
device=device,
)
def test_conv2d_stride(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(
1, 1, (3, 3), padding=(1, 1), stride=(2, 3), bias=False
).to(flow.device(device))
_test_conv2d(
test_case,
conv,
test_conv2d_stride_data,
test_conv2d_stride_weight,
test_conv2d_stride_output,
device=device,
)
def test_conv2d_stride_backward(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(
1, 1, (3, 3), padding=(1, 1), stride=(2, 3), bias=False
).to(flow.device(device))
_test_conv2d_backward(
test_case,
conv,
test_conv2d_stride_data,
test_conv2d_stride_weight,
test_conv2d_stride_data_grad,
test_conv2d_stride_weight_grad,
device=device,
)
def test_conv2d_kernel(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 1, (3, 5), bias=False).to(flow.device(device))
conv.to(flow.device("cuda"))
_test_conv2d(
test_case,
conv,
test_conv2d_kernel_data,
test_conv2d_kernel_weight,
test_conv2d_kernel_output,
)
def test_conv2d_kernel_backward(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 1, (3, 5), bias=False).to(flow.device(device))
conv.to(flow.device("cuda"))
_test_conv2d_backward(
test_case,
conv,
test_conv2d_kernel_data,
test_conv2d_kernel_weight,
test_conv2d_kernel_data_grad,
test_conv2d_kernel_weight_grad,
)
def test_conv2d_dilation(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 1, (3, 3), dilation=(2, 3), bias=False).to(
flow.device(device)
)
_test_conv2d(
test_case,
conv,
test_conv2d_dilation_data,
test_conv2d_dilation_weight,
test_conv2d_dilation_output,
device=device,
)
def test_conv2d_dilation_backward(test_case):
for device in ["cuda", "cpu"]:
conv = flow.nn.Conv2d(1, 1, (3, 3), dilation=(2, 3), bias=False).to(
flow.device(device)
)
_test_conv2d_backward(
test_case,
conv,
test_conv2d_dilation_data,
test_conv2d_dilation_weight,
test_conv2d_dilation_data_grad,
test_conv2d_dilation_weight_grad,
device=device,
)
def test_large_in_channel_group_conv(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_conv2d_large_in_channel]
arg_dict["device"] = ["cuda", "cpu"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
def test_large_out_channel_group_conv(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [_test_conv2d_large_out_channel]
arg_dict["device"] = ["cuda", "cpu"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@unittest.skip("need a more relaxed tolerance")
def test_with_random_data(test_case):
for device in ["cpu", "cuda"]:
channels = random(1, 6)
test_module_against_pytorch(
test_case,
"nn.Conv2d",
extra_generators={
"input": random_tensor(ndim=4, dim1=channels),
"in_channels": channels,
"out_channels": random(1, 129),
"kernel_size": random(1, 4),
"stride": random(1, 4),
"padding": random(1, 5),
"dilation": random(1, 5),
"groups": random(1, 5),
"padding_mode": constant("zeros"),
},
device=device,
)
@unittest.skip("need a more relaxed tolerance")
@autotest()
def test_against_pytorch(test_case):
channels = random(1, 6)
m = torch.nn.Conv2d(
channels,
random(1, 6),
random(1, 6),
stride=random(1, 3) | nothing(),
padding=random(1, 3) | nothing(),
dilation=random(1, 3) | nothing(),
groups=random(1, 3) | nothing(),
bias=random() | nothing(),
padding_mode=constant("zeros") | nothing(),
)
m.train(random())
device = random_device()
m.to(device)
x = random_pytorch_tensor(
ndim=4, dim1=channels, dim2=random(1, 8), dim3=random(1, 8)
).to(device)
y = m(x)
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.Tensor",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.Conv2d",
"oneflow.device"
] | [((800, 1436), 'numpy.array', 'np.array', (['[[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878], [\n 0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]], [[[\n 0.29670074582099915, 1.3111951351165771, 0.5035904049873352], [-\n 1.1894450187683105, -0.5502137541770935, -1.591875672340393], [-\n 1.1081947088241577, 0.07872020453214645, -0.9185634255409241]]], [[[-\n 0.7457143664360046, -1.2080862522125244, 1.8140212297439575], [-\n 1.5227429866790771, -2.515244960784912, -1.3549325466156006], [-\n 0.9574840068817139, -0.7248556613922119, 1.1119636297225952]]]]'], {}), '([[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],\n [0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]], [[[\n 0.29670074582099915, 1.3111951351165771, 0.5035904049873352], [-\n 1.1894450187683105, -0.5502137541770935, -1.591875672340393], [-\n 1.1081947088241577, 0.07872020453214645, -0.9185634255409241]]], [[[-\n 0.7457143664360046, -1.2080862522125244, 1.8140212297439575], [-\n 1.5227429866790771, -2.515244960784912, -1.3549325466156006], [-\n 0.9574840068817139, -0.7248556613922119, 1.1119636297225952]]]])\n', (808, 1436), True, 'import numpy as np\n'), ((1709, 2285), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]])\n', (1717, 2285), True, 'import numpy as np\n'), ((3002, 3569), 'numpy.array', 'np.array', (['[[[[0.4095913469791412, 0.2847584038972855, 2.803684800863266, \n 2.3940934538841248, 2.5189263969659805], [-1.9525419473648071, -\n 4.606781497597694, -3.51521897315979, -1.562677025794983, \n 1.0915625244379044], [-2.1141327619552612, -6.987950943410397, -\n 5.84306687861681, -3.7289341166615486, 1.1448840647935867], [-\n 2.5237241089344025, -7.272709347307682, -8.646751679480076, -\n 6.123027570545673, -1.3740423321723938], [-0.1615908145904541, -\n 2.381169445812702, -2.32784790545702, -2.1662570908665657, \n 0.0533215403556824]]]]'], {}), '([[[[0.4095913469791412, 0.2847584038972855, 2.803684800863266, \n 2.3940934538841248, 2.5189263969659805], [-1.9525419473648071, -\n 4.606781497597694, -3.51521897315979, -1.562677025794983, \n 1.0915625244379044], [-2.1141327619552612, -6.987950943410397, -\n 5.84306687861681, -3.7289341166615486, 1.1448840647935867], [-\n 2.5237241089344025, -7.272709347307682, -8.646751679480076, -\n 6.123027570545673, -1.3740423321723938], [-0.1615908145904541, -\n 2.381169445812702, -2.32784790545702, -2.1662570908665657, \n 0.0533215403556824]]]])\n', (3010, 3569), True, 'import numpy as np\n'), ((4288, 4919), 'numpy.array', 'np.array', (['[[[[0.6277393400669098, -2.7888944894075394, -0.2910575419664383], [-\n 3.095237225294113, -4.835702538490295, -1.8706469237804413], [-\n 1.0139376372098923, -6.076017692685127, -5.780256435275078]]], [[[\n 0.6277393400669098, -2.7888944894075394, -0.2910575419664383], [-\n 3.095237225294113, -4.835702538490295, -1.8706469237804413], [-\n 1.0139376372098923, -6.076017692685127, -5.780256435275078]]], [[[\n 0.6277393400669098, -2.7888944894075394, -0.2910575419664383], [-\n 3.095237225294113, -4.835702538490295, -1.8706469237804413], [-\n 1.0139376372098923, -6.076017692685127, -5.780256435275078]]]]'], {}), '([[[[0.6277393400669098, -2.7888944894075394, -0.2910575419664383],\n [-3.095237225294113, -4.835702538490295, -1.8706469237804413], [-\n 1.0139376372098923, -6.076017692685127, -5.780256435275078]]], [[[\n 0.6277393400669098, -2.7888944894075394, -0.2910575419664383], [-\n 3.095237225294113, -4.835702538490295, -1.8706469237804413], [-\n 1.0139376372098923, -6.076017692685127, -5.780256435275078]]], [[[\n 0.6277393400669098, -2.7888944894075394, -0.2910575419664383], [-\n 3.095237225294113, -4.835702538490295, -1.8706469237804413], [-\n 1.0139376372098923, -6.076017692685127, -5.780256435275078]]]])\n', (4296, 4919), True, 'import numpy as np\n'), ((5194, 5806), 'numpy.array', 'np.array', (['[[[[0.9699610471725464, -0.20758534967899323, 2.3857712745666504], [\n 0.3666309118270874, 4.690882682800293, -8.203354835510254], [\n 2.6072847843170166, -1.9033538103103638, 2.331153154373169]], [[\n 2.519343852996826, 2.3757898807525635, -1.6613528728485107], [\n 0.5777544379234314, -3.5739502906799316, 5.349126815795898], [\n 0.729295015335083, 1.5791023969650269, 3.7627718448638916]], [[-\n 0.27685487270355225, 6.446267127990723, -2.762883424758911], [-\n 8.25644588470459, 9.616064071655273, 8.005367279052734], [-\n 0.6944921016693115, 3.866114854812622, 4.788446426391602]]]]'], {}), '([[[[0.9699610471725464, -0.20758534967899323, 2.3857712745666504],\n [0.3666309118270874, 4.690882682800293, -8.203354835510254], [\n 2.6072847843170166, -1.9033538103103638, 2.331153154373169]], [[\n 2.519343852996826, 2.3757898807525635, -1.6613528728485107], [\n 0.5777544379234314, -3.5739502906799316, 5.349126815795898], [\n 0.729295015335083, 1.5791023969650269, 3.7627718448638916]], [[-\n 0.27685487270355225, 6.446267127990723, -2.762883424758911], [-\n 8.25644588470459, 9.616064071655273, 8.005367279052734], [-\n 0.6944921016693115, 3.866114854812622, 4.788446426391602]]]])\n', (5202, 5806), True, 'import numpy as np\n'), ((6055, 6685), 'numpy.array', 'np.array', (['[[[[1.8271433115005493, -1.0446699857711792, 1.0062190294265747], [\n 0.5174201130867004, -0.806931734085083, 1.3769007921218872], [\n 0.205885112285614, 0.9943519234657288, -0.23580588400363922]]], [[[\n 0.29881811141967773, -1.9982075691223145, 0.3511354625225067], [-\n 0.7644741535186768, 1.2594351768493652, -0.9629734754562378], [\n 0.5080506205558777, 0.7561734318733215, 1.6839302778244019]]], [[[\n 1.2573646306991577, 0.13123232126235962, 1.6403018236160278], [-\n 1.2138012647628784, 2.399970531463623, -0.38509097695350647], [-\n 0.9878040552139282, 0.9585888385772705, -1.4976465702056885]]]]'], {}), '([[[[1.8271433115005493, -1.0446699857711792, 1.0062190294265747],\n [0.5174201130867004, -0.806931734085083, 1.3769007921218872], [\n 0.205885112285614, 0.9943519234657288, -0.23580588400363922]]], [[[\n 0.29881811141967773, -1.9982075691223145, 0.3511354625225067], [-\n 0.7644741535186768, 1.2594351768493652, -0.9629734754562378], [\n 0.5080506205558777, 0.7561734318733215, 1.6839302778244019]]], [[[\n 1.2573646306991577, 0.13123232126235962, 1.6403018236160278], [-\n 1.2138012647628784, 2.399970531463623, -0.38509097695350647], [-\n 0.9878040552139282, 0.9585888385772705, -1.4976465702056885]]]])\n', (6063, 6685), True, 'import numpy as np\n'), ((6968, 7042), 'numpy.array', 'np.array', (['[0.6605162620544434, -0.18903568387031555, -0.27302607893943787]'], {}), '([0.6605162620544434, -0.18903568387031555, -0.27302607893943787])\n', (6976, 7042), True, 'import numpy as np\n'), ((7078, 7662), 'numpy.array', 'np.array', (['[[[[-0.47827261686325073, -1.1739492416381836, -0.7921845316886902, \n 0.9321041703224182, -3.1557741165161133], [2.1935296058654785, -\n 0.5385921001434326, -0.8611332774162292, -1.881519079208374, -\n 0.7205708026885986], [-0.35601571202278137, -0.15963983535766602, \n 1.797447681427002, 0.19594945013523102, -1.7376397848129272], [\n 0.047347065061330795, 0.14580930769443512, 0.32604914903640747, \n 0.4578782916069031, -0.8942581415176392], [0.49383941292762756, -\n 0.9043426513671875, -1.2140793800354004, 2.1564064025878906, \n 1.0938222408294678]]]]'], {}), '([[[[-0.47827261686325073, -1.1739492416381836, -0.7921845316886902,\n 0.9321041703224182, -3.1557741165161133], [2.1935296058654785, -\n 0.5385921001434326, -0.8611332774162292, -1.881519079208374, -\n 0.7205708026885986], [-0.35601571202278137, -0.15963983535766602, \n 1.797447681427002, 0.19594945013523102, -1.7376397848129272], [\n 0.047347065061330795, 0.14580930769443512, 0.32604914903640747, \n 0.4578782916069031, -0.8942581415176392], [0.49383941292762756, -\n 0.9043426513671875, -1.2140793800354004, 2.1564064025878906, \n 1.0938222408294678]]]])\n', (7086, 7662), True, 'import numpy as np\n'), ((8387, 9007), 'numpy.array', 'np.array', (['[[[[-0.05607491731643677, -0.185230553150177, -3.8808679580688477], [\n 6.861937046051025, -2.3341472148895264, -0.5597308874130249], [\n 1.8299254179000854, -2.770848274230957, 2.1958212852478027]], [[\n 2.9348952770233154, 4.117504119873047, -6.278541088104248], [\n 0.2638452351093292, 3.998856782913208, 2.612290620803833], [-\n 1.9891828298568726, -1.6476304531097412, 3.39066219329834]], [[-\n 8.44466781616211, 0.5747121572494507, -8.501373291015625], [-\n 0.036642804741859436, -0.23458999395370483, -2.370849370956421], [\n 2.8372013568878174, -2.987276077270508, 1.8382092714309692]]]]'], {}), '([[[[-0.05607491731643677, -0.185230553150177, -3.8808679580688477],\n [6.861937046051025, -2.3341472148895264, -0.5597308874130249], [\n 1.8299254179000854, -2.770848274230957, 2.1958212852478027]], [[\n 2.9348952770233154, 4.117504119873047, -6.278541088104248], [\n 0.2638452351093292, 3.998856782913208, 2.612290620803833], [-\n 1.9891828298568726, -1.6476304531097412, 3.39066219329834]], [[-\n 8.44466781616211, 0.5747121572494507, -8.501373291015625], [-\n 0.036642804741859436, -0.23458999395370483, -2.370849370956421], [\n 2.8372013568878174, -2.987276077270508, 1.8382092714309692]]]])\n', (8395, 9007), True, 'import numpy as np\n'), ((9252, 9682), 'numpy.array', 'np.array', (['[[[[-0.7248556613922119, 1.1119636297225952, -0.47827261686325073], [-\n 1.1739492416381836, -0.7921845316886902, 0.9321041703224182], [-\n 3.1557741165161133, 2.1935296058654785, -0.5385921001434326]]], [[[-\n 0.8611332774162292, -1.881519079208374, -0.7205708026885986], [-\n 0.35601571202278137, -0.15963983535766602, 1.797447681427002], [\n 0.19594945013523102, -1.7376397848129272, 0.047347065061330795]]]]'], {}), '([[[[-0.7248556613922119, 1.1119636297225952, -0.47827261686325073],\n [-1.1739492416381836, -0.7921845316886902, 0.9321041703224182], [-\n 3.1557741165161133, 2.1935296058654785, -0.5385921001434326]]], [[[-\n 0.8611332774162292, -1.881519079208374, -0.7205708026885986], [-\n 0.35601571202278137, -0.15963983535766602, 1.797447681427002], [\n 0.19594945013523102, -1.7376397848129272, 0.047347065061330795]]]])\n', (9260, 9682), True, 'import numpy as np\n'), ((9888, 11031), 'numpy.array', 'np.array', (['[[[[-0.7248556613922119, 0.3871079683303833, -0.0911646485328674, \n 0.6336910128593445, -0.4782726168632507], [-1.8988049030303955, -\n 1.5790258049964905, -1.125194251537323, 0.7736106514930725, \n 0.4538315534591675], [-5.054579019546509, -2.5412703156471252, -\n 2.6260308623313904, 2.4285481572151184, -0.0847605466842651], [-\n 4.329723358154297, -2.9283782839775085, -2.534866213798523, \n 1.794857144355774, 0.3935120701789856], [-3.1557741165161133, -\n 0.9622445106506348, -1.5008366107940674, 1.654937505722046, -\n 0.5385921001434326]], [[-0.8611332774162292, -2.7426523566246033, -\n 3.463223159313202, -2.6020898818969727, -0.7205708026885986], [-\n 1.2171489894390106, -3.2583079040050507, -2.1814310252666473, -\n 0.9642820358276367, 1.0768768787384033], [-1.0211995393037796, -\n 4.799998238682747, -3.6757742948830128, -2.654574755579233, \n 1.1242239437997341], [-0.1600662618875504, -2.0573458820581436, -\n 0.2125511355698109, -0.0524848736822605, 1.8447947464883327], [\n 0.195949450135231, -1.5416903346776962, -1.4943432696163654, -\n 1.6902927197515965, 0.0473470650613308]]]]'], {}), '([[[[-0.7248556613922119, 0.3871079683303833, -0.0911646485328674, \n 0.6336910128593445, -0.4782726168632507], [-1.8988049030303955, -\n 1.5790258049964905, -1.125194251537323, 0.7736106514930725, \n 0.4538315534591675], [-5.054579019546509, -2.5412703156471252, -\n 2.6260308623313904, 2.4285481572151184, -0.0847605466842651], [-\n 4.329723358154297, -2.9283782839775085, -2.534866213798523, \n 1.794857144355774, 0.3935120701789856], [-3.1557741165161133, -\n 0.9622445106506348, -1.5008366107940674, 1.654937505722046, -\n 0.5385921001434326]], [[-0.8611332774162292, -2.7426523566246033, -\n 3.463223159313202, -2.6020898818969727, -0.7205708026885986], [-\n 1.2171489894390106, -3.2583079040050507, -2.1814310252666473, -\n 0.9642820358276367, 1.0768768787384033], [-1.0211995393037796, -\n 4.799998238682747, -3.6757742948830128, -2.654574755579233, \n 1.1242239437997341], [-0.1600662618875504, -2.0573458820581436, -\n 0.2125511355698109, -0.0524848736822605, 1.8447947464883327], [\n 0.195949450135231, -1.5416903346776962, -1.4943432696163654, -\n 1.6902927197515965, 0.0473470650613308]]]])\n', (9896, 11031), True, 'import numpy as np\n'), ((12419, 12839), 'numpy.array', 'np.array', (['[[[[0.6277393400669098, -2.7888944894075394, -0.2910575419664383], [-\n 3.095237225294113, -4.835702538490295, -1.8706469237804413], [-\n 1.0139376372098923, -6.076017692685127, -5.780256435275078]]], [[[\n 3.30740749835968, -0.7220746576786041, -3.660933956503868], [\n 0.5273916646838188, -2.631059892475605, -7.6207195818424225], [-\n 3.5466641262173653, -8.214546449482441, -11.031560003757477]]]]'], {}), '([[[[0.6277393400669098, -2.7888944894075394, -0.2910575419664383],\n [-3.095237225294113, -4.835702538490295, -1.8706469237804413], [-\n 1.0139376372098923, -6.076017692685127, -5.780256435275078]]], [[[\n 3.30740749835968, -0.7220746576786041, -3.660933956503868], [\n 0.5273916646838188, -2.631059892475605, -7.6207195818424225], [-\n 3.5466641262173653, -8.214546449482441, -11.031560003757477]]]])\n', (12427, 12839), True, 'import numpy as np\n'), ((13040, 14184), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]], [[0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878, 0.35005471110343933, 0.5360521078109741], [\n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393], [-1.1081947088241577, 0.07872020453214645, -\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244], [\n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006, -0.9574840068817139]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]], [[0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878, 0.35005471110343933, 0.5360521078109741], [\n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393], [-1.1081947088241577, 0.07872020453214645, -\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244], [\n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006, -0.9574840068817139]]]])\n', (13048, 14184), True, 'import numpy as np\n'), ((15567, 15977), 'numpy.array', 'np.array', (['[[[[-8.836943626403809, 3.2316627502441406, 6.994439601898193], [-\n 0.8386597037315369, -9.857108116149902, 13.68197250366211], [-\n 13.020713806152344, 7.310227870941162, -3.3760271072387695]], [[-\n 4.803101539611816, 1.026240587234497, 0.5452112555503845], [-\n 6.839838027954102, 2.0195930004119873, 0.11328654736280441], [\n 0.393694669008255, 4.987061023712158, 3.297354221343994]]]]'], {}), '([[[[-8.836943626403809, 3.2316627502441406, 6.994439601898193], [-\n 0.8386597037315369, -9.857108116149902, 13.68197250366211], [-\n 13.020713806152344, 7.310227870941162, -3.3760271072387695]], [[-\n 4.803101539611816, 1.026240587234497, 0.5452112555503845], [-\n 6.839838027954102, 2.0195930004119873, 0.11328654736280441], [\n 0.393694669008255, 4.987061023712158, 3.297354221343994]]]])\n', (15575, 15977), True, 'import numpy as np\n'), ((16163, 16377), 'numpy.array', 'np.array', (['[[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878], [\n 0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]]]'], {}), '([[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],\n [0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]]])\n', (16171, 16377), True, 'import numpy as np\n'), ((16501, 17077), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]])\n', (16509, 17077), True, 'import numpy as np\n'), ((17802, 18341), 'numpy.array', 'np.array', (['[[[[3.237529069185257, 3.237529069185257, 3.237529069185257, \n 3.237529069185257, 3.237529069185257], [3.428095132112503, \n 3.428095132112503, 3.428095132112503, 3.428095132112503, \n 3.428095132112503], [3.428095132112503, 3.428095132112503, \n 3.428095132112503, 3.428095132112503, 3.428095132112503], [\n 3.428095132112503, 3.428095132112503, 3.428095132112503, \n 3.428095132112503, 3.428095132112503], [2.596117228269577, \n 2.596117228269577, 2.596117228269577, 2.596117228269577, \n 2.596117228269577]]]]'], {}), '([[[[3.237529069185257, 3.237529069185257, 3.237529069185257, \n 3.237529069185257, 3.237529069185257], [3.428095132112503, \n 3.428095132112503, 3.428095132112503, 3.428095132112503, \n 3.428095132112503], [3.428095132112503, 3.428095132112503, \n 3.428095132112503, 3.428095132112503, 3.428095132112503], [\n 3.428095132112503, 3.428095132112503, 3.428095132112503, \n 3.428095132112503, 3.428095132112503], [2.596117228269577, \n 2.596117228269577, 2.596117228269577, 2.596117228269577, \n 2.596117228269577]]]])\n', (17810, 18341), True, 'import numpy as np\n'), ((19068, 19281), 'numpy.array', 'np.array', (['[[[[1.7594299167394638, 1.7594299167394638, 1.7594299167394638], [-\n 0.6019042432308197, -0.6019042432308197, -0.6019042432308197], [-\n 1.532561555504799, -1.532561555504799, -1.532561555504799]]]]'], {}), '([[[[1.7594299167394638, 1.7594299167394638, 1.7594299167394638], [\n -0.6019042432308197, -0.6019042432308197, -0.6019042432308197], [-\n 1.532561555504799, -1.532561555504799, -1.532561555504799]]]])\n', (19076, 19281), True, 'import numpy as np\n'), ((19406, 20192), 'numpy.array', 'np.array', (['[[[[1.5489805936813354, -1.0164761543273926, 5.277345657348633, \n 3.153532028198242, -7.301508903503418, -3.7565059661865234, \n 4.690962314605713], [2.425799608230591, -2.0592665672302246, \n 0.9699610471725464, -0.20758534967899323, 2.3857712745666504, \n 1.1719579696655273, 0.6523551940917969], [2.1625545024871826, -\n 1.3517316579818726, 0.3666309118270874, 4.690882682800293, -\n 8.203354835510254, 3.0248217582702637, 1.2624683380126953], [\n 0.6193475723266602, -2.0285415649414062, 2.6072847843170166, -\n 1.9033538103103638, 2.331153154373169, -3.998155355453491, -\n 1.0176407098770142], [2.8643176555633545, -0.7396122217178345, -\n 0.2253415733575821, -2.846742630004883, -4.961236476898193, -\n 0.1308247298002243, -0.7344070672988892]]]]'], {}), '([[[[1.5489805936813354, -1.0164761543273926, 5.277345657348633, \n 3.153532028198242, -7.301508903503418, -3.7565059661865234, \n 4.690962314605713], [2.425799608230591, -2.0592665672302246, \n 0.9699610471725464, -0.20758534967899323, 2.3857712745666504, \n 1.1719579696655273, 0.6523551940917969], [2.1625545024871826, -\n 1.3517316579818726, 0.3666309118270874, 4.690882682800293, -\n 8.203354835510254, 3.0248217582702637, 1.2624683380126953], [\n 0.6193475723266602, -2.0285415649414062, 2.6072847843170166, -\n 1.9033538103103638, 2.331153154373169, -3.998155355453491, -\n 1.0176407098770142], [2.8643176555633545, -0.7396122217178345, -\n 0.2253415733575821, -2.846742630004883, -4.961236476898193, -\n 0.1308247298002243, -0.7344070672988892]]]])\n', (19414, 20192), True, 'import numpy as np\n'), ((21098, 21312), 'numpy.array', 'np.array', (['[[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878], [\n 0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]]]'], {}), '([[[[0.8586049675941467, -0.2279418259859085, 0.2013147622346878],\n [0.35005471110343933, 0.5360521078109741, 1.5194443464279175], [\n 1.9040879011154175, -1.5734431743621826, -0.14007866382598877]]]])\n', (21106, 21312), True, 'import numpy as np\n'), ((21435, 22011), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942], [1.5580710172653198, -\n 0.5459445714950562, -2.3556296825408936, 0.5414402484893799, \n 2.678506374359131], [1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253, 0.37723132967948914], [\n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821], [1.830764889717102, -\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839]]]])\n', (21443, 22011), True, 'import numpy as np\n'), ((22735, 23297), 'numpy.array', 'np.array', (['[[[[0.5360521078109741, 1.5194443464279175, 0.3500547111034393, \n 0.5360521078109741, 1.5194443464279175], [-1.8013850003480911, \n 0.061236098408699, 2.762692868709564, -1.8013850003480911, \n 0.061236098408699], [0.5360521078109741, 1.5194443464279175, \n 0.3500547111034393, 0.5360521078109741, 1.5194443464279175], [-\n 1.8013850003480911, 0.061236098408699, 2.762692868709564, -\n 1.8013850003480911, 0.061236098408699], [0.5360521078109741, \n 1.5194443464279175, 0.3500547111034393, 0.5360521078109741, \n 1.5194443464279175]]]]'], {}), '([[[[0.5360521078109741, 1.5194443464279175, 0.3500547111034393, \n 0.5360521078109741, 1.5194443464279175], [-1.8013850003480911, \n 0.061236098408699, 2.762692868709564, -1.8013850003480911, \n 0.061236098408699], [0.5360521078109741, 1.5194443464279175, \n 0.3500547111034393, 0.5360521078109741, 1.5194443464279175], [-\n 1.8013850003480911, 0.061236098408699, 2.762692868709564, -\n 1.8013850003480911, 0.061236098408699], [0.5360521078109741, \n 1.5194443464279175, 0.3500547111034393, 0.5360521078109741, \n 1.5194443464279175]]]])\n', (22743, 23297), True, 'import numpy as np\n'), ((24023, 24235), 'numpy.array', 'np.array', (['[[[[-5.1135923862457275, 3.5859558284282684, 2.089697480201721], [-\n 0.3276629596948624, 1.7587070614099503, -2.5950092673301697], [-\n 5.1135923862457275, 3.5859558284282684, 2.089697480201721]]]]'], {}), '([[[[-5.1135923862457275, 3.5859558284282684, 2.089697480201721], [\n -0.3276629596948624, 1.7587070614099503, -2.5950092673301697], [-\n 5.1135923862457275, 3.5859558284282684, 2.089697480201721]]]])\n', (24031, 24235), True, 'import numpy as np\n'), ((24359, 24506), 'numpy.array', 'np.array', (['[[[[-1.0164761543273926, -7.301508903503418], [-1.3517316579818726, -\n 8.203354835510254], [-0.7396122217178345, -4.961236476898193]]]]'], {}), '([[[[-1.0164761543273926, -7.301508903503418], [-1.3517316579818726,\n -8.203354835510254], [-0.7396122217178345, -4.961236476898193]]]])\n', (24367, 24506), True, 'import numpy as np\n'), ((24636, 24990), 'numpy.array', 'np.array', (['[[[[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952, -\n 0.47827261686325073, -1.1739492416381836], [-0.7921845316886902, \n 0.9321041703224182, -3.1557741165161133, 2.1935296058654785, -\n 0.5385921001434326], [-0.8611332774162292, -1.881519079208374, -\n 0.7205708026885986, -0.35601571202278137, -0.15963983535766602]]]]'], {}), '([[[[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952, \n -0.47827261686325073, -1.1739492416381836], [-0.7921845316886902, \n 0.9321041703224182, -3.1557741165161133, 2.1935296058654785, -\n 0.5385921001434326], [-0.8611332774162292, -1.881519079208374, -\n 0.7205708026885986, -0.35601571202278137, -0.15963983535766602]]]])\n', (24644, 24990), True, 'import numpy as np\n'), ((25459, 26574), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942, 1.5580710172653198, -\n 0.5459445714950562], [-2.3556296825408936, 0.5414402484893799, \n 2.678506374359131, 1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253], [0.37723132967948914, \n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821, 1.830764889717102], [-\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839, 0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878], [0.35005471110343933, 0.5360521078109741, \n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393, -1.1081947088241577, 0.07872020453214645], [-\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244, \n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942, 1.5580710172653198, -\n 0.5459445714950562], [-2.3556296825408936, 0.5414402484893799, \n 2.678506374359131, 1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253], [0.37723132967948914, \n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821, 1.830764889717102], [-\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839, 0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878], [0.35005471110343933, 0.5360521078109741, \n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393, -1.1081947088241577, 0.07872020453214645], [-\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244, \n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006]]]])\n', (25467, 26574), True, 'import numpy as np\n'), ((27808, 28925), 'numpy.array', 'np.array', (['[[[[-0.9574840068817139, -1.6823396682739258, -0.5703760385513306, -\n 0.0911646485328674, -0.5402582287788391, -1.6522218585014343, -\n 1.1739492416381836], [-1.749668538570404, -1.5424200296401978, -\n 3.586230516433716, -0.121304988861084, -2.0410948395729065, \n 0.0027156472206116, -1.7125413417816162], [-2.6108018159866333, -\n 4.285072386264801, -7.049453675746918, -3.079410582780838, -\n 3.2773211896419525, -0.5129399001598358, -1.8721811771392822], [-\n 2.6108018159866333, -4.285072386264801, -7.049453675746918, -\n 3.079410582780838, -3.2773211896419525, -0.5129399001598358, -\n 1.8721811771392822], [-2.6108018159866333, -4.285072386264801, -\n 7.049453675746918, -3.079410582780838, -3.2773211896419525, -\n 0.5129399001598358, -1.8721811771392822], [-1.6533178091049194, -\n 2.6027327179908752, -6.479077637195587, -2.9882459342479706, -\n 2.7370629608631134, 1.1392819583415985, -0.6982319355010986], [-\n 0.8611332774162292, -2.7426523566246033, -3.463223159313202, -\n 2.958105593919754, -1.236226350069046, -0.5156555473804474, -\n 0.159639835357666]]]]'], {}), '([[[[-0.9574840068817139, -1.6823396682739258, -0.5703760385513306,\n -0.0911646485328674, -0.5402582287788391, -1.6522218585014343, -\n 1.1739492416381836], [-1.749668538570404, -1.5424200296401978, -\n 3.586230516433716, -0.121304988861084, -2.0410948395729065, \n 0.0027156472206116, -1.7125413417816162], [-2.6108018159866333, -\n 4.285072386264801, -7.049453675746918, -3.079410582780838, -\n 3.2773211896419525, -0.5129399001598358, -1.8721811771392822], [-\n 2.6108018159866333, -4.285072386264801, -7.049453675746918, -\n 3.079410582780838, -3.2773211896419525, -0.5129399001598358, -\n 1.8721811771392822], [-2.6108018159866333, -4.285072386264801, -\n 7.049453675746918, -3.079410582780838, -3.2773211896419525, -\n 0.5129399001598358, -1.8721811771392822], [-1.6533178091049194, -\n 2.6027327179908752, -6.479077637195587, -2.9882459342479706, -\n 2.7370629608631134, 1.1392819583415985, -0.6982319355010986], [-\n 0.8611332774162292, -2.7426523566246033, -3.463223159313202, -\n 2.958105593919754, -1.236226350069046, -0.5156555473804474, -\n 0.159639835357666]]]])\n', (27816, 28925), True, 'import numpy as np\n'), ((30162, 30500), 'numpy.array', 'np.array', (['[[[[2.974529668688774, 4.548736393451691, 1.1672898679971695, -\n 1.499158263206482, 0.1862268149852753], [1.6534235626459122, \n 2.3762744814157486, -1.448018729686737, -5.2917241007089615, -\n 2.278435029089451], [-2.083257421851158, -2.23808591067791, -\n 5.749193429946899, -7.540486767888069, -6.306201495230198]]]]'], {}), '([[[[2.974529668688774, 4.548736393451691, 1.1672898679971695, -\n 1.499158263206482, 0.1862268149852753], [1.6534235626459122, \n 2.3762744814157486, -1.448018729686737, -5.2917241007089615, -\n 2.278435029089451], [-2.083257421851158, -2.23808591067791, -\n 5.749193429946899, -7.540486767888069, -6.306201495230198]]]])\n', (30170, 30500), True, 'import numpy as np\n'), ((30971, 31309), 'numpy.array', 'np.array', (['[[[[-3.5647754669189453, -4.234736919403076, 1.4046944379806519], [-\n 0.6964312791824341, 16.42838478088379, -9.649789810180664], [\n 4.312150478363037, -6.283960819244385, -4.8443922996521], [-\n 2.772286891937256, -4.483709812164307, 12.315184593200684], [\n 7.39893913269043, 1.305102825164795, -2.049992561340332]]]]'], {}), '([[[[-3.5647754669189453, -4.234736919403076, 1.4046944379806519],\n [-0.6964312791824341, 16.42838478088379, -9.649789810180664], [\n 4.312150478363037, -6.283960819244385, -4.8443922996521], [-\n 2.772286891937256, -4.483709812164307, 12.315184593200684], [\n 7.39893913269043, 1.305102825164795, -2.049992561340332]]]])\n', (30979, 31309), True, 'import numpy as np\n'), ((31458, 31674), 'numpy.array', 'np.array', (['[[[[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952], [-\n 0.47827261686325073, -1.1739492416381836, -0.7921845316886902], [\n 0.9321041703224182, -3.1557741165161133, 2.1935296058654785]]]]'], {}), '([[[[-0.9574840068817139, -0.7248556613922119, 1.1119636297225952],\n [-0.47827261686325073, -1.1739492416381836, -0.7921845316886902], [\n 0.9321041703224182, -3.1557741165161133, 2.1935296058654785]]]])\n', (31466, 31674), True, 'import numpy as np\n'), ((31799, 32914), 'numpy.array', 'np.array', (['[[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942, 1.5580710172653198, -\n 0.5459445714950562], [-2.3556296825408936, 0.5414402484893799, \n 2.678506374359131, 1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253], [0.37723132967948914, \n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821, 1.830764889717102], [-\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839, 0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878], [0.35005471110343933, 0.5360521078109741, \n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393, -1.1081947088241577, 0.07872020453214645], [-\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244, \n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006]]]]'], {}), '([[[[1.1630785465240479, 0.4838046133518219, 0.299563467502594, \n 0.15302546322345734, -1.168814778327942, 1.5580710172653198, -\n 0.5459445714950562], [-2.3556296825408936, 0.5414402484893799, \n 2.678506374359131, 1.2546343803405762, -0.5487740635871887, -\n 0.6810643672943115, -0.13531559705734253], [0.37723132967948914, \n 0.41016456484794617, 0.5712682008743286, -2.757962703704834, \n 1.0762799978256226, -0.6141325235366821, 1.830764889717102], [-\n 1.1468064785003662, 0.053837940096855164, -2.5074806213378906, -\n 0.5916498899459839, 0.8586049675941467, -0.2279418259859085, \n 0.2013147622346878], [0.35005471110343933, 0.5360521078109741, \n 1.5194443464279175, 1.9040879011154175, -1.5734431743621826, -\n 0.14007866382598877, 0.29670074582099915], [1.3111951351165771, \n 0.5035904049873352, -1.1894450187683105, -0.5502137541770935, -\n 1.591875672340393, -1.1081947088241577, 0.07872020453214645], [-\n 0.9185634255409241, -0.7457143664360046, -1.2080862522125244, \n 1.8140212297439575, -1.5227429866790771, -2.515244960784912, -\n 1.3549325466156006]]]])\n', (31807, 32914), True, 'import numpy as np\n'), ((34150, 34785), 'numpy.array', 'np.array', (['[[[[-0.9574840068817139, 0.0, 0.0, -0.7248556613922119, 0.0, 0.0, \n 1.1119636297225952], [-0.9574840068817139, 0.0, 0.0, -\n 0.7248556613922119, 0.0, 0.0, 1.1119636297225952], [-1.4357566237449646,\n 0.0, 0.0, -1.8988049030303955, 0.0, 0.0, 0.319779098033905], [-\n 0.4782726168632507, 0.0, 0.0, -1.1739492416381836, 0.0, 0.0, -\n 0.7921845316886902], [0.4538315534591675, 0.0, 0.0, -4.329723358154297,\n 0.0, 0.0, 1.4013450741767883], [0.9321041703224182, 0.0, 0.0, -\n 3.1557741165161133, 0.0, 0.0, 2.1935296058654785], [0.9321041703224182,\n 0.0, 0.0, -3.1557741165161133, 0.0, 0.0, 2.1935296058654785]]]]'], {}), '([[[[-0.9574840068817139, 0.0, 0.0, -0.7248556613922119, 0.0, 0.0, \n 1.1119636297225952], [-0.9574840068817139, 0.0, 0.0, -\n 0.7248556613922119, 0.0, 0.0, 1.1119636297225952], [-1.4357566237449646,\n 0.0, 0.0, -1.8988049030303955, 0.0, 0.0, 0.319779098033905], [-\n 0.4782726168632507, 0.0, 0.0, -1.1739492416381836, 0.0, 0.0, -\n 0.7921845316886902], [0.4538315534591675, 0.0, 0.0, -4.329723358154297,\n 0.0, 0.0, 1.4013450741767883], [0.9321041703224182, 0.0, 0.0, -\n 3.1557741165161133, 0.0, 0.0, 2.1935296058654785], [0.9321041703224182,\n 0.0, 0.0, -3.1557741165161133, 0.0, 0.0, 2.1935296058654785]]]])\n', (34158, 34785), True, 'import numpy as np\n'), ((36066, 36278), 'numpy.array', 'np.array', (['[[[[-0.8153198063373566, -1.3503028601408005, 1.1495047211647034], [-\n 0.4195204377174377, -1.4455246925354004, 2.328780397772789], [\n 0.7426864206790924, 3.1678953766822815, -0.979511596262455]]]]'], {}), '([[[[-0.8153198063373566, -1.3503028601408005, 1.1495047211647034],\n [-0.4195204377174377, -1.4455246925354004, 2.328780397772789], [\n 0.7426864206790924, 3.1678953766822815, -0.979511596262455]]]])\n', (36074, 36278), True, 'import numpy as np\n'), ((36405, 36490), 'numpy.array', 'np.array', (['[[[[-5.2563982009887695], [5.410353183746338], [-8.517012596130371]]]]'], {}), '([[[[-5.2563982009887695], [5.410353183746338], [-8.517012596130371]]]]\n )\n', (36413, 36490), True, 'import numpy as np\n'), ((51454, 51486), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (51484, 51486), True, 'import oneflow as flow\n'), ((36593, 36612), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (36604, 36612), True, 'import oneflow as flow\n'), ((36621, 36656), 'oneflow.Tensor', 'flow.Tensor', (['data'], {'device': 'to_device'}), '(data, device=to_device)\n', (36632, 36656), True, 'import oneflow as flow\n'), ((37058, 37077), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (37069, 37077), True, 'import oneflow as flow\n'), ((37086, 37141), 'oneflow.Tensor', 'flow.Tensor', (['data'], {'device': 'to_device', 'requires_grad': '(True)'}), '(data, device=to_device, requires_grad=True)\n', (37097, 37141), True, 'import oneflow as flow\n'), ((37662, 39144), 'numpy.array', 'np.array', (['[[[[0.6206631238581714, -1.1225329393404626, 0.8407155480700242, -\n 0.6845162855236345], [-0.5186484633906412, 0.10420735184519186, -\n 0.1711568947473012, 0.5168640476046483], [-0.12429464919764661, \n 0.050277779246134253, -1.0144501797426606, -2.184600444658526], [\n 0.28918126931309923, -0.822872663244595, 0.44019150436683663, -\n 1.0247720130825562]], [[0.7786504412818226, -0.7501839068078657, -\n 0.8187283189941765, -1.1116653569170698], [0.18085524152316743, -\n 1.3461349607476678, 1.142505437476448, -0.000649619704040145], [\n 0.03160672782674317, -0.006318157449953413, 1.2218487782604377, \n 0.15903027907930234], [1.5857011815642381, 0.6656477116332891, -\n 0.04036621813223574, -0.3427168687988546]], [[-1.1774346070102524, \n 1.6195241269303395, -0.36185552303441965, -1.1382193113192487], [\n 0.08061907334568702, 1.5025447613238763, -1.1591348706634745, \n 1.6449050139676873], [1.1539915649822392, -2.414624939646017, \n 0.3056063774849572, 1.1920089257083162], [0.7623012858982319, -\n 0.01685314742940813, -1.096666898224702, -0.4406476137098582]], [[\n 0.9383797282214235, -1.1075876842796508, -0.4420913825139058, -\n 1.0736097610655628], [-0.3101376466546291, 1.6578227745160954, -\n 0.6225454278031398, 0.6831188620748697], [0.00743800968372913, -\n 0.8089158949698473, 2.08084287836801, 0.721204366332351], [\n 0.5694701823297723, 0.031519314469744895, -0.5041680957766629, -\n 0.4738588233094669]]]]'], {}), '([[[[0.6206631238581714, -1.1225329393404626, 0.8407155480700242, -\n 0.6845162855236345], [-0.5186484633906412, 0.10420735184519186, -\n 0.1711568947473012, 0.5168640476046483], [-0.12429464919764661, \n 0.050277779246134253, -1.0144501797426606, -2.184600444658526], [\n 0.28918126931309923, -0.822872663244595, 0.44019150436683663, -\n 1.0247720130825562]], [[0.7786504412818226, -0.7501839068078657, -\n 0.8187283189941765, -1.1116653569170698], [0.18085524152316743, -\n 1.3461349607476678, 1.142505437476448, -0.000649619704040145], [\n 0.03160672782674317, -0.006318157449953413, 1.2218487782604377, \n 0.15903027907930234], [1.5857011815642381, 0.6656477116332891, -\n 0.04036621813223574, -0.3427168687988546]], [[-1.1774346070102524, \n 1.6195241269303395, -0.36185552303441965, -1.1382193113192487], [\n 0.08061907334568702, 1.5025447613238763, -1.1591348706634745, \n 1.6449050139676873], [1.1539915649822392, -2.414624939646017, \n 0.3056063774849572, 1.1920089257083162], [0.7623012858982319, -\n 0.01685314742940813, -1.096666898224702, -0.4406476137098582]], [[\n 0.9383797282214235, -1.1075876842796508, -0.4420913825139058, -\n 1.0736097610655628], [-0.3101376466546291, 1.6578227745160954, -\n 0.6225454278031398, 0.6831188620748697], [0.00743800968372913, -\n 0.8089158949698473, 2.08084287836801, 0.721204366332351], [\n 0.5694701823297723, 0.031519314469744895, -0.5041680957766629, -\n 0.4738588233094669]]]])\n', (37670, 39144), True, 'import numpy as np\n'), ((41582, 42448), 'numpy.array', 'np.array', (['[[[[0.06456436216831207, -0.10852358490228653, -0.21638715267181396], [-\n 0.2279110550880432, 0.1476770043373108, 0.19457484781742096], [\n 0.05026858672499657, 0.10818571597337723, 0.02056501805782318]], [[\n 0.205095112323761, 0.1488947868347168, -0.2344113141298294], [\n 0.1684819906949997, -0.21986986696720123, 0.1082606166601181], [-\n 0.1528974026441574, 0.17120417952537537, 0.01954500749707222]]], [[[-\n 0.09441672265529633, -0.03644559532403946, -0.22235223650932312], [-\n 0.1771145612001419, 0.08043312281370163, 0.06938580423593521], [\n 0.054393064230680466, -0.05483492836356163, 0.23438701033592224]], [[\n 0.22666795551776886, 0.0874653309583664, 0.07092718034982681], [\n 0.08883464336395264, -0.052362944930791855, -0.1720171570777893], [\n 0.10441060364246368, 0.011952142231166363, -0.0894528403878212]]]]'], {}), '([[[[0.06456436216831207, -0.10852358490228653, -\n 0.21638715267181396], [-0.2279110550880432, 0.1476770043373108, \n 0.19457484781742096], [0.05026858672499657, 0.10818571597337723, \n 0.02056501805782318]], [[0.205095112323761, 0.1488947868347168, -\n 0.2344113141298294], [0.1684819906949997, -0.21986986696720123, \n 0.1082606166601181], [-0.1528974026441574, 0.17120417952537537, \n 0.01954500749707222]]], [[[-0.09441672265529633, -0.03644559532403946, \n -0.22235223650932312], [-0.1771145612001419, 0.08043312281370163, \n 0.06938580423593521], [0.054393064230680466, -0.05483492836356163, \n 0.23438701033592224]], [[0.22666795551776886, 0.0874653309583664, \n 0.07092718034982681], [0.08883464336395264, -0.052362944930791855, -\n 0.1720171570777893], [0.10441060364246368, 0.011952142231166363, -\n 0.0894528403878212]]]])\n', (41590, 42448), True, 'import numpy as np\n'), ((42856, 42901), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(4)', '(2)', '(3)'], {'groups': '(2)', 'bias': '(False)'}), '(4, 2, 3, groups=2, bias=False)\n', (42870, 42901), True, 'import oneflow as flow\n'), ((47018, 47717), 'numpy.array', 'np.array', (['[[[[0.56573248, -0.1968932, -0.67875558, 0.34328273, 0.31964567], [-\n 1.33715475, 0.33422229, -1.27643383, 0.37904647, 0.35891593], [\n 0.84579802, 2.12729621, -0.51423287, 0.6129756, -1.31156564], [-\n 0.71047139, 1.02679253, -0.76686019, -0.72969633, 0.7342515], [-\n 0.13592879, -1.03207183, -0.22554775, 0.74148071, 0.9660151]], [[\n 0.51595992, 0.49624804, 0.91145641, 0.49247262, 0.41002217], [-\n 1.08001196, 1.55497086, -0.8196314, -0.45511565, -0.60269165], [\n 0.05563145, -0.94318372, -1.17058158, -0.73568577, 0.57810956], [-\n 0.40260276, -0.10309298, 1.123788, -0.23510537, -0.73893374], [-\n 0.52712536, -0.00717016, -1.85051966, -1.5079056, 1.38335907]]]]'], {}), '([[[[0.56573248, -0.1968932, -0.67875558, 0.34328273, 0.31964567],\n [-1.33715475, 0.33422229, -1.27643383, 0.37904647, 0.35891593], [\n 0.84579802, 2.12729621, -0.51423287, 0.6129756, -1.31156564], [-\n 0.71047139, 1.02679253, -0.76686019, -0.72969633, 0.7342515], [-\n 0.13592879, -1.03207183, -0.22554775, 0.74148071, 0.9660151]], [[\n 0.51595992, 0.49624804, 0.91145641, 0.49247262, 0.41002217], [-\n 1.08001196, 1.55497086, -0.8196314, -0.45511565, -0.60269165], [\n 0.05563145, -0.94318372, -1.17058158, -0.73568577, 0.57810956], [-\n 0.40260276, -0.10309298, 1.123788, -0.23510537, -0.73893374], [-\n 0.52712536, -0.00717016, -1.85051966, -1.5079056, 1.38335907]]]])\n', (47026, 47717), True, 'import numpy as np\n'), ((48122, 48659), 'numpy.array', 'np.array', (['[[[[-0.19489679, -0.32377058, 0.21736273], [0.04095296, -0.21552679, -\n 0.14626531], [-0.19359522, -0.00742865, -0.19832158]]], [[[0.29926914, \n 0.00931164, 0.2619766], [0.27611443, -0.15439281, -0.19027126], [-\n 0.2890912, 0.30367029, -0.05168664]]], [[[-0.03155736, 0.17610769, \n 0.22111714], [0.2279067, -0.32897446, -0.03260243], [-0.10274851, -\n 0.06903386, -0.19438276]]], [[[-0.24573688, -0.06723209, -0.21363299],\n [-0.02136187, -0.24994437, -0.18691199], [0.12189507, 0.29469389, \n 0.03398871]]]]'], {}), '([[[[-0.19489679, -0.32377058, 0.21736273], [0.04095296, -\n 0.21552679, -0.14626531], [-0.19359522, -0.00742865, -0.19832158]]], [[\n [0.29926914, 0.00931164, 0.2619766], [0.27611443, -0.15439281, -\n 0.19027126], [-0.2890912, 0.30367029, -0.05168664]]], [[[-0.03155736, \n 0.17610769, 0.22111714], [0.2279067, -0.32897446, -0.03260243], [-\n 0.10274851, -0.06903386, -0.19438276]]], [[[-0.24573688, -0.06723209, -\n 0.21363299], [-0.02136187, -0.24994437, -0.18691199], [0.12189507, \n 0.29469389, 0.03398871]]]])\n', (48130, 48659), True, 'import numpy as np\n'), ((49142, 49187), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(2)', '(4)', '(3)'], {'groups': '(2)', 'bias': '(False)'}), '(2, 4, 3, groups=2, bias=False)\n', (49156, 49187), True, 'import oneflow as flow\n'), ((49318, 49843), 'numpy.array', 'np.array', (['[[[[-0.21170563, 0.03652292, 0.25926736], [-0.19168918, 0.49044561, \n 0.25099146], [-1.0248934, 0.25361472, -0.51828313]], [[0.23977707, -\n 0.56090075, -0.19285655], [-0.17167747, 0.24558367, -0.3093586], [-\n 0.33303234, 1.52472734, -0.49013454]], [[-0.17137986, 1.21333742, \n 0.18988736], [0.31785482, -0.1212157, -0.18676008], [-0.10680684, -\n 0.30298883, 0.41809759]], [[-0.87821335, -0.51665992, -0.44061098], [\n 0.7480458, 0.5310725, 0.50418228], [-0.00512899, -0.3645584, -0.23643512]]]\n ]'], {}), '([[[[-0.21170563, 0.03652292, 0.25926736], [-0.19168918, 0.49044561,\n 0.25099146], [-1.0248934, 0.25361472, -0.51828313]], [[0.23977707, -\n 0.56090075, -0.19285655], [-0.17167747, 0.24558367, -0.3093586], [-\n 0.33303234, 1.52472734, -0.49013454]], [[-0.17137986, 1.21333742, \n 0.18988736], [0.31785482, -0.1212157, -0.18676008], [-0.10680684, -\n 0.30298883, 0.41809759]], [[-0.87821335, -0.51665992, -0.44061098], [\n 0.7480458, 0.5310725, 0.50418228], [-0.00512899, -0.3645584, -\n 0.23643512]]]])\n', (49326, 49843), True, 'import numpy as np\n'), ((50379, 51092), 'numpy.array', 'np.array', (['[[[[0.10437235, -0.21008658, 0.26925275, 0.16488039, 0.47933933], [\n 0.42143974, -0.2629388, -0.12013602, -0.54157579, 0.14280275], [-\n 0.06124666, -0.44938356, -0.55658901, -0.49534237, -0.10720548], [-\n 0.16561902, -0.23929697, -0.82584178, -0.66022277, -0.58654481], [-\n 0.4826864, -0.18644476, -0.43645298, 0.04623342, -0.25000823]], [[-\n 0.27729425, -0.16841865, -0.16093449, 0.11635975, 0.00748415], [-\n 0.07074942, -0.54079264, -0.75282294, -0.68207347, -0.21203026], [-\n 0.05160286, -0.29598606, -0.66841042, -0.61680746, -0.3724243], [\n 0.22569139, -0.12756741, -0.50747585, -0.73316729, -0.37990844], [\n 0.01914656, 0.24480659, 0.08441254, 0.06526598, -0.16039404]]]]'], {}), '([[[[0.10437235, -0.21008658, 0.26925275, 0.16488039, 0.47933933],\n [0.42143974, -0.2629388, -0.12013602, -0.54157579, 0.14280275], [-\n 0.06124666, -0.44938356, -0.55658901, -0.49534237, -0.10720548], [-\n 0.16561902, -0.23929697, -0.82584178, -0.66022277, -0.58654481], [-\n 0.4826864, -0.18644476, -0.43645298, 0.04623342, -0.25000823]], [[-\n 0.27729425, -0.16841865, -0.16093449, 0.11635975, 0.00748415], [-\n 0.07074942, -0.54079264, -0.75282294, -0.68207347, -0.21203026], [-\n 0.05160286, -0.29598606, -0.66841042, -0.61680746, -0.3724243], [\n 0.22569139, -0.12756741, -0.50747585, -0.73316729, -0.37990844], [\n 0.01914656, 0.24480659, 0.08441254, 0.06526598, -0.16039404]]]])\n', (50387, 51092), True, 'import numpy as np\n'), ((58787, 58833), 'unittest.skip', 'unittest.skip', (['"""need a more relaxed tolerance"""'], {}), "('need a more relaxed tolerance')\n", (58800, 58833), False, 'import unittest\n'), ((59600, 59646), 'unittest.skip', 'unittest.skip', (['"""need a more relaxed tolerance"""'], {}), "('need a more relaxed tolerance')\n", (59613, 59646), False, 'import unittest\n'), ((60402, 60417), 'unittest.main', 'unittest.main', ([], {}), '()\n', (60415, 60417), False, 'import unittest\n'), ((36693, 36712), 'oneflow.Tensor', 'flow.Tensor', (['weight'], {}), '(weight)\n', (36704, 36712), True, 'import oneflow as flow\n'), ((37178, 37197), 'oneflow.Tensor', 'flow.Tensor', (['weight'], {}), '(weight)\n', (37189, 37197), True, 'import oneflow as flow\n'), ((42935, 42954), 'oneflow.Tensor', 'flow.Tensor', (['weight'], {}), '(weight)\n', (42946, 42954), True, 'import oneflow as flow\n'), ((49221, 49240), 'oneflow.Tensor', 'flow.Tensor', (['weight'], {}), '(weight)\n', (49232, 49240), True, 'import oneflow as flow\n'), ((58300, 58313), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (58311, 58313), False, 'from collections import OrderedDict\n'), ((58441, 58461), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (58451, 58461), False, 'from test_util import GenArgList\n'), ((58577, 58590), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (58588, 58590), False, 'from collections import OrderedDict\n'), ((58719, 58739), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (58729, 58739), False, 'from test_util import GenArgList\n'), ((36777, 36794), 'oneflow.Tensor', 'flow.Tensor', (['bias'], {}), '(bias)\n', (36788, 36794), True, 'import oneflow as flow\n'), ((37282, 37299), 'oneflow.Tensor', 'flow.Tensor', (['bias'], {}), '(bias)\n', (37293, 37299), True, 'import oneflow as flow\n'), ((41523, 41542), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (41534, 41542), True, 'import oneflow as flow\n'), ((48063, 48082), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (48074, 48082), True, 'import oneflow as flow\n'), ((51675, 51694), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (51686, 51694), True, 'import oneflow as flow\n'), ((52195, 52214), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (52206, 52214), True, 'import oneflow as flow\n'), ((52586, 52605), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (52597, 52605), True, 'import oneflow as flow\n'), ((53030, 53049), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (53041, 53049), True, 'import oneflow as flow\n'), ((53524, 53543), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (53535, 53543), True, 'import oneflow as flow\n'), ((53979, 53998), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (53990, 53998), True, 'import oneflow as flow\n'), ((54492, 54511), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (54503, 54511), True, 'import oneflow as flow\n'), ((54961, 54980), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (54972, 54980), True, 'import oneflow as flow\n'), ((55509, 55528), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (55520, 55528), True, 'import oneflow as flow\n'), ((55989, 56008), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (56000, 56008), True, 'import oneflow as flow\n'), ((56459, 56478), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (56470, 56478), True, 'import oneflow as flow\n'), ((56500, 56519), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (56511, 56519), True, 'import oneflow as flow\n'), ((56888, 56907), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (56899, 56907), True, 'import oneflow as flow\n'), ((56929, 56948), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (56940, 56948), True, 'import oneflow as flow\n'), ((57404, 57423), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (57415, 57423), True, 'import oneflow as flow\n'), ((57878, 57897), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (57889, 57897), True, 'import oneflow as flow\n'), ((51632, 51671), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'bias': '(True)'}), '(1, 1, (3, 3), bias=True)\n', (51646, 51671), True, 'import oneflow as flow\n'), ((52151, 52191), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(3)', '(3, 3)'], {'bias': '(False)'}), '(1, 3, (3, 3), bias=False)\n', (52165, 52191), True, 'import oneflow as flow\n'), ((52542, 52582), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(3)', '(3, 3)'], {'bias': '(False)'}), '(1, 3, (3, 3), bias=False)\n', (52556, 52582), True, 'import oneflow as flow\n'), ((52987, 53026), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(3)', '(3, 3)'], {'bias': '(True)'}), '(1, 3, (3, 3), bias=True)\n', (53001, 53026), True, 'import oneflow as flow\n'), ((53453, 53503), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(2)', '(2)', '(3, 3)'], {'groups': '(2)', 'bias': '(False)'}), '(2, 2, (3, 3), groups=2, bias=False)\n', (53467, 53503), True, 'import oneflow as flow\n'), ((53908, 53958), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(2)', '(2)', '(3, 3)'], {'groups': '(2)', 'bias': '(False)'}), '(2, 2, (3, 3), groups=2, bias=False)\n', (53922, 53958), True, 'import oneflow as flow\n'), ((54415, 54471), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'padding': '(1, 2)', 'bias': '(False)'}), '(1, 1, (3, 3), padding=(1, 2), bias=False)\n', (54429, 54471), True, 'import oneflow as flow\n'), ((54884, 54940), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'padding': '(1, 2)', 'bias': '(False)'}), '(1, 1, (3, 3), padding=(1, 2), bias=False)\n', (54898, 54940), True, 'import oneflow as flow\n'), ((55404, 55475), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'padding': '(1, 1)', 'stride': '(2, 3)', 'bias': '(False)'}), '(1, 1, (3, 3), padding=(1, 1), stride=(2, 3), bias=False)\n', (55418, 55475), True, 'import oneflow as flow\n'), ((55884, 55955), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'padding': '(1, 1)', 'stride': '(2, 3)', 'bias': '(False)'}), '(1, 1, (3, 3), padding=(1, 1), stride=(2, 3), bias=False)\n', (55898, 55955), True, 'import oneflow as flow\n'), ((56415, 56455), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 5)'], {'bias': '(False)'}), '(1, 1, (3, 5), bias=False)\n', (56429, 56455), True, 'import oneflow as flow\n'), ((56844, 56884), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 5)'], {'bias': '(False)'}), '(1, 1, (3, 5), bias=False)\n', (56858, 56884), True, 'import oneflow as flow\n'), ((57326, 57383), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'dilation': '(2, 3)', 'bias': '(False)'}), '(1, 1, (3, 3), dilation=(2, 3), bias=False)\n', (57340, 57383), True, 'import oneflow as flow\n'), ((57800, 57857), 'oneflow.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(3, 3)'], {'dilation': '(2, 3)', 'bias': '(False)'}), '(1, 1, (3, 3), dilation=(2, 3), bias=False)\n', (57814, 57857), True, 'import oneflow as flow\n'), ((51804, 51826), 'numpy.zeros', 'np.zeros', (['(1, 1, 3, 3)'], {}), '((1, 1, 3, 3))\n', (51812, 51826), True, 'import numpy as np\n'), ((51989, 52003), 'numpy.zeros', 'np.zeros', (['(1,)'], {}), '((1,))\n', (51997, 52003), True, 'import numpy as np\n')] |
# coding: utf-8
import os
import numpy as np
import pickle as pkl
from tqdm import tqdm
import time
from datetime import timedelta
import csv
import sys
import codecs
import logging_setup
import struct
from parse_args import parse_args
import oneflow.core.record.record_pb2 as of_record
MAX_VOCAB_SIZE = 10000 # 词表长度限制
UNK, PAD = '<UNK>', '<PAD>' # 未知字,padding符号
if __name__ == '__main__':
logger = logging_setup.setup_logger(__name__)
else:
logger = logging_setup.setup_multiprocessing_logger()
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def SST2_Processor(path):
examples = []
with open(path, 'r', encoding='UTF-8') as f:
i=0
for line in tqdm(f):
if i==0:
i += 1
continue
try:
lin = line.strip()
if not lin:
continue
text_a, label = lin.split('\t')
text_b = None
examples.append([text_a, text_b, label])
except Exception as e:
print(e)
return examples
def CoLA_Processor(path):
examples = []
with open(path, 'r', encoding='UTF-8') as f:
i=0
for line in tqdm(f):
try:
lin = line.strip().split('\t')
if not lin:
continue
text_a = lin[3]
text_b = None
label = lin[1]
examples.append([text_a, text_b, label])
except Exception as e:
print(e)
return examples
def QQP_Processor(path):
examples = []
with open(path, 'r', encoding='UTF-8') as f:
i=0
for line in tqdm(f):
if i==0:
i += 1
continue
try:
lin = line.strip().split('\t')
if not lin:
continue
text_a = lin[3]
text_b = lin[4]
label = lin[5]
examples.append([text_a,text_b,label])
except Exception as e:
print(e)
return examples
def RTE_Processor(path):
examples = []
with open(path, 'r', encoding='UTF-8') as f:
i=0
for line in tqdm(f):
if i==0:
i += 1
continue
try:
lin = line.strip().split('\t')
if not lin:
continue
text_a = lin[1]
text_b = lin[2]
label = lin[-1]
examples.append([text_a,text_b,label])
except Exception as e:
print(e)
return examples
def MRPC_Processor(path):
examples = []
with open(path, 'r', encoding='UTF-8') as f:
i=0
for line in tqdm(f):
if i==0:
i += 1
continue
try:
lin = line.strip().split('\t')
if not lin:
continue
text_a = lin[3]
text_b = lin[4]
label = lin[0]
examples.append([text_a,text_b,label])
except Exception as e:
print(e)
return examples
def convert_single_example(examples,tokenizer, pad_size, vocab):
contents = []
for example in examples:
text_a = example[0]
text_b = example[1]
label = example[2]
words_line = []
tokens_a = tokenizer(text_a)
if text_b:
tokens_b = tokenizer(text_b)
_truncate_seq_pair(tokens_a, tokens_b, pad_size - 1)
token = tokens_a + [PAD] + tokens_b
else:
token = tokens_a
seq_len = len(token)
if pad_size:
if len(token) < pad_size:
token.extend([PAD] * (pad_size - len(token)))
else:
token = token[:pad_size]
seq_len = pad_size
# word to id
for word in token:
words_line.append(vocab.get(word, vocab.get(UNK)))
contents.append((words_line, label, seq_len))
return contents
def build_vocab(dataset, file_path, tokenizer, max_size, min_freq):
vocab_dic = {}
if dataset == 'SST-2':
examples = SST2_Processor(file_path)
elif dataset == 'CoLA':
examples = CoLA_Processor(file_path)
elif dataset == 'MRPC':
examples = MRPC_Processor(file_path)
elif dataset == 'QQP':
examples = QQP_Processor(file_path)
elif dataset == 'RTE':
examples = RTE_Processor(file_path)
else:
print('Error: the dataset does not support')
print('Building vocab ...')
for example in tqdm(examples):
text_a = example[0]
text_b = example[1]
if text_b:
text = text_a + text_b
else:
text = text_a
for word in tokenizer(text):
vocab_dic[word] = vocab_dic.get(word, 0) + 1
vocab_list = sorted([_ for _ in vocab_dic.items() if _[1] >= min_freq], key=lambda x: x[1], reverse=True)[:max_size]
vocab_dic = {word_count[0]: idx for idx, word_count in enumerate(vocab_list)}
vocab_dic.update({UNK: len(vocab_dic), PAD: len(vocab_dic) + 1})
return vocab_dic
def build_dataset(dataset, config, ues_word):
if ues_word:
tokenizer = lambda x: x.split(' ') # 以空格隔开,word-level
else:
tokenizer = lambda x: [y for y in x] # char-level
if os.path.exists(config.vocab_path):
vocab = pkl.load(open(config.vocab_path, 'rb'))
else:
vocab = build_vocab(dataset, config.train_path, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
pkl.dump(vocab, open(config.vocab_path, 'wb'))
print(f"Vocab size: {len(vocab)}")
def load_dataset(dataset, tokenizer, path, pad_size=32):
if dataset=='SST-2':
examples = SST2_Processor(path)
elif dataset=='CoLA':
examples = CoLA_Processor(path)
elif dataset=='MRPC':
examples = MRPC_Processor(path)
elif dataset=='QQP':
examples = QQP_Processor(path)
elif dataset == 'RTE':
examples = RTE_Processor(path)
else:
print('error dataset not support')
contents = convert_single_example(examples,tokenizer,pad_size,vocab)
return contents
train = load_dataset(dataset,tokenizer, config.train_path, config.pad_size)
dev = load_dataset(dataset,tokenizer, config.dev_path, config.pad_size)
# test = load_dataset(config.test_path, config.pad_size)
return vocab, train, dev
def get_time_dif(start_time):
"""获取已使用时间"""
end_time = time.time()
time_dif = end_time - start_time
return timedelta(seconds=int(round(time_dif)))
def file_based_convert_examples_to_features(
examples, label_list, max_seq_length, output_file):
"""Convert a set of `InputExample`s to a TFRecord file."""
# writer = tf.python_io.TFRecordWriter(output_file)
writer = open(output_file, 'ab')
total_written = 0
for (ex_index, example) in enumerate(examples):
if ex_index % 10000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(examples)))
label_map = {}
for (i, label) in enumerate(label_list):
label_map[label] = i
input_mask = [1] * example[2] + [0] * (max_seq_length - example[2])
segment_ids = [1] * max_seq_length
assert len(input_mask)==max_seq_length
label_id = label_map[example[1]]
def create_int32_feature(values):
return of_record.Feature(int32_list=of_record.Int32List(value=values)),
sample = of_record.OFRecord(
feature={
"input_ids": create_int32_feature(example[0]),
"input_mask": create_int32_feature(input_mask),
"segment_ids": create_int32_feature(segment_ids),
"label_ids": create_int32_feature([label_id]),
"is_real_example": create_int32_feature([int(True)])
}
)
writer.write(struct.pack("q", sample.ByteSize()))
writer.write(sample.SerializeToString())
if ex_index % 10000 == (len(examples) - 1) % 10000:
logger.info('Wrote intances %d/%d to "%s"', ex_index, len(examples), output_file)
total_written += 1
writer.close()
logger.info('Wrote total %d instances to output files "%s"', total_written, output_file)
class Config(object):
vocab_path = ''
train_path = ''
dev_path = ''
pad_size = 32
if __name__ == "__main__":
'''提取预训练词向量'''
# 下面的目录、文件名按需更改。
config =Config
dataset = "SST-2"
train_dir = "/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_data/{}/train.tsv".format(dataset)
dev_dir = "/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_data/{}/dev.tsv".format(dataset)
vocab_dir = "/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_ofrecord/{}_lstm_32".format(dataset)
pretrain_dir = ""
emb_dim = 300
if os.path.exists(os.path.join(vocab_dir,'vocab.pkl')):
word_to_id = pkl.load(open(os.path.join(vocab_dir,'vocab.pkl'), 'rb'))
else:
tokenizer = lambda x: x.split(' ') # 以词为单位构建词表(数据集中词之间以空格隔开)
# tokenizer = lambda x: [y for y in x] # 以字为单位构建词表
word_to_id = build_vocab(dataset, train_dir, tokenizer=tokenizer, max_size=MAX_VOCAB_SIZE, min_freq=1)
os.makedirs(vocab_dir, exist_ok=True)
pkl.dump(word_to_id, open(os.path.join(vocab_dir,'vocab.pkl'), 'wb'))
# print(word_to_id)
# print(len(word_to_id))
output_dir = '/remote-home/rpluo/Oneflow-Model-Compression/model_compress/data/glue_ofrecord/{}_lstm_32'.format(dataset)
total_examples = {}
max_seq_length= 32
config.vocab_path = os.path.join(vocab_dir,'vocab.pkl')
config.train_path = train_dir
config.dev_path = dev_dir
config.pad_size = max_seq_length
if dataset == 'RTE':
label_list = ["entailment", "not_entailment"]
elif dataset in ['SST-2', 'MRPC', 'QQP', 'CoLA']:
label_list = ["0", "1"]
elif dataset == 'MNLI':
label_list = ["contradiction", "entailment", "neutral"]
else:
print('Error: the dataset not supports')
# print(config.vocab_path)
_,train_dataset,dev_dataset = build_dataset(dataset=dataset, config=config,ues_word='True')
# print(dev_dataset[0])
os.makedirs(os.path.join(output_dir, 'eval'), exist_ok=True)
dev_file = os.path.join(output_dir, 'eval', "eval.of_record-0")
file_based_convert_examples_to_features(dev_dataset,label_list,config.pad_size,dev_file)
os.makedirs(os.path.join(output_dir, 'train'), exist_ok=True)
train_file = os.path.join(output_dir, 'train', "train.of_record-0")
file_based_convert_examples_to_features(train_dataset,label_list,config.pad_size,train_file)
| [
"oneflow.core.record.record_pb2.Int32List"
] | [((407, 443), 'logging_setup.setup_logger', 'logging_setup.setup_logger', (['__name__'], {}), '(__name__)\n', (433, 443), False, 'import logging_setup\n'), ((463, 507), 'logging_setup.setup_multiprocessing_logger', 'logging_setup.setup_multiprocessing_logger', ([], {}), '()\n', (505, 507), False, 'import logging_setup\n'), ((5340, 5354), 'tqdm.tqdm', 'tqdm', (['examples'], {}), '(examples)\n', (5344, 5354), False, 'from tqdm import tqdm\n'), ((6109, 6142), 'os.path.exists', 'os.path.exists', (['config.vocab_path'], {}), '(config.vocab_path)\n', (6123, 6142), False, 'import os\n'), ((7321, 7332), 'time.time', 'time.time', ([], {}), '()\n', (7330, 7332), False, 'import time\n'), ((10504, 10540), 'os.path.join', 'os.path.join', (['vocab_dir', '"""vocab.pkl"""'], {}), "(vocab_dir, 'vocab.pkl')\n", (10516, 10540), False, 'import os\n'), ((11194, 11246), 'os.path.join', 'os.path.join', (['output_dir', '"""eval"""', '"""eval.of_record-0"""'], {}), "(output_dir, 'eval', 'eval.of_record-0')\n", (11206, 11246), False, 'import os\n'), ((11424, 11478), 'os.path.join', 'os.path.join', (['output_dir', '"""train"""', '"""train.of_record-0"""'], {}), "(output_dir, 'train', 'train.of_record-0')\n", (11436, 11478), False, 'import os\n'), ((1316, 1323), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (1320, 1323), False, 'from tqdm import tqdm\n'), ((1845, 1852), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (1849, 1852), False, 'from tqdm import tqdm\n'), ((2331, 2338), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (2335, 2338), False, 'from tqdm import tqdm\n'), ((2886, 2893), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (2890, 2893), False, 'from tqdm import tqdm\n'), ((3443, 3450), 'tqdm.tqdm', 'tqdm', (['f'], {}), '(f)\n', (3447, 3450), False, 'from tqdm import tqdm\n'), ((9761, 9797), 'os.path.join', 'os.path.join', (['vocab_dir', '"""vocab.pkl"""'], {}), "(vocab_dir, 'vocab.pkl')\n", (9773, 9797), False, 'import os\n'), ((10137, 10174), 'os.makedirs', 'os.makedirs', (['vocab_dir'], {'exist_ok': '(True)'}), '(vocab_dir, exist_ok=True)\n', (10148, 10174), False, 'import os\n'), ((11130, 11162), 'os.path.join', 'os.path.join', (['output_dir', '"""eval"""'], {}), "(output_dir, 'eval')\n", (11142, 11162), False, 'import os\n'), ((11357, 11390), 'os.path.join', 'os.path.join', (['output_dir', '"""train"""'], {}), "(output_dir, 'train')\n", (11369, 11390), False, 'import os\n'), ((9834, 9870), 'os.path.join', 'os.path.join', (['vocab_dir', '"""vocab.pkl"""'], {}), "(vocab_dir, 'vocab.pkl')\n", (9846, 9870), False, 'import os\n'), ((10209, 10245), 'os.path.join', 'os.path.join', (['vocab_dir', '"""vocab.pkl"""'], {}), "(vocab_dir, 'vocab.pkl')\n", (10221, 10245), False, 'import os\n'), ((8278, 8311), 'oneflow.core.record.record_pb2.Int32List', 'of_record.Int32List', ([], {'value': 'values'}), '(value=values)\n', (8297, 8311), True, 'import oneflow.core.record.record_pb2 as of_record\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.experimental as flow
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestExp(flow.unittest.TestCase):
def test_exp(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
of_out = flow.exp(input)
np_out = np.exp(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
def test_tensor_exp(test_case):
input = flow.Tensor(np.random.randn(2, 6, 5, 3), dtype=flow.float32)
of_out = input.exp()
np_out = np.exp(input.numpy())
test_case.assertTrue(np.allclose(of_out.numpy(), np_out))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.exp",
"oneflow.experimental.unittest.env.eager_execution_enabled"
] | [((1383, 1398), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1396, 1398), False, 'import unittest\n'), ((969, 984), 'oneflow.experimental.exp', 'flow.exp', (['input'], {}), '(input)\n', (977, 984), True, 'import oneflow.experimental as flow\n'), ((712, 755), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (753, 755), True, 'import oneflow.experimental as flow\n'), ((902, 929), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (917, 929), True, 'import numpy as np\n'), ((1160, 1187), 'numpy.random.randn', 'np.random.randn', (['(2)', '(6)', '(5)', '(3)'], {}), '(2, 6, 5, 3)\n', (1175, 1187), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import collections.abc
import oneflow as flow
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.hob as hob
import oneflow.python.eager.gradient_util as gradient_util
import oneflow.python.lib.core.enable_if as enable_if
from oneflow.python.oneflow_export import oneflow_export
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.core.operator.op_conf_pb2 as op_conf_pb
import oneflow.core.job.job_conf_pb2 as job_conf_pb
from typing import Tuple, Optional, Union, Sequence, Text
class ClipGradientConf:
@property
def clip_conf(self) -> op_conf_pb.ClipConf:
raise NotImplementedError()
@oneflow_export("optimizer.grad_clipping.by_global_norm")
class by_global_norm(ClipGradientConf):
r"""This operator limits the norm of `Input` with `clip_norm`.
If the norm of `Input` is less than the `clip_norm`,
the `Output` will be the same as `Input`.
If the norm of `Input` is greater than the `clip_norm`, the `Output` will be scaled.
The equation is:
.. math::
Output = \frac{clip\_norm*Input}{norm(Input)}
Args:
clip_norm (float): The maximum norm value.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Set learning rate as 0.001
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
# Set gradient_clip
gradient_clip = flow.optimizer.grad_clipping.by_global_norm(1.0)
# Set AdamW optimizer with gradient clip
flow.optimizer.AdamW(lr_scheduler,
do_bias_correction=False, weight_decay=0.00005,
grad_clipping=gradient_clip).minimize(loss)
return loss
"""
def __init__(self, clip_norm):
self.clip_norm = clip_norm
@property
def clip_conf(self):
clip_conf = op_conf_pb.ClipConf()
clip_conf.clip_by_global_norm.clip_norm = self.clip_norm
return clip_conf
class WarmupConf:
@property
def warmup_conf(self) -> op_conf_pb.WarmupConf:
raise NotImplementedError()
@oneflow_export("optimizer.warmup.constant")
class constant(WarmupConf):
r"""This operator use the constant warmup strategy to adjust the learning rate.
Before the steps are specified by user, the learning rate is:
.. math::
learning\_rate = base\_learning\_rate*multiplier
After the steps are specified by user, the learning rate is:
.. math::
learning\_rate = base\_learning\_rate
Args:
steps (int): [description]
multiplier (float): The scale factor :math:`multiplier`, it should be greater than 0. and less than 1.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Before 10 epochs, the learning rate is 0.001
# After 10 epochs, the learning rate is 0.01
warmup_scheduler = flow.optimizer.warmup.constant(10, 0.1)
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.01], warmup=warmup_scheduler)
flow.optimizer.Adam(lr_scheduler).minimize(loss)
return loss
"""
def __init__(self, steps, multiplier):
self.steps = steps
self.multiplier = multiplier
@property
def warmup_conf(self) -> op_conf_pb.WarmupConf:
warmup_conf = op_conf_pb.WarmupConf()
warmup_conf.constant_conf.warmup_batches = self.steps
warmup_conf.constant_conf.multiplier = self.multiplier
return warmup_conf
@oneflow_export("optimizer.warmup.linear")
class linear(WarmupConf):
r"""This operator uses the linear warmup strategy to adjust the learning rate.
When current train step is less than warmup steps, the learning rate will be updated as:
.. math::
& current\_multiplier = start\_multiplier + (1-start\_multiplier)*\frac{train\_step}{warmup\_step}
& current\_learning\_rate = learning\_rate*current\_multiplier
Args:
steps (int): The warmup steps.
start_multiplier (float): The start multiplier(:math:`start\_multiplier`). It should be greater than 0. and less than 1.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Before 10 epochs, the learning rate will increase from 0.001 to 0.01 in linear.
warmup_scheduler = flow.optimizer.warmup.linear(10, 0.1)
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.01], warmup=warmup_scheduler)
flow.optimizer.Adam(lr_scheduler).minimize(loss)
return loss
"""
def __init__(self, steps, start_multiplier):
self.steps = steps
self.start_multiplier = start_multiplier
@property
def warmup_conf(self) -> op_conf_pb.WarmupConf:
warmup_conf = op_conf_pb.WarmupConf()
warmup_conf.linear_conf.warmup_batches = self.steps
warmup_conf.linear_conf.start_multiplier = self.start_multiplier
return warmup_conf
class LrScheduler:
def __init__(
self,
base_lr: Optional[float] = None,
lr_lbn: Optional[Text] = None,
warmup: Optional[WarmupConf] = None,
):
self.base_lr = base_lr
self.lr_lbn = lr_lbn
self.warmup = warmup
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
raise NotImplementedError()
def SetLrFieldsInTrainConf(self, train_conf) -> None:
if self.warmup_conf is not None:
train_conf.model_update_conf.warmup_conf.CopyFrom(self.warmup_conf)
if self.lr_lbn is not None:
assert self.learning_rate_decay_conf is None
assert self.base_lr is None
train_conf.primary_lr_lbn = self.lr_lbn
# primary_lr is a required field
train_conf.primary_lr = 0
else:
assert self.learning_rate_decay_conf is not None
train_conf.model_update_conf.learning_rate_decay.CopyFrom(
self.learning_rate_decay_conf
)
train_conf.primary_lr = self.base_lr
@property
def warmup_conf(self) -> op_conf_pb.WarmupConf:
if self.warmup is None:
return None
return self.warmup.warmup_conf
@oneflow_export("optimizer.CosineScheduler")
class CosineScheduler(LrScheduler):
r"""This operator creates a Cosine decayed learning rate scheduler.
Before the steps are specified by user, the learning rate will be updated as:
.. math::
& cos\_decay = 0.5*(1+cos(\pi*\frac{current\_batch}{decayed\_batch}))
& decay\_factor = (1-\alpha)*cos\_decay+\alpha
& learning\_rate = base\_learning\_rate*decay\_factor
After the steps specified by user, the learning rate will be :
.. math::
learning\_rate = {base\_learning\_rate}*{\alpha}
Args:
base_lr (float): The base learning rate (:math:`base\_learning\_rate`)
steps (int): The decay steps in the scheduler (:math:`decayed\_batch`)
alpha (float, optional): The learning rate scale factor (:math:`\alpha`). Defaults to 0.0.
warmup (Optional[WarmupConf], optional): The warmup strategy. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
lr_scheduler = flow.optimizer.CosineScheduler(base_lr=0.01,
steps=10,
alpha=0.1)
flow.optimizer.Adam(lr_scheduler).minimize(loss)
return loss
"""
def __init__(
self,
base_lr: float,
steps: int,
alpha: float = 0.0,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.alpha = alpha
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.cosine_conf.decay_batches = self.steps
learning_rate_decay_conf.cosine_conf.alpha = self.alpha
return learning_rate_decay_conf
@oneflow_export("optimizer.CustomScheduler")
class CustomScheduler(LrScheduler):
def __init__(self, lbn: Text):
super().__init__(lr_lbn=lbn)
@property
def learning_rate_decay_conf(self) -> op_conf_pb.LearningRateDecayConf:
return None
@oneflow_export("optimizer.PiecewiseConstantScheduler")
class PiecewiseConstantScheduler(LrScheduler):
r"""This operator creates a piecewise constant learning rate scheduler.
The change in learning rate can be described as follows:
.. code-block:: python
boundaries = [1000, 2000]
values = [0.1, 0.01, 0.001]
if current_step < 1000:
learning_rate = 0.1
elif 1000 < current_step < 2000:
learning_rate = 0.01
else:
learning_rate = 0.001
Args:
boundaries (Sequence[int]): A list of train steps.
values (Sequence[float]): A list of learning rate values during the different train step boundary.
warmup (Optional[WarmupConf], optional): The warmup strategy. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler(boundaries=[10, 20],
values=[0.1, 0.01, 0.001])
flow.optimizer.Adam(lr_scheduler).minimize(loss)
return loss
"""
def __init__(
self,
boundaries: Sequence[int],
values: Sequence[float],
warmup: Optional[WarmupConf] = None,
):
assert len(boundaries) + 1 == len(values)
super().__init__(base_lr=values[0], warmup=warmup)
self.boundaries = boundaries
self.values = values
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.piecewise_constant_conf.boundaries.extend(
self.boundaries
)
learning_rate_decay_conf.piecewise_constant_conf.values.extend(self.values)
return learning_rate_decay_conf
@oneflow_export("optimizer.PiecewiseScalingScheduler")
class PiecewiseScalingScheduler(LrScheduler):
"""This operator creates a piecewise scaled decayed learning rate scheduler.
The change in learning rate can be described as follows:
.. code-block:: python
boundaries = [1000, 2000]
scale = [0.1, 0.01]
base_lr = 0.1
if current_step < 1000:
learning_rate = base_lr
elif 1000 < current_step < 2000:
learning_rate = 0.1*base_lr
else:
learning_rate = 0.01*base_lr
Args:
base_lr (float): The base learning rate
boundaries (Sequence[int]): A list of train steps.
scale (Union[float, Sequence[float]]): A list of learning rate scaled factors during the different train step boundary.
warmup (Optional[WarmupConf], optional): The warmup strategy. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
lr_scheduler = flow.optimizer.PiecewiseScalingScheduler(base_lr=0.1,
boundaries=[5, 10],
scale=[0.5, 0.1])
flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(loss)
return loss
"""
def __init__(
self,
base_lr: float,
boundaries: Sequence[int],
scale: Union[float, Sequence[float]],
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.boundaries = boundaries
if not isinstance(scale, collections.abc.Sequence):
scale = [scale] * len(boundaries)
assert len(boundaries) == len(scale)
self.scale = [1] + list(scale)
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.piecewise_scaling_conf.boundaries.extend(
self.boundaries
)
learning_rate_decay_conf.piecewise_scaling_conf.scales.extend(self.scale)
return learning_rate_decay_conf
@oneflow_export("optimizer.PolynomialSchduler")
class PolynomialSchduler(LrScheduler):
r"""This operator creates a polynomial decayed learning rate scheduler.
The learning rate will be updated as follows:
If cycle is `True`, the equation is:
.. math::
& decay\_batch = decay\_batch*ceil(\frac{current\_batch}{decay\_batch})
& learning\_rate = (base\_lr-end\_lr)*(1-\frac{current\_batch}{decay\_batch})^{pow}+end\_lr
If cycle is `False`, the equation is:
.. math::
& decay\_batch = min(decay\_batch, current\_batch)
& learning\_rate = (base\_lr-end\_lr)*(1-\frac{current\_batch}{decay\_batch})^{pow}+end\_lr
Args:
base_lr (float): The base learning rate
steps (int): The decayed steps
end_learning_rate (float, optional): The final learning rate. Defaults to 0.0001.
power (float, optional): The power of polynomial. Defaults to 1.0.
cycle (bool, optional): If cycle is true, the scheduler will decay the learning rate every decay steps. Defaults to False.
warmup (Optional[WarmupConf], optional): The warmup strategy. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
lr_scheduler = flow.optimizer.PolynomialSchduler(base_lr=0.001,
steps=5,
end_learning_rate=0.00001,
power=2)
flow.optimizer.Adam(lr_scheduler).minimize(loss)
return loss
"""
def __init__(
self,
base_lr: float,
steps: int,
end_learning_rate: float = 0.0001,
power: float = 1.0,
cycle: bool = False,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.end_learning_rate = end_learning_rate
self.power = power
self.cycle = cycle
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.polynomial_conf.decay_batches = self.steps
learning_rate_decay_conf.polynomial_conf.end_learning_rate = (
self.end_learning_rate
)
learning_rate_decay_conf.polynomial_conf.power = self.power
learning_rate_decay_conf.polynomial_conf.cycle = self.cycle
return learning_rate_decay_conf
@oneflow_export("optimizer.LinearCosineScheduler")
class LinearCosineScheduler(LrScheduler):
r"""This operator creates a linear cosine decayed learning rate scheduler.
The learning rate will be updated as follows:
.. math::
& current\_batch = min(current\_batch, decay\_batch)
& linear\_decay = \frac{(decay\_batch - current\_batch)}{decay\_batch}
& cosine\_decay = 0.5*(1.0+cos(2*\pi*num\_periods*\frac{current\_batch}{decay\_batch}))
& decay\_factor = (\alpha+linear\_decay)*cosine\_decay + \beta
& learning\_rate = base\_learning\_rate*decay\_factor
Args:
base_lr (float): The base learning rate
steps (int): The decay steps
num_periods (float, optional): The number of decay periods. Defaults to 0.5.
alpha (float, optional): The :math:`\alpha` in equation. Defaults to 0.0.
beta (float, optional): The :math:`\beta` in equation. Defaults to 0.001.
warmup (Optional[WarmupConf], optional): The warmup strategy. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
lr_scheduler = flow.optimizer.LinearCosineScheduler(base_lr=0.1,
steps=10)
flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(loss)
return loss
"""
def __init__(
self,
base_lr: float,
steps: int,
num_periods: float = 0.5,
alpha: float = 0.0,
beta: float = 0.001,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.num_periods = num_periods
self.alpha = alpha
self.beta = beta
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.linear_cosine_conf.decay_batches = self.steps
learning_rate_decay_conf.linear_cosine_conf.num_periods = self.num_periods
learning_rate_decay_conf.linear_cosine_conf.alpha = self.alpha
learning_rate_decay_conf.linear_cosine_conf.beta = self.beta
return learning_rate_decay_conf
@oneflow_export("optimizer.ExponentialScheduler")
class ExponentialScheduler(LrScheduler):
r"""This operator creates a exponential decayed learning rate scheduler.
The learning rate will be updated as follows:
If staircase is set to False, the equation is:
.. math::
& pow = \frac{current\_batch}{decay\_batch}
& learning\_rate = base\_learning\_rate*decay\_rate^{pow}
If staircase is set to True, the equation is:
.. math::
& pow = floor(\frac{current\_batch}{decay\_batch})
& learning\_rate = base\_learning\_rate*decay\_rate^{pow}
Args:
base_lr (float): The base learning rate
steps (int): The decay steps
decay_rate (float): The decay rate
staircase (bool, optional): If staircase is True, the scheduler decay the learning rate at discrete intervals. Defaults to False.
warmup (Optional[WarmupConf], optional): The warmup strategy. Defaults to None.
For example:
.. code-block::python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
lr_scheduler = flow.optimizer.CosineScheduler(base_lr=0.01,
steps=10,
alpha=0.1)
flow.optimizer.Adam(lr_scheduler).minimize(loss)
return loss
"""
def __init__(
self,
base_lr: float,
steps: int,
decay_rate: float,
staircase=False,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.decay_rate = decay_rate
self.staircase = staircase
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.exponential_conf.decay_batches = self.steps
learning_rate_decay_conf.exponential_conf.decay_rate = self.decay_rate
learning_rate_decay_conf.exponential_conf.staircase = self.staircase
return learning_rate_decay_conf
@oneflow_export("optimizer.InverseTimeScheduler")
class InverseTimeScheduler(LrScheduler):
r"""This operator creates a inverse time decayed learning rate scheduler.
The learning rate will be updated as follows:
If staircase is set to False, the equation is:
.. math::
& step\_ratio = \frac{current\_batch}{decay\_batch}
& learning\_rate = \frac{base\_learning\_rate}{1+decay\_rate*step\_ratio}
If staircase is set to True, the equation is:
.. math::
& step\_ratio = \frac{current\_batch}{decay\_batch}
& learning\_rate = \frac{base\_learning\_rate}{1+floor(decay\_rate*step\_ratio)}
Args:
base_lr (float): The base learning rate
steps (int): The decay steps
decay_rate (float): The decay rate
staircase (bool, optional): If staircase is True, the scheduler decay the learning rate at discrete intervals. Defaults to False.
warmup (Optional[WarmupConf], optional): The warmup strategy. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
lr_scheduler = flow.optimizer.InverseTimeScheduler(base_lr=0.1,
steps=5,
decay_rate=0.9)
flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(loss)
return loss
"""
def __init__(
self,
base_lr: float,
steps: int,
decay_rate: float,
staircase: bool = False,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.decay_rate = decay_rate
self.staircase = staircase
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.inverse_time_conf.decay_batches = self.steps
learning_rate_decay_conf.inverse_time_conf.decay_rate = self.decay_rate
learning_rate_decay_conf.inverse_time_conf.staircase = self.staircase
return learning_rate_decay_conf
@oneflow_export("optimizer.NaturalExpScheduler")
class NaturalExpScheduler(LrScheduler):
r"""This operator creates a natural exponential decayed learning rate scheduler.
The learning rate will be updated as follows:
If staircase is set to False, the equation is:
.. math::
& step\_ratio = \frac{current\_batch}{decay\_batch}
& learning\_rate = {base\_learning\_rate}*e^{-decay\_rate*step\_ratio}
If staircase is set to True, the equation is:
.. math::
& step\_ratio = \frac{current\_batch}{decay\_batch}
& learning\_rate = {base\_learning\_rate}*e^{-decay\_rate*floor(step\_ratio)}
Args:
base_lr (float): The base learning rate
steps (int): The decay steps
decay_rate (float): The decay rate
staircase (bool, optional): If staircase is True, the scheduler decay the learning rate at discrete intervals. Defaults to False.
warmup (Optional[WarmupConf], optional): The warmup strategy. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
lr_scheduler = flow.optimizer.NaturalExpScheduler(base_lr=0.1,
steps=10,
decay_rate=0.5)
flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(loss)
return loss
"""
def __init__(
self,
base_lr: float,
steps: int,
decay_rate: float,
staircase: bool = False,
warmup: Optional[WarmupConf] = None,
):
super().__init__(base_lr=base_lr, warmup=warmup)
self.steps = steps
self.decay_rate = decay_rate
self.staircase = staircase
@property
def learning_rate_decay_conf(self) -> Optional[op_conf_pb.LearningRateDecayConf]:
learning_rate_decay_conf = op_conf_pb.LearningRateDecayConf()
learning_rate_decay_conf.natural_exp_conf.decay_batches = self.steps
learning_rate_decay_conf.natural_exp_conf.decay_rate = self.decay_rate
learning_rate_decay_conf.natural_exp_conf.staircase = self.staircase
return learning_rate_decay_conf
class Optimizer:
def __init__(
self,
lr_scheduler: LrScheduler,
loss_scale_factor: Optional[int] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
self.lr_scheduler = lr_scheduler
self.loss_scale_factor = loss_scale_factor
self.grad_clipping = grad_clipping
self.train_step_lbn = train_step_lbn
def _SetSpecificFieldsInTrainConf(self, train_conf):
raise NotImplementedError()
@property
def train_conf(self) -> job_conf_pb.TrainConf:
train_conf = job_conf_pb.TrainConf()
self.lr_scheduler.SetLrFieldsInTrainConf(train_conf)
update_conf = train_conf.model_update_conf
if self.grad_clipping is not None:
update_conf.clip_conf.CopyFrom(self.grad_clipping.clip_conf)
if self.train_step_lbn is not None:
train_conf.train_step_lbn = self.train_step_lbn
if self.loss_scale_factor is not None:
update_conf.loss_scale_factor = self.loss_scale_factor
self._SetSpecificFieldsInTrainConf(train_conf)
return train_conf
def minimize(
self, loss: Union[Sequence[remote_blob_util.BlobDef], remote_blob_util.BlobDef]
) -> None:
if not isinstance(loss, collections.abc.Sequence):
loss = [loss]
c_api_util.CurJobBuildAndInferCtx_SetTrainConf(self.train_conf)
for x in loss:
flow.losses.add_loss(x)
@oneflow_export("optimizer.SGD")
class SGD(Optimizer):
r"""The optimizer of the stochastic gradient descent algorithm.
This algorithm takes a random sample's gradient as an approximate estimate of the overall gradient in small batch gradient descent.
When the momentum = 0, the equation of parameters updating is:
.. math::
param_{new} = param_{old} - learning\_rate*grad
With momentum, the equation of parameters updating is:
.. math::
& V_{t} = \beta*V_{t-1} + learning\_rate*g_t
& param_{new} = param_{old} - V_{t}
Args:
lr_scheduler (LrScheduler): The scheduler of learning rate.
loss_scale_factor (Optional[float], optional): The scale factor of loss. Defaults to None.
momentum (float, optional): Momentum factor (:math:`\beta`). Defaults to 0.9.
grad_clipping (Optional[ClipGradientConf], optional): The gradient clipping strategy. Defaults to None.
train_step_lbn (Optional[Text], optional): [description]. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Set Learning rate as 0.1
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
# Set Momentum=0.9 SGD optimizer
flow.optimizer.SGD(lr_scheduler, momentum=0.9).minimize(loss)
return loss
"""
def __init__(
self,
lr_scheduler: LrScheduler,
loss_scale_factor: Optional[float] = None,
momentum: float = 0.9,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.momentum = momentum
def _SetSpecificFieldsInTrainConf(self, train_conf):
if self.momentum == 0:
train_conf.model_update_conf.naive_conf.SetInParent()
else:
train_conf.model_update_conf.momentum_conf.beta = self.momentum
@oneflow_export("optimizer.Adam")
class Adam(Optimizer):
r"""The optimizer of the Adam algorithm.
This algorithm can adjust the learning rate of each parameter dynamically according to the 1st-moment estimates
and the 2nd-moment estimates of gradient.
With bias correction, the equation of parameters updating is:
.. math::
& V_t = \beta_1*V_{t-1} + (1-\beta_1)*grad
& S_t = \beta_2*S_{t-1} + (1-\beta_2)*{grad} \odot {grad}
& \hat{V_t} = \frac{V_t}{1-\beta_1^t}
& \hat{S_t} = \frac{S_t}{1-\beta_2^t}
& \hat{g} = learning\_rate*\frac{\hat{V_t}}{\sqrt{\hat{S_t}}+\epsilon}
& param_{new} = param_{old} - \hat{g}
Without bias correction, the equation of parameters updating is:
.. math::
& V_t = \beta_1*V_{t-1} + (1-\beta_1)*grad
& S_t = \beta_2*S_{t-1} + (1-\beta_2)*{grad} \odot {grad}
& \hat{g} = learning\_rate*\frac{{V_t}}{\sqrt{{S_t}}+\epsilon}
& param_{new} = param_{old} - \hat{g}
More details please refer to `Adam <https://arxiv.org/abs/1412.6980>`_
Args:
lr_scheduler (LrScheduler): The scheduler of learning rate.
beta1 (float, optional): The exponential weighted average decay rate for the 1st-moment estimates (:math:`\beta_1`). Defaults to 0.9.
beta2 (float, optional): The exponential weighted average decay rate for the 2rd-moment estimates (:math:`\beta_2`). Defaults to 0.999.
epsilon ([type], optional): A small float constant value for numerical stability (:math:`\epsilon`). Defaults to 1e-8.
do_bias_correction (bool, optional): Whether to do the bias correction. Defaults to False.
loss_scale_factor (Optional[float], optional): The scale factor of loss. Defaults to None.
grad_clipping (Optional[ClipGradientConf], optional): The gradient clipping strategy. Defaults to None.
train_step_lbn (Optional[Text], optional): [description]. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Set learning rate as 0.001
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
# Set Adam optimizer
flow.optimizer.Adam(lr_scheduler, do_bias_correction=False).minimize(loss)
return loss
"""
def __init__(
self,
lr_scheduler: LrScheduler,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
do_bias_correction=False,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.do_bias_correction = do_bias_correction
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.adam_conf.beta1 = self.beta1
train_conf.model_update_conf.adam_conf.beta2 = self.beta2
train_conf.model_update_conf.adam_conf.epsilon = self.epsilon
train_conf.model_update_conf.adam_conf.do_bias_correction = (
self.do_bias_correction
)
@oneflow_export("optimizer.AdamW")
class AdamW(Optimizer):
r"""The optimizer of the Adam-weight-decay algorithm.
If we use L2 regularization,
it will be invalid due to the adaptive learning rate in Adam optimizer
(More details please refer to `Adam-weight-decay <https://www.fast.ai/2018/07/02/adam-weight-decay/>`_).
So we use Adam-weight-decay algorithm to solve this problem.
With bias correction, the equation of parameters updating is:
.. math::
& V_t = \beta_1*V_{t-1} + (1-\beta_1)*grad
& S_t = \beta_2*S_{t-1} + (1-\beta_2)*{grad} \odot {grad}
& \hat{V_t} = \frac{V_t}{1-\beta_1^t}
& \hat{S_t} = \frac{S_t}{1-\beta_2^t}
& \hat{g} = learning\_rate*(\frac{\hat{V_t}}{\sqrt{\hat{S_t}}+\epsilon}+\lambda*param_{old})
& param_{new} = param_{old} - \hat{g}
Without bias correction, the equation of parameters updating is:
.. math::
& V_t = \beta_1*V_{t-1} + (1-\beta_1)*grad
& S_t = \beta_2*S_{t-1} + (1-\beta_2)*{grad} \odot {grad}
& \hat{g} = learning\_rate*(\frac{{V_t}}{\sqrt{{S_t}}+\epsilon}+\lambda*param_{old})
& param_{new} = param_{old} - \hat{g}
Args:
lr_scheduler (LrScheduler): The scheduler of learning rate.
beta1 (float, optional): The exponential weighted average decay rate for the 1st-moment estimates (:math:`\beta_1`). Defaults to 0.9.
beta2 (float, optional): The exponential weighted average decay rate for the 2rd-moment estimates (:math:`\beta_2`). Defaults to 0.999.
epsilon ([type], optional): A small float constant value for numerical stability (:math:`\epsilon`). Defaults to 1e-8.
do_bias_correction (bool, optional): Whether to do the bias correction. Defaults to False.
loss_scale_factor (Optional[float], optional): The scale factor of loss. Defaults to None.
weight_decay (Optional[float], optional): The weight decay factor (In the equation is :math:`\lambda`). Defaults to None.
weight_decay_includes (Optional[Union[Sequence[Text], Text]], optional): The name of the model parameters that use weight decay. Defaults to None.
weight_decay_excludes (Optional[Union[Sequence[Text], Text]], optional): The name of the model parameters that do not use weight decay. Defaults to None.
grad_clipping (Optional[ClipGradientConf], optional): The gradient clipping strategy. Defaults to None.
train_step_lbn (Optional[Text], optional): [description]. Defaults to None.
Note:
Only one of `weight_decay_includes` and `weight_decay_excludes` can be set. If both are None,
all the model parameters will use weight decay.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Set learning rate as 0.001
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
# Set AdamW optimizer, weight_decay factor is 0.00005
flow.optimizer.AdamW(lr_scheduler,
do_bias_correction=False, weight_decay=0.00005).minimize(loss)
return loss
"""
def __init__(
self,
lr_scheduler: LrScheduler,
beta1=0.9,
beta2=0.999,
epsilon=1e-8,
do_bias_correction=False,
loss_scale_factor: Optional[float] = None,
weight_decay: Optional[float] = None,
weight_decay_includes: Optional[Union[Sequence[Text], Text]] = None,
weight_decay_excludes: Optional[Union[Sequence[Text], Text]] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.do_bias_correction = do_bias_correction
self.weight_decay = weight_decay
if isinstance(weight_decay_includes, str):
weight_decay_includes = [weight_decay_includes]
if isinstance(weight_decay_excludes, str):
weight_decay_excludes = [weight_decay_excludes]
self.weight_decay_includes = weight_decay_includes
self.weight_decay_excludes = weight_decay_excludes
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.adam_conf.beta1 = self.beta1
train_conf.model_update_conf.adam_conf.beta2 = self.beta2
train_conf.model_update_conf.adam_conf.epsilon = self.epsilon
train_conf.model_update_conf.adam_conf.do_bias_correction = (
self.do_bias_correction
)
if self.weight_decay is not None:
train_conf.model_update_conf.weight_decay_conf.weight_decay_rate = (
self.weight_decay
)
assert not (
self.weight_decay_excludes is not None
and self.weight_decay_includes is not None
)
if self.weight_decay_includes is not None:
train_conf.model_update_conf.weight_decay_conf.includes.pattern.extend(
self.weight_decay_includes
)
elif self.weight_decay_excludes is not None:
train_conf.model_update_conf.weight_decay_conf.excludes.pattern.extend(
self.weight_decay_excludes
)
@oneflow_export("optimizer.RMSProp")
class RMSProp(Optimizer):
r"""The optimizer of the RMSProp algorithm.
This algorithm uses mean squared gradient to adjust the learning rate.
The equation of parameters updating is:
if centered:
.. math::
& mg_t = mg * \beta_1 + (1 - \beta_1) * grad
& denom_t = S_t - mg_t * mg_t
else:
.. math::
denom_t = S_t
.. math::
param_{new} = param_{old} - \frac{learning\_rate}{\sqrt{denom_t+\epsilon}} \odot grad
Args:
lr_scheduler (LrScheduler): The scheduler of learning rate.
decay_rate (float, optional): The decay factor (:math:`\beta_1`). Defaults to 0.99.
epsilon (float, optional): A small float constant value for numerical stability (:math:`\epsilon`). Defaults to 1e-8.
centered (bool, optional): If `True`, gradients are normalized by the estimated
variance of the gradient; if False, by the uncentered second moment.
Setting this to `True` may help with training, but is slightly more
expensive in terms of computation and memory. Defaults to `False`.
loss_scale_factor (Optional[float], optional): The scale factor of loss. Defaults to None.
grad_clipping (Optional[ClipGradientConf], optional): The gradient clipping strategy. Defaults to None.
train_step_lbn (Optional[Text], optional): [description]. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Set learning rate as 0.001
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
# Set RMSProp optimizer
flow.optimizer.RMSProp(lr_scheduler).minimize(loss)
return loss
"""
def __init__(
self,
lr_scheduler: LrScheduler,
decay_rate: float = 0.99,
epsilon: float = 1e-8,
centered: bool = False,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.decay_rate = decay_rate
self.epsilon = epsilon
self.centered = centered
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.rmsprop_conf.decay_rate = self.decay_rate
train_conf.model_update_conf.rmsprop_conf.epsilon = self.epsilon
train_conf.model_update_conf.rmsprop_conf.centered = self.centered
@oneflow_export("optimizer.LARS")
class LARS(Optimizer):
r"""The optimizer of the LARS algorithm.
The equation of parameters updating is:
.. math::
& local\_learning\_rate = learning\_rate*lars\_coeff*\frac{\lVert{parm_{old}\rVert}}{\epsilon+\lVert{grad\rVert}}
& momentum_t = \beta*momentum_{t-1} + local\_learning\_rate*(grad)
& param_{new} = param_{old} - momentum_t
Args:
lr_scheduler (LrScheduler): The scheduler of learning rate.
momentum_beta (float, optional): The momentum factor (:math:`\beta`). Defaults to 0.9.
epsilon (float, optional): A small float constant value for numerical stability (:math:`\epsilon`). Defaults to 1e-9.
lars_coefficient (float, optional): The coefficient factor, it defines how much we trust the layer to change its weights (:math:`lars\_coeff`). Defaults to 0.0001.
loss_scale_factor (Optional[float], optional): The scale factor of loss. Defaults to None.
grad_clipping (Optional[ClipGradientConf], optional): The gradient clipping strategy. Defaults to None.
train_step_lbn (Optional[Text], optional): [description]. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Set learning rate as 0.1
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.1])
# Set LARS optimizer, momentum factor is 0.9
flow.optimizer.LARS(lr_scheduler, momentum_beta=0.9).minimize(loss)
return loss
"""
def __init__(
self,
lr_scheduler: LrScheduler,
momentum_beta: float = 0.9,
epsilon: float = 1e-9,
lars_coefficient: float = 0.0001,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.momentum_beta = momentum_beta
self.epsilon = epsilon
self.lars_coefficient = lars_coefficient
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.lars_conf.momentum_beta = self.momentum_beta
train_conf.model_update_conf.lars_conf.epsilon = self.epsilon
train_conf.model_update_conf.lars_conf.lars_coefficient = self.lars_coefficient
@oneflow_export("optimizer.LazyAdam")
class LazyAdam(Optimizer):
r"""
The optimizer of the LazyAdam algorithm.
This algorithm can adjust the learning rate of each parameter dynamically according to the 1st-moment estimates and the 2nd-moment estimates of the gradient.
The difference between Adam optimizer and LazyAdam optimizer is that LazyAdam only updates the element that has gradient in the current batch, it is faster than Adam optimizer.
.. math::
& V_t = \beta_1*V_{t-1} + (1-\beta_1)*grad
& S_t = \beta_2*S_{t-1} + (1-\beta_2)*{grad} \odot {grad}
& \hat{g} = learning\_rate*\frac{{V_t}}{\sqrt{{S_t}}+\epsilon}
& param_{new} = param_{old} - \hat{g}
Args:
lr_scheduler (LrScheduler): The scheduler of learning rate.
beta1 (float, optional): The exponential weighted average decay rate for the 1st-moment estimates (:math:`\beta_1`). Defaults to 0.9.
beta2 (float, optional): The exponential weighted average decay rate for the 2rd-moment estimates (:math:`\beta_2`). Defaults to 0.999.
epsilon ([type], optional): A small float constant value for numerical stability (:math:`\epsilon`). Defaults to 1e-8.
loss_scale_factor (Optional[float], optional): The scale factor of loss. Defaults to None.
grad_clipping (Optional[ClipGradientConf], optional): The gradient clipping strategy. Defaults to None.
train_step_lbn (Optional[Text], optional): [description]. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
@flow.global_function(type="train")
def train_job(
images: tp.Numpy.Placeholder((BATCH_SIZE, 1, 28, 28), dtype=flow.float),
labels: tp.Numpy.Placeholder((BATCH_SIZE,), dtype=flow.int32),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
logits = lenet(images, train=True)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
# Set learning rate as 0.001
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [0.001])
# Set LazyAdam optimizer
flow.optimizer.LazyAdam(lr_scheduler).minimize(loss)
return loss
"""
def __init__(
self,
lr_scheduler: LrScheduler,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-8,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.lazy_adam_conf.beta1 = self.beta1
train_conf.model_update_conf.lazy_adam_conf.beta2 = self.beta2
train_conf.model_update_conf.lazy_adam_conf.epsilon = self.epsilon
@oneflow_export("optimizer.LAMB")
class LAMB(Optimizer):
r"""
Args:
lr_scheduler (LrScheduler): The scheduler of learning rate.
beta1 (float, optional): The exponential weighted average decay rate for the 1st-moment estimates (:math:`\beta_1`). Defaults to 0.9.
beta2 (float, optional): The exponential weighted average decay rate for the 2rd-moment estimates (:math:`\beta_2`). Defaults to 0.999.
epsilon ([type], optional): A small float constant value for numerical stability (:math:`\epsilon`). Defaults to 1e-6.
loss_scale_factor (Optional[float], optional): The scale factor of loss. Defaults to None.
grad_clipping (Optional[ClipGradientConf], optional): The gradient clipping strategy. Defaults to None.
train_step_lbn (Optional[Text], optional): [description]. Defaults to None.
"""
def __init__(
self,
lr_scheduler: LrScheduler,
beta1: float = 0.9,
beta2: float = 0.999,
epsilon: float = 1e-6,
loss_scale_factor: Optional[float] = None,
grad_clipping: Optional[ClipGradientConf] = None,
train_step_lbn: Optional[Text] = None,
):
super().__init__(
lr_scheduler, loss_scale_factor, grad_clipping, train_step_lbn,
)
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
def _SetSpecificFieldsInTrainConf(self, train_conf):
train_conf.model_update_conf.lamb_conf.beta1 = self.beta1
train_conf.model_update_conf.lamb_conf.beta2 = self.beta2
train_conf.model_update_conf.lamb_conf.epsilon = self.epsilon
| [
"oneflow.losses.add_loss",
"oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_SetTrainConf",
"oneflow.core.operator.op_conf_pb2.WarmupConf",
"oneflow.core.operator.op_conf_pb2.LearningRateDecayConf",
"oneflow.core.operator.op_conf_pb2.ClipConf",
"oneflow.core.job.job_conf_pb2.TrainConf",
"onefl... | [((1303, 1359), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.grad_clipping.by_global_norm"""'], {}), "('optimizer.grad_clipping.by_global_norm')\n", (1317, 1359), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3306, 3349), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.warmup.constant"""'], {}), "('optimizer.warmup.constant')\n", (3320, 3349), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5268, 5309), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.warmup.linear"""'], {}), "('optimizer.warmup.linear')\n", (5282, 5309), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((8546, 8589), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.CosineScheduler"""'], {}), "('optimizer.CosineScheduler')\n", (8560, 8589), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((11030, 11073), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.CustomScheduler"""'], {}), "('optimizer.CustomScheduler')\n", (11044, 11073), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((11296, 11350), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.PiecewiseConstantScheduler"""'], {}), "('optimizer.PiecewiseConstantScheduler')\n", (11310, 11350), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((13733, 13786), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.PiecewiseScalingScheduler"""'], {}), "('optimizer.PiecewiseScalingScheduler')\n", (13747, 13786), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((16489, 16535), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.PolynomialSchduler"""'], {}), "('optimizer.PolynomialSchduler')\n", (16503, 16535), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((19702, 19751), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.LinearCosineScheduler"""'], {}), "('optimizer.LinearCosineScheduler')\n", (19716, 19751), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((22609, 22657), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.ExponentialScheduler"""'], {}), "('optimizer.ExponentialScheduler')\n", (22623, 22657), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((25356, 25404), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.InverseTimeScheduler"""'], {}), "('optimizer.InverseTimeScheduler')\n", (25370, 25404), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((28195, 28242), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.NaturalExpScheduler"""'], {}), "('optimizer.NaturalExpScheduler')\n", (28209, 28242), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((32525, 32556), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.SGD"""'], {}), "('optimizer.SGD')\n", (32539, 32556), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((35101, 35133), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.Adam"""'], {}), "('optimizer.Adam')\n", (35115, 35133), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((38922, 38955), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.AdamW"""'], {}), "('optimizer.AdamW')\n", (38936, 38955), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((44865, 44900), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.RMSProp"""'], {}), "('optimizer.RMSProp')\n", (44879, 44900), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((48160, 48192), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.LARS"""'], {}), "('optimizer.LARS')\n", (48174, 48192), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((51127, 51163), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.LazyAdam"""'], {}), "('optimizer.LazyAdam')\n", (51141, 51163), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((54300, 54332), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""optimizer.LAMB"""'], {}), "('optimizer.LAMB')\n", (54314, 54332), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3069, 3090), 'oneflow.core.operator.op_conf_pb2.ClipConf', 'op_conf_pb.ClipConf', ([], {}), '()\n', (3088, 3090), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((5089, 5112), 'oneflow.core.operator.op_conf_pb2.WarmupConf', 'op_conf_pb.WarmupConf', ([], {}), '()\n', (5110, 5112), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((7083, 7106), 'oneflow.core.operator.op_conf_pb2.WarmupConf', 'op_conf_pb.WarmupConf', ([], {}), '()\n', (7104, 7106), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((10816, 10850), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (10848, 10850), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((13457, 13491), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (13489, 13491), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((16216, 16250), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (16248, 16250), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((19296, 19330), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (19328, 19330), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((22229, 22263), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (22261, 22263), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((25045, 25079), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (25077, 25079), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((27881, 27915), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (27913, 27915), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((30717, 30751), 'oneflow.core.operator.op_conf_pb2.LearningRateDecayConf', 'op_conf_pb.LearningRateDecayConf', ([], {}), '()\n', (30749, 30751), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_pb\n'), ((31633, 31656), 'oneflow.core.job.job_conf_pb2.TrainConf', 'job_conf_pb.TrainConf', ([], {}), '()\n', (31654, 31656), True, 'import oneflow.core.job.job_conf_pb2 as job_conf_pb\n'), ((32399, 32462), 'oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_SetTrainConf', 'c_api_util.CurJobBuildAndInferCtx_SetTrainConf', (['self.train_conf'], {}), '(self.train_conf)\n', (32445, 32462), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((32498, 32521), 'oneflow.losses.add_loss', 'flow.losses.add_loss', (['x'], {}), '(x)\n', (32518, 32521), True, 'import oneflow as flow\n')] |
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from typing import Tuple, Optional
from .padding import pad_same, get_padding_value
def conv2d_same(
x,
weight: flow.Tensor,
bias: Optional[flow.Tensor] = None,
stride: Tuple[int, int] = (1, 1),
padding: Tuple[int, int] = (0, 0),
dilation: Tuple[int, int] = (1, 1),
groups: int = 1,
):
x = pad_same(x, weight.shape[-2:], stride, dilation)
return F.conv2d(x, weight, bias, stride, (0, 0), dilation, groups)
class Conv2dSame(nn.Conv2d):
""" Tensorflow like 'SAME' convolution wrapper for 2D convolutions
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=True,
):
super(Conv2dSame, self).__init__(
in_channels, out_channels, kernel_size, stride, 0, dilation, groups, bias
)
def forward(self, x):
return conv2d_same(
x,
self.weight,
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
def create_conv2d_pad(in_chs, out_chs, kernel_size, **kwargs):
padding = kwargs.pop("padding", "")
kwargs.setdefault("bias", False)
padding, is_dynamic = get_padding_value(padding, kernel_size, **kwargs)
if is_dynamic:
return Conv2dSame(in_chs, out_chs, kernel_size, **kwargs)
else:
return nn.Conv2d(in_chs, out_chs, kernel_size, padding=padding, **kwargs)
| [
"oneflow.nn.functional.conv2d",
"oneflow.nn.Conv2d"
] | [((467, 526), 'oneflow.nn.functional.conv2d', 'F.conv2d', (['x', 'weight', 'bias', 'stride', '(0, 0)', 'dilation', 'groups'], {}), '(x, weight, bias, stride, (0, 0), dilation, groups)\n', (475, 526), True, 'import oneflow.nn.functional as F\n'), ((1532, 1598), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['in_chs', 'out_chs', 'kernel_size'], {'padding': 'padding'}), '(in_chs, out_chs, kernel_size, padding=padding, **kwargs)\n', (1541, 1598), True, 'import oneflow.nn as nn\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.einsum,
"""
einsum(equation, *operands) -> oneflow.Tensor
Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation
based on the Einstein summation convention.
Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them
in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of
this format are described below, but the general idea is to label every dimension of the input :attr:`operands`
with some subscript and define which subscripts are part of the output. The output is then computed by summing
the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the
output. For example, matrix multiplication can be computed using einsum as `flow.einsum("ij,jk->ik", A, B)`.
Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why).
Equation:
The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of
the input :attr:`operands` in the same order as the dimensions, separating subcripts for each operand by a
comma (','), e.g. `'ij,jk'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript
must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is
repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand
must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that
appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order.
The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based
on the subscripts, and then summing out the dimensions whose subscripts are not part of the output.
Optionally, the output subscripts can be explicitly defined by adding an arrow ('->') at the end of the equation
followed by the subscripts for the output. For instance, the following equation computes the transpose of a
matrix multiplication: 'ij,jk->ki'. The output subscripts must appear at least once for some input operand and
at most once for the output.
Ellipsis ('...') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis.
Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts,
e.g. for an input operand with 5 dimensions, the ellipsis in the equation `'ab...c'` cover the third and fourth
dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the
'shape' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not
explicitly defined with the arrow ('->') notation, the ellipsis will come first in the output (left-most dimensions),
before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements
batch matrix multiplication `'...ij,...jk'`.
A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis,
arrow and comma) but something like `'. . .'` is not valid. An empty string `''` is valid for scalar operands.
.. note::
``flow.einsum`` handles ellipsis ('...') differently from NumPy in that it allows dimensions
covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output.
.. note::
This function does not optimize the given expression, so a different formula for the same computation may
run faster or consume less memory. Projects like opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/)
can optimize the formula for you.
Args:
equation (String): The subscripts for the Einstein summation.
*operands (oneflow.Tensor): The tensors to compute the Einstein summation of.
For example:
.. code-block:: python
>>> import oneflow as flow
# trace
>>> flow.einsum('ii', flow.arange(4*4).reshape(4,4).to(flow.float32))
tensor(30., dtype=oneflow.float32)
# diagonal
>>> flow.einsum('ii->i', flow.arange(4*4).reshape(4,4).to(flow.float32))
tensor([ 0., 5., 10., 15.], dtype=oneflow.float32)
# outer product
>>> x = flow.arange(5).to(flow.float32)
>>> y = flow.arange(4).to(flow.float32)
>>> flow.einsum('i,j->ij', x, y)
tensor([[ 0., 0., 0., 0.],
[ 0., 1., 2., 3.],
[ 0., 2., 4., 6.],
[ 0., 3., 6., 9.],
[ 0., 4., 8., 12.]], dtype=oneflow.float32)
# batch matrix multiplication
>>> As = flow.arange(3*2*5).reshape(3,2,5).to(flow.float32)
>>> Bs = flow.arange(3*5*4).reshape(3,5,4).to(flow.float32)
>>> flow.einsum('bij,bjk->bik', As, Bs).shape
oneflow.Size([3, 2, 4])
# batch permute
>>> A = flow.randn(2, 3, 4, 5)
>>> flow.einsum('...ij->...ji', A).shape
oneflow.Size([2, 3, 5, 4])
# bilinear
>>> A = flow.randn(3,5,4)
>>> l = flow.randn(2,5)
>>> r = flow.randn(2,4)
>>> flow.einsum('bn,anm,bm->ba', l, A, r).shape
oneflow.Size([2, 3])
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 6388), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.einsum', '"""\n einsum(equation, *operands) -> oneflow.Tensor\n\n Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation\n based on the Einstein summation convention.\n\n Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them\n in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of\n this format are described below, but the general idea is to label every dimension of the input :attr:`operands`\n with some subscript and define which subscripts are part of the output. The output is then computed by summing\n the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the\n output. For example, matrix multiplication can be computed using einsum as `flow.einsum("ij,jk->ik", A, B)`.\n Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why).\n\n Equation:\n\n The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of\n the input :attr:`operands` in the same order as the dimensions, separating subcripts for each operand by a\n comma (\',\'), e.g. `\'ij,jk\'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript\n must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is\n repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand\n must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that\n appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order.\n The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based\n on the subscripts, and then summing out the dimensions whose subscripts are not part of the output.\n\n Optionally, the output subscripts can be explicitly defined by adding an arrow (\'->\') at the end of the equation\n followed by the subscripts for the output. For instance, the following equation computes the transpose of a\n matrix multiplication: \'ij,jk->ki\'. The output subscripts must appear at least once for some input operand and\n at most once for the output.\n\n Ellipsis (\'...\') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis.\n Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts,\n e.g. for an input operand with 5 dimensions, the ellipsis in the equation `\'ab...c\'` cover the third and fourth\n dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the\n \'shape\' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not\n explicitly defined with the arrow (\'->\') notation, the ellipsis will come first in the output (left-most dimensions),\n before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements\n batch matrix multiplication `\'...ij,...jk\'`.\n\n A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis,\n arrow and comma) but something like `\'. . .\'` is not valid. An empty string `\'\'` is valid for scalar operands.\n\n .. note::\n\n ``flow.einsum`` handles ellipsis (\'...\') differently from NumPy in that it allows dimensions\n covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output.\n\n .. note::\n\n This function does not optimize the given expression, so a different formula for the same computation may\n run faster or consume less memory. Projects like opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/)\n can optimize the formula for you.\n\n Args:\n equation (String): The subscripts for the Einstein summation.\n *operands (oneflow.Tensor): The tensors to compute the Einstein summation of.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n # trace\n >>> flow.einsum(\'ii\', flow.arange(4*4).reshape(4,4).to(flow.float32))\n tensor(30., dtype=oneflow.float32)\n\n # diagonal\n >>> flow.einsum(\'ii->i\', flow.arange(4*4).reshape(4,4).to(flow.float32))\n tensor([ 0., 5., 10., 15.], dtype=oneflow.float32)\n\n # outer product\n >>> x = flow.arange(5).to(flow.float32)\n >>> y = flow.arange(4).to(flow.float32)\n >>> flow.einsum(\'i,j->ij\', x, y)\n tensor([[ 0., 0., 0., 0.],\n [ 0., 1., 2., 3.],\n [ 0., 2., 4., 6.],\n [ 0., 3., 6., 9.],\n [ 0., 4., 8., 12.]], dtype=oneflow.float32)\n \n # batch matrix multiplication\n >>> As = flow.arange(3*2*5).reshape(3,2,5).to(flow.float32)\n >>> Bs = flow.arange(3*5*4).reshape(3,5,4).to(flow.float32)\n >>> flow.einsum(\'bij,bjk->bik\', As, Bs).shape\n oneflow.Size([3, 2, 4])\n\n # batch permute\n >>> A = flow.randn(2, 3, 4, 5)\n >>> flow.einsum(\'...ij->...ji\', A).shape\n oneflow.Size([2, 3, 5, 4])\n\n # bilinear\n >>> A = flow.randn(3,5,4)\n >>> l = flow.randn(2,5)\n >>> r = flow.randn(2,4)\n >>> flow.einsum(\'bn,anm,bm->ba\', l, A, r).shape\n oneflow.Size([2, 3])\n\n """'], {}), '(oneflow.einsum,\n """\n einsum(equation, *operands) -> oneflow.Tensor\n\n Sums the product of the elements of the input :attr:`operands` along dimensions specified using a notation\n based on the Einstein summation convention.\n\n Einsum allows computing many common multi-dimensional linear algebraic array operations by representing them\n in a short-hand format based on the Einstein summation convention, given by :attr:`equation`. The details of\n this format are described below, but the general idea is to label every dimension of the input :attr:`operands`\n with some subscript and define which subscripts are part of the output. The output is then computed by summing\n the product of the elements of the :attr:`operands` along the dimensions whose subscripts are not part of the\n output. For example, matrix multiplication can be computed using einsum as `flow.einsum("ij,jk->ik", A, B)`.\n Here, j is the summation subscript and i and k the output subscripts (see section below for more details on why).\n\n Equation:\n\n The :attr:`equation` string specifies the subscripts (letters in `[a-zA-Z]`) for each dimension of\n the input :attr:`operands` in the same order as the dimensions, separating subcripts for each operand by a\n comma (\',\'), e.g. `\'ij,jk\'` specify subscripts for two 2D operands. The dimensions labeled with the same subscript\n must be broadcastable, that is, their size must either match or be `1`. The exception is if a subscript is\n repeated for the same input operand, in which case the dimensions labeled with this subscript for this operand\n must match in size and the operand will be replaced by its diagonal along these dimensions. The subscripts that\n appear exactly once in the :attr:`equation` will be part of the output, sorted in increasing alphabetical order.\n The output is computed by multiplying the input :attr:`operands` element-wise, with their dimensions aligned based\n on the subscripts, and then summing out the dimensions whose subscripts are not part of the output.\n\n Optionally, the output subscripts can be explicitly defined by adding an arrow (\'->\') at the end of the equation\n followed by the subscripts for the output. For instance, the following equation computes the transpose of a\n matrix multiplication: \'ij,jk->ki\'. The output subscripts must appear at least once for some input operand and\n at most once for the output.\n\n Ellipsis (\'...\') can be used in place of subscripts to broadcast the dimensions covered by the ellipsis.\n Each input operand may contain at most one ellipsis which will cover the dimensions not covered by subscripts,\n e.g. for an input operand with 5 dimensions, the ellipsis in the equation `\'ab...c\'` cover the third and fourth\n dimensions. The ellipsis does not need to cover the same number of dimensions across the :attr:`operands` but the\n \'shape\' of the ellipsis (the size of the dimensions covered by them) must broadcast together. If the output is not\n explicitly defined with the arrow (\'->\') notation, the ellipsis will come first in the output (left-most dimensions),\n before the subscript labels that appear exactly once for the input operands. e.g. the following equation implements\n batch matrix multiplication `\'...ij,...jk\'`.\n\n A few final notes: the equation may contain whitespaces between the different elements (subscripts, ellipsis,\n arrow and comma) but something like `\'. . .\'` is not valid. An empty string `\'\'` is valid for scalar operands.\n\n .. note::\n\n ``flow.einsum`` handles ellipsis (\'...\') differently from NumPy in that it allows dimensions\n covered by the ellipsis to be summed over, that is, ellipsis are not required to be part of the output.\n\n .. note::\n\n This function does not optimize the given expression, so a different formula for the same computation may\n run faster or consume less memory. Projects like opt_einsum (https://optimized-einsum.readthedocs.io/en/stable/)\n can optimize the formula for you.\n\n Args:\n equation (String): The subscripts for the Einstein summation.\n *operands (oneflow.Tensor): The tensors to compute the Einstein summation of.\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n\n # trace\n >>> flow.einsum(\'ii\', flow.arange(4*4).reshape(4,4).to(flow.float32))\n tensor(30., dtype=oneflow.float32)\n\n # diagonal\n >>> flow.einsum(\'ii->i\', flow.arange(4*4).reshape(4,4).to(flow.float32))\n tensor([ 0., 5., 10., 15.], dtype=oneflow.float32)\n\n # outer product\n >>> x = flow.arange(5).to(flow.float32)\n >>> y = flow.arange(4).to(flow.float32)\n >>> flow.einsum(\'i,j->ij\', x, y)\n tensor([[ 0., 0., 0., 0.],\n [ 0., 1., 2., 3.],\n [ 0., 2., 4., 6.],\n [ 0., 3., 6., 9.],\n [ 0., 4., 8., 12.]], dtype=oneflow.float32)\n \n # batch matrix multiplication\n >>> As = flow.arange(3*2*5).reshape(3,2,5).to(flow.float32)\n >>> Bs = flow.arange(3*5*4).reshape(3,5,4).to(flow.float32)\n >>> flow.einsum(\'bij,bjk->bik\', As, Bs).shape\n oneflow.Size([3, 2, 4])\n\n # batch permute\n >>> A = flow.randn(2, 3, 4, 5)\n >>> flow.einsum(\'...ij->...ji\', A).shape\n oneflow.Size([2, 3, 5, 4])\n\n # bilinear\n >>> A = flow.randn(3,5,4)\n >>> l = flow.randn(2,5)\n >>> r = flow.randn(2,4)\n >>> flow.einsum(\'bn,anm,bm->ba\', l, A, r).shape\n oneflow.Size([2, 3])\n\n """\n )\n', (670, 6388), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import os
import oneflow
import oneflow.experimental as flow
import oneflow.python.framework.session_context as session_ctx
import oneflow._oneflow_internal
from oneflow.python.framework.multi_client_session import MultiClientSession
import oneflow.python.framework.c_api_util as c_api_util
@flow.unittest.skip_unless_1n1d()
class TestFetchOutputTensor(unittest.TestCase):
def test_fetch_output_tensor(test_case):
test_case.assertTrue(oneflow.distributed.is_multi_client())
test_case.assertTrue(
oneflow.python.framework.env_util.HasAllMultiClientEnvVars()
)
x = flow.Tensor(1, 1, 10, 10)
flow.nn.init.uniform_(x, a=-1.0, b=1.0)
session = session_ctx.GetDefaultSession()
test_case.assertTrue(isinstance(session, MultiClientSession))
session.TryInit()
with oneflow._oneflow_internal.lazy_mode.gard(True):
oneflow._oneflow_internal.JobBuildAndInferCtx_Open(
"cc_test_output_op_expr_job"
)
job_conf = (
oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto()
)
job_conf.set_job_name("cc_test_output_op_expr_job")
job_conf.mutable_predict_conf()
c_api_util.CurJobBuildAndInferCtx_SetJobConf(job_conf)
attrs = oneflow._oneflow_internal.MutableCfgAttrMap()
input_conf = (
oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf()
)
input_conf.set_in_0("EagerTensorInput")
input_conf.set_out_0("out_0")
input_op = oneflow._oneflow_internal.one.FeedInputOpExpr(
"cc_Input_0", input_conf, ["in_0"], ["out_0"]
)
output_conf = (
oneflow._oneflow_internal.oneflow.core.operator.op_conf.FetchOutputOpConf()
)
output_conf.set_in_0(
"LazyTensorInput"
) # don't care lbn of feed/fetch op conf
output_conf.set_out_0("out_0")
output_op = oneflow._oneflow_internal.one.FetchOutputOpExpr(
"cc_Output_0", output_conf, ["in_0"], ["out_0"]
)
if not x.is_determined:
x.determine()
x_tensor_in_c = x._local_or_consistent_tensor
lazy_tensor = input_op.apply([x_tensor_in_c], attrs)[0]
test_case.assertEqual(lazy_tensor.shape, (1, 1, 10, 10))
test_case.assertTrue(lazy_tensor.is_lazy)
test_case.assertTrue(lazy_tensor.is_consistent)
eager_tensor = output_op.apply([lazy_tensor], attrs)[0]
test_case.assertEqual(eager_tensor.shape, (1, 1, 10, 10))
test_case.assertTrue(not eager_tensor.is_lazy)
test_case.assertTrue(eager_tensor.is_consistent)
if __name__ == "__main__":
unittest.main()
| [
"oneflow._oneflow_internal.MutableCfgAttrMap",
"oneflow._oneflow_internal.JobBuildAndInferCtx_Open",
"oneflow.experimental.Tensor",
"oneflow.experimental.unittest.skip_unless_1n1d",
"oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf",
"oneflow._oneflow_internal.oneflow.core.operator.... | [((921, 953), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (951, 953), True, 'import oneflow.experimental as flow\n'), ((3506, 3521), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3519, 3521), False, 'import unittest\n'), ((1241, 1266), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(1)', '(1)', '(10)', '(10)'], {}), '(1, 1, 10, 10)\n', (1252, 1266), True, 'import oneflow.experimental as flow\n'), ((1275, 1314), 'oneflow.experimental.nn.init.uniform_', 'flow.nn.init.uniform_', (['x'], {'a': '(-1.0)', 'b': '(1.0)'}), '(x, a=-1.0, b=1.0)\n', (1296, 1314), True, 'import oneflow.experimental as flow\n'), ((1334, 1365), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (1363, 1365), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((1076, 1113), 'oneflow.distributed.is_multi_client', 'oneflow.distributed.is_multi_client', ([], {}), '()\n', (1111, 1113), False, 'import oneflow\n'), ((1157, 1217), 'oneflow.python.framework.env_util.HasAllMultiClientEnvVars', 'oneflow.python.framework.env_util.HasAllMultiClientEnvVars', ([], {}), '()\n', (1215, 1217), False, 'import oneflow\n'), ((1476, 1522), 'oneflow._oneflow_internal.lazy_mode.gard', 'oneflow._oneflow_internal.lazy_mode.gard', (['(True)'], {}), '(True)\n', (1516, 1522), False, 'import oneflow\n'), ((1537, 1622), 'oneflow._oneflow_internal.JobBuildAndInferCtx_Open', 'oneflow._oneflow_internal.JobBuildAndInferCtx_Open', (['"""cc_test_output_op_expr_job"""'], {}), "('cc_test_output_op_expr_job'\n )\n", (1587, 1622), False, 'import oneflow\n'), ((1689, 1757), 'oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto', 'oneflow._oneflow_internal.oneflow.core.job.job_conf.JobConfigProto', ([], {}), '()\n', (1755, 1757), False, 'import oneflow\n'), ((1892, 1946), 'oneflow.python.framework.c_api_util.CurJobBuildAndInferCtx_SetJobConf', 'c_api_util.CurJobBuildAndInferCtx_SetJobConf', (['job_conf'], {}), '(job_conf)\n', (1936, 1946), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((1968, 2013), 'oneflow._oneflow_internal.MutableCfgAttrMap', 'oneflow._oneflow_internal.MutableCfgAttrMap', ([], {}), '()\n', (2011, 2013), False, 'import oneflow\n'), ((2058, 2131), 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf', 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FeedInputOpConf', ([], {}), '()\n', (2129, 2131), False, 'import oneflow\n'), ((2263, 2360), 'oneflow._oneflow_internal.one.FeedInputOpExpr', 'oneflow._oneflow_internal.one.FeedInputOpExpr', (['"""cc_Input_0"""', 'input_conf', "['in_0']", "['out_0']"], {}), "('cc_Input_0', input_conf, [\n 'in_0'], ['out_0'])\n", (2308, 2360), False, 'import oneflow\n'), ((2431, 2506), 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FetchOutputOpConf', 'oneflow._oneflow_internal.oneflow.core.operator.op_conf.FetchOutputOpConf', ([], {}), '()\n', (2504, 2506), False, 'import oneflow\n'), ((2710, 2810), 'oneflow._oneflow_internal.one.FetchOutputOpExpr', 'oneflow._oneflow_internal.one.FetchOutputOpExpr', (['"""cc_Output_0"""', 'output_conf', "['in_0']", "['out_0']"], {}), "('cc_Output_0', output_conf,\n ['in_0'], ['out_0'])\n", (2757, 2810), False, 'import oneflow\n')] |
import oneflow
import oneflow.F as F
import oneflow.experimental as flow
from oneflow.experimental import nn
import random
# 开启oneflow的eager动态图模式
flow.enable_eager_execution()
import numpy as np
class Affine(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(flow.ones((1, 1, dim)), requires_grad=True)
self.bias = nn.Parameter(flow.zeros((1, 1, dim)), requires_grad=True)
def forward(self, x):
return x * self.gamma + self.bias
class PreAffinePostLayerScale(nn.Module):
def __init__(self, dim, depth, fn):
super().__init__()
if depth < 18:
init_eps = 0.1
elif depth > 18 and depth <= 24:
init_eps = 1e-5
else:
init_eps = 1e-6
scale = flow.tensor(np.zeros((1, 1, dim)), dtype=flow.float32).fill_(init_eps)
self.scale = nn.Parameter(scale)
self.affine = Affine(dim=dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(self.affine(x), **kwargs) * self.scale + x
class ResMLP(nn.Module):
def __init__(self, *, dim, depth, num_classes, expansion_factor=4, patch_size=16, image_size=224):
super().__init__()
assert (image_size % patch_size) == 0, 'image must be divisible by patch size'
num_patches = (image_size // patch_size) ** 2
wrapper = lambda i, fn: PreAffinePostLayerScale(dim, i + 1, fn)
self.patch_embedding = nn.Conv2d(3, dim, kernel_size=patch_size, stride=patch_size)
self.res_mlp_layers = nn.Sequential(*[nn.Sequential(wrapper(i, nn.Conv1d(num_patches, num_patches, 1)),
wrapper(i, nn.Sequential(
nn.Linear(dim, dim * expansion_factor),
nn.GELU(),
nn.Linear(dim * expansion_factor, dim)
))
) for i in range(depth)])
self.affine = Affine(dim)
self.to_logits = nn.Linear(dim, num_classes)
def forward(self, x):
x = self.patch_embedding(x)
x = x.flatten(2).transpose(1,2)
x = self.res_mlp_layers(x)
x = self.affine(x)
x = x.transpose(1,2).mean(dim=-1)
x = self.to_logits(x)
return x
if __name__ == "__main__":
x = flow.tensor(np.random.randn(1, 3, 224, 224), dtype=flow.float32)
net = ResMLP(dim=384, depth=3, num_classes=100)
print(x.shape)
print(net(x).shape)
| [
"oneflow.experimental.nn.Conv2d",
"oneflow.experimental.nn.GELU",
"oneflow.experimental.nn.Parameter",
"oneflow.experimental.ones",
"oneflow.experimental.nn.Conv1d",
"oneflow.experimental.nn.Linear",
"oneflow.experimental.enable_eager_execution",
"oneflow.experimental.zeros"
] | [((146, 175), 'oneflow.experimental.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (173, 175), True, 'import oneflow.experimental as flow\n'), ((886, 905), 'oneflow.experimental.nn.Parameter', 'nn.Parameter', (['scale'], {}), '(scale)\n', (898, 905), False, 'from oneflow.experimental import nn\n'), ((1469, 1529), 'oneflow.experimental.nn.Conv2d', 'nn.Conv2d', (['(3)', 'dim'], {'kernel_size': 'patch_size', 'stride': 'patch_size'}), '(3, dim, kernel_size=patch_size, stride=patch_size)\n', (1478, 1529), False, 'from oneflow.experimental import nn\n'), ((2219, 2246), 'oneflow.experimental.nn.Linear', 'nn.Linear', (['dim', 'num_classes'], {}), '(dim, num_classes)\n', (2228, 2246), False, 'from oneflow.experimental import nn\n'), ((2549, 2580), 'numpy.random.randn', 'np.random.randn', (['(1)', '(3)', '(224)', '(224)'], {}), '(1, 3, 224, 224)\n', (2564, 2580), True, 'import numpy as np\n'), ((311, 333), 'oneflow.experimental.ones', 'flow.ones', (['(1, 1, dim)'], {}), '((1, 1, dim))\n', (320, 333), True, 'import oneflow.experimental as flow\n'), ((388, 411), 'oneflow.experimental.zeros', 'flow.zeros', (['(1, 1, dim)'], {}), '((1, 1, dim))\n', (398, 411), True, 'import oneflow.experimental as flow\n'), ((806, 827), 'numpy.zeros', 'np.zeros', (['(1, 1, dim)'], {}), '((1, 1, dim))\n', (814, 827), True, 'import numpy as np\n'), ((1601, 1639), 'oneflow.experimental.nn.Conv1d', 'nn.Conv1d', (['num_patches', 'num_patches', '(1)'], {}), '(num_patches, num_patches, 1)\n', (1610, 1639), False, 'from oneflow.experimental import nn\n'), ((1792, 1830), 'oneflow.experimental.nn.Linear', 'nn.Linear', (['dim', '(dim * expansion_factor)'], {}), '(dim, dim * expansion_factor)\n', (1801, 1830), False, 'from oneflow.experimental import nn\n'), ((1896, 1905), 'oneflow.experimental.nn.GELU', 'nn.GELU', ([], {}), '()\n', (1903, 1905), False, 'from oneflow.experimental import nn\n'), ((1971, 2009), 'oneflow.experimental.nn.Linear', 'nn.Linear', (['(dim * expansion_factor)', 'dim'], {}), '(dim * expansion_factor, dim)\n', (1980, 2009), False, 'from oneflow.experimental import nn\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.tensor,
r"""
Constructs a tensor with data, return a consistent tensor if placement and sbp are in kwargs,
otherwise return a local tensor.
Arguments:
data: Initial data for the tensor. Can be a list, tuple, NumPy ndarray, scalar or tensor.
Keyword Arguments:
dtype (oneflow.dtype, optional) – the desired data type of returned tensor.
Default: if None, infers data type from data.
device (oneflow.device, optional): the desired device of returned tensor. If placement
and sbp is None, uses the current cpu for the default tensor type.
placement (oneflow.placement, optional): the desired placement of returned tensor.
sbp (oneflow.sbp or tuple of oneflow.sbp, optional): the desired sbp of returned tensor.
requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False
Note:
The Keyword Argument device is mutually exclusive with placement and sbp.
Consistent tensor only can be constructed from tensor.
For example:
.. code-block:: python
>>> import oneflow as flow
>>> x = flow.tensor([1,2,3])
>>> x
tensor([1, 2, 3], dtype=oneflow.int64)
""",
)
add_docstr(
oneflow.Tensor.atan2,
r"""
See :func:`oneflow.atan2`
""",
)
add_docstr(
oneflow.Tensor.expand_as,
"""
expand_as(other) -> Tensor
Expand this tensor to the same size as :attr:`other`.
``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.
Please see :meth:`~Tensor.expand` for more information about ``expand``.
Args:
other (:class:`oneflow.Tensor`): The result tensor has the same size
as :attr:`other`.
""",
)
add_docstr(
oneflow.Tensor.numel,
"""
See :func:`oneflow.numel`
""",
)
add_docstr(
oneflow.Tensor.transpose,
"""
See :func:`oneflow.transpose`
""",
)
add_docstr(
oneflow.Tensor.logical_not,
"""
logical_not() -> Tensor
See :func:`oneflow.logical_not`
""",
)
add_docstr(
oneflow.Tensor.std,
"""
See :func:`oneflow.std`
""",
)
add_docstr(
oneflow.Tensor.var,
"""
See :func:`oneflow.var`
""",
)
add_docstr(
oneflow.Tensor.squeeze,
"""
See :func:`oneflow.squeeze`
""",
)
add_docstr(
oneflow.Tensor.unfold,
"""
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.Tensor.unfold.html#torch.Tensor.unfold.
Returns a view of the original tensor which contains all slices of `size` size from `self`
tensor in the dimension `dimension`.
Step between two slices is given by `step`.
If sizedim is the size of dimension `dimension` for `self`, the size of dimension dimension in the
returned tensor will be (sizedim - size) / step + 1.
An additional dimension of size `size` is appended in the returned tensor.
Args:
dimension (int): dimension in which unfolding happens
size (int): the size of each slice that is unfolded
step (int): the step between each slice
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x = flow.arange(1., 8)
>>> x
tensor([ 1., 2., 3., 4., 5., 6., 7.])
>>> x.unfold(0, 2, 1)
tensor([[ 1., 2.],
[ 2., 3.],
[ 3., 4.],
[ 4., 5.],
[ 5., 6.],
[ 6., 7.]])
>>> x.unfold(0, 2, 2)
tensor([[ 1., 2.],
[ 3., 4.],
[ 5., 6.]])
""",
)
add_docstr(
oneflow.Tensor.matmul,
"""
See :func:`oneflow.matmul`
""",
)
add_docstr(
oneflow.Tensor.narrow,
"""
See :func:`oneflow.narrow`
""",
)
add_docstr(
oneflow.Tensor.unsqueeze,
"""
See :func:`oneflow.unsqueeze`
""",
)
add_docstr(
oneflow.Tensor.permute,
"""
See :func:`oneflow.permute`
""",
)
add_docstr(
oneflow.Tensor.abs,
"""
See :func:`oneflow.abs`
""",
)
add_docstr(
oneflow.Tensor.acos,
"""
See :func:`oneflow.acos`
""",
)
add_docstr(
oneflow.Tensor.acosh,
"""
See :func:`oneflow.acosh`
""",
)
add_docstr(
oneflow.Tensor.arccosh,
"""
See :func:`oneflow.arccosh`
""",
)
add_docstr(
oneflow.Tensor.arctanh,
"""
See :func:`oneflow.arctanh`
""",
)
add_docstr(
oneflow.Tensor.argmax,
"""
See :func:`oneflow.argmax`
""",
)
add_docstr(
oneflow.Tensor.argmin,
"""
See :func:`oneflow.argmin`
""",
)
add_docstr(
oneflow.Tensor.atanh,
"""
See :func:`oneflow.atanh`
""",
)
add_docstr(
oneflow.Tensor.backward,
"""
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.Tensor.backward.html#torch.Tensor.backward.
Computes the gradient of current tensor w.r.t. graph leaves.
The graph is differentiated using the chain rule. If the tensor is non-scalar (i.e. its data has more than one element) and requires gradient, the function additionally requires specifying gradient. It should be a tensor of matching type and location, that contains the gradient of the differentiated function w.r.t. self.
This function accumulates gradients in the leaves - you might need to zero .grad attributes or set them to None before calling it. See Default gradient layouts for details on the memory layout of accumulated gradients.
Note:
If you run any forward ops, create gradient, and/or call backward in a user-specified CUDA stream context, see Stream semantics of backward passes.
Note:
When inputs are provided and a given input is not a leaf, the current implementation will call its grad_fn (though it is not strictly needed to get this gradients). It is an implementation detail on which the user should not rely. See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.
Args:
gradient (Tensor or None): Gradient w.r.t. the tensor. If it is a tensor, it will be automatically converted to a Tensor that does not require grad unless create_graph is True. None values can be specified for scalar Tensors or ones that don’t require grad. If a None value would be acceptable then this argument is optional.
retain_graph (bool, optional): If False, the graph used to compute the grads will be freed. Note that in nearly all cases setting this option to True is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph.
create_graph (bool, optional): If True, graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to False.
""",
)
add_docstr(
oneflow.Tensor.cast,
"""
See :func:`oneflow.cast`
""",
)
add_docstr(
oneflow.Tensor.diag,
"""
See :func:`oneflow.diag`
""",
)
add_docstr(
oneflow.Tensor.dim,
"""
Tensor.dim() → int
Returns the number of dimensions of self tensor.
""",
)
add_docstr(
oneflow.Tensor.element_size,
"""
Tensor.element_size() → int
Returns the size in bytes of an individual element.
""",
)
add_docstr(
oneflow.Tensor.exp,
"""
See :func:`oneflow.exp`
""",
)
add_docstr(
oneflow.Tensor.fill_,
"""
Tensor.fill_(value) → Tensor
Fills self tensor with the specified value.
""",
)
add_docstr(
oneflow.Tensor.ge,
"""
See :func:`oneflow.ge`
""",
)
add_docstr(
oneflow.Tensor.gelu,
"""
See :func:`oneflow.gelu`
""",
)
add_docstr(
oneflow.Tensor.get_device,
"""
Tensor.get_device() -> Device ordinal (Integer)
For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides. For CPU tensors, an error is thrown.
""",
)
add_docstr(
oneflow.Tensor.gt,
"""
See :func:`oneflow.gt`
""",
)
add_docstr(
oneflow.Tensor.log1p,
"""
See :func:`oneflow.log1p`
""",
)
add_docstr(
oneflow.Tensor.mish,
"""
See :func:`oneflow.mish`
""",
)
add_docstr(
oneflow.Tensor.mul,
"""
See :func:`oneflow.mul`
""",
)
add_docstr(
oneflow.Tensor.negative,
"""
See :func:`oneflow.negative`
""",
)
add_docstr(
oneflow.Tensor.nelement,
"""
Tensor.nelement() → int
Alias for numel()
""",
)
add_docstr(
oneflow.Tensor.normal_,
"""
normal_(mean=0, std=1, *, generator=None) -> Tensor
Fills :attr:`self` tensor with elements samples from the normal distribution parameterized by :attr:`mean` and :attr:`std`.
""",
)
add_docstr(
oneflow.Tensor.numpy,
"""
Tensor.numpy() → numpy.ndarray
Returns self tensor as a NumPy ndarray. This tensor and the returned ndarray share the same underlying storage. Changes to self tensor will be reflected in the ndarray and vice versa.
""",
)
add_docstr(
oneflow.Tensor.pow,
"""
See :func:`oneflow.pow`
""",
)
add_docstr(
oneflow.Tensor.relu,
"""
See :func:`oneflow.relu`
""",
)
add_docstr(
oneflow.Tensor.roll,
"""
See :func:`oneflow.roll`
""",
)
add_docstr(
oneflow.Tensor.round,
"""
See :func:`oneflow.round`
""",
)
add_docstr(
oneflow.Tensor.selu,
"""
See :func:`oneflow.selu`
""",
)
add_docstr(
oneflow.Tensor.sigmoid,
"""
See :func:`oneflow.sigmoid`
""",
)
add_docstr(
oneflow.Tensor.sign,
"""
See :func:`oneflow.sign`
""",
)
add_docstr(
oneflow.Tensor.silu,
"""
See :func:`oneflow.silu`
""",
)
add_docstr(
oneflow.Tensor.sinh,
"""
See :func:`oneflow.sinh`
""",
)
add_docstr(
oneflow.Tensor.size,
"""
The interface is consistent with PyTorch.
Returns the size of the self tensor. If dim is not specified, the returned value is a torch.Size, a subclass of tuple. If dim is specified, returns an int holding the size of that dimension.
Args:
idx (int, optional): The dimension for which to retrieve the size.
""",
)
add_docstr(
oneflow.Tensor.softmax,
"""
See :func:`oneflow.softmax`
""",
)
add_docstr(
oneflow.Tensor.softplus,
"""
See :func:`oneflow.softplus`
""",
)
add_docstr(
oneflow.Tensor.softsign,
"""
See :func:`oneflow.softsign`
""",
)
add_docstr(
oneflow.Tensor.tan,
"""
See :func:`oneflow.tan`
""",
)
add_docstr(
oneflow.Tensor.tanh,
"""
See :func:`oneflow.tanh`
""",
)
add_docstr(
oneflow.Tensor.tril,
"""
See :func:`oneflow.tril`
""",
)
add_docstr(
oneflow.Tensor.triu,
"""
See :func:`oneflow.triu`
""",
)
add_docstr(
oneflow.Tensor.uniform_,
"""
Tensor.uniform_(from=0, to=1) → Tensor
Fills self tensor with numbers sampled from the continuous uniform distribution:
.. math::
P(x)=1/(to-from)
""",
)
add_docstr(
oneflow.Tensor.copy_,
"""
The interface is consistent with PyTorch.
Tensor.copy_(src, non_blocking=False) → Tensor
Copies the elements from src into self tensor and returns self.
The src tensor must be broadcastable with the self tensor. It may be of a different data type or reside on a different device.
Args:
src (Tensor): the source tensor to copy from
non_blocking (bool): if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect.
""",
)
add_docstr(
oneflow.Tensor.to,
"""Performs Tensor dtype and/or device conversion.
A flow.dtype and flow.device are inferred from the arguments of `input.to(*args, **kwargs)`.
.. note::
If the ``input`` Tensor already
has the correct :class:`flow.dtype` and :class:`flow.device`, then ``input`` is returned.
Otherwise, the returned tensor is a copy of ``input`` with the desired.
Args:
input (oneflow.Tensor): An input tensor.
*args (oneflow.Tensor or oneflow.device or oneflow.dtype): Positional arguments
**kwargs (oneflow.device or oneflow.dtype) : Key-value arguments
Returns:
oneflow.Tensor: A Tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> arr = np.random.randint(1, 9, size=(1, 2, 3, 4))
>>> input = flow.Tensor(arr)
>>> output = input.to(dtype=flow.float32)
>>> np.array_equal(arr.astype(np.float32), output.numpy())
True
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 1958), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.tensor', '"""\n Constructs a tensor with data, return a consistent tensor if placement and sbp are in kwargs,\n otherwise return a local tensor. \n \n Arguments:\n data: Initial data for the tensor. Can be a list, tuple, NumPy ndarray, scalar or tensor.\n Keyword Arguments:\n dtype (oneflow.dtype, optional) – the desired data type of returned tensor.\n Default: if None, infers data type from data.\n device (oneflow.device, optional): the desired device of returned tensor. If placement\n and sbp is None, uses the current cpu for the default tensor type.\n placement (oneflow.placement, optional): the desired placement of returned tensor.\n sbp (oneflow.sbp or tuple of oneflow.sbp, optional): the desired sbp of returned tensor.\n requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False\n\n Note:\n The Keyword Argument device is mutually exclusive with placement and sbp.\n Consistent tensor only can be constructed from tensor.\n\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> x = flow.tensor([1,2,3])\n >>> x\n tensor([1, 2, 3], dtype=oneflow.int64)\n\n """'], {}), '(oneflow.tensor,\n """\n Constructs a tensor with data, return a consistent tensor if placement and sbp are in kwargs,\n otherwise return a local tensor. \n \n Arguments:\n data: Initial data for the tensor. Can be a list, tuple, NumPy ndarray, scalar or tensor.\n Keyword Arguments:\n dtype (oneflow.dtype, optional) – the desired data type of returned tensor.\n Default: if None, infers data type from data.\n device (oneflow.device, optional): the desired device of returned tensor. If placement\n and sbp is None, uses the current cpu for the default tensor type.\n placement (oneflow.placement, optional): the desired placement of returned tensor.\n sbp (oneflow.sbp or tuple of oneflow.sbp, optional): the desired sbp of returned tensor.\n requires_grad (bool, optional): If autograd should record operations on the returned tensor. Default: False\n\n Note:\n The Keyword Argument device is mutually exclusive with placement and sbp.\n Consistent tensor only can be constructed from tensor.\n\n\n For example:\n\n .. code-block:: python\n\n >>> import oneflow as flow\n \n >>> x = flow.tensor([1,2,3])\n >>> x\n tensor([1, 2, 3], dtype=oneflow.int64)\n\n """\n )\n', (670, 1958), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((1963, 2038), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.atan2', '"""\n See :func:`oneflow.atan2`\n """'], {}), '(oneflow.Tensor.atan2, """\n See :func:`oneflow.atan2`\n """)\n', (1973, 2038), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2052, 2474), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.expand_as', '"""\n expand_as(other) -> Tensor\n\n Expand this tensor to the same size as :attr:`other`.\n ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.\n\n Please see :meth:`~Tensor.expand` for more information about ``expand``.\n\n Args:\n other (:class:`oneflow.Tensor`): The result tensor has the same size\n as :attr:`other`.\n """'], {}), '(oneflow.Tensor.expand_as,\n """\n expand_as(other) -> Tensor\n\n Expand this tensor to the same size as :attr:`other`.\n ``self.expand_as(other)`` is equivalent to ``self.expand(other.size())``.\n\n Please see :meth:`~Tensor.expand` for more information about ``expand``.\n\n Args:\n other (:class:`oneflow.Tensor`): The result tensor has the same size\n as :attr:`other`.\n """\n )\n', (2062, 2474), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2478, 2553), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.numel', '"""\n See :func:`oneflow.numel`\n """'], {}), '(oneflow.Tensor.numel, """\n See :func:`oneflow.numel`\n """)\n', (2488, 2553), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2566, 2653), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.transpose', '"""\n See :func:`oneflow.transpose`\n """'], {}), '(oneflow.Tensor.transpose,\n """\n See :func:`oneflow.transpose`\n """)\n', (2576, 2653), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2662, 2786), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.logical_not', '"""\n logical_not() -> Tensor\n See :func:`oneflow.logical_not`\n """'], {}), '(oneflow.Tensor.logical_not,\n """\n logical_not() -> Tensor\n See :func:`oneflow.logical_not`\n """\n )\n', (2672, 2786), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2790, 2861), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.std', '"""\n See :func:`oneflow.std`\n """'], {}), '(oneflow.Tensor.std, """\n See :func:`oneflow.std`\n """)\n', (2800, 2861), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2874, 2945), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.var', '"""\n See :func:`oneflow.var`\n """'], {}), '(oneflow.Tensor.var, """\n See :func:`oneflow.var`\n """)\n', (2884, 2945), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((2958, 3037), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.squeeze', '"""\n See :func:`oneflow.squeeze`\n """'], {}), '(oneflow.Tensor.squeeze, """\n See :func:`oneflow.squeeze`\n """)\n', (2968, 3037), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((3050, 4418), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.unfold', '"""\n The interface is consistent with PyTorch.\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.Tensor.unfold.html#torch.Tensor.unfold.\n\n Returns a view of the original tensor which contains all slices of `size` size from `self`\n tensor in the dimension `dimension`.\n\n Step between two slices is given by `step`.\n\n If sizedim is the size of dimension `dimension` for `self`, the size of dimension dimension in the\n returned tensor will be (sizedim - size) / step + 1.\n\n An additional dimension of size `size` is appended in the returned tensor.\n\n Args:\n dimension (int): dimension in which unfolding happens\n size (int): the size of each slice that is unfolded\n step (int): the step between each slice\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.arange(1., 8)\n >>> x\n tensor([ 1., 2., 3., 4., 5., 6., 7.])\n >>> x.unfold(0, 2, 1)\n tensor([[ 1., 2.],\n [ 2., 3.],\n [ 3., 4.],\n [ 4., 5.],\n [ 5., 6.],\n [ 6., 7.]])\n >>> x.unfold(0, 2, 2)\n tensor([[ 1., 2.],\n [ 3., 4.],\n [ 5., 6.]])\n """'], {}), '(oneflow.Tensor.unfold,\n """\n The interface is consistent with PyTorch.\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.Tensor.unfold.html#torch.Tensor.unfold.\n\n Returns a view of the original tensor which contains all slices of `size` size from `self`\n tensor in the dimension `dimension`.\n\n Step between two slices is given by `step`.\n\n If sizedim is the size of dimension `dimension` for `self`, the size of dimension dimension in the\n returned tensor will be (sizedim - size) / step + 1.\n\n An additional dimension of size `size` is appended in the returned tensor.\n\n Args:\n dimension (int): dimension in which unfolding happens\n size (int): the size of each slice that is unfolded\n step (int): the step between each slice\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> x = flow.arange(1., 8)\n >>> x\n tensor([ 1., 2., 3., 4., 5., 6., 7.])\n >>> x.unfold(0, 2, 1)\n tensor([[ 1., 2.],\n [ 2., 3.],\n [ 3., 4.],\n [ 4., 5.],\n [ 5., 6.],\n [ 6., 7.]])\n >>> x.unfold(0, 2, 2)\n tensor([[ 1., 2.],\n [ 3., 4.],\n [ 5., 6.]])\n """\n )\n', (3060, 4418), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4422, 4499), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.matmul', '"""\n See :func:`oneflow.matmul`\n """'], {}), '(oneflow.Tensor.matmul, """\n See :func:`oneflow.matmul`\n """)\n', (4432, 4499), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4512, 4589), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.narrow', '"""\n See :func:`oneflow.narrow`\n """'], {}), '(oneflow.Tensor.narrow, """\n See :func:`oneflow.narrow`\n """)\n', (4522, 4589), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4602, 4689), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.unsqueeze', '"""\n See :func:`oneflow.unsqueeze`\n """'], {}), '(oneflow.Tensor.unsqueeze,\n """\n See :func:`oneflow.unsqueeze`\n """)\n', (4612, 4689), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4698, 4777), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.permute', '"""\n See :func:`oneflow.permute`\n """'], {}), '(oneflow.Tensor.permute, """\n See :func:`oneflow.permute`\n """)\n', (4708, 4777), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4790, 4861), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.abs', '"""\n See :func:`oneflow.abs`\n """'], {}), '(oneflow.Tensor.abs, """\n See :func:`oneflow.abs`\n """)\n', (4800, 4861), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4874, 4947), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.acos', '"""\n See :func:`oneflow.acos`\n """'], {}), '(oneflow.Tensor.acos, """\n See :func:`oneflow.acos`\n """)\n', (4884, 4947), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((4960, 5035), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.acosh', '"""\n See :func:`oneflow.acosh`\n """'], {}), '(oneflow.Tensor.acosh, """\n See :func:`oneflow.acosh`\n """)\n', (4970, 5035), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5048, 5127), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.arccosh', '"""\n See :func:`oneflow.arccosh`\n """'], {}), '(oneflow.Tensor.arccosh, """\n See :func:`oneflow.arccosh`\n """)\n', (5058, 5127), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5140, 5219), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.arctanh', '"""\n See :func:`oneflow.arctanh`\n """'], {}), '(oneflow.Tensor.arctanh, """\n See :func:`oneflow.arctanh`\n """)\n', (5150, 5219), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5232, 5309), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.argmax', '"""\n See :func:`oneflow.argmax`\n """'], {}), '(oneflow.Tensor.argmax, """\n See :func:`oneflow.argmax`\n """)\n', (5242, 5309), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5322, 5399), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.argmin', '"""\n See :func:`oneflow.argmin`\n """'], {}), '(oneflow.Tensor.argmin, """\n See :func:`oneflow.argmin`\n """)\n', (5332, 5399), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5412, 5487), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.atanh', '"""\n See :func:`oneflow.atanh`\n """'], {}), '(oneflow.Tensor.atanh, """\n See :func:`oneflow.atanh`\n """)\n', (5422, 5487), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((5500, 7647), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.backward', '"""\n The interface is consistent with PyTorch.\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.Tensor.backward.html#torch.Tensor.backward.\n\n Computes the gradient of current tensor w.r.t. graph leaves.\n\n The graph is differentiated using the chain rule. If the tensor is non-scalar (i.e. its data has more than one element) and requires gradient, the function additionally requires specifying gradient. It should be a tensor of matching type and location, that contains the gradient of the differentiated function w.r.t. self.\n\n This function accumulates gradients in the leaves - you might need to zero .grad attributes or set them to None before calling it. See Default gradient layouts for details on the memory layout of accumulated gradients.\n\n Note:\n If you run any forward ops, create gradient, and/or call backward in a user-specified CUDA stream context, see Stream semantics of backward passes.\n Note:\n When inputs are provided and a given input is not a leaf, the current implementation will call its grad_fn (though it is not strictly needed to get this gradients). It is an implementation detail on which the user should not rely. See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.\n\n Args:\n gradient (Tensor or None): Gradient w.r.t. the tensor. If it is a tensor, it will be automatically converted to a Tensor that does not require grad unless create_graph is True. None values can be specified for scalar Tensors or ones that don’t require grad. If a None value would be acceptable then this argument is optional.\n\n retain_graph (bool, optional): If False, the graph used to compute the grads will be freed. Note that in nearly all cases setting this option to True is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph.\n\n create_graph (bool, optional): If True, graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to False.\n """'], {}), '(oneflow.Tensor.backward,\n """\n The interface is consistent with PyTorch.\n The documentation is referenced from: https://pytorch.org/docs/stable/generated/torch.Tensor.backward.html#torch.Tensor.backward.\n\n Computes the gradient of current tensor w.r.t. graph leaves.\n\n The graph is differentiated using the chain rule. If the tensor is non-scalar (i.e. its data has more than one element) and requires gradient, the function additionally requires specifying gradient. It should be a tensor of matching type and location, that contains the gradient of the differentiated function w.r.t. self.\n\n This function accumulates gradients in the leaves - you might need to zero .grad attributes or set them to None before calling it. See Default gradient layouts for details on the memory layout of accumulated gradients.\n\n Note:\n If you run any forward ops, create gradient, and/or call backward in a user-specified CUDA stream context, see Stream semantics of backward passes.\n Note:\n When inputs are provided and a given input is not a leaf, the current implementation will call its grad_fn (though it is not strictly needed to get this gradients). It is an implementation detail on which the user should not rely. See https://github.com/pytorch/pytorch/pull/60521#issuecomment-867061780 for more details.\n\n Args:\n gradient (Tensor or None): Gradient w.r.t. the tensor. If it is a tensor, it will be automatically converted to a Tensor that does not require grad unless create_graph is True. None values can be specified for scalar Tensors or ones that don’t require grad. If a None value would be acceptable then this argument is optional.\n\n retain_graph (bool, optional): If False, the graph used to compute the grads will be freed. Note that in nearly all cases setting this option to True is not needed and often can be worked around in a much more efficient way. Defaults to the value of create_graph.\n\n create_graph (bool, optional): If True, graph of the derivative will be constructed, allowing to compute higher order derivative products. Defaults to False.\n """\n )\n', (5510, 7647), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7652, 7725), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.cast', '"""\n See :func:`oneflow.cast`\n """'], {}), '(oneflow.Tensor.cast, """\n See :func:`oneflow.cast`\n """)\n', (7662, 7725), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7739, 7812), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.diag', '"""\n See :func:`oneflow.diag`\n """'], {}), '(oneflow.Tensor.diag, """\n See :func:`oneflow.diag`\n """)\n', (7749, 7812), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7825, 7954), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.dim', '"""\n Tensor.dim() → int\n\n Returns the number of dimensions of self tensor.\n """'], {}), '(oneflow.Tensor.dim,\n """\n Tensor.dim() → int\n\n Returns the number of dimensions of self tensor.\n """\n )\n', (7835, 7954), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((7958, 8109), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.element_size', '"""\n Tensor.element_size() → int\n\n Returns the size in bytes of an individual element.\n\n """'], {}), '(oneflow.Tensor.element_size,\n """\n Tensor.element_size() → int\n\n Returns the size in bytes of an individual element.\n\n """\n )\n', (7968, 8109), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8113, 8184), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.exp', '"""\n See :func:`oneflow.exp`\n """'], {}), '(oneflow.Tensor.exp, """\n See :func:`oneflow.exp`\n """)\n', (8123, 8184), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8197, 8333), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.fill_', '"""\n Tensor.fill_(value) → Tensor\n\n Fills self tensor with the specified value.\n """'], {}), '(oneflow.Tensor.fill_,\n """\n Tensor.fill_(value) → Tensor\n\n Fills self tensor with the specified value.\n """\n )\n', (8207, 8333), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8337, 8406), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.ge', '"""\n See :func:`oneflow.ge`\n """'], {}), '(oneflow.Tensor.ge, """\n See :func:`oneflow.ge`\n """)\n', (8347, 8406), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8419, 8492), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.gelu', '"""\n See :func:`oneflow.gelu`\n """'], {}), '(oneflow.Tensor.gelu, """\n See :func:`oneflow.gelu`\n """)\n', (8429, 8492), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8505, 8761), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.get_device', '"""\n Tensor.get_device() -> Device ordinal (Integer)\n\n For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides. For CPU tensors, an error is thrown.\n\n \n """'], {}), '(oneflow.Tensor.get_device,\n """\n Tensor.get_device() -> Device ordinal (Integer)\n\n For CUDA tensors, this function returns the device ordinal of the GPU on which the tensor resides. For CPU tensors, an error is thrown.\n\n \n """\n )\n', (8515, 8761), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8765, 8834), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.gt', '"""\n See :func:`oneflow.gt`\n """'], {}), '(oneflow.Tensor.gt, """\n See :func:`oneflow.gt`\n """)\n', (8775, 8834), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8847, 8922), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.log1p', '"""\n See :func:`oneflow.log1p`\n """'], {}), '(oneflow.Tensor.log1p, """\n See :func:`oneflow.log1p`\n """)\n', (8857, 8922), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((8935, 9008), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.mish', '"""\n See :func:`oneflow.mish`\n """'], {}), '(oneflow.Tensor.mish, """\n See :func:`oneflow.mish`\n """)\n', (8945, 9008), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((9021, 9092), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.mul', '"""\n See :func:`oneflow.mul`\n """'], {}), '(oneflow.Tensor.mul, """\n See :func:`oneflow.mul`\n """)\n', (9031, 9092), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((9105, 9190), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.negative', '"""\n See :func:`oneflow.negative`\n """'], {}), '(oneflow.Tensor.negative,\n """\n See :func:`oneflow.negative`\n """)\n', (9115, 9190), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((9199, 9302), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.nelement', '"""\n Tensor.nelement() → int\n\n Alias for numel()\n """'], {}), '(oneflow.Tensor.nelement,\n """\n Tensor.nelement() → int\n\n Alias for numel()\n """)\n', (9209, 9302), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((9311, 9552), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.normal_', '"""\n normal_(mean=0, std=1, *, generator=None) -> Tensor\n\n Fills :attr:`self` tensor with elements samples from the normal distribution parameterized by :attr:`mean` and :attr:`std`.\n """'], {}), '(oneflow.Tensor.normal_,\n """\n normal_(mean=0, std=1, *, generator=None) -> Tensor\n\n Fills :attr:`self` tensor with elements samples from the normal distribution parameterized by :attr:`mean` and :attr:`std`.\n """\n )\n', (9321, 9552), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((9556, 9834), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.numpy', '"""\n Tensor.numpy() → numpy.ndarray\n\n Returns self tensor as a NumPy ndarray. This tensor and the returned ndarray share the same underlying storage. Changes to self tensor will be reflected in the ndarray and vice versa.\n """'], {}), '(oneflow.Tensor.numpy,\n """\n Tensor.numpy() → numpy.ndarray\n\n Returns self tensor as a NumPy ndarray. This tensor and the returned ndarray share the same underlying storage. Changes to self tensor will be reflected in the ndarray and vice versa.\n """\n )\n', (9566, 9834), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((9838, 9909), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.pow', '"""\n See :func:`oneflow.pow`\n """'], {}), '(oneflow.Tensor.pow, """\n See :func:`oneflow.pow`\n """)\n', (9848, 9909), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((9922, 9995), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.relu', '"""\n See :func:`oneflow.relu`\n """'], {}), '(oneflow.Tensor.relu, """\n See :func:`oneflow.relu`\n """)\n', (9932, 9995), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10008, 10081), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.roll', '"""\n See :func:`oneflow.roll`\n """'], {}), '(oneflow.Tensor.roll, """\n See :func:`oneflow.roll`\n """)\n', (10018, 10081), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10094, 10169), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.round', '"""\n See :func:`oneflow.round`\n """'], {}), '(oneflow.Tensor.round, """\n See :func:`oneflow.round`\n """)\n', (10104, 10169), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10182, 10255), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.selu', '"""\n See :func:`oneflow.selu`\n """'], {}), '(oneflow.Tensor.selu, """\n See :func:`oneflow.selu`\n """)\n', (10192, 10255), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10268, 10347), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.sigmoid', '"""\n See :func:`oneflow.sigmoid`\n """'], {}), '(oneflow.Tensor.sigmoid, """\n See :func:`oneflow.sigmoid`\n """)\n', (10278, 10347), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10360, 10433), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.sign', '"""\n See :func:`oneflow.sign`\n """'], {}), '(oneflow.Tensor.sign, """\n See :func:`oneflow.sign`\n """)\n', (10370, 10433), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10446, 10519), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.silu', '"""\n See :func:`oneflow.silu`\n """'], {}), '(oneflow.Tensor.silu, """\n See :func:`oneflow.silu`\n """)\n', (10456, 10519), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10532, 10605), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.sinh', '"""\n See :func:`oneflow.sinh`\n """'], {}), '(oneflow.Tensor.sinh, """\n See :func:`oneflow.sinh`\n """)\n', (10542, 10605), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((10618, 11007), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.size', '"""\n The interface is consistent with PyTorch.\n \n Returns the size of the self tensor. If dim is not specified, the returned value is a torch.Size, a subclass of tuple. If dim is specified, returns an int holding the size of that dimension.\n\n Args:\n idx (int, optional): The dimension for which to retrieve the size.\n\n \n """'], {}), '(oneflow.Tensor.size,\n """\n The interface is consistent with PyTorch.\n \n Returns the size of the self tensor. If dim is not specified, the returned value is a torch.Size, a subclass of tuple. If dim is specified, returns an int holding the size of that dimension.\n\n Args:\n idx (int, optional): The dimension for which to retrieve the size.\n\n \n """\n )\n', (10628, 11007), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11011, 11090), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.softmax', '"""\n See :func:`oneflow.softmax`\n """'], {}), '(oneflow.Tensor.softmax, """\n See :func:`oneflow.softmax`\n """)\n', (11021, 11090), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11103, 11188), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.softplus', '"""\n See :func:`oneflow.softplus`\n """'], {}), '(oneflow.Tensor.softplus,\n """\n See :func:`oneflow.softplus`\n """)\n', (11113, 11188), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11197, 11282), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.softsign', '"""\n See :func:`oneflow.softsign`\n """'], {}), '(oneflow.Tensor.softsign,\n """\n See :func:`oneflow.softsign`\n """)\n', (11207, 11282), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11291, 11362), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.tan', '"""\n See :func:`oneflow.tan`\n """'], {}), '(oneflow.Tensor.tan, """\n See :func:`oneflow.tan`\n """)\n', (11301, 11362), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11375, 11448), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.tanh', '"""\n See :func:`oneflow.tanh`\n """'], {}), '(oneflow.Tensor.tanh, """\n See :func:`oneflow.tanh`\n """)\n', (11385, 11448), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11461, 11534), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.tril', '"""\n See :func:`oneflow.tril`\n """'], {}), '(oneflow.Tensor.tril, """\n See :func:`oneflow.tril`\n """)\n', (11471, 11534), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11547, 11620), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.triu', '"""\n See :func:`oneflow.triu`\n """'], {}), '(oneflow.Tensor.triu, """\n See :func:`oneflow.triu`\n """)\n', (11557, 11620), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11633, 11864), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.uniform_', '"""\n Tensor.uniform_(from=0, to=1) → Tensor\n\n Fills self tensor with numbers sampled from the continuous uniform distribution:\n\n .. math::\n P(x)=1/(to-from)\n \n """'], {}), '(oneflow.Tensor.uniform_,\n """\n Tensor.uniform_(from=0, to=1) → Tensor\n\n Fills self tensor with numbers sampled from the continuous uniform distribution:\n\n .. math::\n P(x)=1/(to-from)\n \n """\n )\n', (11643, 11864), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((11868, 12468), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.copy_', '"""\n The interface is consistent with PyTorch.\n\n Tensor.copy_(src, non_blocking=False) → Tensor\n\n Copies the elements from src into self tensor and returns self.\n\n The src tensor must be broadcastable with the self tensor. It may be of a different data type or reside on a different device.\n\n Args:\n\n src (Tensor): the source tensor to copy from\n\n non_blocking (bool): if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect.\n """'], {}), '(oneflow.Tensor.copy_,\n """\n The interface is consistent with PyTorch.\n\n Tensor.copy_(src, non_blocking=False) → Tensor\n\n Copies the elements from src into self tensor and returns self.\n\n The src tensor must be broadcastable with the self tensor. It may be of a different data type or reside on a different device.\n\n Args:\n\n src (Tensor): the source tensor to copy from\n\n non_blocking (bool): if True and this copy is between CPU and GPU, the copy may occur asynchronously with respect to the host. For other cases, this argument has no effect.\n """\n )\n', (11878, 12468), False, 'from oneflow.framework.docstr.utils import add_docstr\n'), ((12472, 13516), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.Tensor.to', '"""Performs Tensor dtype and/or device conversion.\n A flow.dtype and flow.device are inferred from the arguments of `input.to(*args, **kwargs)`.\n\n .. note::\n If the ``input`` Tensor already\n has the correct :class:`flow.dtype` and :class:`flow.device`, then ``input`` is returned.\n Otherwise, the returned tensor is a copy of ``input`` with the desired.\n\n Args:\n input (oneflow.Tensor): An input tensor.\n *args (oneflow.Tensor or oneflow.device or oneflow.dtype): Positional arguments\n **kwargs (oneflow.device or oneflow.dtype) : Key-value arguments\n\n Returns:\n oneflow.Tensor: A Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> arr = np.random.randint(1, 9, size=(1, 2, 3, 4))\n >>> input = flow.Tensor(arr)\n >>> output = input.to(dtype=flow.float32)\n >>> np.array_equal(arr.astype(np.float32), output.numpy())\n True\n\n """'], {}), '(oneflow.Tensor.to,\n """Performs Tensor dtype and/or device conversion.\n A flow.dtype and flow.device are inferred from the arguments of `input.to(*args, **kwargs)`.\n\n .. note::\n If the ``input`` Tensor already\n has the correct :class:`flow.dtype` and :class:`flow.device`, then ``input`` is returned.\n Otherwise, the returned tensor is a copy of ``input`` with the desired.\n\n Args:\n input (oneflow.Tensor): An input tensor.\n *args (oneflow.Tensor or oneflow.device or oneflow.dtype): Positional arguments\n **kwargs (oneflow.device or oneflow.dtype) : Key-value arguments\n\n Returns:\n oneflow.Tensor: A Tensor.\n\n For example:\n\n .. code-block:: python\n\n >>> import numpy as np\n >>> import oneflow as flow\n\n >>> arr = np.random.randint(1, 9, size=(1, 2, 3, 4))\n >>> input = flow.Tensor(arr)\n >>> output = input.to(dtype=flow.float32)\n >>> np.array_equal(arr.astype(np.float32), output.numpy())\n True\n\n """\n )\n', (12482, 13516), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from typing import Optional, Sequence, Sized, Union
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
import oneflow_api
def _gen_unique_name_if_need(name, default_name):
if name is None:
return id_util.UniqueStr(default_name)
assert isinstance(name, str), name
return name
def _check_axis(axis, shape):
if axis is None:
axis = list(range(len(shape)))
if isinstance(axis, int):
axis = [axis]
assert isinstance(axis, (list, tuple)), "Invalid axis {}".format(axis)
for x in axis:
if x < 0:
x += len(shape)
assert x >= 0 and x < len(shape), "Invalid axis {}, len(shape): {}".format(
axis, len(shape)
)
return axis
def _do_reduce(x, name, op_type_name, keepdims, axis):
op = (
flow.user_op_builder(name)
.Op(op_type_name)
.Input("input_tensor", [x])
.Output("output_tensor")
.Attr("axis", axis)
.Attr("keepdims", keepdims)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("math.reduce_sum")
def reduce_sum(
input_tensor: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator computes the sum of elements across dimensions of a tensor
Args:
input_tensor (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the sum value is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of sum on the specified axis of input Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_sum_Job(x: tp.Numpy.Placeholder((3, 3))
) -> tp.Numpy:
return flow.math.reduce_sum(x, axis=1, keepdims=True)
x = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]).astype(np.float32)
out = reduce_sum_Job(x)
# out [[ 6.]
# [15.]
# [24.]]
"""
name = _gen_unique_name_if_need(name, "ReduceSum_")
axis = _check_axis(axis, input_tensor.shape)
if len(axis) == 0:
return input_tensor
op = (
flow.user_op_builder(name)
.Op("reduce_sum")
.Input("input_tensor", [input_tensor])
.Output("output_tensor")
.Attr("axis", axis)
.Attr("keepdims", keepdims)
.Build()
)
return op.InferAndTryRun().SoleOutputBlob()
@oneflow_export("math.reduce_any")
def reduce_any(
x: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator computes the `logical or` of input Blob along the specified axis
Args:
x (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the logical and value is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of logical or on the specified axis of input Blob
Note:
The input Blob dtype is int8
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_any_Job(x: tp.Numpy.Placeholder((3, 3), dtype=flow.int8)
) -> tp.Numpy:
return flow.math.reduce_any(x, axis=1, keepdims=True)
x = np.array([[1, 0, 0], [0, 0, 0], [1, 0, 1]]).astype(np.int8)
out = reduce_any_Job(x)
# out [[1]
# [0]
# [1]]
"""
name = _gen_unique_name_if_need(name, "ReduceAny_")
axis = _check_axis(axis, x.shape)
if len(axis) == 0:
return flow.math.not_equal(x, flow.constant_scalar(value=0.0, dtype=x.dtype))
return _do_reduce(x, name, "reduce_any", keepdims, axis)
@oneflow_export("math.reduce_min")
def reduce_min(
x: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator computes the minimum value of input Blob along the specified axis
Args:
x (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the minimum value is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of minimum value on the specified axis of input Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_min_Job(x: tp.Numpy.Placeholder((3, 3))
) -> tp.Numpy:
return flow.math.reduce_min(x, axis=1, keepdims=True)
x = np.array([[2, 1, 3], [5, 3, 6], [7, 4, 9]]).astype(np.float32)
out = reduce_min_Job(x)
# out [[1.]
# [3.]
# [4.]]
"""
name = _gen_unique_name_if_need(name, "ReduceMin_")
axis = _check_axis(axis, x.shape)
if len(axis) == 0:
return x
return _do_reduce(x, name, "reduce_min", keepdims, axis)
@oneflow_export("math.reduce_max")
def reduce_max(
x: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator computes the maximum value of input Blob along the specified axis
Args:
x (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the maximum value is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of maximum value on the specified axis of input Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_max_Job(x: tp.Numpy.Placeholder((3, 3))
) -> tp.Numpy:
return flow.math.reduce_max(x, axis=1, keepdims=True)
x = np.array([[2, 1, 4], [5, 3, 7], [7, 4, 9]]).astype(np.float32)
out = reduce_max_Job(x)
# out [[4.]
# [7.]
# [9.]]
"""
name = _gen_unique_name_if_need(name, "ReduceMax_")
axis = _check_axis(axis, x.shape)
if len(axis) == 0:
return x
return _do_reduce(x, name, "reduce_max", keepdims, axis)
@oneflow_export("math.reduce_prod")
def reduce_prod(
x: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator computes the product of input Blob along the specified axis
Args:
x (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the product is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of product value on the specified axis of input Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_product_Job(x: tp.Numpy.Placeholder((3, 3))
) -> tp.Numpy:
return flow.math.reduce_prod(x, axis=1, keepdims=True)
x = np.array([[1, 2, 3], [3, 4, 5], [6, 3, 2]]).astype(np.float32)
out = reduce_product_Job(x)
# out [[ 6.]
# [60.]
# [36.]]
"""
name = _gen_unique_name_if_need(name, "ReduceProd_")
axis = _check_axis(axis, x.shape)
if len(axis) == 0:
return x
return _do_reduce(x, name, "reduce_prod", keepdims, axis)
@oneflow_export("math.reduce_all")
def reduce_all(
x: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
"""This operator computes the `logical and` of input Blob along the specified axis
Args:
x (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the logical and value is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of logical and value on the specified axis of input Blob
Note:
The input Blob dtype is int8
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_all_Job(x: tp.Numpy.Placeholder((3, 3), dtype=flow.int8)
) -> tp.Numpy:
return flow.math.reduce_all(x, axis=1, keepdims=True)
x = np.array([[1, 0, 0], [0, 0, 0], [1, 1, 1]]).astype(np.int8)
out = reduce_all_Job(x)
# out [[0]
# [0]
# [1]]
"""
name = _gen_unique_name_if_need(name, "ReduceAll_")
axis = _check_axis(axis, x.shape)
if len(axis) == 0:
return flow.math.not_equal(x, flow.constant_scalar(value=0.0, dtype=x.dtype))
return _do_reduce(x, name, "reduce_all", keepdims, axis)
@oneflow_export("math.reduce_euclidean_norm")
def reduce_euclidean_norm(
input_tensor: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""This operator computes the Euclidean norm of input Blob along the specified axis
The equation is:
.. math::
out=\sqrt{\sum_{t=0}^{n} x_{t}^2}
Args:
input_tensor (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the Euclidean norm is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of Euclidean norm on the specified axis of input Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_euclidean_norm_Job(x: tp.Numpy.Placeholder((3, 2))
) -> tp.Numpy:
return flow.math.reduce_euclidean_norm(x, axis=1, keepdims=True)
x = np.array([[3, 4], [5, 12], [8, 15]]).astype(np.float32)
out = reduce_euclidean_norm_Job(x)
# out [[ 5.]
# [13.]
# [17.]]
"""
name = _gen_unique_name_if_need(name, "ReduceEuclideanNorm_")
return flow.math.sqrt(
flow.math.reduce_sum(
flow.math.square(input_tensor, name + "_square"),
axis,
keepdims,
name + "_reduce_sum",
),
name + "_sqrt",
)
@oneflow_export("math.reduce_logsumexp")
def reduce_logsumexp(
input_tensor: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""This operator computes the log of exponential sum of input Blob along the specified axis
The equation is:
.. math::
out = log(\sum_{t=0}^{t=n} e^{x_{t}})
Args:
input_tensor (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the log of exponential sum is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of log of exponential sum on the specified axis of input Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_logsumexp_Job(x: tp.Numpy.Placeholder((3, 2))
) -> tp.Numpy:
return flow.math.reduce_logsumexp(x, axis=1, keepdims=True)
x = np.array([[0, 0], [1, 1], [2, 2]]).astype(np.float32)
out = reduce_logsumexp_Job(x)
# out [[0.6931472]
# [1.6931472]
# [2.6931472]]
"""
name = _gen_unique_name_if_need(name, "ReduceLogSumExp_")
axis = _check_axis(axis, input_tensor.shape)
return flow.math.log(
flow.math.reduce_sum(
flow.math.exp(input_tensor, name + "_exp"),
axis,
keepdims,
name + "_reduce_sum",
),
name + "_log",
)
@oneflow_export("math.reduce_std")
def reduce_std(
input_tensor: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""This operator computes the standard deviation of input Blob along the specified axis
The equation is:
.. math::
out=\sqrt{\frac{1}{n}*\sum_{i=1}^{n}(x_i-mean)^2}
Args:
input_tensor (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the standard deviation is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of standard deviation on the specified axis of input Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_std_Job(x: tp.Numpy.Placeholder((3, 3))
) -> tp.Numpy:
return flow.math.reduce_std(x, axis=1, keepdims=True)
x = np.array([[0, 5, 10], [5, 5, 5], [12, 3, 0]]).astype(np.float32)
out = reduce_std_Job(x)
# out [[4.0824833]
# [0. ]
# [5.0990195]]
"""
name = _gen_unique_name_if_need(name, "ReduceStd_")
axis = _check_axis(axis, input_tensor.shape)
if isinstance(axis, list) and len(axis) == 0:
return flow.zeros_like(
input_tensor, dtype=input_tensor.dtype, name=name + "_zeros_like"
)
return flow.math.sqrt(
flow.math.reduce_variance(
input_tensor, axis, keepdims, name + "_reduce_variance"
),
name + "_sqrt",
)
@oneflow_export("math.reduce_variance")
def reduce_variance(
input_tensor: oneflow_api.BlobDesc,
axis: Optional[Union[int, Sequence[int]]] = None,
keepdims: bool = False,
name: Optional[str] = None,
) -> oneflow_api.BlobDesc:
r"""This operator computes the variance of input Blob along the specified axis
The equation is:
.. math::
out=\frac{1}{n}*\sum_{i=1}^{n}(x_i-mean)^2
Args:
input_tensor (oneflow_api.BlobDesc): A Blob
axis (Optional[Union[int, Sequence[int]]], optional): The dimension along which the variance is computed. Defaults to None.
keepdims (bool, optional): Whether to keep the reduced dimension in the output Blob. Defaults to False.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow_api.BlobDesc: The result of variance on the specified axis of input Blob
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def reduce_variance_Job(x: tp.Numpy.Placeholder((3, 3))
) -> tp.Numpy:
return flow.math.reduce_variance(x, axis=1, keepdims=True)
x = np.array([[0, 5, 10], [5, 5, 5], [12, 3, 0]]).astype(np.float32)
out = reduce_variance_Job(x)
# out [[16.666668]
# [ 0. ]
# [26. ]]
"""
name = _gen_unique_name_if_need(name, "ReduceVariance_")
axis = _check_axis(axis, input_tensor.shape)
if isinstance(axis, list) and len(axis) == 0:
return flow.zeros_like(
input_tensor, dtype=input_tensor.dtype, name=name + "_zeros_like"
)
return flow.math.subtract(
flow.math.reduce_mean(
flow.math.square(input_tensor, name + "_square_minuend"),
axis,
keepdims,
name + "_reduce_mean_minuend",
),
flow.math.square(
flow.math.reduce_mean(
input_tensor, axis, keepdims, name + "_reduce_mean_subtrahend"
),
name + "_square_subtrahend",
),
name + "_subtract",
)
| [
"oneflow.math.reduce_variance",
"oneflow.math.reduce_mean",
"oneflow.zeros_like",
"oneflow.user_op_builder",
"oneflow.constant_scalar",
"oneflow.python.framework.id_util.UniqueStr",
"oneflow.math.exp",
"oneflow.math.square",
"oneflow.python.oneflow_export.oneflow_export"
] | [((2000, 2033), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_sum"""'], {}), "('math.reduce_sum')\n", (2014, 2033), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((3759, 3792), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_any"""'], {}), "('math.reduce_any')\n", (3773, 3792), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((5394, 5427), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_min"""'], {}), "('math.reduce_min')\n", (5408, 5427), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((6903, 6936), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_max"""'], {}), "('math.reduce_max')\n", (6917, 6936), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((8420, 8454), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_prod"""'], {}), "('math.reduce_prod')\n", (8434, 8454), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((9933, 9966), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_all"""'], {}), "('math.reduce_all')\n", (9947, 9966), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((11584, 11628), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_euclidean_norm"""'], {}), "('math.reduce_euclidean_norm')\n", (11598, 11628), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((13357, 13396), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_logsumexp"""'], {}), "('math.reduce_logsumexp')\n", (13371, 13396), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((15187, 15220), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_std"""'], {}), "('math.reduce_std')\n", (15201, 15220), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((17110, 17148), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""math.reduce_variance"""'], {}), "('math.reduce_variance')\n", (17124, 17148), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1150, 1181), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['default_name'], {}), '(default_name)\n', (1167, 1181), True, 'import oneflow.python.framework.id_util as id_util\n'), ((16831, 16917), 'oneflow.zeros_like', 'flow.zeros_like', (['input_tensor'], {'dtype': 'input_tensor.dtype', 'name': "(name + '_zeros_like')"}), "(input_tensor, dtype=input_tensor.dtype, name=name +\n '_zeros_like')\n", (16846, 16917), True, 'import oneflow as flow\n'), ((16971, 17057), 'oneflow.math.reduce_variance', 'flow.math.reduce_variance', (['input_tensor', 'axis', 'keepdims', "(name + '_reduce_variance')"], {}), "(input_tensor, axis, keepdims, name +\n '_reduce_variance')\n", (16996, 17057), True, 'import oneflow as flow\n'), ((18743, 18829), 'oneflow.zeros_like', 'flow.zeros_like', (['input_tensor'], {'dtype': 'input_tensor.dtype', 'name': "(name + '_zeros_like')"}), "(input_tensor, dtype=input_tensor.dtype, name=name +\n '_zeros_like')\n", (18758, 18829), True, 'import oneflow as flow\n'), ((5282, 5328), 'oneflow.constant_scalar', 'flow.constant_scalar', ([], {'value': '(0.0)', 'dtype': 'x.dtype'}), '(value=0.0, dtype=x.dtype)\n', (5302, 5328), True, 'import oneflow as flow\n'), ((11472, 11518), 'oneflow.constant_scalar', 'flow.constant_scalar', ([], {'value': '(0.0)', 'dtype': 'x.dtype'}), '(value=0.0, dtype=x.dtype)\n', (11492, 11518), True, 'import oneflow as flow\n'), ((13189, 13237), 'oneflow.math.square', 'flow.math.square', (['input_tensor', "(name + '_square')"], {}), "(input_tensor, name + '_square')\n", (13205, 13237), True, 'import oneflow as flow\n'), ((15026, 15068), 'oneflow.math.exp', 'flow.math.exp', (['input_tensor', "(name + '_exp')"], {}), "(input_tensor, name + '_exp')\n", (15039, 15068), True, 'import oneflow as flow\n'), ((18922, 18978), 'oneflow.math.square', 'flow.math.square', (['input_tensor', "(name + '_square_minuend')"], {}), "(input_tensor, name + '_square_minuend')\n", (18938, 18978), True, 'import oneflow as flow\n'), ((19112, 19201), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['input_tensor', 'axis', 'keepdims', "(name + '_reduce_mean_subtrahend')"], {}), "(input_tensor, axis, keepdims, name +\n '_reduce_mean_subtrahend')\n", (19133, 19201), True, 'import oneflow as flow\n'), ((1740, 1766), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (1760, 1766), True, 'import oneflow as flow\n'), ((3488, 3514), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (3508, 3514), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
BLOCK_COUNTS = [3, 4, 6, 3]
BLOCK_FILTERS = [256, 512, 1024, 2048]
BLOCK_FILTERS_INNER = [64, 128, 256, 512]
class ResnetBuilder(object):
def __init__(self, weight_regularizer, trainable=True, training=True, channel_last=False, fuse_bn_relu=True, fuse_bn_add_relu=True):
self.data_format = "NHWC" if channel_last else "NCHW"
self.weight_initializer = flow.variance_scaling_initializer(2, 'fan_in', 'random_normal',
data_format=self.data_format)
self.weight_regularizer = weight_regularizer
self.trainable = trainable
self.training = training
self.fuse_bn_relu = fuse_bn_relu
self.fuse_bn_add_relu = fuse_bn_add_relu
def _conv2d(
self,
name,
input,
filters,
kernel_size,
strides=1,
padding="SAME",
dilations=1,
):
# There are different shapes of weight metric between 'NCHW' and 'NHWC' mode
if self.data_format == "NHWC":
shape = (filters, kernel_size, kernel_size, input.shape[3])
else:
shape = (filters, input.shape[1], kernel_size, kernel_size)
weight = flow.get_variable(
name + "-weight",
shape=shape,
dtype=input.dtype,
initializer=self.weight_initializer,
regularizer=self.weight_regularizer,
model_name="weight",
trainable=self.trainable,
)
return flow.nn.conv2d(input, weight, strides, padding, self.data_format, dilations, name=name)
def _batch_norm(self, inputs, name=None, last=False):
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
if self.data_format =="NHWC":
axis = 3
return flow.layers.batch_normalization(
inputs=inputs,
axis=axis,
momentum=0.9, # 97,
epsilon=1e-5,
center=True,
scale=True,
trainable=self.trainable,
training=self.training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=self.weight_regularizer,
beta_regularizer=self.weight_regularizer,
name=name,
)
def _batch_norm_relu(self, inputs, name=None, last=False):
if self.fuse_bn_relu:
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
if self.data_format =="NHWC":
axis = 3
return flow.layers.batch_normalization_relu(
inputs=inputs,
axis=axis,
momentum=0.9,
epsilon=1e-5,
center=True,
scale=True,
trainable=self.trainable,
training=self.training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=self.weight_regularizer,
beta_regularizer=self.weight_regularizer,
name=name + "_bn_relu",
)
else:
return flow.nn.relu(self._batch_norm(inputs, name + "_bn", last=last))
def _batch_norm_add_relu(self, inputs, addend, name=None, last=False):
if self.fuse_bn_add_relu:
initializer = flow.zeros_initializer() if last else flow.ones_initializer()
axis = 1
if self.data_format =="NHWC":
axis = 3
return flow.layers.batch_normalization_add_relu(
inputs=inputs,
addend=addend,
axis=axis,
momentum=0.9,
epsilon=1e-5,
center=True,
scale=True,
trainable=self.trainable,
training=self.training,
gamma_initializer=initializer,
moving_variance_initializer=initializer,
gamma_regularizer=self.weight_regularizer,
beta_regularizer=self.weight_regularizer,
name=name+"_bn_add_relu",
)
else:
return flow.nn.relu(self._batch_norm(inputs, name+"_bn", last=last) + addend)
def conv2d_affine(self, input, name, filters, kernel_size, strides):
# input data_format must be NCHW, cannot check now
padding = "SAME" if strides > 1 or kernel_size > 1 else "VALID"
output = self._conv2d(name, input, filters, kernel_size, strides, padding)
return output
def bottleneck_transformation(self, input, block_name, filters, filters_inner, strides):
a = self.conv2d_affine(
input, block_name + "_branch2a", filters_inner, 1, 1)
a = self._batch_norm_relu(a, block_name + "_branch2a")
b = self.conv2d_affine(
a, block_name + "_branch2b", filters_inner, 3, strides)
b = self._batch_norm_relu(b, block_name + "_branch2b")
c = self.conv2d_affine(b, block_name + "_branch2c", filters, 1, 1)
return c
def residual_block(self, input, block_name, filters, filters_inner, strides_init):
if strides_init != 1 or block_name == "res2_0":
shortcut = self.conv2d_affine(
input, block_name + "_branch1", filters, 1, strides_init
)
shortcut = self._batch_norm(shortcut, block_name + "_branch1_bn")
else:
shortcut = input
bottleneck = self.bottleneck_transformation(
input, block_name, filters, filters_inner, strides_init,
)
return self._batch_norm_add_relu(bottleneck, shortcut, block_name + "_branch2c", last=True)
def residual_stage(self, input, stage_name, counts, filters, filters_inner, stride_init=2):
output = input
for i in range(counts):
block_name = "%s_%d" % (stage_name, i)
output = self.residual_block(
output, block_name, filters, filters_inner, stride_init if i == 0 else 1
)
return output
def resnet_conv_x_body(self, input):
output = input
for i, (counts, filters, filters_inner) in enumerate(
zip(BLOCK_COUNTS, BLOCK_FILTERS, BLOCK_FILTERS_INNER)
):
stage_name = "res%d" % (i + 2)
output = self.residual_stage(
output, stage_name, counts, filters, filters_inner, 1 if i == 0 else 2
)
return output
def resnet_stem(self, input):
conv1 = self._conv2d("conv1", input, 64, 7, 2)
conv1_bn = self._batch_norm_relu(conv1, "conv1")
pool1 = flow.nn.max_pool2d(
conv1_bn, ksize=3, strides=2, padding="SAME", data_format=self.data_format, name="pool1",
)
return pool1
def resnet50(images, args, trainable=True, training=True):
weight_regularizer = flow.regularizers.l2(args.wd) if args.wd > 0.0 and args.wd < 1.0 else None
builder = ResnetBuilder(weight_regularizer, trainable, training, args.channel_last, args.fuse_bn_relu, args.fuse_bn_add_relu)
if args.pad_output:
if args.channel_last:
paddings = ((0, 0), (0, 0), (0, 0), (0, 1))
else:
paddings = ((0, 0), (0, 1), (0, 0), (0, 0))
images = flow.pad(images, paddings=paddings)
with flow.scope.namespace("Resnet"):
stem = builder.resnet_stem(images)
body = builder.resnet_conv_x_body(stem)
pool5 = flow.nn.avg_pool2d(
body, ksize=7, strides=1, padding="VALID", data_format=builder.data_format, name="pool5",
)
fc1001 = flow.layers.dense(
flow.reshape(pool5, (pool5.shape[0], -1)),
units=1000,
use_bias=True,
kernel_initializer=flow.variance_scaling_initializer(2, 'fan_in', 'random_normal'),
bias_initializer=flow.zeros_initializer(),
kernel_regularizer=weight_regularizer,
bias_regularizer=weight_regularizer,
trainable=trainable,
name="fc1001",
)
return fc1001
| [
"oneflow.variance_scaling_initializer",
"oneflow.regularizers.l2",
"oneflow.nn.conv2d",
"oneflow.layers.batch_normalization",
"oneflow.scope.namespace",
"oneflow.layers.batch_normalization_relu",
"oneflow.ones_initializer",
"oneflow.get_variable",
"oneflow.zeros_initializer",
"oneflow.reshape",
... | [((988, 1086), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_in"""', '"""random_normal"""'], {'data_format': 'self.data_format'}), "(2, 'fan_in', 'random_normal', data_format\n =self.data_format)\n", (1021, 1086), True, 'import oneflow as flow\n'), ((1862, 2059), 'oneflow.get_variable', 'flow.get_variable', (["(name + '-weight')"], {'shape': 'shape', 'dtype': 'input.dtype', 'initializer': 'self.weight_initializer', 'regularizer': 'self.weight_regularizer', 'model_name': '"""weight"""', 'trainable': 'self.trainable'}), "(name + '-weight', shape=shape, dtype=input.dtype,\n initializer=self.weight_initializer, regularizer=self.\n weight_regularizer, model_name='weight', trainable=self.trainable)\n", (1879, 2059), True, 'import oneflow as flow\n'), ((2162, 2253), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'self.data_format', 'dilations'], {'name': 'name'}), '(input, weight, strides, padding, self.data_format, dilations,\n name=name)\n', (2176, 2253), True, 'import oneflow as flow\n'), ((2484, 2830), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'inputs', 'axis': 'axis', 'momentum': '(0.9)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': 'self.trainable', 'training': 'self.training', 'gamma_initializer': 'initializer', 'moving_variance_initializer': 'initializer', 'gamma_regularizer': 'self.weight_regularizer', 'beta_regularizer': 'self.weight_regularizer', 'name': 'name'}), '(inputs=inputs, axis=axis, momentum=0.9,\n epsilon=1e-05, center=True, scale=True, trainable=self.trainable,\n training=self.training, gamma_initializer=initializer,\n moving_variance_initializer=initializer, gamma_regularizer=self.\n weight_regularizer, beta_regularizer=self.weight_regularizer, name=name)\n', (2515, 2830), True, 'import oneflow as flow\n'), ((7360, 7472), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv1_bn'], {'ksize': '(3)', 'strides': '(2)', 'padding': '"""SAME"""', 'data_format': 'self.data_format', 'name': '"""pool1"""'}), "(conv1_bn, ksize=3, strides=2, padding='SAME',\n data_format=self.data_format, name='pool1')\n", (7378, 7472), True, 'import oneflow as flow\n'), ((7599, 7628), 'oneflow.regularizers.l2', 'flow.regularizers.l2', (['args.wd'], {}), '(args.wd)\n', (7619, 7628), True, 'import oneflow as flow\n'), ((8003, 8038), 'oneflow.pad', 'flow.pad', (['images'], {'paddings': 'paddings'}), '(images, paddings=paddings)\n', (8011, 8038), True, 'import oneflow as flow\n'), ((8048, 8078), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""Resnet"""'], {}), "('Resnet')\n", (8068, 8078), True, 'import oneflow as flow\n'), ((8187, 8300), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['body'], {'ksize': '(7)', 'strides': '(1)', 'padding': '"""VALID"""', 'data_format': 'builder.data_format', 'name': '"""pool5"""'}), "(body, ksize=7, strides=1, padding='VALID', data_format=\n builder.data_format, name='pool5')\n", (8205, 8300), True, 'import oneflow as flow\n'), ((2331, 2355), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2353, 2355), True, 'import oneflow as flow\n'), ((2369, 2392), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (2390, 2392), True, 'import oneflow as flow\n'), ((3276, 3644), 'oneflow.layers.batch_normalization_relu', 'flow.layers.batch_normalization_relu', ([], {'inputs': 'inputs', 'axis': 'axis', 'momentum': '(0.9)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': 'self.trainable', 'training': 'self.training', 'gamma_initializer': 'initializer', 'moving_variance_initializer': 'initializer', 'gamma_regularizer': 'self.weight_regularizer', 'beta_regularizer': 'self.weight_regularizer', 'name': "(name + '_bn_relu')"}), "(inputs=inputs, axis=axis, momentum=0.9,\n epsilon=1e-05, center=True, scale=True, trainable=self.trainable,\n training=self.training, gamma_initializer=initializer,\n moving_variance_initializer=initializer, gamma_regularizer=self.\n weight_regularizer, beta_regularizer=self.weight_regularizer, name=name +\n '_bn_relu')\n", (3312, 3644), True, 'import oneflow as flow\n'), ((4248, 4641), 'oneflow.layers.batch_normalization_add_relu', 'flow.layers.batch_normalization_add_relu', ([], {'inputs': 'inputs', 'addend': 'addend', 'axis': 'axis', 'momentum': '(0.9)', 'epsilon': '(1e-05)', 'center': '(True)', 'scale': '(True)', 'trainable': 'self.trainable', 'training': 'self.training', 'gamma_initializer': 'initializer', 'moving_variance_initializer': 'initializer', 'gamma_regularizer': 'self.weight_regularizer', 'beta_regularizer': 'self.weight_regularizer', 'name': "(name + '_bn_add_relu')"}), "(inputs=inputs, addend=addend, axis\n =axis, momentum=0.9, epsilon=1e-05, center=True, scale=True, trainable=\n self.trainable, training=self.training, gamma_initializer=initializer,\n moving_variance_initializer=initializer, gamma_regularizer=self.\n weight_regularizer, beta_regularizer=self.weight_regularizer, name=name +\n '_bn_add_relu')\n", (4288, 4641), True, 'import oneflow as flow\n'), ((8367, 8408), 'oneflow.reshape', 'flow.reshape', (['pool5', '(pool5.shape[0], -1)'], {}), '(pool5, (pool5.shape[0], -1))\n', (8379, 8408), True, 'import oneflow as flow\n'), ((3107, 3131), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (3129, 3131), True, 'import oneflow as flow\n'), ((3145, 3168), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (3166, 3168), True, 'import oneflow as flow\n'), ((4079, 4103), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (4101, 4103), True, 'import oneflow as flow\n'), ((4117, 4140), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (4138, 4140), True, 'import oneflow as flow\n'), ((8492, 8555), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', (['(2)', '"""fan_in"""', '"""random_normal"""'], {}), "(2, 'fan_in', 'random_normal')\n", (8525, 8555), True, 'import oneflow as flow\n'), ((8586, 8610), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (8608, 8610), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import argparse
import os
import time
from datetime import datetime
import alexnet_model
import benchmark_util
import data_loader
import resnet_model
import vgg_model
from oneflow.compatible import single_client as flow
parser = argparse.ArgumentParser(description="flags for cnn benchmark")
parser.add_argument("--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("--node_num", type=int, default=1)
parser.add_argument(
"--node_list",
type=str,
default=None,
required=False,
help="nodes' IP address, split by comma",
)
parser.add_argument(
"--model", type=str, default="vgg16", required=False, help="vgg16 or resnet50"
)
parser.add_argument("--batch_size_per_device", type=int, default=8, required=False)
parser.add_argument("--learning_rate", type=float, default=0.0001, required=False)
parser.add_argument(
"--optimizer", type=str, default="sgd", required=False, help="sgd, adam, momentum"
)
parser.add_argument(
"--weight_l2",
type=float,
default=None,
required=False,
help="weight decay parameter",
)
parser.add_argument(
"--iter_num", type=int, default=10, required=False, help="total iterations to run"
)
parser.add_argument(
"--skip_iter_num",
type=int,
default=0,
required=False,
help="number of skipping iterations for benchmark purpose.",
)
parser.add_argument(
"--data_dir", type=str, default=None, required=False, help="dataset directory"
)
parser.add_argument(
"--data_part_num",
type=int,
default=32,
required=False,
help="data part number in dataset",
)
parser.add_argument(
"--gpu_image_decoder",
type=bool,
default=False,
required=False,
help="Whether to use use ImageDecoderRandomCropResize.",
)
parser.add_argument(
"--image_size", type=int, default=228, required=False, help="image size"
)
parser.add_argument(
"--loss_print_every_n_iter",
type=int,
default=1,
required=False,
help="print loss every n iteration",
)
parser.add_argument(
"--model_save_every_n_iter",
type=int,
default=200,
required=False,
help="save model every n iteration",
)
parser.add_argument(
"--model_save_dir",
type=str,
default="./output/model_save-{}".format(
str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))
),
required=False,
help="model save directory",
)
parser.add_argument(
"--save_last_snapshot",
type=bool,
default=False,
required=False,
help="save model snapshot for last iteration",
)
parser.add_argument(
"--model_load_dir",
type=str,
default=None,
required=False,
help="model load directory",
)
parser.add_argument(
"--log_dir",
type=str,
default="./output",
required=False,
help="log info save directory",
)
parser.add_argument(
"--enable_auto_mixed_precision",
type=bool,
default=False,
required=False,
help="automatically change the float net into mixed precision net",
)
args = parser.parse_args()
model_dict = {
"resnet50": resnet_model.resnet50,
"vgg16": vgg_model.vgg16,
"alexnet": alexnet_model.alexnet,
}
func_config = flow.FunctionConfig()
func_config.default_distribute_strategy(flow.scope.consistent_view())
func_config.default_data_type(flow.float)
func_config.enable_auto_mixed_precision(args.enable_auto_mixed_precision)
if args.weight_l2:
func_config.train.weight_l2(args.weight_l2)
flow.config.gpu_device_num(args.gpu_num_per_node)
def set_up_optimizer(loss, args):
loss_scale_policy = None
if args.enable_auto_mixed_precision:
loss_scale_policy = flow.optimizer.loss_scale.dynamic_loss_scale(
increment_period=2000
)
if args.optimizer == "sgd":
print("Optimizer: SGD")
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [args.learning_rate]),
loss_scale_policy=loss_scale_policy,
).minimize(loss)
elif args.optimizer == "momentum":
print("Optimizer: Momentum")
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [args.learning_rate]),
momentum=0.9,
loss_scale_policy=loss_scale_policy,
).minimize(loss)
elif args.optimizer == "adam":
print("Optimizer: Adam")
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [args.learning_rate]),
beta1=0.9,
loss_scale_policy=loss_scale_policy,
).minimize(loss)
@flow.global_function(func_config)
def TrainNet():
total_device_num = args.node_num * args.gpu_num_per_node
batch_size = total_device_num * args.batch_size_per_device
if args.data_dir:
assert os.path.exists(args.data_dir)
print("Loading data from {}".format(args.data_dir))
(labels, images) = data_loader.load_imagenet(
args.data_dir,
args.image_size,
batch_size,
args.data_part_num,
args.gpu_image_decoder,
)
else:
print("Loading synthetic data.")
(labels, images) = data_loader.load_synthetic(args.image_size, batch_size)
logits = model_dict[args.model](images)
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(
labels, logits, name="softmax_loss"
)
set_up_optimizer(loss, args)
return loss
def main():
print("=".ljust(66, "="))
print(
"Running {}: num_gpu_per_node = {}, num_nodes = {}.".format(
args.model, args.gpu_num_per_node, args.node_num
)
)
print("=".ljust(66, "="))
for arg in vars(args):
print("{} = {}".format(arg, getattr(args, arg)))
print("-".ljust(66, "-"))
print("Time stamp: {}".format(str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))))
flow.env.log_dir(args.log_dir)
if args.node_num > 1:
nodes = []
for n in args.node_list.strip().split(","):
addr_dict = {}
addr_dict["addr"] = n
nodes.append(addr_dict)
flow.env.machine(nodes)
check_point = flow.train.CheckPoint()
if args.model_load_dir:
assert os.path.isdir(args.model_load_dir)
print("Restoring model from {}.".format(args.model_load_dir))
check_point.load(args.model_load_dir)
else:
print("Init model on demand.")
check_point.init()
total_batch_size = (
args.node_num * args.gpu_num_per_node * args.batch_size_per_device
)
speedometer = benchmark_util.CNNSpeedometer()
start_time = time.time()
for step in range(args.skip_iter_num + args.iter_num):
cb = speedometer.speedometer_cb(
step,
start_time,
total_batch_size,
args.skip_iter_num,
args.iter_num,
args.loss_print_every_n_iter,
)
TrainNet().async_get(cb)
if (step + 1) % args.model_save_every_n_iter == 0:
if not os.path.exists(args.model_save_dir):
os.makedirs(args.model_save_dir)
snapshot_save_path = os.path.join(
args.model_save_dir, "snapshot_%d" % (step + 1)
)
print("Saving model to {}.".format(snapshot_save_path))
check_point.save(snapshot_save_path)
if args.save_last_snapshot:
snapshot_save_path = os.path.join(args.model_save_dir, "last_snapshot")
if not os.path.exists(snapshot_save_path):
os.makedirs(snapshot_save_path)
print("Saving model to {}.".format(snapshot_save_path))
check_point.save(snapshot_save_path)
if __name__ == "__main__":
main()
| [
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.env.machine",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.nn.sparse_softmax_cross_entropy_with_logits",
"oneflow.compatible.single_client.train.CheckPoint",
"oneflow.compatible.si... | [((822, 884), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""flags for cnn benchmark"""'}), "(description='flags for cnn benchmark')\n", (845, 884), False, 'import argparse\n'), ((3741, 3762), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3760, 3762), True, 'from oneflow.compatible import single_client as flow\n'), ((4016, 4065), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['args.gpu_num_per_node'], {}), '(args.gpu_num_per_node)\n', (4042, 4065), True, 'from oneflow.compatible import single_client as flow\n'), ((5103, 5136), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['func_config'], {}), '(func_config)\n', (5123, 5136), True, 'from oneflow.compatible import single_client as flow\n'), ((3803, 3831), 'oneflow.compatible.single_client.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (3829, 3831), True, 'from oneflow.compatible import single_client as flow\n'), ((5805, 5895), 'oneflow.compatible.single_client.nn.sparse_softmax_cross_entropy_with_logits', 'flow.nn.sparse_softmax_cross_entropy_with_logits', (['labels', 'logits'], {'name': '"""softmax_loss"""'}), "(labels, logits, name=\n 'softmax_loss')\n", (5853, 5895), True, 'from oneflow.compatible import single_client as flow\n'), ((6389, 6419), 'oneflow.compatible.single_client.env.log_dir', 'flow.env.log_dir', (['args.log_dir'], {}), '(args.log_dir)\n', (6405, 6419), True, 'from oneflow.compatible import single_client as flow\n'), ((6664, 6687), 'oneflow.compatible.single_client.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (6685, 6687), True, 'from oneflow.compatible import single_client as flow\n'), ((7082, 7113), 'benchmark_util.CNNSpeedometer', 'benchmark_util.CNNSpeedometer', ([], {}), '()\n', (7111, 7113), False, 'import benchmark_util\n'), ((7131, 7142), 'time.time', 'time.time', ([], {}), '()\n', (7140, 7142), False, 'import time\n'), ((4200, 4267), 'oneflow.compatible.single_client.optimizer.loss_scale.dynamic_loss_scale', 'flow.optimizer.loss_scale.dynamic_loss_scale', ([], {'increment_period': '(2000)'}), '(increment_period=2000)\n', (4244, 4267), True, 'from oneflow.compatible import single_client as flow\n'), ((5314, 5343), 'os.path.exists', 'os.path.exists', (['args.data_dir'], {}), '(args.data_dir)\n', (5328, 5343), False, 'import os\n'), ((5431, 5549), 'data_loader.load_imagenet', 'data_loader.load_imagenet', (['args.data_dir', 'args.image_size', 'batch_size', 'args.data_part_num', 'args.gpu_image_decoder'], {}), '(args.data_dir, args.image_size, batch_size, args.\n data_part_num, args.gpu_image_decoder)\n', (5456, 5549), False, 'import data_loader\n'), ((5694, 5749), 'data_loader.load_synthetic', 'data_loader.load_synthetic', (['args.image_size', 'batch_size'], {}), '(args.image_size, batch_size)\n', (5720, 5749), False, 'import data_loader\n'), ((6622, 6645), 'oneflow.compatible.single_client.env.machine', 'flow.env.machine', (['nodes'], {}), '(nodes)\n', (6638, 6645), True, 'from oneflow.compatible import single_client as flow\n'), ((6731, 6765), 'os.path.isdir', 'os.path.isdir', (['args.model_load_dir'], {}), '(args.model_load_dir)\n', (6744, 6765), False, 'import os\n'), ((7926, 7976), 'os.path.join', 'os.path.join', (['args.model_save_dir', '"""last_snapshot"""'], {}), "(args.model_save_dir, 'last_snapshot')\n", (7938, 7976), False, 'import os\n'), ((7656, 7717), 'os.path.join', 'os.path.join', (['args.model_save_dir', "('snapshot_%d' % (step + 1))"], {}), "(args.model_save_dir, 'snapshot_%d' % (step + 1))\n", (7668, 7717), False, 'import os\n'), ((7992, 8026), 'os.path.exists', 'os.path.exists', (['snapshot_save_path'], {}), '(snapshot_save_path)\n', (8006, 8026), False, 'import os\n'), ((8040, 8071), 'os.makedirs', 'os.makedirs', (['snapshot_save_path'], {}), '(snapshot_save_path)\n', (8051, 8071), False, 'import os\n'), ((7537, 7572), 'os.path.exists', 'os.path.exists', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (7551, 7572), False, 'import os\n'), ((7590, 7622), 'os.makedirs', 'os.makedirs', (['args.model_save_dir'], {}), '(args.model_save_dir)\n', (7601, 7622), False, 'import os\n'), ((4395, 4462), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[args.learning_rate]'], {}), '([], [args.learning_rate])\n', (4436, 4462), True, 'from oneflow.compatible import single_client as flow\n'), ((2860, 2874), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (2872, 2874), False, 'from datetime import datetime\n'), ((4655, 4722), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[args.learning_rate]'], {}), '([], [args.learning_rate])\n', (4696, 4722), True, 'from oneflow.compatible import single_client as flow\n'), ((6337, 6351), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (6349, 6351), False, 'from datetime import datetime\n'), ((4934, 5001), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[args.learning_rate]'], {}), '([], [args.learning_rate])\n', (4975, 5001), True, 'from oneflow.compatible import single_client as flow\n')] |
import os
import oneflow as flow
from config import config
def train_dataset_reader(
data_dir, batch_size, data_part_num, part_name_suffix_length=1
):
if os.path.exists(data_dir):
print("Loading train data from {}".format(data_dir))
else:
raise Exception("Invalid train dataset dir", data_dir)
image_blob_conf = flow.data.BlobConf(
"encoded",
shape=(112, 112, 3),
dtype=flow.float,
codec=flow.data.ImageCodec(
image_preprocessors=[
flow.data.ImagePreprocessor("bgr2rgb"),
flow.data.ImagePreprocessor("mirror"),
]
),
preprocessors=[
flow.data.NormByChannelPreprocessor(
mean_values=(127.5, 127.5, 127.5), std_values=(128, 128, 128)
),
],
)
label_blob_conf = flow.data.BlobConf(
"label", shape=(), dtype=flow.int32, codec=flow.data.RawCodec()
)
return flow.data.decode_ofrecord(
data_dir,
(label_blob_conf, image_blob_conf),
batch_size=batch_size,
data_part_num=data_part_num,
part_name_prefix=config.part_name_prefix,
part_name_suffix_length=config.part_name_suffix_length,
shuffle=config.shuffle,
buffer_size=16384,
)
def validation_dataset_reader(val_dataset_dir, val_batch_size=1, val_data_part_num=1):
# lfw: (12000L, 3L, 112L, 112L)
# cfp_fp: (14000L, 3L, 112L, 112L)
# agedb_30: (12000L, 3L, 112L, 112L)
if os.path.exists(val_dataset_dir):
print("Loading validation data from {}".format(val_dataset_dir))
else:
raise Exception("Invalid validation dataset dir", val_dataset_dir)
color_space = "RGB"
ofrecord = flow.data.ofrecord_reader(
val_dataset_dir,
batch_size=val_batch_size,
data_part_num=val_data_part_num,
part_name_suffix_length=1,
shuffle_after_epoch=False,
)
image = flow.data.OFRecordImageDecoder(ofrecord, "encoded", color_space=color_space)
issame = flow.data.OFRecordRawDecoder(
ofrecord, "issame", shape=(), dtype=flow.int32
)
rsz, scale, new_size = flow.image.Resize(image, target_size=(112,112), channels=3)
normal = flow.image.CropMirrorNormalize(
rsz,
color_space=color_space,
crop_h=0,
crop_w=0,
crop_pos_y=0.5,
crop_pos_x=0.5,
mean=[127.5, 127.5, 127.5],
std=[128.0, 128.0, 128.0],
output_dtype=flow.float,
)
normal = flow.transpose(normal, name="transpose_val", perm=[0, 2, 3, 1])
return issame, normal
def load_synthetic(config):
batch_size = config.train_batch_size
image_size = 112
label = flow.data.decode_random(
shape=(),
dtype=flow.int32,
batch_size=batch_size,
initializer=flow.zeros_initializer(flow.int32),
)
image = flow.data.decode_random(
shape=(image_size, image_size, 3), dtype=flow.float, batch_size=batch_size,
)
return label, image
def load_train_dataset(args):
data_dir = config.dataset_dir
batch_size = args.train_batch_size
data_part_num = config.train_data_part_num
part_name_suffix_length = config.part_name_suffix_length
print("train batch size in load train dataset: ", batch_size)
labels, images = train_dataset_reader(
data_dir, batch_size, data_part_num, part_name_suffix_length
)
return labels, images
def load_lfw_dataset(args):
data_dir = args.lfw_dataset_dir
batch_size = args.val_batch_size_per_device
data_part_num = args.val_data_part_num
(issame, images) = validation_dataset_reader(
val_dataset_dir=data_dir,
val_batch_size=batch_size,
val_data_part_num=data_part_num,
)
return issame, images
def load_cfp_fp_dataset(args):
data_dir = args.cfp_fp_dataset_dir
batch_size = args.val_batch_size_per_device
data_part_num = args.val_data_part_num
(issame, images) = validation_dataset_reader(
val_dataset_dir=data_dir,
val_batch_size=batch_size,
val_data_part_num=data_part_num,
)
return issame, images
def load_agedb_30_dataset(args):
data_dir = args.agedb_30_dataset_dir
batch_size = args.val_batch_size_per_device
data_part_num = args.val_data_part_num
(issame, images) = validation_dataset_reader(
val_dataset_dir=data_dir,
val_batch_size=batch_size,
val_data_part_num=data_part_num,
)
return issame, images
| [
"oneflow.image.Resize",
"oneflow.transpose",
"oneflow.data.decode_random",
"oneflow.data.ImagePreprocessor",
"oneflow.data.RawCodec",
"oneflow.data.ofrecord_reader",
"oneflow.data.decode_ofrecord",
"oneflow.data.NormByChannelPreprocessor",
"oneflow.image.CropMirrorNormalize",
"oneflow.zeros_initia... | [((164, 188), 'os.path.exists', 'os.path.exists', (['data_dir'], {}), '(data_dir)\n', (178, 188), False, 'import os\n'), ((962, 1240), 'oneflow.data.decode_ofrecord', 'flow.data.decode_ofrecord', (['data_dir', '(label_blob_conf, image_blob_conf)'], {'batch_size': 'batch_size', 'data_part_num': 'data_part_num', 'part_name_prefix': 'config.part_name_prefix', 'part_name_suffix_length': 'config.part_name_suffix_length', 'shuffle': 'config.shuffle', 'buffer_size': '(16384)'}), '(data_dir, (label_blob_conf, image_blob_conf),\n batch_size=batch_size, data_part_num=data_part_num, part_name_prefix=\n config.part_name_prefix, part_name_suffix_length=config.\n part_name_suffix_length, shuffle=config.shuffle, buffer_size=16384)\n', (987, 1240), True, 'import oneflow as flow\n'), ((1510, 1541), 'os.path.exists', 'os.path.exists', (['val_dataset_dir'], {}), '(val_dataset_dir)\n', (1524, 1541), False, 'import os\n'), ((1740, 1904), 'oneflow.data.ofrecord_reader', 'flow.data.ofrecord_reader', (['val_dataset_dir'], {'batch_size': 'val_batch_size', 'data_part_num': 'val_data_part_num', 'part_name_suffix_length': '(1)', 'shuffle_after_epoch': '(False)'}), '(val_dataset_dir, batch_size=val_batch_size,\n data_part_num=val_data_part_num, part_name_suffix_length=1,\n shuffle_after_epoch=False)\n', (1765, 1904), True, 'import oneflow as flow\n'), ((1956, 2032), 'oneflow.data.OFRecordImageDecoder', 'flow.data.OFRecordImageDecoder', (['ofrecord', '"""encoded"""'], {'color_space': 'color_space'}), "(ofrecord, 'encoded', color_space=color_space)\n", (1986, 2032), True, 'import oneflow as flow\n'), ((2046, 2122), 'oneflow.data.OFRecordRawDecoder', 'flow.data.OFRecordRawDecoder', (['ofrecord', '"""issame"""'], {'shape': '()', 'dtype': 'flow.int32'}), "(ofrecord, 'issame', shape=(), dtype=flow.int32)\n", (2074, 2122), True, 'import oneflow as flow\n'), ((2165, 2225), 'oneflow.image.Resize', 'flow.image.Resize', (['image'], {'target_size': '(112, 112)', 'channels': '(3)'}), '(image, target_size=(112, 112), channels=3)\n', (2182, 2225), True, 'import oneflow as flow\n'), ((2238, 2438), 'oneflow.image.CropMirrorNormalize', 'flow.image.CropMirrorNormalize', (['rsz'], {'color_space': 'color_space', 'crop_h': '(0)', 'crop_w': '(0)', 'crop_pos_y': '(0.5)', 'crop_pos_x': '(0.5)', 'mean': '[127.5, 127.5, 127.5]', 'std': '[128.0, 128.0, 128.0]', 'output_dtype': 'flow.float'}), '(rsz, color_space=color_space, crop_h=0,\n crop_w=0, crop_pos_y=0.5, crop_pos_x=0.5, mean=[127.5, 127.5, 127.5],\n std=[128.0, 128.0, 128.0], output_dtype=flow.float)\n', (2268, 2438), True, 'import oneflow as flow\n'), ((2524, 2587), 'oneflow.transpose', 'flow.transpose', (['normal'], {'name': '"""transpose_val"""', 'perm': '[0, 2, 3, 1]'}), "(normal, name='transpose_val', perm=[0, 2, 3, 1])\n", (2538, 2587), True, 'import oneflow as flow\n'), ((2894, 2997), 'oneflow.data.decode_random', 'flow.data.decode_random', ([], {'shape': '(image_size, image_size, 3)', 'dtype': 'flow.float', 'batch_size': 'batch_size'}), '(shape=(image_size, image_size, 3), dtype=flow.float,\n batch_size=batch_size)\n', (2917, 2997), True, 'import oneflow as flow\n'), ((923, 943), 'oneflow.data.RawCodec', 'flow.data.RawCodec', ([], {}), '()\n', (941, 943), True, 'import oneflow as flow\n'), ((2839, 2873), 'oneflow.zeros_initializer', 'flow.zeros_initializer', (['flow.int32'], {}), '(flow.int32)\n', (2861, 2873), True, 'import oneflow as flow\n'), ((682, 784), 'oneflow.data.NormByChannelPreprocessor', 'flow.data.NormByChannelPreprocessor', ([], {'mean_values': '(127.5, 127.5, 127.5)', 'std_values': '(128, 128, 128)'}), '(mean_values=(127.5, 127.5, 127.5),\n std_values=(128, 128, 128))\n', (717, 784), True, 'import oneflow as flow\n'), ((526, 564), 'oneflow.data.ImagePreprocessor', 'flow.data.ImagePreprocessor', (['"""bgr2rgb"""'], {}), "('bgr2rgb')\n", (553, 564), True, 'import oneflow as flow\n'), ((582, 619), 'oneflow.data.ImagePreprocessor', 'flow.data.ImagePreprocessor', (['"""mirror"""'], {}), "('mirror')\n", (609, 619), True, 'import oneflow as flow\n')] |
from collections import OrderedDict
from oneflow import nn, Tensor
from oneflow.nn import functional as F
from typing import Dict
from .. import mobilenet_v3
from .seg_utils import _SimpleSegmentationModel, IntermediateLayerGetter
from flowvision.models.utils import load_state_dict_from_url
from flowvision.models.registry import ModelCreator
model_urls = {
"lraspp_mobilenet_v3_large_coco": "https://oneflow-public.oss-cn-beijing.aliyuncs.com/model_zoo/flowvision/segmentation/lraspp/lraspp_mobilenet_v3_large_coco.zip",
}
class LRASPP(nn.Module):
"""
Implements a Lite R-ASPP Network for semantic segmentation from
`"Searching for MobileNetV3"
<https://arxiv.org/abs/1905.02244>`_.
Args:
backbone (nn.Module): the network used to compute the features for the model.
The backbone should return an OrderedDict[Tensor], with the key being
"high" for the high level feature map and "low" for the low level feature map.
low_channels (int): the number of channels of the low level features.
high_channels (int): the number of channels of the high level features.
num_classes (int): number of output classes of the model (including the background).
inter_channels (int, optional): the number of channels for intermediate computations.
"""
def __init__(
self, backbone, low_channels, high_channels, num_classes, inter_channels=128
):
super().__init__()
self.backbone = backbone
self.classifier = LRASPPHead(
low_channels, high_channels, num_classes, inter_channels
)
def forward(self, input):
features = self.backbone(input)
out = self.classifier(features)
out = F.interpolate(
out, size=input.shape[-2:], mode="bilinear", align_corners=False
)
result = OrderedDict()
result["out"] = out
return result
class LRASPPHead(nn.Module):
def __init__(self, low_channels, high_channels, num_classes, inter_channels):
super().__init__()
self.cbr = nn.Sequential(
nn.Conv2d(high_channels, inter_channels, 1, bias=False),
nn.BatchNorm2d(inter_channels),
nn.ReLU(inplace=True),
)
self.scale = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(high_channels, inter_channels, 1, bias=False),
nn.Sigmoid(),
)
self.low_classifier = nn.Conv2d(low_channels, num_classes, 1)
self.high_classifier = nn.Conv2d(inter_channels, num_classes, 1)
def forward(self, input: Dict[str, Tensor]) -> Tensor:
low = input["low"]
high = input["high"]
x = self.cbr(high)
s = self.scale(high)
x = x * s
x = F.interpolate(x, size=low.shape[-2:], mode="bilinear", align_corners=False)
return self.low_classifier(low) + self.high_classifier(x)
def _load_weights(model, arch_type, backbone, progress):
arch = arch_type + "_" + backbone + "_coco"
model_url = model_urls.get(arch, None)
if model_url is None:
raise NotImplementedError(
"pretrained {} is not supported as of now".format(arch)
)
else:
state_dict = load_state_dict_from_url(model_url, progress=progress)
model.load_state_dict(state_dict)
def _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True):
backbone = mobilenet_v3.__dict__[backbone_name](
pretrained=pretrained_backbone, dilated=True
).features
# Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks.
# The first and last blocks are always included because they are the C0 (conv1) and Cn.
stage_indices = (
[0]
+ [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)]
+ [len(backbone) - 1]
)
low_pos = stage_indices[-4] # use C2 here which has output_stride = 8
high_pos = stage_indices[-1] # use C5 which has output_stride = 16
low_channels = backbone[low_pos].out_channels
high_channels = backbone[high_pos].out_channels
backbone = IntermediateLayerGetter(
backbone, return_layers={str(low_pos): "low", str(high_pos): "high"}
)
model = LRASPP(backbone, low_channels, high_channels, num_classes)
return model
@ModelCreator.register_model
def lraspp_mobilenet_v3_large_coco(
pretrained=False, progress=True, num_classes=21, **kwargs
):
"""Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone.
Args:
pretrained (bool): If True, returns a model pre-trained on COCO train2017 which
contains the same classes as Pascal VOC
progress (bool): If True, displays a progress bar of the download to stderr
num_classes (int): number of output classes of the model (including the background)
"""
if kwargs.pop("aux_loss", False):
raise NotImplementedError("This model does not use auxiliary loss")
backbone_name = "mobilenet_v3_large"
model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs)
if pretrained:
_load_weights(model, "lraspp", backbone_name, progress)
return model
| [
"oneflow.nn.ReLU",
"oneflow.nn.AdaptiveAvgPool2d",
"oneflow.nn.BatchNorm2d",
"oneflow.nn.functional.interpolate",
"oneflow.nn.Conv2d",
"oneflow.nn.Sigmoid"
] | [((1746, 1825), 'oneflow.nn.functional.interpolate', 'F.interpolate', (['out'], {'size': 'input.shape[-2:]', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(out, size=input.shape[-2:], mode='bilinear', align_corners=False)\n", (1759, 1825), True, 'from oneflow.nn import functional as F\n'), ((1866, 1879), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1877, 1879), False, 'from collections import OrderedDict\n'), ((2471, 2510), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['low_channels', 'num_classes', '(1)'], {}), '(low_channels, num_classes, 1)\n', (2480, 2510), False, 'from oneflow import nn, Tensor\n'), ((2542, 2583), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['inter_channels', 'num_classes', '(1)'], {}), '(inter_channels, num_classes, 1)\n', (2551, 2583), False, 'from oneflow import nn, Tensor\n'), ((2787, 2862), 'oneflow.nn.functional.interpolate', 'F.interpolate', (['x'], {'size': 'low.shape[-2:]', 'mode': '"""bilinear"""', 'align_corners': '(False)'}), "(x, size=low.shape[-2:], mode='bilinear', align_corners=False)\n", (2800, 2862), True, 'from oneflow.nn import functional as F\n'), ((3250, 3304), 'flowvision.models.utils.load_state_dict_from_url', 'load_state_dict_from_url', (['model_url'], {'progress': 'progress'}), '(model_url, progress=progress)\n', (3274, 3304), False, 'from flowvision.models.utils import load_state_dict_from_url\n'), ((2117, 2172), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['high_channels', 'inter_channels', '(1)'], {'bias': '(False)'}), '(high_channels, inter_channels, 1, bias=False)\n', (2126, 2172), False, 'from oneflow import nn, Tensor\n'), ((2186, 2216), 'oneflow.nn.BatchNorm2d', 'nn.BatchNorm2d', (['inter_channels'], {}), '(inter_channels)\n', (2200, 2216), False, 'from oneflow import nn, Tensor\n'), ((2230, 2251), 'oneflow.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (2237, 2251), False, 'from oneflow import nn, Tensor\n'), ((2311, 2334), 'oneflow.nn.AdaptiveAvgPool2d', 'nn.AdaptiveAvgPool2d', (['(1)'], {}), '(1)\n', (2331, 2334), False, 'from oneflow import nn, Tensor\n'), ((2348, 2403), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['high_channels', 'inter_channels', '(1)'], {'bias': '(False)'}), '(high_channels, inter_channels, 1, bias=False)\n', (2357, 2403), False, 'from oneflow import nn, Tensor\n'), ((2417, 2429), 'oneflow.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (2427, 2429), False, 'from oneflow import nn, Tensor\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Optional, Sequence, Union
import oneflow as flow
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export
@oneflow_export("random.bernoulli")
def Bernoulli(
x: remote_blob_util.BlobDef,
seed: Optional[int] = None,
dtype: Optional[dtype_util.dtype] = None,
name: Optional[str] = None,
) -> remote_blob_util.BlobDef:
if name is None:
name = id_util.UniqueStr("Bernoulli_")
if dtype is None:
dtype = x.dtype
return (
flow.user_op_builder(name)
.Op("bernoulli")
.Input("in", [x])
.Output("out")
.Attr("dtype", dtype)
.SetRandomSeed(seed)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
| [
"oneflow.user_op_builder",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.id_util.UniqueStr"
] | [((926, 960), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""random.bernoulli"""'], {}), "('random.bernoulli')\n", (940, 960), False, 'from oneflow.python.oneflow_export import oneflow_export\n'), ((1186, 1217), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Bernoulli_"""'], {}), "('Bernoulli_')\n", (1203, 1217), True, 'import oneflow.python.framework.id_util as id_util\n'), ((1286, 1312), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (1306, 1312), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
config = flow.function_config()
def make_job(x_shape, b_shape, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def bias_add_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
bias=flow.FixedTensorDef(b_shape, dtype=dtype),
):
return flow.nn.bias_add(x, bias)
return bias_add_job
def make_xla_job(x_shape, b_shape, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_bias_add_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
bias=flow.FixedTensorDef(b_shape, dtype=dtype),
):
return flow.nn.bias_add(x, bias)
return xla_bias_add_job
def make_trt_job(x_shape, b_shape, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(True)
@flow.global_function(config)
def trt_bias_add_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
bias=flow.FixedTensorDef(b_shape, dtype=dtype),
):
return flow.nn.bias_add(x, bias)
return trt_bias_add_job
class TestBiasAdd(unittest.TestCase):
def _test_body(self, x, bias, dtype=np.float32):
f1 = make_job(x.shape, bias.shape, dtype=flow.float32)
f2 = make_xla_job(x.shape, bias.shape, dtype=flow.float32)
a = f1(x, bias).get()
b = f2(x, bias).get()
print("without xla: ", a)
print("with xla: ", b)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=0.001, atol=1e-05))
flow.clear_default_session()
f3 = make_trt_job(x.shape, bias.shape, dtype=flow.float32)
c = f3(x, bias).get()
print("with tensorrt: ", c)
self.assertTrue(np.allclose(a.numpy(), c.numpy(), rtol=0.001, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, x_shape, bias_shape, dtype=np.float32):
x = np.ones(x_shape, dtype=dtype)
b = np.ones(bias_shape, dtype=dtype)
self._test_body(x, b, dtype=dtype)
def _test_random_body(self, x_shape, bias_shape, dtype=np.float32):
x = np.random.random(x_shape).astype(dtype)
b = np.random.random(bias_shape).astype(dtype)
self._test_body(x, b, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((1, 10), 10)
self._test_ones_body((2, 10, 2), 10)
self._test_ones_body((2, 5, 2, 2), 5)
def test_random_input(self):
self._test_random_body((1, 10), 10)
self._test_random_body((2, 10, 2), 10)
self._test_random_body((2, 5, 2, 2), 5)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.nn.bias_add",
"oneflow.compatible.single_client.function_config",
"oneflow.compatible.single_client.FixedTensorDef",
"oneflow.compatible.single_client.clear_default_session",
"oneflow.compatible.single_client.global_function"
] | [((740, 762), 'oneflow.compatible.single_client.function_config', 'flow.function_config', ([], {}), '()\n', (760, 762), True, 'from oneflow.compatible import single_client as flow\n'), ((884, 912), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (904, 912), True, 'from oneflow.compatible import single_client as flow\n'), ((1241, 1269), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (1261, 1269), True, 'from oneflow.compatible import single_client as flow\n'), ((1606, 1634), 'oneflow.compatible.single_client.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (1626, 1634), True, 'from oneflow.compatible import single_client as flow\n'), ((3363, 3378), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3376, 3378), False, 'import unittest\n'), ((945, 986), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (964, 986), True, 'from oneflow.compatible import single_client as flow\n'), ((1001, 1042), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (1020, 1042), True, 'from oneflow.compatible import single_client as flow\n'), ((1066, 1091), 'oneflow.compatible.single_client.nn.bias_add', 'flow.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (1082, 1091), True, 'from oneflow.compatible import single_client as flow\n'), ((1306, 1347), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (1325, 1347), True, 'from oneflow.compatible import single_client as flow\n'), ((1362, 1403), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (1381, 1403), True, 'from oneflow.compatible import single_client as flow\n'), ((1427, 1452), 'oneflow.compatible.single_client.nn.bias_add', 'flow.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (1443, 1452), True, 'from oneflow.compatible import single_client as flow\n'), ((1671, 1712), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (1690, 1712), True, 'from oneflow.compatible import single_client as flow\n'), ((1727, 1768), 'oneflow.compatible.single_client.FixedTensorDef', 'flow.FixedTensorDef', (['b_shape'], {'dtype': 'dtype'}), '(b_shape, dtype=dtype)\n', (1746, 1768), True, 'from oneflow.compatible import single_client as flow\n'), ((1792, 1817), 'oneflow.compatible.single_client.nn.bias_add', 'flow.nn.bias_add', (['x', 'bias'], {}), '(x, bias)\n', (1808, 1817), True, 'from oneflow.compatible import single_client as flow\n'), ((2286, 2314), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2312, 2314), True, 'from oneflow.compatible import single_client as flow\n'), ((2539, 2567), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2565, 2567), True, 'from oneflow.compatible import single_client as flow\n'), ((2651, 2680), 'numpy.ones', 'np.ones', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (2658, 2680), True, 'import numpy as np\n'), ((2693, 2725), 'numpy.ones', 'np.ones', (['bias_shape'], {'dtype': 'dtype'}), '(bias_shape, dtype=dtype)\n', (2700, 2725), True, 'import numpy as np\n'), ((2854, 2879), 'numpy.random.random', 'np.random.random', (['x_shape'], {}), '(x_shape)\n', (2870, 2879), True, 'import numpy as np\n'), ((2906, 2934), 'numpy.random.random', 'np.random.random', (['bias_shape'], {}), '(bias_shape)\n', (2922, 2934), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from contextlib import contextmanager
import oneflow.python.framework.distribute_context as distribute_ctx
from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate
import traceback
class Distribute(object):
def __init__(self):
pass
class AutoDistribute(Distribute):
def __init__(self):
Distribute.__init__(self)
class BroadcastDistribute(Distribute):
def __init__(self):
Distribute.__init__(self)
class SplitDistribute(Distribute):
def __init__(self, axis):
Distribute.__init__(self)
self.axis_ = axis
@property
def axis(self):
return self.axis_
@oneflow_export("distribute.mirrored_strategy")
@oneflow_deprecate()
def deprecated_mirrored_strategy():
print(
"WARNING:",
"oneflow.distribute.mirrored_strategy",
"will be removed in the future, use {} instead.".format(
"oneflow.scope.mirrored_view"
),
)
print(traceback.format_stack()[-2])
return DistributeMirroredStrategy()
@oneflow_export("scope.mirrored_view")
class DistributeMirroredStrategy(distribute_ctx.DistributeStrategy):
r"""Create a scope in mirrored view. All operators within the scope will be mirrored among diffierent accelerators.
Usage::
with oneflow.scope.mirrored_view():
...
"""
def __init__(self):
distribute_ctx.DistributeStrategy.__init__(self, True)
@oneflow_export("distribute.mirrored_strategy_enabled")
@oneflow_deprecate()
def deprecated_mirrored_strategy_enabled():
print(
"WARNING:",
"oneflow.distribute.mirrored_strategy_enabled",
"will be removed in the future, use {} instead.".format(
"oneflow.scope.mirrored_view_enabled"
),
)
print(traceback.format_stack()[-2])
return MirroredStrategyEnabled()
@oneflow_export("scope.mirrored_view_enabled")
def MirroredStrategyEnabled() -> bool:
r"""
Returns:
bool: `True` if mirrored strategy is enabled in current context where this function is called.
"""
return distribute_ctx.IsMirroredStrategyEnabled()
@oneflow_export("distribute.consistent_strategy")
@oneflow_deprecate()
def deprecated_consistent_strategy():
print(
"WARNING:",
"oneflow.distribute.consistent_strategy",
"will be removed in the future, use {} instead.".format(
"oneflow.scope.consistent_view"
),
)
print(traceback.format_stack()[-2])
return DistributeConsistentStrategy()
@oneflow_export("scope.consistent_view")
class DistributeConsistentStrategy(distribute_ctx.DistributeStrategy):
r"""Create a scope in consistent view. All operators within the scope will be automatically parallelized among diffierent accelerators for best performance and least data transfer.
Usage::
with oneflow.scope.consistent_view():
...
"""
def __init__(self):
distribute_ctx.DistributeStrategy.__init__(self, False)
@oneflow_export("distribute.consistent_strategy_enabled")
@oneflow_deprecate()
def deprecated_consistent_strategy_enabled():
print(
"WARNING:",
"oneflow.distribute.consistent_strategy_enabled",
"will be removed in the future, use {} instead.".format(
"oneflow.scope.consistent_view_enabled"
),
)
print(traceback.format_stack()[-2])
return ConsistentStrategyEnabled()
@oneflow_export("scope.consistent_view_enabled")
def ConsistentStrategyEnabled() -> bool:
r"""
Returns:
bool: `True` if consistent strategy is enabled in current context where this function is called.
"""
return distribute_ctx.IsConsistentStrategyEnabled()
@oneflow_export("distribute.split")
def split(axis: int) -> SplitDistribute:
r"""Generate a split scheme in which op will be splitted at `axis`.
Args:
axis (int): At `axis` the op will be splitted.
Returns:
SplitDistribute: Split scheme object, often required by `with_distribute` method of `Blob` or `oneflow.get_variable`.
Example::
weight = weight.with_distribute(distribute.split(1))
"""
assert type(axis) is int
assert str(axis) in _axis_str2split_axis_obj, "not a valid split. expected: [0, 11)"
return _axis_str2split_axis_obj[str(axis)]
@oneflow_export("distribute.broadcast")
def broadcast() -> BroadcastDistribute:
r"""Generate a broadcast scheme.
Returns:
BroadcastDistribute: Broadcast scheme object, often required by `with_distribute` method of `Blob` or `oneflow.get_variable`.
Example::
segment_ids = segment_ids.with_distribute(flow.distribute.broadcast())
"""
return _broadcast
@oneflow_export("distribute.auto")
def auto() -> AutoDistribute:
r"""Generate a broadcast scheme.
Returns:
AutoDistribute: Auto distribute scheme object, often required by `with_distribute` method of `Blob` or `oneflow.get_variable`.
"""
return _auto
@oneflow_export("distribute.assert_is_valid_distribute")
def assert_is_valid_distribute(distribute: Distribute) -> None:
assert isinstance(
distribute, Distribute
), """not a valid distribute policy.
expected: 1) oneflow.distribute.split(axis); 2) oneflow.distribute.broadcast(); 3) oneflow.distribute.auto()"""
_auto = AutoDistribute()
_broadcast = BroadcastDistribute()
_axis_str2split_axis_obj = dict()
for i in range(11):
class_name = "Split_Axis%d" % i
_axis_str2split_axis_obj[str(i)] = SplitDistribute(i)
| [
"oneflow.python.framework.distribute_context.IsConsistentStrategyEnabled",
"oneflow.python.framework.distribute_context.IsMirroredStrategyEnabled",
"oneflow.python.oneflow_export.oneflow_deprecate",
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.distribute_context.DistributeStrate... | [((1280, 1326), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""distribute.mirrored_strategy"""'], {}), "('distribute.mirrored_strategy')\n", (1294, 1326), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((1328, 1347), 'oneflow.python.oneflow_export.oneflow_deprecate', 'oneflow_deprecate', ([], {}), '()\n', (1345, 1347), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((1670, 1707), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""scope.mirrored_view"""'], {}), "('scope.mirrored_view')\n", (1684, 1707), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((2070, 2124), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""distribute.mirrored_strategy_enabled"""'], {}), "('distribute.mirrored_strategy_enabled')\n", (2084, 2124), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((2126, 2145), 'oneflow.python.oneflow_export.oneflow_deprecate', 'oneflow_deprecate', ([], {}), '()\n', (2143, 2145), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((2489, 2534), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""scope.mirrored_view_enabled"""'], {}), "('scope.mirrored_view_enabled')\n", (2503, 2534), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((2766, 2814), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""distribute.consistent_strategy"""'], {}), "('distribute.consistent_strategy')\n", (2780, 2814), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((2816, 2835), 'oneflow.python.oneflow_export.oneflow_deprecate', 'oneflow_deprecate', ([], {}), '()\n', (2833, 2835), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((3166, 3205), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""scope.consistent_view"""'], {}), "('scope.consistent_view')\n", (3180, 3205), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((3643, 3699), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""distribute.consistent_strategy_enabled"""'], {}), "('distribute.consistent_strategy_enabled')\n", (3657, 3699), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((3701, 3720), 'oneflow.python.oneflow_export.oneflow_deprecate', 'oneflow_deprecate', ([], {}), '()\n', (3718, 3720), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((4072, 4119), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""scope.consistent_view_enabled"""'], {}), "('scope.consistent_view_enabled')\n", (4086, 4119), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((4357, 4391), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""distribute.split"""'], {}), "('distribute.split')\n", (4371, 4391), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((4977, 5015), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""distribute.broadcast"""'], {}), "('distribute.broadcast')\n", (4991, 5015), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((5377, 5410), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""distribute.auto"""'], {}), "('distribute.auto')\n", (5391, 5410), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((5660, 5715), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""distribute.assert_is_valid_distribute"""'], {}), "('distribute.assert_is_valid_distribute')\n", (5674, 5715), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((2720, 2762), 'oneflow.python.framework.distribute_context.IsMirroredStrategyEnabled', 'distribute_ctx.IsMirroredStrategyEnabled', ([], {}), '()\n', (2760, 2762), True, 'import oneflow.python.framework.distribute_context as distribute_ctx\n'), ((4309, 4353), 'oneflow.python.framework.distribute_context.IsConsistentStrategyEnabled', 'distribute_ctx.IsConsistentStrategyEnabled', ([], {}), '()\n', (4351, 4353), True, 'import oneflow.python.framework.distribute_context as distribute_ctx\n'), ((2012, 2066), 'oneflow.python.framework.distribute_context.DistributeStrategy.__init__', 'distribute_ctx.DistributeStrategy.__init__', (['self', '(True)'], {}), '(self, True)\n', (2054, 2066), True, 'import oneflow.python.framework.distribute_context as distribute_ctx\n'), ((3584, 3639), 'oneflow.python.framework.distribute_context.DistributeStrategy.__init__', 'distribute_ctx.DistributeStrategy.__init__', (['self', '(False)'], {}), '(self, False)\n', (3626, 3639), True, 'import oneflow.python.framework.distribute_context as distribute_ctx\n'), ((1597, 1621), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (1619, 1621), False, 'import traceback\n'), ((2419, 2443), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (2441, 2443), False, 'import traceback\n'), ((3091, 3115), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (3113, 3115), False, 'import traceback\n'), ((4000, 4024), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (4022, 4024), False, 'import traceback\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n1d()
class TestMultiGraph(oneflow.unittest.TestCase):
def test_multi_graph(test_case):
relu_data = np.array([2.0, 1.0, 0.0, -1.0, -2.0])
relu_in = flow.tensor(relu_data, dtype=flow.float32)
MyRelu = flow.nn.ReLU()
relu_out_eager = MyRelu(relu_in)
class ReluGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.cc_relu = MyRelu
def build(self, x):
return self.cc_relu(x)
relu_g = ReluGraph()
relu_out_lazy = relu_g(relu_in)
test_case.assertTrue(
np.array_equal(relu_out_lazy.numpy(), relu_out_eager.numpy())
)
linear = flow.nn.Linear(3, 8, False)
linear = linear.to(flow.device("cuda"))
input_arr = np.array(
[
[-0.94630778, -0.83378579, -0.87060891],
[2.0289922, -0.28708987, -2.18369248],
[0.35217619, -0.67095644, -1.58943879],
[0.08086036, -1.81075924, 1.20752494],
[0.8901075, -0.49976737, -1.07153746],
[-0.44872912, -1.07275683, 0.06256855],
[-0.22556897, 0.74798368, 0.90416439],
[0.48339456, -2.32742195, -0.59321527],
],
dtype=np.float32,
)
np_weight = np.ones((3, 8)).astype(np.float32)
np_weight.fill(2.3)
linear_in = flow.tensor(input_arr, device=flow.device("cuda"))
flow.nn.init.constant_(linear.weight, 2.3)
linear_out_eager = linear(linear_in)
np_out = np.matmul(input_arr, np_weight)
test_case.assertTrue(
np.allclose(linear_out_eager.numpy(), np_out, 1e-05, 1e-05)
)
class LinearGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.my_linear = linear
def build(self, x):
return self.my_linear(x)
linear_g = LinearGraph()
linear_out_lazy = linear_g(linear_in)
test_case.assertTrue(
np.array_equal(linear_out_lazy.numpy(), linear_out_eager.numpy())
)
relu_out_lazy = relu_g(relu_in)
linear_out_lazy = linear_g(linear_in)
test_case.assertTrue(
np.array_equal(relu_out_eager.numpy(), relu_out_lazy.numpy())
)
test_case.assertTrue(
np.array_equal(linear_out_eager.numpy(), linear_out_lazy.numpy())
)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.nn.Linear",
"oneflow.nn.init.constant_",
"oneflow.nn.ReLU",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor",
"oneflow.device"
] | [((762, 794), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (792, 794), True, 'import oneflow as flow\n'), ((702, 736), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (711, 736), False, 'import os\n'), ((3299, 3314), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3312, 3314), False, 'import unittest\n'), ((901, 938), 'numpy.array', 'np.array', (['[2.0, 1.0, 0.0, -1.0, -2.0]'], {}), '([2.0, 1.0, 0.0, -1.0, -2.0])\n', (909, 938), True, 'import numpy as np\n'), ((957, 999), 'oneflow.tensor', 'flow.tensor', (['relu_data'], {'dtype': 'flow.float32'}), '(relu_data, dtype=flow.float32)\n', (968, 999), True, 'import oneflow as flow\n'), ((1018, 1032), 'oneflow.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (1030, 1032), True, 'import oneflow as flow\n'), ((1494, 1521), 'oneflow.nn.Linear', 'flow.nn.Linear', (['(3)', '(8)', '(False)'], {}), '(3, 8, False)\n', (1508, 1521), True, 'import oneflow as flow\n'), ((1590, 1954), 'numpy.array', 'np.array', (['[[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -0.28708987, -\n 2.18369248], [0.35217619, -0.67095644, -1.58943879], [0.08086036, -\n 1.81075924, 1.20752494], [0.8901075, -0.49976737, -1.07153746], [-\n 0.44872912, -1.07275683, 0.06256855], [-0.22556897, 0.74798368, \n 0.90416439], [0.48339456, -2.32742195, -0.59321527]]'], {'dtype': 'np.float32'}), '([[-0.94630778, -0.83378579, -0.87060891], [2.0289922, -0.28708987,\n -2.18369248], [0.35217619, -0.67095644, -1.58943879], [0.08086036, -\n 1.81075924, 1.20752494], [0.8901075, -0.49976737, -1.07153746], [-\n 0.44872912, -1.07275683, 0.06256855], [-0.22556897, 0.74798368, \n 0.90416439], [0.48339456, -2.32742195, -0.59321527]], dtype=np.float32)\n', (1598, 1954), True, 'import numpy as np\n'), ((2276, 2318), 'oneflow.nn.init.constant_', 'flow.nn.init.constant_', (['linear.weight', '(2.3)'], {}), '(linear.weight, 2.3)\n', (2298, 2318), True, 'import oneflow as flow\n'), ((2381, 2412), 'numpy.matmul', 'np.matmul', (['input_arr', 'np_weight'], {}), '(input_arr, np_weight)\n', (2390, 2412), True, 'import numpy as np\n'), ((1549, 1568), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (1560, 1568), True, 'import oneflow as flow\n'), ((2134, 2149), 'numpy.ones', 'np.ones', (['(3, 8)'], {}), '((3, 8))\n', (2141, 2149), True, 'import numpy as np\n'), ((2247, 2266), 'oneflow.device', 'flow.device', (['"""cuda"""'], {}), "('cuda')\n", (2258, 2266), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import threading
import oneflow.python.framework.local_blob as local_blob_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow_api
class FutureRemoteBlobs(object):
def __init__(self):
self.inited_ = False
def get(self):
raise NotImplementedError
def async_get(self, callback):
raise NotImplementedError
def SetResult(self, remote_blobs):
raise NotImplementedError
def Inited(self):
assert self.inited_ is False
self.inited_ = True
return self
class LazyFutureRemoteBlobs(FutureRemoteBlobs):
def __init__(self, session):
super().__init__()
self.session_ = session
self.cond_var_ = threading.Condition()
self.out_remote_blob_pullers_ = []
self.finished_cnt_ = 0
self.data_delivered_ = False
self.async_get_callback_ = lambda: None
# user api
def get(self):
assert self.inited_
assert self.data_delivered_ == False
self._Wait()
self.data_delivered_ = True
return self._TrySyncAndGetResultNdarray(self.out_remote_blob_pullers_)
# user api
def async_get(self, callback):
assert self.inited_
assert self.data_delivered_ == False
pullers_cnt = self._GetPullersCnt()
def Callback():
assert self.finished_cnt_ <= pullers_cnt
if self.finished_cnt_ == pullers_cnt:
callback(
self._TrySyncAndGetResultNdarray(self.out_remote_blob_pullers_)
)
try:
self.cond_var_.acquire()
if self.finished_cnt_ == pullers_cnt:
Callback()
else:
self.async_get_callback_ = Callback
finally:
self.cond_var_.release()
self.data_delivered_ = True
def SetResult(self, out_remote_blobs):
assert self.inited_ == False
assert isinstance(self.out_remote_blob_pullers_, list)
assert len(self.out_remote_blob_pullers_) == 0
pullers = self._MakeRemoteBlobPullers(out_remote_blobs)
self.out_remote_blob_pullers_ = pullers
for puller in self._FlatConsistentBlobPullers(pullers):
puller.AsyncPull(self._FinishCallback)
return self
def _FinishCallback(self):
self.cond_var_.acquire()
self.finished_cnt_ += 1
self.cond_var_.notify()
self.async_get_callback_()
self.cond_var_.release()
def _Wait(self):
pullers_cnt = self._GetPullersCnt()
self.cond_var_.acquire()
while self.finished_cnt_ != pullers_cnt:
self.cond_var_.wait()
self.cond_var_.release()
def _TrySyncAndGetResultNdarray(self, pullers):
if self.session_.HasAnyCallbackAfterFunctionReturn():
self.session_.Sync()
return self._GetResultLocalBlob(pullers)
def _GetResultLocalBlob(self, pullers):
assert self.inited_
if isinstance(pullers, _BlobPuller):
return pullers.result
if isinstance(pullers, (list, tuple)):
return type(pullers)(self._GetResultLocalBlob(x) for x in pullers)
if isinstance(pullers, dict):
return {k: self._GetResultLocalBlob(v) for k, v in pullers.items()}
raise NotImplementedError
def _GetPullersCnt(self):
cnt = 0
for _ in self._FlatConsistentBlobPullers(self.out_remote_blob_pullers_):
cnt += 1
return cnt
def _FlatConsistentBlobPullers(self, pullers):
if isinstance(pullers, _BlobPuller):
for x in pullers.FlatConsistentBlobPullers():
yield x
elif isinstance(pullers, list) or isinstance(pullers, tuple):
for elem in pullers:
for x in self._FlatConsistentBlobPullers(elem):
yield x
elif isinstance(pullers, dict):
for _, v in pullers.items():
for x in self._FlatConsistentBlobPullers(v):
yield x
else:
raise NotImplementedError
def _MakeRemoteBlobPullers(self, out_remote_blobs):
if isinstance(out_remote_blobs, oneflow_api.ConsistentBlob):
return _ConsistentBlobPuller(out_remote_blobs, self.session_)
if isinstance(out_remote_blobs, oneflow_api.MirroredBlob):
return _MirroredBlobPuller(out_remote_blobs, self.session_)
if isinstance(out_remote_blobs, list) or isinstance(out_remote_blobs, tuple):
return type(out_remote_blobs)(
self._MakeRemoteBlobPullers(x) for x in out_remote_blobs
)
if isinstance(out_remote_blobs, dict):
return {
k: self._MakeRemoteBlobPullers(v) for k, v in out_remote_blobs.items()
}
raise NotImplementedError
class _BlobPuller(object):
def __init__(self, session):
self.session_ = session
def FlatConsistentBlobPullers(self):
raise NotImplementedError
@property
def result(self):
raise NotImplementedError
class _ConsistentBlobPuller(_BlobPuller):
def __init__(self, consistent_blob, session):
_BlobPuller.__init__(self, session)
self.result_ = None
self.consistent_blob_ = consistent_blob
@property
def result(self):
assert self.result_ is not None
return self.result_
def FlatConsistentBlobPullers(self):
yield self
def AsyncPull(self, pull_cb):
def PullCallback(of_blob):
self.result_ = local_blob_util.MakeLocalBlob(
of_blob.CopyToNdarrayLists(), self.consistent_blob_
)
pull_cb()
self.session_.AsyncPull(self.consistent_blob_.op_name, PullCallback)
class _MirroredBlobPuller(_BlobPuller):
def __init__(self, mirrored_blob, session):
_BlobPuller.__init__(self, session)
self.mirrored_blob_ = mirrored_blob
self.sub_pullers_ = tuple(
_ConsistentBlobPuller(x, self.session_)
for x in mirrored_blob.sub_consistent_blob_list
)
self.local_mirrored_blob_ = None
@property
def result(self):
if self.local_mirrored_blob_ is not None:
return self.local_mirrored_blob_
local_blob_list = [x.result for x in self.sub_pullers_]
self.local_mirrored_blob_ = local_blob_util.MergeLocalBlobs(
local_blob_list, self.mirrored_blob_
)
return self.local_mirrored_blob_
def FlatConsistentBlobPullers(self):
for x in self.sub_pullers_:
yield x
class EagerFutureRemoteBlobs(FutureRemoteBlobs):
def __init__(self):
super().__init__()
self.blob_getters_ = None
def get(self):
return self._GetResultLocalBlob(self.blob_getters_)
def async_get(self, callback):
assert callable(callback)
callback(self._GetResultLocalBlob(self.blob_getters_))
def SetResult(self, remote_blobs):
assert self.inited_ is False
assert self.blob_getters_ is None
self.blob_getters_ = self._MakeRemoteBlobGetters(remote_blobs)
return self
def _MakeRemoteBlobGetters(self, remote_blobs):
if isinstance(remote_blobs, (list, tuple)):
return type(remote_blobs)(
self._MakeRemoteBlobGetters(blob) for blob in remote_blobs
)
elif isinstance(remote_blobs, dict):
return {k: self._MakeRemoteBlobGetters(v) for k, v in remote_blobs.items()}
elif isinstance(remote_blobs, oneflow_api.EagerBlobTrait):
return _EagerBlobGetter(remote_blobs)
else:
raise NotImplementedError
def _GetResultLocalBlob(self, getter):
assert self.inited_
if isinstance(getter, _EagerBlobGetter):
return getter.result
elif isinstance(getter, (list, tuple)):
return type(getter)(self._GetResultLocalBlob(g) for g in getter)
elif isinstance(getter, dict):
return {k: self._GetResultLocalBlob(v) for k, v in getter.items()}
else:
raise NotImplementedError(type(getter))
class _EagerBlobGetter(object):
def __init__(self, eager_blob):
assert isinstance(eager_blob, oneflow_api.EagerBlobTrait)
self.eager_blob_ = eager_blob
self.local_tensor_ = None
@property
def result(self):
if self.local_tensor_ is not None:
return self.local_tensor_
self.local_tensor_ = local_blob_util.MakeLocalBlob4EagerBlob(self.eager_blob_)
return self.local_tensor_
| [
"oneflow.python.framework.local_blob.MakeLocalBlob4EagerBlob",
"oneflow.python.framework.local_blob.MergeLocalBlobs"
] | [((1353, 1374), 'threading.Condition', 'threading.Condition', ([], {}), '()\n', (1372, 1374), False, 'import threading\n'), ((7036, 7105), 'oneflow.python.framework.local_blob.MergeLocalBlobs', 'local_blob_util.MergeLocalBlobs', (['local_blob_list', 'self.mirrored_blob_'], {}), '(local_blob_list, self.mirrored_blob_)\n', (7067, 7105), True, 'import oneflow.python.framework.local_blob as local_blob_util\n'), ((9180, 9237), 'oneflow.python.framework.local_blob.MakeLocalBlob4EagerBlob', 'local_blob_util.MakeLocalBlob4EagerBlob', (['self.eager_blob_'], {}), '(self.eager_blob_)\n', (9219, 9237), True, 'import oneflow.python.framework.local_blob as local_blob_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.unittest
from oneflow.test_utils.automated_test_util import *
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
class TestTensor(flow.unittest.TestCase):
@flow.unittest.skip_unless_1n1d()
def test_creating_global_tensor(test_case):
placement = flow.placement("cuda", [0])
sbp = flow.sbp.broadcast
shape = (2, 3)
# Shape -> GlobalTensor
x = flow.Tensor(*shape, placement=placement, sbp=sbp)
test_case.assertTrue(x.is_global)
# LocalTensor -> GlobalTensor
x = flow.Tensor(*shape, device="cpu")
test_case.assertTrue(x.is_local)
y = flow.Tensor(x, placement=placement, sbp=sbp)
test_case.assertTrue(y.is_global)
# GlobalTensor -> GlobalTensor
z = flow.Tensor(y, placement=placement, sbp=sbp)
test_case.assertTrue(z.is_global)
# TODO: ndarray -> GlobalTensor
@flow.unittest.skip_unless_1n1d()
def test_construct_local_from_global_tensor(test_case):
placement = flow.placement("cuda", [0])
sbp = flow.sbp.broadcast
shape = (2, 3)
x = flow.Tensor(*shape, placement=placement, sbp=sbp)
test_case.assertTrue(x.is_global)
# GlobalTensor -> LocalTensor
y = flow.Tensor(x, device="cpu")
test_case.assertTrue(y.is_local)
y = flow.Tensor(x, device="cuda")
test_case.assertTrue(y.is_local)
@flow.unittest.skip_unless_1n1d()
def test_global_set_data(test_case):
x_placement = flow.placement("cpu", [0])
x_sbp = flow.sbp.broadcast
x = flow.ones(2, 3, placement=x_placement, sbp=x_sbp)
y_placement = flow.placement("cuda", [0])
y_sbp = flow.sbp.split(0)
y = flow.ones(4, 5, placement=y_placement, sbp=y_sbp)
old_id = id(x)
x.data = y
test_case.assertEqual(old_id, id(x))
test_case.assertTrue(x.shape == (4, 5))
test_case.assertTrue(x.placement == y_placement)
test_case.assertTrue(x.sbp[0] == y_sbp)
@flow.unittest.skip_unless_1n1d()
def test_global_tensor_autograd_related_methods(test_case):
placement = flow.placement("cuda", [0])
sbp = flow.sbp.split(0)
shape = (2, 3, 4, 5)
l_x = flow.Tensor(*shape)
test_case.assertFalse(l_x.requires_grad)
test_case.assertTrue(l_x.is_leaf)
l_y = flow.Tensor(*shape)
l_y.requires_grad = True
test_case.assertTrue(l_y.requires_grad)
test_case.assertTrue(l_y.is_leaf)
x = l_x.to_global(placement=placement, sbp=sbp)
test_case.assertTrue(x.is_leaf)
y = l_y.to_global(placement=placement, sbp=sbp)
test_case.assertFalse(y.is_leaf)
z = x + y
test_case.assertTrue(z.requires_grad)
test_case.assertFalse(z.is_leaf)
with flow.no_grad():
m = x + y
test_case.assertTrue(m.is_leaf)
test_case.assertFalse(m.requires_grad)
l_v = flow.Tensor(*shape)
l_v.requires_grad = True
v = l_v.to_global(placement=placement, sbp=sbp)
z.retain_grad()
w = v + z
l_grad = flow.ones(*shape)
grad = l_grad.to_global(placement=placement, sbp=sbp)
w.backward(gradient=grad)
test_case.assertTrue(
np.allclose(l_v.grad.numpy(), np.ones(shape), atol=1e-4, rtol=1e-4)
)
test_case.assertTrue(
np.allclose(l_y.grad.numpy(), np.ones(shape), atol=1e-4, rtol=1e-4)
)
test_case.assertTrue(
np.allclose(
z.grad.to_global(sbp=flow.sbp.broadcast).to_local().numpy(),
np.ones(shape),
atol=1e-4,
rtol=1e-4,
)
)
test_case.assertIsNone(l_x.grad)
@flow.unittest.skip_unless_1n1d()
def test_global_tensor_unsupported_property(test_case):
shape = (2, 3)
placement = flow.placement("cuda", [0])
sbp = flow.sbp.split(0)
a = flow.Tensor(*shape)
b = a.to_global(placement=placement, sbp=sbp)
test_case.assertTrue(b.is_global)
with test_case.assertRaises(RuntimeError):
b.device()
with test_case.assertRaises(RuntimeError):
b._tensor_buffer_shapes_and_dtypes
@flow.unittest.skip_unless_1n4d()
def test_global_tensor_2d_sbp_init(test_case):
V = 10
H = 4
S = 6
P = flow.placement("cuda", [[0, 1], [2, 3]])
wte = flow.nn.Parameter(
flow.empty(
(V, H),
dtype=flow.float32,
placement=P,
sbp=[flow.sbp.broadcast, flow.sbp.split(0)],
)
)
wpe = flow.nn.Parameter(
flow.empty(
(S, H),
dtype=flow.float32,
placement=P,
sbp=[flow.sbp.broadcast, flow.sbp.broadcast],
)
)
flow.nn.init.normal_(wte, std=0.02)
flow.nn.init.normal_(wpe, std=0.02)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.Tensor",
"oneflow.nn.init.normal_",
"oneflow.unittest.skip_unless_1n4d",
"oneflow.sbp.split",
"oneflow.ones",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.empty",
"oneflow.placement",
"oneflow.no_grad"
] | [((890, 922), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (920, 922), True, 'import oneflow as flow\n'), ((1623, 1655), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1653, 1655), True, 'import oneflow as flow\n'), ((2133, 2165), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2163, 2165), True, 'import oneflow as flow\n'), ((2745, 2777), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2775, 2777), True, 'import oneflow as flow\n'), ((4503, 4535), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4533, 4535), True, 'import oneflow as flow\n'), ((5008, 5040), 'oneflow.unittest.skip_unless_1n4d', 'flow.unittest.skip_unless_1n4d', ([], {}), '()\n', (5038, 5040), True, 'import oneflow as flow\n'), ((5776, 5791), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5789, 5791), False, 'import unittest\n'), ((991, 1018), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[0]'], {}), "('cuda', [0])\n", (1005, 1018), True, 'import oneflow as flow\n'), ((1120, 1169), 'oneflow.Tensor', 'flow.Tensor', (['*shape'], {'placement': 'placement', 'sbp': 'sbp'}), '(*shape, placement=placement, sbp=sbp)\n', (1131, 1169), True, 'import oneflow as flow\n'), ((1263, 1296), 'oneflow.Tensor', 'flow.Tensor', (['*shape'], {'device': '"""cpu"""'}), "(*shape, device='cpu')\n", (1274, 1296), True, 'import oneflow as flow\n'), ((1350, 1394), 'oneflow.Tensor', 'flow.Tensor', (['x'], {'placement': 'placement', 'sbp': 'sbp'}), '(x, placement=placement, sbp=sbp)\n', (1361, 1394), True, 'import oneflow as flow\n'), ((1489, 1533), 'oneflow.Tensor', 'flow.Tensor', (['y'], {'placement': 'placement', 'sbp': 'sbp'}), '(y, placement=placement, sbp=sbp)\n', (1500, 1533), True, 'import oneflow as flow\n'), ((1736, 1763), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[0]'], {}), "('cuda', [0])\n", (1750, 1763), True, 'import oneflow as flow\n'), ((1832, 1881), 'oneflow.Tensor', 'flow.Tensor', (['*shape'], {'placement': 'placement', 'sbp': 'sbp'}), '(*shape, placement=placement, sbp=sbp)\n', (1843, 1881), True, 'import oneflow as flow\n'), ((1974, 2002), 'oneflow.Tensor', 'flow.Tensor', (['x'], {'device': '"""cpu"""'}), "(x, device='cpu')\n", (1985, 2002), True, 'import oneflow as flow\n'), ((2056, 2085), 'oneflow.Tensor', 'flow.Tensor', (['x'], {'device': '"""cuda"""'}), "(x, device='cuda')\n", (2067, 2085), True, 'import oneflow as flow\n'), ((2229, 2255), 'oneflow.placement', 'flow.placement', (['"""cpu"""', '[0]'], {}), "('cpu', [0])\n", (2243, 2255), True, 'import oneflow as flow\n'), ((2303, 2352), 'oneflow.ones', 'flow.ones', (['(2)', '(3)'], {'placement': 'x_placement', 'sbp': 'x_sbp'}), '(2, 3, placement=x_placement, sbp=x_sbp)\n', (2312, 2352), True, 'import oneflow as flow\n'), ((2375, 2402), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[0]'], {}), "('cuda', [0])\n", (2389, 2402), True, 'import oneflow as flow\n'), ((2419, 2436), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (2433, 2436), True, 'import oneflow as flow\n'), ((2449, 2498), 'oneflow.ones', 'flow.ones', (['(4)', '(5)'], {'placement': 'y_placement', 'sbp': 'y_sbp'}), '(4, 5, placement=y_placement, sbp=y_sbp)\n', (2458, 2498), True, 'import oneflow as flow\n'), ((2862, 2889), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[0]'], {}), "('cuda', [0])\n", (2876, 2889), True, 'import oneflow as flow\n'), ((2904, 2921), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (2918, 2921), True, 'import oneflow as flow\n'), ((2965, 2984), 'oneflow.Tensor', 'flow.Tensor', (['*shape'], {}), '(*shape)\n', (2976, 2984), True, 'import oneflow as flow\n'), ((3091, 3110), 'oneflow.Tensor', 'flow.Tensor', (['*shape'], {}), '(*shape)\n', (3102, 3110), True, 'import oneflow as flow\n'), ((3689, 3708), 'oneflow.Tensor', 'flow.Tensor', (['*shape'], {}), '(*shape)\n', (3700, 3708), True, 'import oneflow as flow\n'), ((3859, 3876), 'oneflow.ones', 'flow.ones', (['*shape'], {}), '(*shape)\n', (3868, 3876), True, 'import oneflow as flow\n'), ((4640, 4667), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[0]'], {}), "('cuda', [0])\n", (4654, 4667), True, 'import oneflow as flow\n'), ((4682, 4699), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (4696, 4699), True, 'import oneflow as flow\n'), ((4712, 4731), 'oneflow.Tensor', 'flow.Tensor', (['*shape'], {}), '(*shape)\n', (4723, 4731), True, 'import oneflow as flow\n'), ((5148, 5188), 'oneflow.placement', 'flow.placement', (['"""cuda"""', '[[0, 1], [2, 3]]'], {}), "('cuda', [[0, 1], [2, 3]])\n", (5162, 5188), True, 'import oneflow as flow\n'), ((5663, 5698), 'oneflow.nn.init.normal_', 'flow.nn.init.normal_', (['wte'], {'std': '(0.02)'}), '(wte, std=0.02)\n', (5683, 5698), True, 'import oneflow as flow\n'), ((5707, 5742), 'oneflow.nn.init.normal_', 'flow.nn.init.normal_', (['wpe'], {'std': '(0.02)'}), '(wpe, std=0.02)\n', (5727, 5742), True, 'import oneflow as flow\n'), ((3548, 3562), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (3560, 3562), True, 'import oneflow as flow\n'), ((5467, 5568), 'oneflow.empty', 'flow.empty', (['(S, H)'], {'dtype': 'flow.float32', 'placement': 'P', 'sbp': '[flow.sbp.broadcast, flow.sbp.broadcast]'}), '((S, H), dtype=flow.float32, placement=P, sbp=[flow.sbp.broadcast,\n flow.sbp.broadcast])\n', (5477, 5568), True, 'import oneflow as flow\n'), ((4046, 4060), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (4053, 4060), True, 'import numpy as np\n'), ((4166, 4180), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (4173, 4180), True, 'import numpy as np\n'), ((4362, 4376), 'numpy.ones', 'np.ones', (['shape'], {}), '(shape)\n', (4369, 4376), True, 'import numpy as np\n'), ((5377, 5394), 'oneflow.sbp.split', 'flow.sbp.split', (['(0)'], {}), '(0)\n', (5391, 5394), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
@flow.unittest.skip_unless_1n2d()
class TestManualSeedApi(flow.unittest.TestCase):
def test_cuda_manual_seed_all(test_case):
flow.cuda.manual_seed_all(20)
x = flow.randn(2, 4, device="cuda:0")
y = flow.randn(2, 4, device="cuda:1")
test_case.assertTrue(np.allclose(x.numpy(), y.numpy()))
def test_cuda_manual_seed(test_case):
flow.cuda.manual_seed(30)
device = flow.device("cuda", flow.cuda.current_device())
x = flow.randn(2, 4, device=device)
tensor_list = [flow.zeros((2, 4), dtype=flow.int32) for _ in range(2)]
flow.comm.all_gather(tensor_list, x)
test_case.assertTrue(
np.allclose(tensor_list[0].numpy(), tensor_list[1].numpy())
)
def test_manual_seed(test_case):
flow.manual_seed(40)
x = flow.randn(2, 4, device="cuda:0")
y = flow.randn(2, 4, device="cuda:1")
test_case.assertTrue(np.allclose(x.numpy(), y.numpy()))
def test_set_get_rng_state(test_case):
x = flow.ByteTensor(5000)
flow.set_rng_state(x)
y = flow.get_rng_state()
test_case.assertTrue(np.allclose(x.numpy(), y.numpy()))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.cuda.current_device",
"oneflow.cuda.manual_seed_all",
"oneflow.set_rng_state",
"oneflow.ByteTensor",
"oneflow.unittest.skip_unless_1n2d",
"oneflow.cuda.manual_seed",
"oneflow.zeros",
"oneflow.randn",
"oneflow.manual_seed",
"oneflow.comm.all_gather",
"oneflow.get_rng_state"
] | [((764, 796), 'oneflow.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (794, 796), True, 'import oneflow as flow\n'), ((704, 738), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (713, 738), False, 'import os\n'), ((1969, 1984), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1982, 1984), False, 'import unittest\n'), ((900, 929), 'oneflow.cuda.manual_seed_all', 'flow.cuda.manual_seed_all', (['(20)'], {}), '(20)\n', (925, 929), True, 'import oneflow as flow\n'), ((942, 975), 'oneflow.randn', 'flow.randn', (['(2)', '(4)'], {'device': '"""cuda:0"""'}), "(2, 4, device='cuda:0')\n", (952, 975), True, 'import oneflow as flow\n'), ((988, 1021), 'oneflow.randn', 'flow.randn', (['(2)', '(4)'], {'device': '"""cuda:1"""'}), "(2, 4, device='cuda:1')\n", (998, 1021), True, 'import oneflow as flow\n'), ((1137, 1162), 'oneflow.cuda.manual_seed', 'flow.cuda.manual_seed', (['(30)'], {}), '(30)\n', (1158, 1162), True, 'import oneflow as flow\n'), ((1240, 1271), 'oneflow.randn', 'flow.randn', (['(2)', '(4)'], {'device': 'device'}), '(2, 4, device=device)\n', (1250, 1271), True, 'import oneflow as flow\n'), ((1359, 1395), 'oneflow.comm.all_gather', 'flow.comm.all_gather', (['tensor_list', 'x'], {}), '(tensor_list, x)\n', (1379, 1395), True, 'import oneflow as flow\n'), ((1554, 1574), 'oneflow.manual_seed', 'flow.manual_seed', (['(40)'], {}), '(40)\n', (1570, 1574), True, 'import oneflow as flow\n'), ((1587, 1620), 'oneflow.randn', 'flow.randn', (['(2)', '(4)'], {'device': '"""cuda:0"""'}), "(2, 4, device='cuda:0')\n", (1597, 1620), True, 'import oneflow as flow\n'), ((1633, 1666), 'oneflow.randn', 'flow.randn', (['(2)', '(4)'], {'device': '"""cuda:1"""'}), "(2, 4, device='cuda:1')\n", (1643, 1666), True, 'import oneflow as flow\n'), ((1787, 1808), 'oneflow.ByteTensor', 'flow.ByteTensor', (['(5000)'], {}), '(5000)\n', (1802, 1808), True, 'import oneflow as flow\n'), ((1817, 1838), 'oneflow.set_rng_state', 'flow.set_rng_state', (['x'], {}), '(x)\n', (1835, 1838), True, 'import oneflow as flow\n'), ((1851, 1871), 'oneflow.get_rng_state', 'flow.get_rng_state', ([], {}), '()\n', (1869, 1871), True, 'import oneflow as flow\n'), ((1200, 1226), 'oneflow.cuda.current_device', 'flow.cuda.current_device', ([], {}), '()\n', (1224, 1226), True, 'import oneflow as flow\n'), ((1295, 1331), 'oneflow.zeros', 'flow.zeros', (['(2, 4)'], {'dtype': 'flow.int32'}), '((2, 4), dtype=flow.int32)\n', (1305, 1331), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.test_utils.automated_test_util import *
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestMaskedFill(flow.unittest.TestCase):
@autotest(check_graph=True)
def test_flow_masked_fill_with_random_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
device = random_device()
input = random_tensor(ndim=2, dim0=k1, dim1=k2).to(device)
mask = random_tensor(ndim=2, dim0=k1, dim1=k2).to(device)
value = random().to(float)
return input.masked_fill(mask > 0, value)
@autotest(check_graph=True)
def test_flow_masked_fill_with_0dim_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
device = random_device()
input = random_tensor(ndim=0).to(device)
mask = random_tensor(ndim=0).to(device)
value = random().to(float)
return input.masked_fill(mask > 0, value)
@autotest(check_graph=True)
def test_flow_masked_fill_broadcast_with_random_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
device = random_device()
input = random_tensor(ndim=2, dim0=1, dim1=k2).to(device)
mask = random_tensor(ndim=2, dim0=k1, dim1=1).to(device)
value = random().to(float)
return input.masked_fill(mask > 0, value)
@autotest(check_graph=True)
def test_flow_masked_fill_int_with_random_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
device = random_device()
input = random_tensor(ndim=2, dim0=k1, dim1=k2).to(device)
mask = random_tensor(ndim=2, dim0=k1, dim1=k2).to(device)
value = random().to(int)
return input.masked_fill(mask > 0, value)
@autotest(auto_backward=False, check_graph=False)
def test_flow_masked_fill_bool_with_random_data(test_case):
k1 = random(2, 6)
k2 = random(2, 6)
device = random_device()
input = random_tensor(ndim=2, dim0=k1, dim1=k2).to(
device=device, dtype=torch.bool
)
mask = random_tensor(ndim=2, dim0=k1, dim1=k2).to(device)
value = random().to(bool)
return input.masked_fill(mask > 0, value)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((732, 764), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (762, 764), True, 'import oneflow as flow\n'), ((2863, 2878), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2876, 2878), False, 'import unittest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
from test_util import GenArgList
import oneflow.typing as oft
import test_global_storage
def compare_reduce_any_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.int8)
@flow.global_function(function_config=func_config)
def ReduceAnyJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.int8)):
with flow.scope.placement(device_type, "0:0"):
return flow.math.reduce_any(x, axis=axis, keepdims=keepdims)
x = np.random.rand(*input_shape).astype(np.int8)
# OneFlow
of_out = ReduceAnyJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_any(x, axis=axis, keepdims=keepdims)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
def test_reduce_any_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_any_with_tensorflow(*arg)
def test_reduce_any_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_any_with_tensorflow(*arg)
def test_reduce_any_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_any_with_tensorflow(*arg)
def test_reduce_any_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_any_with_tensorflow(*arg)
def test_reduce_any_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_any_with_tensorflow(*arg)
def test_reduce_any_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,), dtype=flow.int8)):
y = flow.math.reduce_any(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.int8))
def compare_reduce_prod_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(function_config=func_config)
def ReduceProdJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.float32)):
with flow.scope.placement(device_type, "0:0"):
return flow.math.reduce_prod(x, axis=axis, keepdims=keepdims)
x = np.random.rand(*input_shape).astype(np.float32)
# OneFlow
of_out = ReduceProdJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_prod(x, axis=axis, keepdims=keepdims)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
def test_reduce_prod_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_prod_with_tensorflow(*arg)
def test_reduce_prod_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_prod_with_tensorflow(*arg)
def test_reduce_prod_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_prod_with_tensorflow(*arg)
def test_reduce_prod_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_prod_with_tensorflow(*arg)
def test_reduce_prod_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_prod_with_tensorflow(*arg)
def test_reduce_prod_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_prod(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.float32))
def compare_reduce_min_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(type="train", function_config=func_config)
def ReduceMinJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.float)):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="v1",
shape=input_shape,
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
loss = flow.math.reduce_min(x, axis=axis, keepdims=keepdims)
loss = flow.identity(loss)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
return loss
x = np.random.rand(*input_shape).astype(np.float32)
# OneFlow
of_out = ReduceMinJob(x).get()
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
tf_out = tf.math.reduce_min(x, axis=axis, keepdims=keepdims)
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
assert np.allclose(
test_global_storage.Get("x_diff"), tf_x_diff.numpy(), rtol=1e-5, atol=1e-5
)
def test_reduce_min_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_min_with_tensorflow(*arg)
def test_reduce_min_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_min_with_tensorflow(*arg)
def test_reduce_min_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_min_with_tensorflow(*arg)
def test_reduce_min_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_min_with_tensorflow(*arg)
def test_reduce_min_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_min_with_tensorflow(*arg)
def test_reduce_min_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_min(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.float32))
def compare_reduce_all_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.int8)
@flow.global_function(function_config=func_config)
def ReduceAllJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.int8)):
with flow.scope.placement(device_type, "0:0"):
return flow.math.reduce_all(x, axis=axis, keepdims=keepdims)
x = np.random.rand(*input_shape).astype(np.int8)
# OneFlow
of_out = ReduceAllJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_all(x, axis=axis, keepdims=keepdims)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
def test_reduce_all_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_all_with_tensorflow(*arg)
def test_reduce_all_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_all_with_tensorflow(*arg)
def test_reduce_all_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_all_with_tensorflow(*arg)
def test_reduce_all_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_all_with_tensorflow(*arg)
def test_reduce_all_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_all_with_tensorflow(*arg)
def test_reduce_all_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,), dtype=flow.int8)):
y = flow.math.reduce_all(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.int8))
def compare_reduce_sum_with_tensorflow(
test_case, device_type, input_shape, axis, keepdims
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.int32)
@flow.global_function(function_config=func_config)
def ReduceSumJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.int32)):
with flow.scope.placement(device_type, "0:0"):
return flow.math.reduce_sum(x, axis=axis, keepdims=keepdims)
x = (np.random.rand(*input_shape) * 100).astype(np.int32)
# OneFlow
of_out = ReduceSumJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_sum(x, axis=axis, keepdims=keepdims)
test_case.assertTrue(np.allclose(of_out.numpy(), tf_out.numpy()))
def test_reduce_sum_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_sum_with_tensorflow(test_case, *arg)
def test_reduce_sum_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_sum_with_tensorflow(test_case, *arg)
def test_reduce_sum_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_sum_with_tensorflow(test_case, *arg)
def test_reduce_sum_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_sum_with_tensorflow(test_case, *arg)
def test_reduce_sum_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_sum_with_tensorflow(test_case, *arg)
def test_reduce_sum_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_sum(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.float32))
def compare_reduce_euclidean_norm_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(function_config=func_config)
def ReduceEuclideanNormJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.float)):
with flow.scope.placement(device_type, "0:0"):
return flow.math.reduce_euclidean_norm(x, axis=axis, keepdims=keepdims)
x = np.random.rand(*input_shape).astype(np.float32)
# OneFlow
of_out = ReduceEuclideanNormJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_euclidean_norm(x, axis=axis, keepdims=keepdims)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
def test_reduce_euclidean_norm_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_euclidean_norm_with_tensorflow(*arg)
def test_reduce_euclidean_norm_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_euclidean_norm_with_tensorflow(*arg)
def test_reduce_euclidean_norm_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_euclidean_norm_with_tensorflow(*arg)
def test_reduce_euclidean_norm_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_euclidean_norm_with_tensorflow(*arg)
def test_reduce_euclidean_norm_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_euclidean_norm_with_tensorflow(*arg)
def test_reduce_euclidean_norm_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_euclidean_norm(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.float32))
def compare_reduce_logsumexp_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(function_config=func_config)
def ReduceLogSumExpJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.float)):
with flow.scope.placement(device_type, "0:0"):
return flow.math.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
x = np.random.rand(*input_shape).astype(np.float32)
# OneFlow
of_out = ReduceLogSumExpJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_logsumexp(x, axis=axis, keepdims=keepdims)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
def test_reduce_logsumexp_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_logsumexp_with_tensorflow(*arg)
def test_reduce_logsumexp_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_logsumexp_with_tensorflow(*arg)
def test_reduce_logsumexp_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_logsumexp_with_tensorflow(*arg)
def test_reduce_logsumexp_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_logsumexp_with_tensorflow(*arg)
def test_reduce_logsumexp_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_logsumexp_with_tensorflow(*arg)
def test_reduce_logsumexp_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_logsumexp(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.float32))
def compare_reduce_std_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(function_config=func_config)
def ReduceStdJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.float)):
with flow.scope.placement(device_type, "0:0"):
return flow.math.reduce_std(x, axis=axis, keepdims=keepdims)
x = np.random.rand(*input_shape).astype(np.float32)
# OneFlow
of_out = ReduceStdJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_std(x, axis=axis, keepdims=keepdims)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
def test_reduce_std_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_std_with_tensorflow(*arg)
def test_reduce_std_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_std_with_tensorflow(*arg)
def test_reduce_std_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_std_with_tensorflow(*arg)
def test_reduce_std_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_std_with_tensorflow(*arg)
def test_reduce_std_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_std_with_tensorflow(*arg)
def test_reduce_std_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_std(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.float32))
def compare_reduce_variance_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
@flow.global_function(function_config=func_config)
def ReduceVarianceJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.float)):
with flow.scope.placement(device_type, "0:0"):
return flow.math.reduce_variance(x, axis=axis, keepdims=keepdims)
x = np.random.rand(*input_shape).astype(np.float32)
# OneFlow
of_out = ReduceVarianceJob(x).get()
# TensorFlow
tf_out = tf.math.reduce_variance(x, axis=axis, keepdims=keepdims)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
def test_reduce_variance_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_variance_with_tensorflow(*arg)
def test_reduce_variance_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_variance_with_tensorflow(*arg)
def test_reduce_variance_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_variance_with_tensorflow(*arg)
def test_reduce_variance_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_variance_with_tensorflow(*arg)
def test_reduce_variance_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_variance_with_tensorflow(*arg)
def test_reduce_variance_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_variance(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.float32))
def compare_reduce_max_with_tensorflow(
device_type, input_shape, axis, keepdims, rtol=1e-5, atol=1e-5
):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.global_function(type="train", function_config=func_config)
def ReduceMaxJob(x: oft.Numpy.Placeholder(input_shape, dtype=flow.float)):
with flow.scope.placement(device_type, "0:0"):
x += flow.get_variable(
name="v1",
shape=input_shape,
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
loss = flow.math.reduce_max(x, axis=axis, keepdims=keepdims)
loss = flow.identity(loss)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(loss)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(loss, test_global_storage.Setter("loss"))
flow.watch_diff(loss, test_global_storage.Setter("loss_diff"))
return loss
x = np.random.rand(*input_shape).astype(np.float32)
# OneFlow
of_out = ReduceMaxJob(x).get()
# TensorFlow
with tf.GradientTape(persistent=True) as tape:
x = tf.Variable(x)
tf_out = tf.math.reduce_max(x, axis=axis, keepdims=keepdims)
loss_diff = test_global_storage.Get("loss_diff")
tf_x_diff = tape.gradient(tf_out, x, loss_diff)
assert np.allclose(of_out.numpy(), tf_out.numpy(), rtol=rtol, atol=atol)
assert np.allclose(
test_global_storage.Get("x_diff"), tf_x_diff.numpy(), rtol=1e-5, atol=1e-5
)
def test_reduce_max_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(64, 64, 64)]
arg_dict["axis"] = [None, [], [1], [0, 2]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_max_with_tensorflow(*arg)
def test_reduce_max_with_one_value_func(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1,)]
arg_dict["axis"] = [None, [], [0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_max_with_tensorflow(*arg)
def test_reduce_max_col_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_max_with_tensorflow(*arg)
def test_reduce_max_row_reduce(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(25, 1024 * 1024)]
arg_dict["axis"] = [[1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_max_with_tensorflow(*arg)
def test_reduce_max_scalar(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["input_shape"] = [(1024 * 64, 25)]
arg_dict["axis"] = [[0, 1]]
arg_dict["keepdims"] = [True, False]
for arg in GenArgList(arg_dict):
compare_reduce_max_with_tensorflow(*arg)
def test_reduce_max_batch_axis_reduced(test_case):
flow.config.gpu_device_num(2)
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.consistent_view())
@flow.global_function(function_config=func_config)
def Foo(x: oft.Numpy.Placeholder((10,))):
y = flow.math.reduce_max(x)
test_case.assertTrue(y.split_axis is None)
test_case.assertTrue(y.batch_axis is None)
Foo(np.ndarray((10,), dtype=np.float32))
| [
"oneflow.math.reduce_all",
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.consistent_view",
"oneflow.clear_default_session",
"oneflow.math.reduce_sum",
"oneflow.config.gpu_device_num",
"oneflow.math.reduce_min",
"oneflow.math.reduce_std",
"oneflow.math.reduce_prod",
"oneflow.optimizer.Piecewis... | [((949, 977), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (975, 977), True, 'import oneflow as flow\n'), ((996, 1017), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1015, 1017), True, 'import oneflow as flow\n'), ((1069, 1118), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1089, 1118), True, 'import oneflow as flow\n'), ((1458, 1509), 'tensorflow.math.reduce_any', 'tf.math.reduce_any', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (1476, 1509), True, 'import tensorflow as tf\n'), ((1641, 1654), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1652, 1654), False, 'from collections import OrderedDict\n'), ((1848, 1868), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (1858, 1868), False, 'from test_util import GenArgList\n'), ((1988, 2001), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1999, 2001), False, 'from collections import OrderedDict\n'), ((2179, 2199), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2189, 2199), False, 'from test_util import GenArgList\n'), ((2310, 2323), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2321, 2323), False, 'from collections import OrderedDict\n'), ((2502, 2522), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2512, 2522), False, 'from test_util import GenArgList\n'), ((2633, 2646), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2644, 2646), False, 'from collections import OrderedDict\n'), ((2827, 2847), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2837, 2847), False, 'from test_util import GenArgList\n'), ((2954, 2967), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2965, 2967), False, 'from collections import OrderedDict\n'), ((3149, 3169), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3159, 3169), False, 'from test_util import GenArgList\n'), ((3277, 3306), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (3303, 3306), True, 'import oneflow as flow\n'), ((3325, 3346), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3344, 3346), True, 'import oneflow as flow\n'), ((3420, 3469), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (3440, 3469), True, 'import oneflow as flow\n'), ((3872, 3900), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (3898, 3900), True, 'import oneflow as flow\n'), ((3919, 3940), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (3938, 3940), True, 'import oneflow as flow\n'), ((3995, 4044), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (4015, 4044), True, 'import oneflow as flow\n'), ((4393, 4445), 'tensorflow.math.reduce_prod', 'tf.math.reduce_prod', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (4412, 4445), True, 'import tensorflow as tf\n'), ((4578, 4591), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4589, 4591), False, 'from collections import OrderedDict\n'), ((4785, 4805), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4795, 4805), False, 'from test_util import GenArgList\n'), ((4927, 4940), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4938, 4940), False, 'from collections import OrderedDict\n'), ((5118, 5138), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5128, 5138), False, 'from test_util import GenArgList\n'), ((5251, 5264), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5262, 5264), False, 'from collections import OrderedDict\n'), ((5443, 5463), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5453, 5463), False, 'from test_util import GenArgList\n'), ((5576, 5589), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5587, 5589), False, 'from collections import OrderedDict\n'), ((5770, 5790), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5780, 5790), False, 'from test_util import GenArgList\n'), ((5899, 5912), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5910, 5912), False, 'from collections import OrderedDict\n'), ((6094, 6114), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6104, 6114), False, 'from test_util import GenArgList\n'), ((6224, 6253), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (6250, 6253), True, 'import oneflow as flow\n'), ((6272, 6293), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (6291, 6293), True, 'import oneflow as flow\n'), ((6367, 6416), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (6387, 6416), True, 'import oneflow as flow\n'), ((6805, 6833), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (6831, 6833), True, 'import oneflow as flow\n'), ((6852, 6873), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (6871, 6873), True, 'import oneflow as flow\n'), ((6928, 6991), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (6948, 6991), True, 'import oneflow as flow\n'), ((8160, 8196), 'test_global_storage.Get', 'test_global_storage.Get', (['"""loss_diff"""'], {}), "('loss_diff')\n", (8183, 8196), False, 'import test_global_storage\n'), ((8493, 8506), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8504, 8506), False, 'from collections import OrderedDict\n'), ((8700, 8720), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (8710, 8720), False, 'from test_util import GenArgList\n'), ((8840, 8853), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (8851, 8853), False, 'from collections import OrderedDict\n'), ((9031, 9051), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (9041, 9051), False, 'from test_util import GenArgList\n'), ((9162, 9175), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9173, 9175), False, 'from collections import OrderedDict\n'), ((9354, 9374), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (9364, 9374), False, 'from test_util import GenArgList\n'), ((9485, 9498), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9496, 9498), False, 'from collections import OrderedDict\n'), ((9679, 9699), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (9689, 9699), False, 'from test_util import GenArgList\n'), ((9806, 9819), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (9817, 9819), False, 'from collections import OrderedDict\n'), ((10001, 10021), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (10011, 10021), False, 'from test_util import GenArgList\n'), ((10129, 10158), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (10155, 10158), True, 'import oneflow as flow\n'), ((10177, 10198), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (10196, 10198), True, 'import oneflow as flow\n'), ((10272, 10321), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (10292, 10321), True, 'import oneflow as flow\n'), ((10709, 10737), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (10735, 10737), True, 'import oneflow as flow\n'), ((10756, 10777), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (10775, 10777), True, 'import oneflow as flow\n'), ((10829, 10878), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (10849, 10878), True, 'import oneflow as flow\n'), ((11218, 11269), 'tensorflow.math.reduce_all', 'tf.math.reduce_all', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (11236, 11269), True, 'import tensorflow as tf\n'), ((11401, 11414), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11412, 11414), False, 'from collections import OrderedDict\n'), ((11608, 11628), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (11618, 11628), False, 'from test_util import GenArgList\n'), ((11748, 11761), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11759, 11761), False, 'from collections import OrderedDict\n'), ((11939, 11959), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (11949, 11959), False, 'from test_util import GenArgList\n'), ((12070, 12083), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12081, 12083), False, 'from collections import OrderedDict\n'), ((12262, 12282), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (12272, 12282), False, 'from test_util import GenArgList\n'), ((12393, 12406), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12404, 12406), False, 'from collections import OrderedDict\n'), ((12587, 12607), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (12597, 12607), False, 'from test_util import GenArgList\n'), ((12714, 12727), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (12725, 12727), False, 'from collections import OrderedDict\n'), ((12909, 12929), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (12919, 12929), False, 'from test_util import GenArgList\n'), ((13037, 13066), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (13063, 13066), True, 'import oneflow as flow\n'), ((13085, 13106), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (13104, 13106), True, 'import oneflow as flow\n'), ((13180, 13229), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (13200, 13229), True, 'import oneflow as flow\n'), ((13620, 13648), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (13646, 13648), True, 'import oneflow as flow\n'), ((13667, 13688), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (13686, 13688), True, 'import oneflow as flow\n'), ((13741, 13790), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (13761, 13790), True, 'import oneflow as flow\n'), ((14140, 14191), 'tensorflow.math.reduce_sum', 'tf.math.reduce_sum', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (14158, 14191), True, 'import tensorflow as tf\n'), ((14316, 14329), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14327, 14329), False, 'from collections import OrderedDict\n'), ((14523, 14543), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (14533, 14543), False, 'from test_util import GenArgList\n'), ((14674, 14687), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (14685, 14687), False, 'from collections import OrderedDict\n'), ((14865, 14885), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (14875, 14885), False, 'from test_util import GenArgList\n'), ((15007, 15020), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15018, 15020), False, 'from collections import OrderedDict\n'), ((15199, 15219), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (15209, 15219), False, 'from test_util import GenArgList\n'), ((15341, 15354), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15352, 15354), False, 'from collections import OrderedDict\n'), ((15535, 15555), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (15545, 15555), False, 'from test_util import GenArgList\n'), ((15673, 15686), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (15684, 15686), False, 'from collections import OrderedDict\n'), ((15868, 15888), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (15878, 15888), False, 'from test_util import GenArgList\n'), ((16007, 16036), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (16033, 16036), True, 'import oneflow as flow\n'), ((16055, 16076), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (16074, 16076), True, 'import oneflow as flow\n'), ((16150, 16199), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (16170, 16199), True, 'import oneflow as flow\n'), ((16598, 16626), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (16624, 16626), True, 'import oneflow as flow\n'), ((16645, 16666), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (16664, 16666), True, 'import oneflow as flow\n'), ((16721, 16770), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (16741, 16770), True, 'import oneflow as flow\n'), ((17145, 17207), 'tensorflow.math.reduce_euclidean_norm', 'tf.math.reduce_euclidean_norm', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (17174, 17207), True, 'import tensorflow as tf\n'), ((17350, 17363), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17361, 17363), False, 'from collections import OrderedDict\n'), ((17557, 17577), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (17567, 17577), False, 'from test_util import GenArgList\n'), ((17719, 17732), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (17730, 17732), False, 'from collections import OrderedDict\n'), ((17910, 17930), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (17920, 17930), False, 'from test_util import GenArgList\n'), ((18063, 18076), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18074, 18076), False, 'from collections import OrderedDict\n'), ((18255, 18275), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18265, 18275), False, 'from test_util import GenArgList\n'), ((18408, 18421), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18419, 18421), False, 'from collections import OrderedDict\n'), ((18602, 18622), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18612, 18622), False, 'from test_util import GenArgList\n'), ((18751, 18764), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (18762, 18764), False, 'from collections import OrderedDict\n'), ((18946, 18966), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (18956, 18966), False, 'from test_util import GenArgList\n'), ((19096, 19125), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (19122, 19125), True, 'import oneflow as flow\n'), ((19144, 19165), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (19163, 19165), True, 'import oneflow as flow\n'), ((19239, 19288), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (19259, 19288), True, 'import oneflow as flow\n'), ((19693, 19721), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (19719, 19721), True, 'import oneflow as flow\n'), ((19740, 19761), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (19759, 19761), True, 'import oneflow as flow\n'), ((19816, 19865), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (19836, 19865), True, 'import oneflow as flow\n'), ((20227, 20284), 'tensorflow.math.reduce_logsumexp', 'tf.math.reduce_logsumexp', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (20251, 20284), True, 'import tensorflow as tf\n'), ((20422, 20435), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20433, 20435), False, 'from collections import OrderedDict\n'), ((20629, 20649), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (20639, 20649), False, 'from test_util import GenArgList\n'), ((20781, 20794), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (20792, 20794), False, 'from collections import OrderedDict\n'), ((20972, 20992), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (20982, 20992), False, 'from test_util import GenArgList\n'), ((21115, 21128), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21126, 21128), False, 'from collections import OrderedDict\n'), ((21307, 21327), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21317, 21327), False, 'from test_util import GenArgList\n'), ((21450, 21463), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21461, 21463), False, 'from collections import OrderedDict\n'), ((21644, 21664), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21654, 21664), False, 'from test_util import GenArgList\n'), ((21783, 21796), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (21794, 21796), False, 'from collections import OrderedDict\n'), ((21978, 21998), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (21988, 21998), False, 'from test_util import GenArgList\n'), ((22118, 22147), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (22144, 22147), True, 'import oneflow as flow\n'), ((22166, 22187), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (22185, 22187), True, 'import oneflow as flow\n'), ((22261, 22310), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (22281, 22310), True, 'import oneflow as flow\n'), ((22704, 22732), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (22730, 22732), True, 'import oneflow as flow\n'), ((22751, 22772), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (22770, 22772), True, 'import oneflow as flow\n'), ((22827, 22876), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (22847, 22876), True, 'import oneflow as flow\n'), ((23220, 23271), 'tensorflow.math.reduce_std', 'tf.math.reduce_std', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (23238, 23271), True, 'import tensorflow as tf\n'), ((23403, 23416), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23414, 23416), False, 'from collections import OrderedDict\n'), ((23610, 23630), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (23620, 23630), False, 'from test_util import GenArgList\n'), ((23750, 23763), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (23761, 23763), False, 'from collections import OrderedDict\n'), ((23941, 23961), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (23951, 23961), False, 'from test_util import GenArgList\n'), ((24072, 24085), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24083, 24085), False, 'from collections import OrderedDict\n'), ((24264, 24284), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (24274, 24284), False, 'from test_util import GenArgList\n'), ((24395, 24408), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24406, 24408), False, 'from collections import OrderedDict\n'), ((24589, 24609), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (24599, 24609), False, 'from test_util import GenArgList\n'), ((24716, 24729), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (24727, 24729), False, 'from collections import OrderedDict\n'), ((24911, 24931), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (24921, 24931), False, 'from test_util import GenArgList\n'), ((25039, 25068), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (25065, 25068), True, 'import oneflow as flow\n'), ((25087, 25108), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (25106, 25108), True, 'import oneflow as flow\n'), ((25182, 25231), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (25202, 25231), True, 'import oneflow as flow\n'), ((25624, 25652), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (25650, 25652), True, 'import oneflow as flow\n'), ((25671, 25692), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (25690, 25692), True, 'import oneflow as flow\n'), ((25747, 25796), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (25767, 25796), True, 'import oneflow as flow\n'), ((26155, 26211), 'tensorflow.math.reduce_variance', 'tf.math.reduce_variance', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (26178, 26211), True, 'import tensorflow as tf\n'), ((26348, 26361), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (26359, 26361), False, 'from collections import OrderedDict\n'), ((26555, 26575), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (26565, 26575), False, 'from test_util import GenArgList\n'), ((26705, 26718), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (26716, 26718), False, 'from collections import OrderedDict\n'), ((26896, 26916), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (26906, 26916), False, 'from test_util import GenArgList\n'), ((27037, 27050), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (27048, 27050), False, 'from collections import OrderedDict\n'), ((27229, 27249), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (27239, 27249), False, 'from test_util import GenArgList\n'), ((27370, 27383), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (27381, 27383), False, 'from collections import OrderedDict\n'), ((27564, 27584), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (27574, 27584), False, 'from test_util import GenArgList\n'), ((27701, 27714), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (27712, 27714), False, 'from collections import OrderedDict\n'), ((27896, 27916), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (27906, 27916), False, 'from test_util import GenArgList\n'), ((28034, 28063), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (28060, 28063), True, 'import oneflow as flow\n'), ((28082, 28103), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (28101, 28103), True, 'import oneflow as flow\n'), ((28177, 28226), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (28197, 28226), True, 'import oneflow as flow\n'), ((28619, 28647), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (28645, 28647), True, 'import oneflow as flow\n'), ((28666, 28687), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (28685, 28687), True, 'import oneflow as flow\n'), ((28740, 28803), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (28760, 28803), True, 'import oneflow as flow\n'), ((29972, 30008), 'test_global_storage.Get', 'test_global_storage.Get', (['"""loss_diff"""'], {}), "('loss_diff')\n", (29995, 30008), False, 'import test_global_storage\n'), ((30305, 30318), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (30316, 30318), False, 'from collections import OrderedDict\n'), ((30512, 30532), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (30522, 30532), False, 'from test_util import GenArgList\n'), ((30652, 30665), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (30663, 30665), False, 'from collections import OrderedDict\n'), ((30843, 30863), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (30853, 30863), False, 'from test_util import GenArgList\n'), ((30974, 30987), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (30985, 30987), False, 'from collections import OrderedDict\n'), ((31166, 31186), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (31176, 31186), False, 'from test_util import GenArgList\n'), ((31297, 31310), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (31308, 31310), False, 'from collections import OrderedDict\n'), ((31491, 31511), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (31501, 31511), False, 'from test_util import GenArgList\n'), ((31618, 31631), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (31629, 31631), False, 'from collections import OrderedDict\n'), ((31813, 31833), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (31823, 31833), False, 'from test_util import GenArgList\n'), ((31941, 31970), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(2)'], {}), '(2)\n', (31967, 31970), True, 'import oneflow as flow\n'), ((31989, 32010), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (32008, 32010), True, 'import oneflow as flow\n'), ((32084, 32133), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (32104, 32133), True, 'import oneflow as flow\n'), ((3384, 3412), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (3410, 3412), True, 'import oneflow as flow\n'), ((3545, 3568), 'oneflow.math.reduce_any', 'flow.math.reduce_any', (['x'], {}), '(x)\n', (3565, 3568), True, 'import oneflow as flow\n'), ((3680, 3712), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.int8'}), '((10,), dtype=np.int8)\n', (3690, 3712), True, 'import numpy as np\n'), ((6331, 6359), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (6357, 6359), True, 'import oneflow as flow\n'), ((6475, 6499), 'oneflow.math.reduce_prod', 'flow.math.reduce_prod', (['x'], {}), '(x)\n', (6496, 6499), True, 'import oneflow as flow\n'), ((6611, 6646), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (6621, 6646), True, 'import numpy as np\n'), ((8006, 8038), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (8021, 8038), True, 'import tensorflow as tf\n'), ((8060, 8074), 'tensorflow.Variable', 'tf.Variable', (['x'], {}), '(x)\n', (8071, 8074), True, 'import tensorflow as tf\n'), ((8092, 8143), 'tensorflow.math.reduce_min', 'tf.math.reduce_min', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (8110, 8143), True, 'import tensorflow as tf\n'), ((8358, 8391), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (8381, 8391), False, 'import test_global_storage\n'), ((10236, 10264), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (10262, 10264), True, 'import oneflow as flow\n'), ((10380, 10403), 'oneflow.math.reduce_min', 'flow.math.reduce_min', (['x'], {}), '(x)\n', (10400, 10403), True, 'import oneflow as flow\n'), ((10515, 10550), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (10525, 10550), True, 'import numpy as np\n'), ((13144, 13172), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (13170, 13172), True, 'import oneflow as flow\n'), ((13305, 13328), 'oneflow.math.reduce_all', 'flow.math.reduce_all', (['x'], {}), '(x)\n', (13325, 13328), True, 'import oneflow as flow\n'), ((13440, 13472), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.int8'}), '((10,), dtype=np.int8)\n', (13450, 13472), True, 'import numpy as np\n'), ((16114, 16142), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (16140, 16142), True, 'import oneflow as flow\n'), ((16258, 16281), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['x'], {}), '(x)\n', (16278, 16281), True, 'import oneflow as flow\n'), ((16393, 16428), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (16403, 16428), True, 'import numpy as np\n'), ((19203, 19231), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (19229, 19231), True, 'import oneflow as flow\n'), ((19347, 19381), 'oneflow.math.reduce_euclidean_norm', 'flow.math.reduce_euclidean_norm', (['x'], {}), '(x)\n', (19378, 19381), True, 'import oneflow as flow\n'), ((19493, 19528), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (19503, 19528), True, 'import numpy as np\n'), ((22225, 22253), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (22251, 22253), True, 'import oneflow as flow\n'), ((22369, 22398), 'oneflow.math.reduce_logsumexp', 'flow.math.reduce_logsumexp', (['x'], {}), '(x)\n', (22395, 22398), True, 'import oneflow as flow\n'), ((22510, 22545), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (22520, 22545), True, 'import numpy as np\n'), ((25146, 25174), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (25172, 25174), True, 'import oneflow as flow\n'), ((25290, 25313), 'oneflow.math.reduce_std', 'flow.math.reduce_std', (['x'], {}), '(x)\n', (25310, 25313), True, 'import oneflow as flow\n'), ((25425, 25460), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (25435, 25460), True, 'import numpy as np\n'), ((28141, 28169), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (28167, 28169), True, 'import oneflow as flow\n'), ((28285, 28313), 'oneflow.math.reduce_variance', 'flow.math.reduce_variance', (['x'], {}), '(x)\n', (28310, 28313), True, 'import oneflow as flow\n'), ((28425, 28460), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (28435, 28460), True, 'import numpy as np\n'), ((29818, 29850), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {'persistent': '(True)'}), '(persistent=True)\n', (29833, 29850), True, 'import tensorflow as tf\n'), ((29872, 29886), 'tensorflow.Variable', 'tf.Variable', (['x'], {}), '(x)\n', (29883, 29886), True, 'import tensorflow as tf\n'), ((29904, 29955), 'tensorflow.math.reduce_max', 'tf.math.reduce_max', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (29922, 29955), True, 'import tensorflow as tf\n'), ((30170, 30203), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (30193, 30203), False, 'import test_global_storage\n'), ((32048, 32076), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (32074, 32076), True, 'import oneflow as flow\n'), ((32192, 32215), 'oneflow.math.reduce_max', 'flow.math.reduce_max', (['x'], {}), '(x)\n', (32212, 32215), True, 'import oneflow as flow\n'), ((32327, 32362), 'numpy.ndarray', 'np.ndarray', (['(10,)'], {'dtype': 'np.float32'}), '((10,), dtype=np.float32)\n', (32337, 32362), True, 'import numpy as np\n'), ((1143, 1194), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.int8'}), '(input_shape, dtype=flow.int8)\n', (1164, 1194), True, 'import oneflow.typing as oft\n'), ((1210, 1250), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1230, 1250), True, 'import oneflow as flow\n'), ((1271, 1324), 'oneflow.math.reduce_any', 'flow.math.reduce_any', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (1291, 1324), True, 'import oneflow as flow\n'), ((1334, 1362), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (1348, 1362), True, 'import numpy as np\n'), ((3485, 3530), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {'dtype': 'flow.int8'}), '((10,), dtype=flow.int8)\n', (3506, 3530), True, 'import oneflow.typing as oft\n'), ((4070, 4124), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.float32'}), '(input_shape, dtype=flow.float32)\n', (4091, 4124), True, 'import oneflow.typing as oft\n'), ((4140, 4180), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (4160, 4180), True, 'import oneflow as flow\n'), ((4201, 4255), 'oneflow.math.reduce_prod', 'flow.math.reduce_prod', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (4222, 4255), True, 'import oneflow as flow\n'), ((4265, 4293), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (4279, 4293), True, 'import numpy as np\n'), ((6432, 6460), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (6453, 6460), True, 'import oneflow.typing as oft\n'), ((7016, 7068), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.float'}), '(input_shape, dtype=flow.float)\n', (7037, 7068), True, 'import oneflow.typing as oft\n'), ((7084, 7124), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (7104, 7124), True, 'import oneflow as flow\n'), ((7345, 7398), 'oneflow.math.reduce_min', 'flow.math.reduce_min', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (7365, 7398), True, 'import oneflow as flow\n'), ((7418, 7437), 'oneflow.identity', 'flow.identity', (['loss'], {}), '(loss)\n', (7431, 7437), True, 'import oneflow as flow\n'), ((7883, 7911), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (7897, 7911), True, 'import numpy as np\n'), ((10337, 10365), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (10358, 10365), True, 'import oneflow.typing as oft\n'), ((10903, 10954), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.int8'}), '(input_shape, dtype=flow.int8)\n', (10924, 10954), True, 'import oneflow.typing as oft\n'), ((10970, 11010), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (10990, 11010), True, 'import oneflow as flow\n'), ((11031, 11084), 'oneflow.math.reduce_all', 'flow.math.reduce_all', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (11051, 11084), True, 'import oneflow as flow\n'), ((11094, 11122), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (11108, 11122), True, 'import numpy as np\n'), ((13245, 13290), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {'dtype': 'flow.int8'}), '((10,), dtype=flow.int8)\n', (13266, 13290), True, 'import oneflow.typing as oft\n'), ((13815, 13867), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.int32'}), '(input_shape, dtype=flow.int32)\n', (13836, 13867), True, 'import oneflow.typing as oft\n'), ((13883, 13923), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (13903, 13923), True, 'import oneflow as flow\n'), ((13944, 13997), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (13964, 13997), True, 'import oneflow as flow\n'), ((16215, 16243), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (16236, 16243), True, 'import oneflow.typing as oft\n'), ((16805, 16857), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.float'}), '(input_shape, dtype=flow.float)\n', (16826, 16857), True, 'import oneflow.typing as oft\n'), ((16873, 16913), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (16893, 16913), True, 'import oneflow as flow\n'), ((16934, 16998), 'oneflow.math.reduce_euclidean_norm', 'flow.math.reduce_euclidean_norm', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (16965, 16998), True, 'import oneflow as flow\n'), ((17008, 17036), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (17022, 17036), True, 'import numpy as np\n'), ((19304, 19332), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (19325, 19332), True, 'import oneflow.typing as oft\n'), ((19896, 19948), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.float'}), '(input_shape, dtype=flow.float)\n', (19917, 19948), True, 'import oneflow.typing as oft\n'), ((19964, 20004), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (19984, 20004), True, 'import oneflow as flow\n'), ((20025, 20084), 'oneflow.math.reduce_logsumexp', 'flow.math.reduce_logsumexp', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (20051, 20084), True, 'import oneflow as flow\n'), ((20094, 20122), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (20108, 20122), True, 'import numpy as np\n'), ((22326, 22354), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (22347, 22354), True, 'import oneflow.typing as oft\n'), ((22901, 22953), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.float'}), '(input_shape, dtype=flow.float)\n', (22922, 22953), True, 'import oneflow.typing as oft\n'), ((22969, 23009), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (22989, 23009), True, 'import oneflow as flow\n'), ((23030, 23083), 'oneflow.math.reduce_std', 'flow.math.reduce_std', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (23050, 23083), True, 'import oneflow as flow\n'), ((23093, 23121), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (23107, 23121), True, 'import numpy as np\n'), ((25247, 25275), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (25268, 25275), True, 'import oneflow.typing as oft\n'), ((25826, 25878), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.float'}), '(input_shape, dtype=flow.float)\n', (25847, 25878), True, 'import oneflow.typing as oft\n'), ((25894, 25934), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (25914, 25934), True, 'import oneflow as flow\n'), ((25955, 26013), 'oneflow.math.reduce_variance', 'flow.math.reduce_variance', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (25980, 26013), True, 'import oneflow as flow\n'), ((26023, 26051), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (26037, 26051), True, 'import numpy as np\n'), ((28242, 28270), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (28263, 28270), True, 'import oneflow.typing as oft\n'), ((28828, 28880), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['input_shape'], {'dtype': 'flow.float'}), '(input_shape, dtype=flow.float)\n', (28849, 28880), True, 'import oneflow.typing as oft\n'), ((28896, 28936), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (28916, 28936), True, 'import oneflow as flow\n'), ((29157, 29210), 'oneflow.math.reduce_max', 'flow.math.reduce_max', (['x'], {'axis': 'axis', 'keepdims': 'keepdims'}), '(x, axis=axis, keepdims=keepdims)\n', (29177, 29210), True, 'import oneflow as flow\n'), ((29230, 29249), 'oneflow.identity', 'flow.identity', (['loss'], {}), '(loss)\n', (29243, 29249), True, 'import oneflow as flow\n'), ((29695, 29723), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (29709, 29723), True, 'import numpy as np\n'), ((32149, 32177), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(10,)'], {}), '((10,))\n', (32170, 32177), True, 'import oneflow.typing as oft\n'), ((7607, 7638), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (7633, 7638), False, 'import test_global_storage\n'), ((7671, 7707), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (7697, 7707), False, 'import test_global_storage\n'), ((7738, 7772), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss"""'], {}), "('loss')\n", (7764, 7772), False, 'import test_global_storage\n'), ((7808, 7847), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss_diff"""'], {}), "('loss_diff')\n", (7834, 7847), False, 'import test_global_storage\n'), ((14008, 14036), 'numpy.random.rand', 'np.random.rand', (['*input_shape'], {}), '(*input_shape)\n', (14022, 14036), True, 'import numpy as np\n'), ((29419, 29450), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (29445, 29450), False, 'import test_global_storage\n'), ((29483, 29519), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (29509, 29519), False, 'import test_global_storage\n'), ((29550, 29584), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss"""'], {}), "('loss')\n", (29576, 29584), False, 'import test_global_storage\n'), ((29620, 29659), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""loss_diff"""'], {}), "('loss_diff')\n", (29646, 29659), False, 'import test_global_storage\n'), ((7286, 7310), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (7308, 7310), True, 'import oneflow as flow\n'), ((29098, 29122), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (29120, 29122), True, 'import oneflow as flow\n'), ((7486, 7541), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (7527, 7541), True, 'import oneflow as flow\n'), ((29298, 29353), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (29339, 29353), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
import oneflow.unittest
from collections import OrderedDict
from test_util import GenArgList
shapes = {2: (128, 8), 3: (16, 8, 64), 4: (16, 8, 32, 32), 5: (16, 8, 16, 16, 16)}
def compare_loss(device_type, dim, reduction, cls, data_generator):
x, y = data_generator(dim, device_type)
f = cls(reduction=reduction).to(device_type)
z_eager = f(x, y)
class CurrentGraph(flow.nn.Graph):
def __init__(self) -> None:
super().__init__()
self.f = f
def build(self, x, y):
return self.f(x, y)
f_g = CurrentGraph()
z_lazy = f_g(x, y)
assert np.allclose(z_eager.numpy(), z_lazy.numpy(), rtol=1.0e-5, atol=1.0e-5)
def generate_necessity_default(dim: int, device: str):
shape = shapes[dim]
x_np = np.random.uniform(0, 1, shape)
y_np = np.random.uniform(0, 1, shape)
x = flow.tensor(x_np, dtype=flow.float32, device=device)
y = flow.tensor(y_np, dtype=flow.float32, device=device)
return x, y
def generate_necessity_for_cross_entropy_or_nll_loss(dim: int, device: str):
shape = shapes[dim]
y_shape = (shape[0],) if dim == 2 else (shape[0], *shape[2:])
x_np = np.random.uniform(0, 1, shape)
y_np = np.random.randint(0, shape[1], y_shape)
x = flow.tensor(x_np, dtype=flow.float32, device=device)
y = flow.tensor(y_np, dtype=flow.int32, device=device)
return x, y
@flow.unittest.skip_unless_1n1d()
class TestKLDivLossGraph(oneflow.unittest.TestCase):
def test_kl_div_loss_graph(testcase):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dim"] = [2, 3, 4, 5]
arg_dict["reduction"] = ["sum", "mean"]
arg_dict["cls"] = [flow.nn.KLDivLoss]
arg_dict["data_generator"] = [generate_necessity_default]
for arg in GenArgList(arg_dict):
compare_loss(*arg)
@flow.unittest.skip_unless_1n1d()
class TestSmoothL1LossGraph(oneflow.unittest.TestCase):
def test_smooth_l1_loss_graph(testcase):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dim"] = [2, 3, 4, 5]
arg_dict["reduction"] = ["sum", "mean"]
arg_dict["cls"] = [flow.nn.SmoothL1Loss]
arg_dict["data_generator"] = [generate_necessity_default]
for arg in GenArgList(arg_dict):
compare_loss(*arg)
@flow.unittest.skip_unless_1n1d()
class TestBCELossOrWithLogitsGraph(flow.unittest.TestCase):
def test_bce_loss_graph(testcase):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dim"] = [2, 3, 4, 5]
arg_dict["reduction"] = ["sum", "mean"]
arg_dict["cls"] = [flow.nn.BCELoss, flow.nn.BCEWithLogitsLoss]
arg_dict["data_generator"] = [generate_necessity_default]
for arg in GenArgList(arg_dict):
compare_loss(*arg)
@flow.unittest.skip_unless_1n1d()
class TestCrossEntropyOrNllLossGraph(flow.unittest.TestCase):
def test_cross_entropy_loss_or_nll_loss_graph(testcase):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cuda", "cpu"]
arg_dict["dim"] = [2, 3, 4, 5]
arg_dict["reduction"] = ["sum", "mean"]
arg_dict["cls"] = [flow.nn.CrossEntropyLoss, flow.nn.NLLLoss]
arg_dict["data_generator"] = [generate_necessity_for_cross_entropy_or_nll_loss]
for arg in GenArgList(arg_dict):
compare_loss(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d",
"oneflow.tensor"
] | [((2041, 2073), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2071, 2073), True, 'import oneflow as flow\n'), ((2526, 2558), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2556, 2558), True, 'import oneflow as flow\n'), ((3020, 3052), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3050, 3052), True, 'import oneflow as flow\n'), ((3534, 3566), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3564, 3566), True, 'import oneflow as flow\n'), ((1429, 1459), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'shape'], {}), '(0, 1, shape)\n', (1446, 1459), True, 'import numpy as np\n'), ((1471, 1501), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'shape'], {}), '(0, 1, shape)\n', (1488, 1501), True, 'import numpy as np\n'), ((1510, 1562), 'oneflow.tensor', 'flow.tensor', (['x_np'], {'dtype': 'flow.float32', 'device': 'device'}), '(x_np, dtype=flow.float32, device=device)\n', (1521, 1562), True, 'import oneflow as flow\n'), ((1571, 1623), 'oneflow.tensor', 'flow.tensor', (['y_np'], {'dtype': 'flow.float32', 'device': 'device'}), '(y_np, dtype=flow.float32, device=device)\n', (1582, 1623), True, 'import oneflow as flow\n'), ((1820, 1850), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'shape'], {}), '(0, 1, shape)\n', (1837, 1850), True, 'import numpy as np\n'), ((1862, 1901), 'numpy.random.randint', 'np.random.randint', (['(0)', 'shape[1]', 'y_shape'], {}), '(0, shape[1], y_shape)\n', (1879, 1901), True, 'import numpy as np\n'), ((1910, 1962), 'oneflow.tensor', 'flow.tensor', (['x_np'], {'dtype': 'flow.float32', 'device': 'device'}), '(x_np, dtype=flow.float32, device=device)\n', (1921, 1962), True, 'import oneflow as flow\n'), ((1971, 2021), 'oneflow.tensor', 'flow.tensor', (['y_np'], {'dtype': 'flow.int32', 'device': 'device'}), '(y_np, dtype=flow.int32, device=device)\n', (1982, 2021), True, 'import oneflow as flow\n'), ((4123, 4138), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4136, 4138), False, 'import unittest\n'), ((2188, 2201), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2199, 2201), False, 'from collections import OrderedDict\n'), ((2470, 2490), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2480, 2490), False, 'from test_util import GenArgList\n'), ((2679, 2692), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (2690, 2692), False, 'from collections import OrderedDict\n'), ((2964, 2984), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (2974, 2984), False, 'from test_util import GenArgList\n'), ((3171, 3184), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3182, 3184), False, 'from collections import OrderedDict\n'), ((3478, 3498), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3488, 3498), False, 'from test_util import GenArgList\n'), ((3709, 3722), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3720, 3722), False, 'from collections import OrderedDict\n'), ((4037, 4057), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4047, 4057), False, 'from test_util import GenArgList\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow
from oneflow.framework.docstr.utils import add_docstr
add_docstr(
oneflow.index_select,
"""
input.index_select(dim, index) -> Tensor
The interface is consistent with PyTorch.
The documentation is referenced from: https://pytorch-cn.readthedocs.io/zh/latest/package_references/torch/#torchindex_select
Select values along an axis specified by `dim`.
:attr:`index` must be an Int32 Tensor with 1-D.
:attr:`dim` must be in the range of input Dimensions.
value of :attr:`index` must be in the range of the dim-th of input.
Note that ``input`` and ``index`` do not broadcast against each other.
Args:
input (Tensor): the source tensor
dim (int): the axis along which to index
index (Tensor): the 1-D tensor containing the indices to index
For example:
.. code-block:: python
>>> import oneflow as flow
>>> input = flow.tensor([[1,2,3],[4,5,6]], dtype=flow.int32)
>>> input
tensor([[1, 2, 3],
[4, 5, 6]], dtype=oneflow.int32)
>>> index = flow.tensor([0,1], dtype=flow.int32)
>>> output = flow.index_select(input, 1, index)
>>> output
tensor([[1, 2],
[4, 5]], dtype=oneflow.int32)
>>> output = input.index_select(1, index)
>>> output
tensor([[1, 2],
[4, 5]], dtype=oneflow.int32)
""",
)
| [
"oneflow.framework.docstr.utils.add_docstr"
] | [((660, 2025), 'oneflow.framework.docstr.utils.add_docstr', 'add_docstr', (['oneflow.index_select', '"""\n input.index_select(dim, index) -> Tensor\n\n The interface is consistent with PyTorch. \n The documentation is referenced from: https://pytorch-cn.readthedocs.io/zh/latest/package_references/torch/#torchindex_select\n\n Select values along an axis specified by `dim`.\n\n :attr:`index` must be an Int32 Tensor with 1-D.\n :attr:`dim` must be in the range of input Dimensions.\n value of :attr:`index` must be in the range of the dim-th of input.\n Note that ``input`` and ``index`` do not broadcast against each other. \n \n Args:\n input (Tensor): the source tensor\n dim (int): the axis along which to index\n index (Tensor): the 1-D tensor containing the indices to index\n \n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n >>> input = flow.tensor([[1,2,3],[4,5,6]], dtype=flow.int32)\n >>> input \n tensor([[1, 2, 3],\n [4, 5, 6]], dtype=oneflow.int32)\n >>> index = flow.tensor([0,1], dtype=flow.int32)\n >>> output = flow.index_select(input, 1, index)\n >>> output\n tensor([[1, 2],\n [4, 5]], dtype=oneflow.int32)\n >>> output = input.index_select(1, index)\n >>> output\n tensor([[1, 2],\n [4, 5]], dtype=oneflow.int32)\n """'], {}), '(oneflow.index_select,\n """\n input.index_select(dim, index) -> Tensor\n\n The interface is consistent with PyTorch. \n The documentation is referenced from: https://pytorch-cn.readthedocs.io/zh/latest/package_references/torch/#torchindex_select\n\n Select values along an axis specified by `dim`.\n\n :attr:`index` must be an Int32 Tensor with 1-D.\n :attr:`dim` must be in the range of input Dimensions.\n value of :attr:`index` must be in the range of the dim-th of input.\n Note that ``input`` and ``index`` do not broadcast against each other. \n \n Args:\n input (Tensor): the source tensor\n dim (int): the axis along which to index\n index (Tensor): the 1-D tensor containing the indices to index\n \n For example:\n\n .. code-block:: python\n \n >>> import oneflow as flow\n >>> input = flow.tensor([[1,2,3],[4,5,6]], dtype=flow.int32)\n >>> input \n tensor([[1, 2, 3],\n [4, 5, 6]], dtype=oneflow.int32)\n >>> index = flow.tensor([0,1], dtype=flow.int32)\n >>> output = flow.index_select(input, 1, index)\n >>> output\n tensor([[1, 2],\n [4, 5]], dtype=oneflow.int32)\n >>> output = input.index_select(1, index)\n >>> output\n tensor([[1, 2],\n [4, 5]], dtype=oneflow.int32)\n """\n )\n', (670, 2025), False, 'from oneflow.framework.docstr.utils import add_docstr\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import random
import oneflow as flow
import oneflow.unittest
from test_util import generate_graph
@flow.unittest.skip_unless_1n1d()
class TestEyeGraph(oneflow.unittest.TestCase):
def test_eye_graph(test_case):
n = random.randint(1, 10)
m = random.randint(1, 10)
eye_fn = lambda: flow.eye(n, m)
y_eager = eye_fn()
eye_graph = generate_graph(eye_fn)
y_lazy = eye_graph()
test_case.assertTrue(np.array_equal(y_eager.numpy(), y_lazy.numpy()))
if __name__ == "__main__":
unittest.main()
| [
"oneflow.eye",
"oneflow.unittest.skip_unless_1n1d"
] | [((726, 758), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (756, 758), True, 'import oneflow as flow\n'), ((1160, 1175), 'unittest.main', 'unittest.main', ([], {}), '()\n', (1173, 1175), False, 'import unittest\n'), ((853, 874), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (867, 874), False, 'import random\n'), ((887, 908), 'random.randint', 'random.randint', (['(1)', '(10)'], {}), '(1, 10)\n', (901, 908), False, 'import random\n'), ((997, 1019), 'test_util.generate_graph', 'generate_graph', (['eye_fn'], {}), '(eye_fn)\n', (1011, 1019), False, 'from test_util import generate_graph\n'), ((935, 949), 'oneflow.eye', 'flow.eye', (['n', 'm'], {}), '(n, m)\n', (943, 949), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import numpy as np
import oneflow as flow
import oneflow.typing as tp
flow.config.enable_legacy_model_io(True)
def flow_upsample(x, input_shape, size, data_format, interpolation):
def make_job(input_shape, size, data_format, interpolation, dtype=flow.float32):
config = flow.function_config()
config.default_placement_scope(flow.scope.placement("cambricon", "0:0"))
@flow.global_function(type="predict", function_config=config)
def upsample_job(x: tp.Numpy.Placeholder(input_shape)) -> tp.Numpy:
return flow.layers.upsample_2d(
x, size=size, data_format=data_format, interpolation=interpolation
)
return upsample_job
upsample_fakedev_job = make_job(
x.shape, size, data_format, interpolation, dtype=flow.float32
)
y = upsample_fakedev_job(x)
return y
def _compare_with_np(test_case, input_shape, size, data_format, interpolation):
x = np.array([[[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]])
flow_res = flow_upsample(x, input_shape, size, data_format, interpolation)
print(flow_res)
@flow.unittest.skip_unless_1n1d()
class TestUpsample(flow.unittest.TestCase):
def test_upsample(test_case):
_compare_with_np(test_case, (1, 3, 3, 1), (2, 2), "NHWC", "bilinear")
if __name__ == "__main__":
unittest.main()
| [
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.placement",
"oneflow.function_config",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.config.enable_legacy_model_io",
"oneflow.layers.upsample_2d"
] | [((1311, 1351), 'oneflow.config.enable_legacy_model_io', 'flow.config.enable_legacy_model_io', (['(True)'], {}), '(True)\n', (1345, 1351), True, 'import oneflow as flow\n'), ((2361, 2393), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2391, 2393), True, 'import oneflow as flow\n'), ((2195, 2258), 'numpy.array', 'np.array', (['[[[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]]'], {}), '([[[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]])\n', (2203, 2258), True, 'import numpy as np\n'), ((2583, 2598), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2596, 2598), False, 'import unittest\n'), ((1525, 1547), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (1545, 1547), True, 'import oneflow as flow\n'), ((1639, 1699), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""predict"""', 'function_config': 'config'}), "(type='predict', function_config=config)\n", (1659, 1699), True, 'import oneflow as flow\n'), ((1587, 1627), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cambricon"""', '"""0:0"""'], {}), "('cambricon', '0:0')\n", (1607, 1627), True, 'import oneflow as flow\n'), ((1795, 1890), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', (['x'], {'size': 'size', 'data_format': 'data_format', 'interpolation': 'interpolation'}), '(x, size=size, data_format=data_format,\n interpolation=interpolation)\n', (1818, 1890), True, 'import oneflow as flow\n'), ((1728, 1761), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['input_shape'], {}), '(input_shape)\n', (1748, 1761), True, 'import oneflow.typing as tp\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
from typing import Sequence
from functools import reduce
import operator
def infer_shape(x, shape):
dim_index_need_infer = shape.index(-1) if shape.count(-1) == 1 else None
in_elem_cnt = reduce(operator.mul, x.shape, 1)
out_elem_cnt = reduce(operator.mul, shape, 1)
if dim_index_need_infer is not None:
assert (in_elem_cnt % out_elem_cnt) == 0
shape[dim_index_need_infer] = int(abs(in_elem_cnt / out_elem_cnt))
else:
assert in_elem_cnt == out_elem_cnt
return shape
class Reshape(Module):
def __init__(self, shape: Sequence[int]) -> None:
super().__init__()
assert isinstance(shape, tuple) or isinstance(shape, list)
shape = list(shape)
assert all(dim == -1 or dim > 0 for dim in shape)
assert shape.count(-1) <= 1
self._op = (
flow.builtin_op("reshape")
.Input("in")
.Output("out")
.Attr("shape", shape)
.Build()
)
self.shape = shape
def forward(self, x):
new_shape = infer_shape(x, self.shape)
return self._op(x, shape=new_shape)[0]
@oneflow_export("reshape")
@register_tensor_op("reshape")
@experimental_api
def reshape_op(x, shape: Sequence[int] = None):
"""This operator reshapes a Tensor.
We can set one dimension in `shape` as `-1`, the operator will infer the complete shape.
Args:
x: A Tensor.
shape: Shape of the output tensor.
Returns:
A Tensor has the same type as `x`.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow.experimental as flow
>>> flow.enable_eager_execution()
>>> x = np.array(
... [[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12], [13, 14, 15, 16]]
... ).astype(np.float32)
>>> input = flow.Tensor(x)
>>> y = flow.reshape(input, shape=[2, 2, 2, -1]).numpy().shape
>>> print(y)
(2, 2, 2, 2)
"""
return Reshape(shape=shape)(x)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.python.framework.tensor.register_tensor_op",
"oneflow.builtin_op",
"oneflow.python.oneflow_export.oneflow_export"
] | [((1935, 1960), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""reshape"""'], {}), "('reshape')\n", (1949, 1960), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((1962, 1991), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""reshape"""'], {}), "('reshape')\n", (1980, 1991), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((992, 1024), 'functools.reduce', 'reduce', (['operator.mul', 'x.shape', '(1)'], {}), '(operator.mul, x.shape, 1)\n', (998, 1024), False, 'from functools import reduce\n'), ((1044, 1074), 'functools.reduce', 'reduce', (['operator.mul', 'shape', '(1)'], {}), '(operator.mul, shape, 1)\n', (1050, 1074), False, 'from functools import reduce\n'), ((2876, 2912), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (2891, 2912), False, 'import doctest\n'), ((1640, 1666), 'oneflow.builtin_op', 'flow.builtin_op', (['"""reshape"""'], {}), "('reshape')\n", (1655, 1666), True, 'import oneflow as flow\n')] |
import numpy as np
import os
import random
from collections import deque
import oneflow as flow
import oneflow.typing as tp
from threading import Thread, Lock
import threading
# Hyper Parameters:
FRAME_PER_ACTION = 1
GAMMA = 0.99 # decay rate of past observations
OBSERVE = 100. # timesteps to observe before training
EXPLORE = 200000. # frames over which to anneal epsilon
FINAL_EPSILON = 0. # 0.001 # final value of epsilon
INITIAL_EPSILON = 0. # 0.005 # starting value of epsilon
MAX_REPLAY_MEMORY = 50000 # number of previous transitions to remember
BATCH_SIZE = 32 # size of minibatch
UPDATE_TIME = 100
ACTIONS_NUM = 2 # two actions
DEVICE_TAG = "gpu"
DEVICE_NUM = 1
def dataPrep(data):
# at the begining data.shape = (64, 64, 4) or (64, 64, 1)
if data.shape[2] > 1:
mean = np.array([128, 128, 128, 128])
reshaped_mean = mean.reshape(1, 1, 4)
else:
mean=np.array([128])
reshaped_mean = mean.reshape(1, 1, 1)
data = data - reshaped_mean
# convert hwc -> chw
data = np.swapaxes(data, 0, 2)
data = np.swapaxes(data, 1, 2)
data = np.expand_dims(data, axis = 0)
# before return data.shape = (1, 4, 64, 64) or (1, 1, 64, 64)
return data
# get QNet parameters
def getQNetParams(var_name_prefix: str = "QNet",
is_train: bool = True):
weight_init = flow.variance_scaling_initializer(scale = 1.0, mode = "fan_in", distribution = "truncated_normal", data_format = "NCHW")
bias_init = flow.constant_initializer(value = 0.)
conv_prefix = "_conv1"
conv1_weight = flow.get_variable(
var_name_prefix + conv_prefix + "_weight",
shape = (32, 4, 3, 3),
dtype = flow.float32,
initializer = weight_init,
trainable = is_train
)
conv1_bias = flow.get_variable(
var_name_prefix + conv_prefix + "_bias",
shape = (32,),
dtype = flow.float32,
initializer = bias_init,
trainable = is_train
)
conv_prefix = "_conv2"
conv2_weight = flow.get_variable(
var_name_prefix + conv_prefix + "_weight",
shape = (32, 32, 3, 3),
dtype = flow.float32,
initializer = weight_init,
trainable = is_train
)
conv2_bias = flow.get_variable(
var_name_prefix + conv_prefix + "_bias",
shape = (32,),
dtype = flow.float32,
initializer = bias_init,
trainable = is_train
)
fc_prefix = "_fc1"
fc1_weight = flow.get_variable(
var_name_prefix + fc_prefix + "_weight",
shape = (512, 32 * 16 * 16),
dtype = flow.float32,
initializer = weight_init,
trainable = is_train
)
fc1_bias = flow.get_variable(
var_name_prefix + fc_prefix + "_bias",
shape = (512,),
dtype = flow.float32,
initializer = bias_init,
trainable = is_train
)
fc_prefix = "_fc2"
fc2_weight = flow.get_variable(
var_name_prefix + fc_prefix + "_weight",
shape = (ACTIONS_NUM, 512),
dtype = flow.float32,
initializer = weight_init,
trainable = is_train
)
fc2_bias = flow.get_variable(
var_name_prefix + fc_prefix + "_bias",
shape = (ACTIONS_NUM,),
dtype = flow.float32,
initializer = bias_init,
trainable = is_train
)
return conv1_weight, conv1_bias, conv2_weight, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias
def createOfQNet(input_image: tp.Numpy.Placeholder((BATCH_SIZE, 4, 64, 64), dtype = flow.float32),
var_name_prefix: str = "QNet",
is_train: bool = True) -> tp.Numpy:
conv1_weight, conv1_bias, conv2_weight, conv2_bias, fc1_weight, fc1_bias, fc2_weight, fc2_bias = \
getQNetParams(var_name_prefix = var_name_prefix, is_train = is_train)
conv1 = flow.nn.compat_conv2d(
input_image,
conv1_weight,
strides = [1, 1],
padding = "same",
data_format = "NCHW"
)
conv1 = flow.nn.bias_add(conv1, conv1_bias, "NCHW")
conv1 = flow.layers.batch_normalization(inputs = conv1, axis = 1, name = "conv1_bn")
conv1 = flow.nn.relu(conv1)
pool1 = flow.nn.max_pool2d(conv1, 2, 2, "VALID", "NCHW", name = "pool1")
conv2 = flow.nn.compat_conv2d(
pool1,
conv2_weight,
strides = [1, 1],
padding = "same",
data_format = "NCHW"
)
conv2 = flow.nn.bias_add(conv2, conv2_bias, "NCHW")
conv2 = flow.layers.batch_normalization(inputs = conv2, axis = 1, name = "conv2_bn")
conv2 = flow.nn.relu(conv2)
pool2 = flow.nn.max_pool2d(conv2, 2, 2, "VALID", "NCHW", name = "pool2")
# conv3.shape = (32, 32, 16, 16), after reshape become (32, 32 * 16 * 16)
pool2_flatten = flow.reshape(pool2, (BATCH_SIZE, -1))
fc1 = flow.matmul(a = pool2_flatten, b = fc1_weight, transpose_b = True)
fc1 = flow.nn.bias_add(fc1, fc1_bias)
fc1 = flow.layers.batch_normalization(inputs = fc1, axis = 1, name = "fc1_bn")
fc1 = flow.nn.relu(fc1)
fc2 = flow.matmul(a = fc1, b = fc2_weight, transpose_b = True)
fc2 = flow.nn.bias_add(fc2, fc2_bias)
return fc2
def get_train_config():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.consistent_view())
return func_config
def get_predict_config():
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float32)
func_config.default_logical_view(flow.scope.consistent_view())
return func_config
@flow.global_function("train", get_train_config())
def trainQNet(input_image: tp.Numpy.Placeholder((BATCH_SIZE, 4, 64, 64), dtype = flow.float32),
y_input: tp.Numpy.Placeholder((BATCH_SIZE,), dtype = flow.float32),
action_input: tp.Numpy.Placeholder((BATCH_SIZE, 2), dtype = flow.float32)):
with flow.scope.placement(DEVICE_TAG, "0:0-%d" % (DEVICE_NUM - 1)):
out = createOfQNet(input_image, var_name_prefix = "QNet", is_train = True)
Q_Action = flow.math.reduce_sum(out * action_input, axis = 1)
cost = flow.math.reduce_mean(flow.math.square(y_input - Q_Action))
learning_rate = 0.0002
beta1 = 0.9
flow.optimizer.Adam(flow.optimizer.PiecewiseConstantScheduler([], [learning_rate]), beta1 = beta1).minimize(cost)
@flow.global_function("predict", get_predict_config())
def predictQNet(input_image: tp.Numpy.Placeholder((BATCH_SIZE, 4, 64, 64), dtype = flow.float32)) -> tp.Numpy:
with flow.scope.placement(DEVICE_TAG, "0:0-%d" % (DEVICE_NUM - 1)):
out = createOfQNet(input_image, var_name_prefix = "QNetT", is_train = False)
return out
# copy QNet parameters to QNetT
@flow.global_function("predict", get_predict_config())
def copyQNetToQnetT():
with flow.scope.placement(DEVICE_TAG, "0:0-%d" % (DEVICE_NUM - 1)):
t_conv1_weight, t_conv1_bias, t_conv2_weight, t_conv2_bias, t_fc1_weight, t_fc1_bias, t_fc2_weight, t_fc2_bias = \
getQNetParams(var_name_prefix = "QNet", is_train = True)
p_conv1_weight, p_conv1_bias, p_conv2_weight, p_conv2_bias, p_fc1_weight, p_fc1_bias, p_fc2_weight, p_fc2_bias = \
getQNetParams(var_name_prefix = "QNetT", is_train = False)
flow.assign(p_conv1_weight, t_conv1_weight)
flow.assign(p_conv1_bias, t_conv1_bias)
flow.assign(p_conv2_weight, t_conv2_weight)
flow.assign(p_conv2_bias, t_conv2_bias)
flow.assign(p_fc1_weight, t_fc1_weight)
flow.assign(p_fc1_bias, t_fc1_bias)
flow.assign(p_fc2_weight, t_fc2_weight)
flow.assign(p_fc2_bias, t_fc2_bias)
class OfBrainDQN:
def __init__(self, args):
# init replay memory
self.replayMemory = deque()
# init some parameters
self.timeStep = 0
self.epsilon = INITIAL_EPSILON
self.trainQNet = trainQNet
self.predictQNet = predictQNet
self.copyQNetToQnetT = copyQNetToQnetT
self.check_point_dir = args.checkpoints_path
self.pretrain_models = args.pretrain_models
self.check_point = flow.train.CheckPoint()
if self.pretrain_models != '':
self.check_point.load(self.pretrain_models)
else:
self.check_point.init()
self.time_step_mutex = Lock()
self.predict_QNet_mutex = Lock()
self.replay_memory_mutex = Lock()
self.train_thread = Thread(target = self.trainQNetwork)
self.thread_started = False
def trainQNetwork(self):
while True:
self.replay_memory_mutex.acquire()
# Step 1: obtain random minibatch from replay memory
minibatch = random.sample(self.replayMemory, BATCH_SIZE)
self.replay_memory_mutex.release()
# state_batch.shape = (BATCH_SIZE, 4, 80, 80)
state_batch = np.squeeze([data[0] for data in minibatch])
action_batch = np.squeeze([data[1] for data in minibatch])
reward_batch = np.squeeze([data[2] for data in minibatch])
next_state_batch = np.squeeze([data[3] for data in minibatch])
# Step 2: calculate y_batch
self.predict_QNet_mutex.acquire()
Qvalue_batch = self.predictQNet(next_state_batch)
self.predict_QNet_mutex.release()
terminal = np.squeeze([data[4] for data in minibatch])
y_batch = reward_batch.astype(np.float32)
terminal_false = terminal == False
if (terminal_false).shape[0] > 0:
y_batch[terminal_false] += (GAMMA * np.max(Qvalue_batch, axis=1))[terminal_false]
# do forward, backward and update parameters
self.trainQNet(state_batch, y_batch, action_batch)
self.time_step_mutex.acquire()
localTimeStep = self.timeStep
self.time_step_mutex.release()
# save network every 100 iterations
if localTimeStep % 100 == 0:
if not os.path.exists(self.check_point_dir):
os.mkdir(self.check_point_dir)
save_path = '%s/network-dqn_of_%d' % (self.check_point_dir, localTimeStep)
if not os.path.exists(save_path):
self.check_point.save(save_path)
if localTimeStep % UPDATE_TIME == 0:
self.predict_QNet_mutex.acquire()
self.copyQNetToQnetT()
self.predict_QNet_mutex.release()
def setInitState(self, observation):
# temp.shape = (1, 4, 80, 80)
temp = dataPrep(np.stack((observation, observation, observation, observation), axis = 2))
self.currentState = temp
def setPerception(self, nextObservation, action, reward, terminal):
# discard the first channel of currentState and append nextObervation
# newState.shape = (1, 4, 80, 80)
newState = np.append(self.currentState[:, 1:, :, :], dataPrep(nextObservation), axis = 1)
self.replay_memory_mutex.acquire()
self.replayMemory.append(
(self.currentState.astype(np.float32), action.astype(np.float32), reward, newState.astype(np.float32), terminal))
self.replay_memory_mutex.release()
if len(self.replayMemory) > MAX_REPLAY_MEMORY:
self.replayMemory.popleft()
if self.timeStep > OBSERVE and not self.thread_started:
# Train the network
self.train_thread.start()
self.thread_started = True
# print info
state = ""
if self.timeStep <= OBSERVE:
state = "observe"
elif self.timeStep > OBSERVE and self.timeStep <= OBSERVE + EXPLORE:
state = "explore"
else:
state = "train"
if self.timeStep % UPDATE_TIME == 0:
print("TIMESTEP", self.timeStep, "/ STATE", state, "/ EPSILON", self.epsilon)
self.currentState = newState
self.time_step_mutex.acquire()
self.timeStep += 1
self.time_step_mutex.release()
def getAction(self):
input_images = np.repeat(self.currentState, BATCH_SIZE, axis = 0).astype(np.float32)
self.predict_QNet_mutex.acquire()
Qvalue = np.squeeze(self.predictQNet(input_images))
self.predict_QNet_mutex.release()
Qvalue = Qvalue[0]
action = np.zeros(ACTIONS_NUM)
action_index = 0
if self.timeStep % FRAME_PER_ACTION == 0:
if random.random() <= self.epsilon:
action_index = random.randrange(ACTIONS_NUM)
action[action_index] = 1
else:
action_index = np.argmax(Qvalue)
action[action_index] = 1
else:
action[0] = 1 # do nothing
# change episilon
if self.epsilon > FINAL_EPSILON and self.timeStep > OBSERVE:
self.epsilon -= (INITIAL_EPSILON - FINAL_EPSILON) / EXPLORE
return action | [
"oneflow.variance_scaling_initializer",
"oneflow.matmul",
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.consistent_view",
"oneflow.constant_initializer",
"oneflow.math.reduce_sum",
"oneflow.nn.compat_conv2d",
"oneflow.math.square",
"oneflow.assign",
"oneflow.optimizer.PiecewiseConstantSchedul... | [((1030, 1053), 'numpy.swapaxes', 'np.swapaxes', (['data', '(0)', '(2)'], {}), '(data, 0, 2)\n', (1041, 1053), True, 'import numpy as np\n'), ((1065, 1088), 'numpy.swapaxes', 'np.swapaxes', (['data', '(1)', '(2)'], {}), '(data, 1, 2)\n', (1076, 1088), True, 'import numpy as np\n'), ((1100, 1128), 'numpy.expand_dims', 'np.expand_dims', (['data'], {'axis': '(0)'}), '(data, axis=0)\n', (1114, 1128), True, 'import numpy as np\n'), ((1345, 1462), 'oneflow.variance_scaling_initializer', 'flow.variance_scaling_initializer', ([], {'scale': '(1.0)', 'mode': '"""fan_in"""', 'distribution': '"""truncated_normal"""', 'data_format': '"""NCHW"""'}), "(scale=1.0, mode='fan_in', distribution=\n 'truncated_normal', data_format='NCHW')\n", (1378, 1462), True, 'import oneflow as flow\n'), ((1482, 1518), 'oneflow.constant_initializer', 'flow.constant_initializer', ([], {'value': '(0.0)'}), '(value=0.0)\n', (1507, 1518), True, 'import oneflow as flow\n'), ((1567, 1718), 'oneflow.get_variable', 'flow.get_variable', (["(var_name_prefix + conv_prefix + '_weight')"], {'shape': '(32, 4, 3, 3)', 'dtype': 'flow.float32', 'initializer': 'weight_init', 'trainable': 'is_train'}), "(var_name_prefix + conv_prefix + '_weight', shape=(32, 4, \n 3, 3), dtype=flow.float32, initializer=weight_init, trainable=is_train)\n", (1584, 1718), True, 'import oneflow as flow\n'), ((1793, 1931), 'oneflow.get_variable', 'flow.get_variable', (["(var_name_prefix + conv_prefix + '_bias')"], {'shape': '(32,)', 'dtype': 'flow.float32', 'initializer': 'bias_init', 'trainable': 'is_train'}), "(var_name_prefix + conv_prefix + '_bias', shape=(32,),\n dtype=flow.float32, initializer=bias_init, trainable=is_train)\n", (1810, 1931), True, 'import oneflow as flow\n'), ((2029, 2180), 'oneflow.get_variable', 'flow.get_variable', (["(var_name_prefix + conv_prefix + '_weight')"], {'shape': '(32, 32, 3, 3)', 'dtype': 'flow.float32', 'initializer': 'weight_init', 'trainable': 'is_train'}), "(var_name_prefix + conv_prefix + '_weight', shape=(32, 32,\n 3, 3), dtype=flow.float32, initializer=weight_init, trainable=is_train)\n", (2046, 2180), True, 'import oneflow as flow\n'), ((2256, 2394), 'oneflow.get_variable', 'flow.get_variable', (["(var_name_prefix + conv_prefix + '_bias')"], {'shape': '(32,)', 'dtype': 'flow.float32', 'initializer': 'bias_init', 'trainable': 'is_train'}), "(var_name_prefix + conv_prefix + '_bias', shape=(32,),\n dtype=flow.float32, initializer=bias_init, trainable=is_train)\n", (2273, 2394), True, 'import oneflow as flow\n'), ((2486, 2640), 'oneflow.get_variable', 'flow.get_variable', (["(var_name_prefix + fc_prefix + '_weight')"], {'shape': '(512, 32 * 16 * 16)', 'dtype': 'flow.float32', 'initializer': 'weight_init', 'trainable': 'is_train'}), "(var_name_prefix + fc_prefix + '_weight', shape=(512, 32 *\n 16 * 16), dtype=flow.float32, initializer=weight_init, trainable=is_train)\n", (2503, 2640), True, 'import oneflow as flow\n'), ((2714, 2851), 'oneflow.get_variable', 'flow.get_variable', (["(var_name_prefix + fc_prefix + '_bias')"], {'shape': '(512,)', 'dtype': 'flow.float32', 'initializer': 'bias_init', 'trainable': 'is_train'}), "(var_name_prefix + fc_prefix + '_bias', shape=(512,),\n dtype=flow.float32, initializer=bias_init, trainable=is_train)\n", (2731, 2851), True, 'import oneflow as flow\n'), ((2943, 3101), 'oneflow.get_variable', 'flow.get_variable', (["(var_name_prefix + fc_prefix + '_weight')"], {'shape': '(ACTIONS_NUM, 512)', 'dtype': 'flow.float32', 'initializer': 'weight_init', 'trainable': 'is_train'}), "(var_name_prefix + fc_prefix + '_weight', shape=(\n ACTIONS_NUM, 512), dtype=flow.float32, initializer=weight_init,\n trainable=is_train)\n", (2960, 3101), True, 'import oneflow as flow\n'), ((3170, 3316), 'oneflow.get_variable', 'flow.get_variable', (["(var_name_prefix + fc_prefix + '_bias')"], {'shape': '(ACTIONS_NUM,)', 'dtype': 'flow.float32', 'initializer': 'bias_init', 'trainable': 'is_train'}), "(var_name_prefix + fc_prefix + '_bias', shape=(ACTIONS_NUM\n ,), dtype=flow.float32, initializer=bias_init, trainable=is_train)\n", (3187, 3316), True, 'import oneflow as flow\n'), ((3874, 3979), 'oneflow.nn.compat_conv2d', 'flow.nn.compat_conv2d', (['input_image', 'conv1_weight'], {'strides': '[1, 1]', 'padding': '"""same"""', 'data_format': '"""NCHW"""'}), "(input_image, conv1_weight, strides=[1, 1], padding=\n 'same', data_format='NCHW')\n", (3895, 3979), True, 'import oneflow as flow\n'), ((4039, 4082), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['conv1', 'conv1_bias', '"""NCHW"""'], {}), "(conv1, conv1_bias, 'NCHW')\n", (4055, 4082), True, 'import oneflow as flow\n'), ((4095, 4165), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'conv1', 'axis': '(1)', 'name': '"""conv1_bn"""'}), "(inputs=conv1, axis=1, name='conv1_bn')\n", (4126, 4165), True, 'import oneflow as flow\n'), ((4184, 4203), 'oneflow.nn.relu', 'flow.nn.relu', (['conv1'], {}), '(conv1)\n', (4196, 4203), True, 'import oneflow as flow\n'), ((4217, 4279), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv1', '(2)', '(2)', '"""VALID"""', '"""NCHW"""'], {'name': '"""pool1"""'}), "(conv1, 2, 2, 'VALID', 'NCHW', name='pool1')\n", (4235, 4279), True, 'import oneflow as flow\n'), ((4295, 4393), 'oneflow.nn.compat_conv2d', 'flow.nn.compat_conv2d', (['pool1', 'conv2_weight'], {'strides': '[1, 1]', 'padding': '"""same"""', 'data_format': '"""NCHW"""'}), "(pool1, conv2_weight, strides=[1, 1], padding='same',\n data_format='NCHW')\n", (4316, 4393), True, 'import oneflow as flow\n'), ((4454, 4497), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['conv2', 'conv2_bias', '"""NCHW"""'], {}), "(conv2, conv2_bias, 'NCHW')\n", (4470, 4497), True, 'import oneflow as flow\n'), ((4510, 4580), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'conv2', 'axis': '(1)', 'name': '"""conv2_bn"""'}), "(inputs=conv2, axis=1, name='conv2_bn')\n", (4541, 4580), True, 'import oneflow as flow\n'), ((4599, 4618), 'oneflow.nn.relu', 'flow.nn.relu', (['conv2'], {}), '(conv2)\n', (4611, 4618), True, 'import oneflow as flow\n'), ((4636, 4698), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['conv2', '(2)', '(2)', '"""VALID"""', '"""NCHW"""'], {'name': '"""pool2"""'}), "(conv2, 2, 2, 'VALID', 'NCHW', name='pool2')\n", (4654, 4698), True, 'import oneflow as flow\n'), ((4800, 4837), 'oneflow.reshape', 'flow.reshape', (['pool2', '(BATCH_SIZE, -1)'], {}), '(pool2, (BATCH_SIZE, -1))\n', (4812, 4837), True, 'import oneflow as flow\n'), ((4848, 4908), 'oneflow.matmul', 'flow.matmul', ([], {'a': 'pool2_flatten', 'b': 'fc1_weight', 'transpose_b': '(True)'}), '(a=pool2_flatten, b=fc1_weight, transpose_b=True)\n', (4859, 4908), True, 'import oneflow as flow\n'), ((4925, 4956), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['fc1', 'fc1_bias'], {}), '(fc1, fc1_bias)\n', (4941, 4956), True, 'import oneflow as flow\n'), ((4967, 5033), 'oneflow.layers.batch_normalization', 'flow.layers.batch_normalization', ([], {'inputs': 'fc1', 'axis': '(1)', 'name': '"""fc1_bn"""'}), "(inputs=fc1, axis=1, name='fc1_bn')\n", (4998, 5033), True, 'import oneflow as flow\n'), ((5050, 5067), 'oneflow.nn.relu', 'flow.nn.relu', (['fc1'], {}), '(fc1)\n', (5062, 5067), True, 'import oneflow as flow\n'), ((5079, 5129), 'oneflow.matmul', 'flow.matmul', ([], {'a': 'fc1', 'b': 'fc2_weight', 'transpose_b': '(True)'}), '(a=fc1, b=fc2_weight, transpose_b=True)\n', (5090, 5129), True, 'import oneflow as flow\n'), ((5146, 5177), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['fc2', 'fc2_bias'], {}), '(fc2, fc2_bias)\n', (5162, 5177), True, 'import oneflow as flow\n'), ((5237, 5258), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (5256, 5258), True, 'import oneflow as flow\n'), ((5442, 5463), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (5461, 5463), True, 'import oneflow as flow\n'), ((799, 829), 'numpy.array', 'np.array', (['[128, 128, 128, 128]'], {}), '([128, 128, 128, 128])\n', (807, 829), True, 'import numpy as np\n'), ((899, 914), 'numpy.array', 'np.array', (['[128]'], {}), '([128])\n', (907, 914), True, 'import numpy as np\n'), ((3505, 3570), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE, 4, 64, 64)'], {'dtype': 'flow.float32'}), '((BATCH_SIZE, 4, 64, 64), dtype=flow.float32)\n', (3525, 3570), True, 'import oneflow.typing as tp\n'), ((5344, 5372), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (5370, 5372), True, 'import oneflow as flow\n'), ((5549, 5577), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (5575, 5577), True, 'import oneflow as flow\n'), ((5681, 5746), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE, 4, 64, 64)'], {'dtype': 'flow.float32'}), '((BATCH_SIZE, 4, 64, 64), dtype=flow.float32)\n', (5701, 5746), True, 'import oneflow.typing as tp\n'), ((5773, 5828), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE,)'], {'dtype': 'flow.float32'}), '((BATCH_SIZE,), dtype=flow.float32)\n', (5793, 5828), True, 'import oneflow.typing as tp\n'), ((5860, 5917), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE, 2)'], {'dtype': 'flow.float32'}), '((BATCH_SIZE, 2), dtype=flow.float32)\n', (5880, 5917), True, 'import oneflow.typing as tp\n'), ((5931, 5992), 'oneflow.scope.placement', 'flow.scope.placement', (['DEVICE_TAG', "('0:0-%d' % (DEVICE_NUM - 1))"], {}), "(DEVICE_TAG, '0:0-%d' % (DEVICE_NUM - 1))\n", (5951, 5992), True, 'import oneflow as flow\n'), ((6096, 6144), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['(out * action_input)'], {'axis': '(1)'}), '(out * action_input, axis=1)\n', (6116, 6144), True, 'import oneflow as flow\n'), ((6480, 6545), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', (['(BATCH_SIZE, 4, 64, 64)'], {'dtype': 'flow.float32'}), '((BATCH_SIZE, 4, 64, 64), dtype=flow.float32)\n', (6500, 6545), True, 'import oneflow.typing as tp\n'), ((6571, 6632), 'oneflow.scope.placement', 'flow.scope.placement', (['DEVICE_TAG', "('0:0-%d' % (DEVICE_NUM - 1))"], {}), "(DEVICE_TAG, '0:0-%d' % (DEVICE_NUM - 1))\n", (6591, 6632), True, 'import oneflow as flow\n'), ((6858, 6919), 'oneflow.scope.placement', 'flow.scope.placement', (['DEVICE_TAG', "('0:0-%d' % (DEVICE_NUM - 1))"], {}), "(DEVICE_TAG, '0:0-%d' % (DEVICE_NUM - 1))\n", (6878, 6919), True, 'import oneflow as flow\n'), ((7316, 7359), 'oneflow.assign', 'flow.assign', (['p_conv1_weight', 't_conv1_weight'], {}), '(p_conv1_weight, t_conv1_weight)\n', (7327, 7359), True, 'import oneflow as flow\n'), ((7368, 7407), 'oneflow.assign', 'flow.assign', (['p_conv1_bias', 't_conv1_bias'], {}), '(p_conv1_bias, t_conv1_bias)\n', (7379, 7407), True, 'import oneflow as flow\n'), ((7416, 7459), 'oneflow.assign', 'flow.assign', (['p_conv2_weight', 't_conv2_weight'], {}), '(p_conv2_weight, t_conv2_weight)\n', (7427, 7459), True, 'import oneflow as flow\n'), ((7468, 7507), 'oneflow.assign', 'flow.assign', (['p_conv2_bias', 't_conv2_bias'], {}), '(p_conv2_bias, t_conv2_bias)\n', (7479, 7507), True, 'import oneflow as flow\n'), ((7516, 7555), 'oneflow.assign', 'flow.assign', (['p_fc1_weight', 't_fc1_weight'], {}), '(p_fc1_weight, t_fc1_weight)\n', (7527, 7555), True, 'import oneflow as flow\n'), ((7564, 7599), 'oneflow.assign', 'flow.assign', (['p_fc1_bias', 't_fc1_bias'], {}), '(p_fc1_bias, t_fc1_bias)\n', (7575, 7599), True, 'import oneflow as flow\n'), ((7608, 7647), 'oneflow.assign', 'flow.assign', (['p_fc2_weight', 't_fc2_weight'], {}), '(p_fc2_weight, t_fc2_weight)\n', (7619, 7647), True, 'import oneflow as flow\n'), ((7656, 7691), 'oneflow.assign', 'flow.assign', (['p_fc2_bias', 't_fc2_bias'], {}), '(p_fc2_bias, t_fc2_bias)\n', (7667, 7691), True, 'import oneflow as flow\n'), ((7798, 7805), 'collections.deque', 'deque', ([], {}), '()\n', (7803, 7805), False, 'from collections import deque\n'), ((8158, 8181), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (8179, 8181), True, 'import oneflow as flow\n'), ((8359, 8365), 'threading.Lock', 'Lock', ([], {}), '()\n', (8363, 8365), False, 'from threading import Thread, Lock\n'), ((8400, 8406), 'threading.Lock', 'Lock', ([], {}), '()\n', (8404, 8406), False, 'from threading import Thread, Lock\n'), ((8442, 8448), 'threading.Lock', 'Lock', ([], {}), '()\n', (8446, 8448), False, 'from threading import Thread, Lock\n'), ((8477, 8510), 'threading.Thread', 'Thread', ([], {'target': 'self.trainQNetwork'}), '(target=self.trainQNetwork)\n', (8483, 8510), False, 'from threading import Thread, Lock\n'), ((12413, 12434), 'numpy.zeros', 'np.zeros', (['ACTIONS_NUM'], {}), '(ACTIONS_NUM)\n', (12421, 12434), True, 'import numpy as np\n'), ((6184, 6220), 'oneflow.math.square', 'flow.math.square', (['(y_input - Q_Action)'], {}), '(y_input - Q_Action)\n', (6200, 6220), True, 'import oneflow as flow\n'), ((8744, 8788), 'random.sample', 'random.sample', (['self.replayMemory', 'BATCH_SIZE'], {}), '(self.replayMemory, BATCH_SIZE)\n', (8757, 8788), False, 'import random\n'), ((8921, 8964), 'numpy.squeeze', 'np.squeeze', (['[data[0] for data in minibatch]'], {}), '([data[0] for data in minibatch])\n', (8931, 8964), True, 'import numpy as np\n'), ((8992, 9035), 'numpy.squeeze', 'np.squeeze', (['[data[1] for data in minibatch]'], {}), '([data[1] for data in minibatch])\n', (9002, 9035), True, 'import numpy as np\n'), ((9063, 9106), 'numpy.squeeze', 'np.squeeze', (['[data[2] for data in minibatch]'], {}), '([data[2] for data in minibatch])\n', (9073, 9106), True, 'import numpy as np\n'), ((9138, 9181), 'numpy.squeeze', 'np.squeeze', (['[data[3] for data in minibatch]'], {}), '([data[3] for data in minibatch])\n', (9148, 9181), True, 'import numpy as np\n'), ((9401, 9444), 'numpy.squeeze', 'np.squeeze', (['[data[4] for data in minibatch]'], {}), '([data[4] for data in minibatch])\n', (9411, 9444), True, 'import numpy as np\n'), ((10642, 10712), 'numpy.stack', 'np.stack', (['(observation, observation, observation, observation)'], {'axis': '(2)'}), '((observation, observation, observation, observation), axis=2)\n', (10650, 10712), True, 'import numpy as np\n'), ((12153, 12201), 'numpy.repeat', 'np.repeat', (['self.currentState', 'BATCH_SIZE'], {'axis': '(0)'}), '(self.currentState, BATCH_SIZE, axis=0)\n', (12162, 12201), True, 'import numpy as np\n'), ((12525, 12540), 'random.random', 'random.random', ([], {}), '()\n', (12538, 12540), False, 'import random\n'), ((12589, 12618), 'random.randrange', 'random.randrange', (['ACTIONS_NUM'], {}), '(ACTIONS_NUM)\n', (12605, 12618), False, 'import random\n'), ((12709, 12726), 'numpy.argmax', 'np.argmax', (['Qvalue'], {}), '(Qvalue)\n', (12718, 12726), True, 'import numpy as np\n'), ((6301, 6363), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[learning_rate]'], {}), '([], [learning_rate])\n', (6342, 6363), True, 'import oneflow as flow\n'), ((10053, 10089), 'os.path.exists', 'os.path.exists', (['self.check_point_dir'], {}), '(self.check_point_dir)\n', (10067, 10089), False, 'import os\n'), ((10111, 10141), 'os.mkdir', 'os.mkdir', (['self.check_point_dir'], {}), '(self.check_point_dir)\n', (10119, 10141), False, 'import os\n'), ((10256, 10281), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (10270, 10281), False, 'import os\n'), ((9644, 9672), 'numpy.max', 'np.max', (['Qvalue_batch'], {'axis': '(1)'}), '(Qvalue_batch, axis=1)\n', (9650, 9672), True, 'import numpy as np\n')] |
"""
Logic:
1. AudioDataLoader generate a minibatch from AudioDataset, the size of this
minibatch is AudioDataLoader's batchsize. For now, we always set
AudioDataLoader's batchsize as 1. The real minibatch size we care about is
set in AudioDataset's __init__(...). So actually, we generate the
information of one minibatch in AudioDataset.
2. After AudioDataLoader getting one minibatch from AudioDataset,
AudioDataLoader calls its collate_fn(batch) to process this minibatch.
"""
import json
import numpy as np
import oneflow as flow
import oneflow.utils.data as data
import kaldi_io
from utils import IGNORE_ID, pad_list
class AudioDataset(data.Dataset):
"""
TODO: this is a little HACK now, put batch_size here now.
remove batch_size to dataloader later.
"""
def __init__(self, data_json_path, batch_size, max_length_in, max_length_out,
num_batches=0, batch_frames=0):
# From: espnet/src/asr/asr_utils.py: make_batchset()
"""
Args:
data: espnet/espnet json format file.
num_batches: for debug. only use num_batches minibatch but not all.
"""
super(AudioDataset, self).__init__()
with open(data_json_path, 'rb') as f:
data = json.load(f)['utts']
# sort it by input lengths (long to short)
sorted_data = sorted(data.items(), key=lambda data: int(
data[1]['input'][0]['shape'][0]), reverse=True)
minibatch = []
# Method 1: Generate minibatch based on batch_size
# i.e. each batch contains #batch_size utterances
if batch_frames == 0:
start = 0
while True:
ilen = int(sorted_data[start][1]['input'][0]['shape'][0])
olen = int(sorted_data[start][1]['output'][0]['shape'][0])
factor = max(int(ilen / max_length_in), int(olen / max_length_out))
b = max(1, int(batch_size / (1 + factor)))
end = min(len(sorted_data), start + b)
minibatch.append(sorted_data[start:end])
if end == len(sorted_data):
break
start = end
# Method 2: Generate minibatch based on batch_frames
# i.e. each batch contains approximately #batch_frames frames
else:
print("NOTE: Generate minibatch based on batch_frames.")
print("i.e. each batch contains approximately #batch_frames frames")
start = 0
while True:
total_frames = 0
end = start
while total_frames < batch_frames and end < len(sorted_data):
ilen = int(sorted_data[end][1]['input'][0]['shape'][0])
total_frames += ilen
end += 1
minibatch.append(sorted_data[start:end])
if end == len(sorted_data):
break
start = end
if num_batches > 0:
minibatch = minibatch[:num_batches]
self.minibatch = minibatch
def __getitem__(self, index):
return self.minibatch[index]
def __len__(self):
return len(self.minibatch)
class AudioDataLoader(data.DataLoader):
"""
NOTE: just use batchsize=1 here, so drop_last=True makes no sense here.
"""
def __init__(self, *args, LFR_m=1, LFR_n=1, **kwargs):
super(AudioDataLoader, self).__init__(*args, **kwargs)
self.collate_fn = LFRCollate(LFR_m=LFR_m, LFR_n=LFR_n)
class LFRCollate(object):
"""Build this wrapper to pass arguments(LFR_m, LFR_n) to _collate_fn"""
def __init__(self, LFR_m=1, LFR_n=1):
self.LFR_m = LFR_m
self.LFR_n = LFR_n
def __call__(self, batch):
return _collate_fn(batch, LFR_m=self.LFR_m, LFR_n=self.LFR_n)
# From: espnet/src/asr/asr_pytorch.py: CustomConverter:__call__
def _collate_fn(batch, LFR_m=1, LFR_n=1):
"""
Args:
batch: list, len(batch) = 1. See AudioDataset.__getitem__()
Returns:
xs_pad: N x Ti x D, torch.Tensor
ilens : N, torch.Tentor
ys_pad: N x To, torch.Tensor
"""
# batch should be located in list
assert len(batch) == 1
batch = load_inputs_and_targets(batch[0], LFR_m=LFR_m, LFR_n=LFR_n)
xs, ys = batch
# get batch of lengths of input sequences
ilens = np.array([x.shape[0] for x in xs])
# perform padding and convert to tensor
xs_pad = pad_list([flow.tensor(x).to(dtype=flow.float32) for x in xs], 0)
ilens = flow.tensor(ilens)
ys_pad = pad_list([flow.tensor(y) for y in ys], IGNORE_ID)
return xs_pad, ilens, ys_pad
# ------------------------------ utils ------------------------------------
def load_inputs_and_targets(batch, LFR_m=1, LFR_n=1):
# From: espnet/src/asr/asr_utils.py: load_inputs_and_targets
# load acoustic features and target sequence of token ids
xs = [kaldi_io.read_mat(b[1]['input'][0]['feat']) for b in batch]
ys = [b[1]['output'][0]['tokenid'].split() for b in batch]
if LFR_m != 1 or LFR_n != 1:
xs = [build_LFR_features(x, LFR_m, LFR_n) for x in xs]
# get index of non-zero length samples
nonzero_idx = filter(lambda i: len(ys[i]) > 0, range(len(xs)))
# sort in input lengths
nonzero_sorted_idx = sorted(nonzero_idx, key=lambda i: -len(xs[i]))
if len(nonzero_sorted_idx) != len(xs):
print("warning: Target sequences include empty tokenid")
# remove zero-lenght samples
xs = [xs[i] for i in nonzero_sorted_idx]
ys = [np.fromiter(map(int, ys[i]), dtype=np.int64)
for i in nonzero_sorted_idx]
return xs, ys
def build_LFR_features(inputs, m, n):
"""
Actually, this implements stacking frames and skipping frames.
if m = 1 and n = 1, just return the origin features.
if m = 1 and n > 1, it works like skipping.
if m > 1 and n = 1, it works like stacking but only support right frames.
if m > 1 and n > 1, it works like LFR.
Args:
inputs_batch: inputs is T x D np.ndarray
m: number of frames to stack
n: number of frames to skip
"""
LFR_inputs = []
T = inputs.shape[0]
T_lfr = int(np.ceil(T / n))
for i in range(T_lfr):
if m <= T - i * n:
LFR_inputs.append(np.hstack(inputs[i * n:i * n + m]))
else:
num_padding = m - (T - i * n)
frame = np.hstack(inputs[i * n:])
for _ in range(num_padding):
frame = np.hstack((frame, inputs[-1]))
LFR_inputs.append(frame)
return np.vstack(LFR_inputs)
| [
"oneflow.utils.data.items",
"oneflow.tensor"
] | [((4378, 4412), 'numpy.array', 'np.array', (['[x.shape[0] for x in xs]'], {}), '([x.shape[0] for x in xs])\n', (4386, 4412), True, 'import numpy as np\n'), ((4548, 4566), 'oneflow.tensor', 'flow.tensor', (['ilens'], {}), '(ilens)\n', (4559, 4566), True, 'import oneflow as flow\n'), ((6587, 6608), 'numpy.vstack', 'np.vstack', (['LFR_inputs'], {}), '(LFR_inputs)\n', (6596, 6608), True, 'import numpy as np\n'), ((4931, 4974), 'kaldi_io.read_mat', 'kaldi_io.read_mat', (["b[1]['input'][0]['feat']"], {}), "(b[1]['input'][0]['feat'])\n", (4948, 4974), False, 'import kaldi_io\n'), ((6205, 6219), 'numpy.ceil', 'np.ceil', (['(T / n)'], {}), '(T / n)\n', (6212, 6219), True, 'import numpy as np\n'), ((1375, 1387), 'oneflow.utils.data.items', 'data.items', ([], {}), '()\n', (1385, 1387), True, 'import oneflow.utils.data as data\n'), ((4590, 4604), 'oneflow.tensor', 'flow.tensor', (['y'], {}), '(y)\n', (4601, 4604), True, 'import oneflow as flow\n'), ((6417, 6442), 'numpy.hstack', 'np.hstack', (['inputs[i * n:]'], {}), '(inputs[i * n:])\n', (6426, 6442), True, 'import numpy as np\n'), ((1273, 1285), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1282, 1285), False, 'import json\n'), ((6305, 6339), 'numpy.hstack', 'np.hstack', (['inputs[i * n:i * n + m]'], {}), '(inputs[i * n:i * n + m])\n', (6314, 6339), True, 'import numpy as np\n'), ((6508, 6538), 'numpy.hstack', 'np.hstack', (['(frame, inputs[-1])'], {}), '((frame, inputs[-1]))\n', (6517, 6538), True, 'import numpy as np\n'), ((4481, 4495), 'oneflow.tensor', 'flow.tensor', (['x'], {}), '(x)\n', (4492, 4495), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import List, Optional, Tuple
import oneflow as flow
from oneflow.framework.tensor import Tensor
from oneflow.nn.module import Module
class Embedding(Module):
"""A simple lookup table that stores embeddings of a fixed dictionary and size.
This module is often used to store word embeddings and retrieve them using indices.
The input to the module is a list of indices, and the output is the corresponding
word embeddings.
Args:
num_embeddings (int): size of the dictionary of embeddings
embedding_dim (int): the size of each embedding vector
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
i.e. it remains as a fixed "pad". For a newly constructed Embedding,
the embedding vector at :attr:`padding_idx` will default to all zeros,
but can be updated to another value to be used as the padding vector.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> indices = flow.tensor([[1, 2, 4, 5], [4, 3, 2, 9]], dtype=flow.int)
>>> m = flow.nn.Embedding(10, 3)
>>> y = m(indices)
"""
def __init__(
self,
num_embeddings: int,
embedding_dim: int,
padding_idx: Optional[int] = None,
max_norm: Optional[float] = None,
norm_type: Optional[float] = None,
scale_grad_by_freq: bool = False,
sparse: bool = False,
_weight: Optional[Tensor] = None,
):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
if padding_idx is not None:
if padding_idx > 0:
assert (
padding_idx < self.num_embeddings
), "Padding_idx must be within num_embeddings"
elif padding_idx < 0:
assert (
padding_idx >= -self.num_embeddings
), "Padding_idx must be within num_embeddings"
padding_idx = self.num_embeddings + padding_idx
self.padding_idx = padding_idx
assert max_norm is None, "Not support max_norm yet!"
assert norm_type is None, "Not support norm_type yet!"
assert scale_grad_by_freq is False, "Not support scale_grad_by_freq=True yet!"
assert sparse is False, "Not support sparse=True yet!"
if _weight is None:
self.weight = flow.nn.Parameter(Tensor(num_embeddings, embedding_dim))
self.reset_parameters()
else:
assert list(_weight.shape) == [
num_embeddings,
embedding_dim,
], "Shape of weight does not match num_embeddings and embedding_dim"
self.weight = flow.nn.Parameter(_weight)
self.sparse = sparse
def reset_parameters(self) -> None:
flow.nn.init.normal_(self.weight)
self._fill_padding_idx_with_zero()
def _fill_padding_idx_with_zero(self) -> None:
if self.padding_idx is not None:
with flow.no_grad():
self.weight[self.padding_idx].fill_(0)
def forward(self, indices):
res = flow._C.gather(self.weight, indices, axis=0)
return res
def embedding(
input,
weight,
padding_idx=None,
max_norm=None,
norm_type=None,
scale_grad_by_freq=False,
sparse=False,
):
r"""A simple lookup table that looks up embeddings in a fixed dictionary and size.
This module is often used to retrieve word embeddings using indices.
The input to the module is a list of indices, and the embedding matrix,
and the output is the corresponding word embeddings.
See :class:`oneflow.nn.Embedding` for more details.
Args:
input (LongTensor): Tensor containing indices into the embedding matrix
weight (Tensor): The embedding matrix with number of rows equal to the maximum possible index + 1,
and number of columns equal to the embedding size
padding_idx (int, optional): If specified, the entries at :attr:`padding_idx` do not contribute to the gradient;
therefore, the embedding vector at :attr:`padding_idx` is not updated during training,
i.e. it remains as a fixed "pad".
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import oneflow.nn.functional as F
>>> # a batch of 2 samples of 4 indices each
>>> input = flow.tensor([[1,2,4,5],[4,3,2,9]])
>>> # an embedding matrix containing 10 tensors of size 3
>>> embedding_matrix = flow.rand(10, 3)
>>> output = F.embedding(input, embedding_matrix)
>>> output.shape
oneflow.Size([2, 4, 3])
>>> # example with padding_idx
>>> input = flow.tensor([[0,2,0,5]])
>>> output = F.embedding(input, embedding_matrix, padding_idx=0)
>>> output.shape
oneflow.Size([1, 4, 3])
"""
assert max_norm is None, "Not support max_norm yet!"
assert norm_type is None, "Not support norm_type yet!"
assert scale_grad_by_freq is False, "Not support scale_grad_by_freq=True yet!"
assert sparse is False, "Not support sparse=True yet!"
if padding_idx is not None:
weight[padding_idx].fill_(0)
res = flow._C.gather(weight, input, axis=0)
return res
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.nn.init.normal_",
"oneflow.framework.tensor.Tensor",
"oneflow.nn.Parameter",
"oneflow._C.gather",
"oneflow.no_grad"
] | [((6209, 6246), 'oneflow._C.gather', 'flow._C.gather', (['weight', 'input'], {'axis': '(0)'}), '(weight, input, axis=0)\n', (6223, 6246), True, 'import oneflow as flow\n'), ((6315, 6351), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (6330, 6351), False, 'import doctest\n'), ((3736, 3769), 'oneflow.nn.init.normal_', 'flow.nn.init.normal_', (['self.weight'], {}), '(self.weight)\n', (3756, 3769), True, 'import oneflow as flow\n'), ((4041, 4085), 'oneflow._C.gather', 'flow._C.gather', (['self.weight', 'indices'], {'axis': '(0)'}), '(self.weight, indices, axis=0)\n', (4055, 4085), True, 'import oneflow as flow\n'), ((3631, 3657), 'oneflow.nn.Parameter', 'flow.nn.Parameter', (['_weight'], {}), '(_weight)\n', (3648, 3657), True, 'import oneflow as flow\n'), ((3328, 3365), 'oneflow.framework.tensor.Tensor', 'Tensor', (['num_embeddings', 'embedding_dim'], {}), '(num_embeddings, embedding_dim)\n', (3334, 3365), False, 'from oneflow.framework.tensor import Tensor\n'), ((3923, 3937), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (3935, 3937), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
import math
def add_optimizer_args(parser):
group = parser.add_argument_group('optimizer parameters',
'entire group applies only to optimizer parameters')
group.add_argument("--optimizer", type=str, default="sgd", help="sgd, adam, rmsprop")
group.add_argument("--learning_rate", type=float, default=0.001)
group.add_argument("--wd", type=float, default=1.0/32768, help="weight decay")
group.add_argument("--momentum", type=float, default=0.875, help="momentum")
group.add_argument('--lr_decay', type=str, default='cosine', help='cosine, step, polynomial, exponential, None')
group.add_argument('--lr_decay_rate', type=float, default='0.94', help='exponential learning decay rate')
group.add_argument('--lr_decay_epochs', type=int, default=2, help='exponential learning rate decay every n epochs')
group.add_argument('--warmup_epochs', type=int, default=5,
help='the epochs to warmp-up lr to scaled large-batch value')
group.add_argument('--decay_rate', type=float, default='0.9', help='decay rate of RMSProp')
group.add_argument('--epsilon', type=float, default='1', help='epsilon')
group.add_argument('--gradient_clipping', type=float, default=0.0, help='gradient clipping')
return parser
def set_up_optimizer(loss, args):
total_device_num = args.num_nodes * args.gpu_num_per_node
train_batch_size = total_device_num * args.batch_size_per_device
batches_per_epoch = math.ceil(args.num_examples / train_batch_size)
warmup_batches = batches_per_epoch * args.warmup_epochs
num_train_batches = batches_per_epoch * args.num_epochs
decay_batches = num_train_batches - warmup_batches
exponential_decay_batches = batches_per_epoch * args.lr_decay_epochs
# set up warmup strategy
warmup = flow.optimizer.warmup.linear(warmup_batches, 0) if warmup_batches > 0 else None
# set up grad_clipping
grad_clipping = flow.optimizer.grad_clipping.by_global_norm(args.gradient_clipping) if args.gradient_clipping > 0.0 else None
# set up learning rate scheduler
if args.lr_decay == 'cosine':
# CosineScheduler
lr_scheduler = flow.optimizer.CosineScheduler(
base_lr=args.learning_rate,
steps = decay_batches,
warmup=warmup
)
elif args.lr_decay == 'step':
# PiecewiseScalingScheduler
lr_scheduler = flow.optimizer.PiecewiseScalingScheduler(
base_lr=args.learning_rate,
boundaries=[30, 60, 80],
scale=[0.1, 0.01, 0.001],
warmup=warmup
)
elif args.lr_decay == 'polynomial':
# PolynomialSchduler
lr_scheduler = flow.optimizer.PolynomialSchduler(
base_lr=args.learning_rate,
steps=decay_batches,
end_learning_rate=0.00001,
power=1.0,
cycle=False,
warmup=warmup
)
elif args.lr_decay == 'exponential':
# ExponentialScheduler
lr_scheduler = flow.optimizer.ExponentialScheduler(
base_lr=args.learning_rate,
steps=exponential_decay_batches,
decay_rate=args.lr_decay_rate,
staircase=False,
warmup=warmup
)
else:
lr_scheduler = flow.optimizer.PiecewiseScalingScheduler(
base_lr=args.learning_rate,
boundaries=[args.num_epochs],
scale=[1.0],
warmup=warmup
)
# set up optimizer
loss_scale_policy = None
if args.use_fp16:
loss_scale_policy = flow.optimizer.loss_scale.dynamic_loss_scale(increment_period=2000);
if args.optimizer=='sgd':
print("Optimizer: SGD")
flow.optimizer.SGD(lr_scheduler,
momentum=args.momentum if args.momentum>0 else None,
grad_clipping = grad_clipping,
loss_scale_policy=loss_scale_policy
).minimize(loss)
elif args.optimizer=='adam':
if args.wd > 0 and args.wd < 1.0 :
print("Optimizer: AdamW")
flow.optimizer.AdamW(
lr_scheduler = lr_scheduler,
weight_decay = args.wd,
weight_decay_excludes='_bn-',
grad_clipping = grad_clipping,
epsilon=args.epsilon,
loss_scale_policy=loss_scale_policy
).minimize(loss)
else:
print("Optimizer: Adam")
flow.optimizer.Adam(lr_scheduler=lr_scheduler,
grad_clipping=grad_clipping,
epsilon=args.epsilon,
loss_scale_policy=loss_scale_policy
).minimize(loss)
elif args.optimizer=='rmsprop':
print("Optimizer: RMSProp")
flow.optimizer.RMSProp(lr_scheduler=lr_scheduler,
decay_rate=args.decay_rate,
epsilon=args.epsilon,
loss_scale_policy=loss_scale_policy
).minimize(loss)
if __name__ == '__main__':
import config as configs
parser = configs.get_parser()
args = parser.parse_args()
configs.print_args(args)
| [
"oneflow.optimizer.PolynomialSchduler",
"oneflow.optimizer.AdamW",
"oneflow.optimizer.warmup.linear",
"oneflow.optimizer.CosineScheduler",
"oneflow.optimizer.loss_scale.dynamic_loss_scale",
"oneflow.optimizer.grad_clipping.by_global_norm",
"oneflow.optimizer.RMSProp",
"oneflow.optimizer.Adam",
"onef... | [((2109, 2156), 'math.ceil', 'math.ceil', (['(args.num_examples / train_batch_size)'], {}), '(args.num_examples / train_batch_size)\n', (2118, 2156), False, 'import math\n'), ((5656, 5676), 'config.get_parser', 'configs.get_parser', ([], {}), '()\n', (5674, 5676), True, 'import config as configs\n'), ((5712, 5736), 'config.print_args', 'configs.print_args', (['args'], {}), '(args)\n', (5730, 5736), True, 'import config as configs\n'), ((2448, 2495), 'oneflow.optimizer.warmup.linear', 'flow.optimizer.warmup.linear', (['warmup_batches', '(0)'], {}), '(warmup_batches, 0)\n', (2476, 2495), True, 'import oneflow as flow\n'), ((2579, 2646), 'oneflow.optimizer.grad_clipping.by_global_norm', 'flow.optimizer.grad_clipping.by_global_norm', (['args.gradient_clipping'], {}), '(args.gradient_clipping)\n', (2622, 2646), True, 'import oneflow as flow\n'), ((2811, 2910), 'oneflow.optimizer.CosineScheduler', 'flow.optimizer.CosineScheduler', ([], {'base_lr': 'args.learning_rate', 'steps': 'decay_batches', 'warmup': 'warmup'}), '(base_lr=args.learning_rate, steps=\n decay_batches, warmup=warmup)\n', (2841, 2910), True, 'import oneflow as flow\n'), ((4232, 4299), 'oneflow.optimizer.loss_scale.dynamic_loss_scale', 'flow.optimizer.loss_scale.dynamic_loss_scale', ([], {'increment_period': '(2000)'}), '(increment_period=2000)\n', (4276, 4299), True, 'import oneflow as flow\n'), ((3049, 3187), 'oneflow.optimizer.PiecewiseScalingScheduler', 'flow.optimizer.PiecewiseScalingScheduler', ([], {'base_lr': 'args.learning_rate', 'boundaries': '[30, 60, 80]', 'scale': '[0.1, 0.01, 0.001]', 'warmup': 'warmup'}), '(base_lr=args.learning_rate,\n boundaries=[30, 60, 80], scale=[0.1, 0.01, 0.001], warmup=warmup)\n', (3089, 3187), True, 'import oneflow as flow\n'), ((3337, 3493), 'oneflow.optimizer.PolynomialSchduler', 'flow.optimizer.PolynomialSchduler', ([], {'base_lr': 'args.learning_rate', 'steps': 'decay_batches', 'end_learning_rate': '(1e-05)', 'power': '(1.0)', 'cycle': '(False)', 'warmup': 'warmup'}), '(base_lr=args.learning_rate, steps=\n decay_batches, end_learning_rate=1e-05, power=1.0, cycle=False, warmup=\n warmup)\n', (3370, 3493), True, 'import oneflow as flow\n'), ((4372, 4535), 'oneflow.optimizer.SGD', 'flow.optimizer.SGD', (['lr_scheduler'], {'momentum': '(args.momentum if args.momentum > 0 else None)', 'grad_clipping': 'grad_clipping', 'loss_scale_policy': 'loss_scale_policy'}), '(lr_scheduler, momentum=args.momentum if args.momentum > \n 0 else None, grad_clipping=grad_clipping, loss_scale_policy=\n loss_scale_policy)\n', (4390, 4535), True, 'import oneflow as flow\n'), ((3672, 3841), 'oneflow.optimizer.ExponentialScheduler', 'flow.optimizer.ExponentialScheduler', ([], {'base_lr': 'args.learning_rate', 'steps': 'exponential_decay_batches', 'decay_rate': 'args.lr_decay_rate', 'staircase': '(False)', 'warmup': 'warmup'}), '(base_lr=args.learning_rate, steps=\n exponential_decay_batches, decay_rate=args.lr_decay_rate, staircase=\n False, warmup=warmup)\n', (3707, 3841), True, 'import oneflow as flow\n'), ((3936, 4066), 'oneflow.optimizer.PiecewiseScalingScheduler', 'flow.optimizer.PiecewiseScalingScheduler', ([], {'base_lr': 'args.learning_rate', 'boundaries': '[args.num_epochs]', 'scale': '[1.0]', 'warmup': 'warmup'}), '(base_lr=args.learning_rate,\n boundaries=[args.num_epochs], scale=[1.0], warmup=warmup)\n', (3976, 4066), True, 'import oneflow as flow\n'), ((4713, 4909), 'oneflow.optimizer.AdamW', 'flow.optimizer.AdamW', ([], {'lr_scheduler': 'lr_scheduler', 'weight_decay': 'args.wd', 'weight_decay_excludes': '"""_bn-"""', 'grad_clipping': 'grad_clipping', 'epsilon': 'args.epsilon', 'loss_scale_policy': 'loss_scale_policy'}), "(lr_scheduler=lr_scheduler, weight_decay=args.wd,\n weight_decay_excludes='_bn-', grad_clipping=grad_clipping, epsilon=args\n .epsilon, loss_scale_policy=loss_scale_policy)\n", (4733, 4909), True, 'import oneflow as flow\n'), ((5096, 5234), 'oneflow.optimizer.Adam', 'flow.optimizer.Adam', ([], {'lr_scheduler': 'lr_scheduler', 'grad_clipping': 'grad_clipping', 'epsilon': 'args.epsilon', 'loss_scale_policy': 'loss_scale_policy'}), '(lr_scheduler=lr_scheduler, grad_clipping=grad_clipping,\n epsilon=args.epsilon, loss_scale_policy=loss_scale_policy)\n', (5115, 5234), True, 'import oneflow as flow\n'), ((5388, 5529), 'oneflow.optimizer.RMSProp', 'flow.optimizer.RMSProp', ([], {'lr_scheduler': 'lr_scheduler', 'decay_rate': 'args.decay_rate', 'epsilon': 'args.epsilon', 'loss_scale_policy': 'loss_scale_policy'}), '(lr_scheduler=lr_scheduler, decay_rate=args.\n decay_rate, epsilon=args.epsilon, loss_scale_policy=loss_scale_policy)\n', (5410, 5529), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow._oneflow_internal
Size = oneflow._oneflow_internal.Size
device = oneflow._oneflow_internal.device
placement = oneflow._oneflow_internal.placement
no_grad = oneflow._oneflow_internal.autograd.no_grad
# define dtype at the begining of oneflow init
locals()["dtype"] = oneflow._oneflow_internal.dtype
locals()["char"] = oneflow._oneflow_internal.char
locals()["float16"] = oneflow._oneflow_internal.float16
locals()["half"] = oneflow._oneflow_internal.float16
locals()["float32"] = oneflow._oneflow_internal.float32
locals()["float"] = oneflow._oneflow_internal.float
locals()["double"] = oneflow._oneflow_internal.double
locals()["float64"] = oneflow._oneflow_internal.float64
locals()["int8"] = oneflow._oneflow_internal.int8
locals()["int"] = oneflow._oneflow_internal.int32
locals()["int32"] = oneflow._oneflow_internal.int32
locals()["int64"] = oneflow._oneflow_internal.int64
locals()["long"] = oneflow._oneflow_internal.int64
locals()["uint8"] = oneflow._oneflow_internal.uint8
locals()["record"] = oneflow._oneflow_internal.record
locals()["tensor_buffer"] = oneflow._oneflow_internal.tensor_buffer
from oneflow.core.job.job_set_pb2 import ConfigProto
from oneflow.core.job.job_conf_pb2 import JobConfigProto
from oneflow.compatible.single_client.python.framework import session_util
from oneflow.compatible.single_client.python.framework import session_context
from oneflow.compatible.single_client.python.framework import env_util
oneflow._oneflow_internal.DestroyEnv()
import time
# sleep to prevent glog raising "File exists"
time.sleep(1)
del time
oneflow._oneflow_internal.SetIsMultiClient(False)
session_context.OpenDefaultSession(
session_util.Session(oneflow._oneflow_internal.NewSessionId())
)
oneflow._oneflow_internal.EnableEagerEnvironment(False)
del env_util
del session_util
del session_context
import oneflow.compatible.single_client.python_gen.__export_symbols__
import oneflow.compatible.single_client.python.framework.c_api_util
# register ForeignCallback
from oneflow.compatible.single_client.python.framework import register_python_callback
from oneflow.compatible.single_client.python.framework import python_callback
oneflow._oneflow_internal.RegisterForeignCallbackOnlyOnce(
python_callback.global_python_callback
)
del python_callback
del register_python_callback
# register Watcher
from oneflow.compatible.single_client.python.framework import watcher
oneflow._oneflow_internal.RegisterWatcherOnlyOnce(watcher._global_watcher)
del watcher
# register BoxingUtil
from oneflow.compatible.single_client.python.eager import boxing_util
oneflow._oneflow_internal.deprecated.RegisterBoxingUtilOnlyOnce(
boxing_util._global_boxing_util
)
del boxing_util
# register RegisterPyKernels
from oneflow.compatible.single_client.python.ops.util import custom_op_module
oneflow._oneflow_internal.RegisterPyKernels(
custom_op_module._python_kernel_reg.kernels_
)
del custom_op_module
from oneflow.compatible.single_client.python.framework import register_class_method_util
register_class_method_util.RegisterMethod4Class()
del register_class_method_util
INVALID_SPLIT_AXIS = oneflow._oneflow_internal.INVALID_SPLIT_AXIS
import atexit
from oneflow.compatible.single_client.python.framework.session_context import (
TryCloseAllSession,
)
atexit.register(TryCloseAllSession)
del TryCloseAllSession
del atexit
import sys
__original_exit__ = sys.exit
def custom_exit(returncode):
if returncode != 0:
import oneflow
oneflow._oneflow_internal.MasterSendAbort()
__original_exit__(returncode)
sys.exit = custom_exit
del custom_exit
del sys
del absolute_import
| [
"oneflow._oneflow_internal.SetIsMultiClient",
"oneflow._oneflow_internal.deprecated.RegisterBoxingUtilOnlyOnce",
"oneflow._oneflow_internal.RegisterPyKernels",
"oneflow._oneflow_internal.MasterSendAbort",
"oneflow.compatible.single_client.python.framework.register_class_method_util.RegisterMethod4Class",
... | [((2091, 2129), 'oneflow._oneflow_internal.DestroyEnv', 'oneflow._oneflow_internal.DestroyEnv', ([], {}), '()\n', (2127, 2129), False, 'import oneflow\n'), ((2189, 2202), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (2199, 2202), False, 'import time\n'), ((2213, 2262), 'oneflow._oneflow_internal.SetIsMultiClient', 'oneflow._oneflow_internal.SetIsMultiClient', (['(False)'], {}), '(False)\n', (2255, 2262), False, 'import oneflow\n'), ((2368, 2423), 'oneflow._oneflow_internal.EnableEagerEnvironment', 'oneflow._oneflow_internal.EnableEagerEnvironment', (['(False)'], {}), '(False)\n', (2416, 2423), False, 'import oneflow\n'), ((2810, 2912), 'oneflow._oneflow_internal.RegisterForeignCallbackOnlyOnce', 'oneflow._oneflow_internal.RegisterForeignCallbackOnlyOnce', (['python_callback.global_python_callback'], {}), '(python_callback.\n global_python_callback)\n', (2867, 2912), False, 'import oneflow\n'), ((3054, 3128), 'oneflow._oneflow_internal.RegisterWatcherOnlyOnce', 'oneflow._oneflow_internal.RegisterWatcherOnlyOnce', (['watcher._global_watcher'], {}), '(watcher._global_watcher)\n', (3103, 3128), False, 'import oneflow\n'), ((3235, 3336), 'oneflow._oneflow_internal.deprecated.RegisterBoxingUtilOnlyOnce', 'oneflow._oneflow_internal.deprecated.RegisterBoxingUtilOnlyOnce', (['boxing_util._global_boxing_util'], {}), '(boxing_util\n ._global_boxing_util)\n', (3298, 3336), False, 'import oneflow\n'), ((3463, 3557), 'oneflow._oneflow_internal.RegisterPyKernels', 'oneflow._oneflow_internal.RegisterPyKernels', (['custom_op_module._python_kernel_reg.kernels_'], {}), '(custom_op_module.\n _python_kernel_reg.kernels_)\n', (3506, 3557), False, 'import oneflow\n'), ((3671, 3720), 'oneflow.compatible.single_client.python.framework.register_class_method_util.RegisterMethod4Class', 'register_class_method_util.RegisterMethod4Class', ([], {}), '()\n', (3718, 3720), False, 'from oneflow.compatible.single_client.python.framework import register_class_method_util\n'), ((3941, 3976), 'atexit.register', 'atexit.register', (['TryCloseAllSession'], {}), '(TryCloseAllSession)\n', (3956, 3976), False, 'import atexit\n'), ((2324, 2364), 'oneflow._oneflow_internal.NewSessionId', 'oneflow._oneflow_internal.NewSessionId', ([], {}), '()\n', (2362, 2364), False, 'import oneflow\n'), ((4141, 4184), 'oneflow._oneflow_internal.MasterSendAbort', 'oneflow._oneflow_internal.MasterSendAbort', ([], {}), '()\n', (4182, 4184), False, 'import oneflow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
class Sinh(Module):
def __init__(self) -> None:
super().__init__()
def forward(self, x):
return flow.F.sinh(x)
def sinh_op(x):
"""Returns a new tensor with the hyperbolic sine of the elements of :attr:`input`.
.. math::
\\text{out}_{i} = \\sinh(\\text{input}_{i})
Args:
input (Tensor): the input tensor.
For example:
.. code-block:: python
>>> import numpy as np
>>> import oneflow as flow
>>> x1 = flow.Tensor(np.array([1, 2, 3]))
>>> x2 = flow.Tensor(np.array([1.53123589,0.54242598,0.15117185]))
>>> x3 = flow.Tensor(np.array([1,0,-1]))
>>> flow.sinh(x1).numpy()
array([ 1.1752012, 3.6268604, 10.017875 ], dtype=float32)
>>> flow.sinh(x2).numpy()
array([2.20381 , 0.5694193, 0.1517483], dtype=float32)
>>> flow.sinh(x3).numpy()
array([ 1.1752012, 0. , -1.1752012], dtype=float32)
"""
return Sinh()(x)
@register_tensor_op("sinh")
def sinh_op_tensor(x):
"""
sinh() -> Tensor
See :func:`oneflow.sinh`
"""
return Sinh()(x)
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.framework.tensor.register_tensor_op",
"oneflow.F.sinh"
] | [((1691, 1717), 'oneflow.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""sinh"""'], {}), "('sinh')\n", (1709, 1717), False, 'from oneflow.framework.tensor import register_tensor_op\n'), ((1884, 1920), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (1899, 1920), False, 'import doctest\n'), ((829, 843), 'oneflow.F.sinh', 'flow.F.sinh', (['x'], {}), '(x)\n', (840, 843), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
def masked_select_op(input, mask):
"""
Returns a new 1-D tensor which indexes the input tensor according to the boolean mask mask which is a BoolTensor(In oneFlow BoolTensor is replaced by Int8Tensor).
The shapes of the mask tensor and the input tensor don’t need to match, but they must be broadcastable.
Args:
input (Tensor): the input tensor.
mask (Tensor): the tensor containing the binary mask to index with
For example:
.. code-block:: python
>>> import oneflow as flow
>>> import numpy as np
>>> input = flow.tensor(np.array([[-0.4620, 0.3139], [0.3898, -0.7197], [0.0478, -0.1657]]), dtype=flow.float32)
>>> mask = input.gt(0.05)
>>> out = flow.masked_select(input, mask)
>>> out
tensor([0.3139, 0.3898], dtype=oneflow.float32)
"""
assert len(input.shape) == len(
mask.shape
), f"The dim of masked_select module's inputs can not match, please check!"
assert input.is_global == mask.is_global, (
f"input tensor is %s tensor, but mask is %s tensor"
% (
"global" if input.is_global else "local",
"global" if mask.is_global else "local",
)
)
res = flow._C.mul(input, mask)
indices = flow.argwhere(res)
gather_res = flow._C.gather_nd(res, indices)
return gather_res.flatten()
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow.argwhere",
"oneflow._C.gather_nd",
"oneflow._C.mul"
] | [((1915, 1939), 'oneflow._C.mul', 'flow._C.mul', (['input', 'mask'], {}), '(input, mask)\n', (1926, 1939), True, 'import oneflow as flow\n'), ((1955, 1973), 'oneflow.argwhere', 'flow.argwhere', (['res'], {}), '(res)\n', (1968, 1973), True, 'import oneflow as flow\n'), ((1991, 2022), 'oneflow._C.gather_nd', 'flow._C.gather_nd', (['res', 'indices'], {}), '(res, indices)\n', (2008, 2022), True, 'import oneflow as flow\n'), ((2109, 2145), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (2124, 2145), False, 'import doctest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
from typing import Optional, Sequence, Tuple, Union, List
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.job.initializer_conf_pb2 as initializer_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.remote_blob as remote_blob_util
from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate
import oneflow._oneflow_internal
import traceback
@oneflow_export("data.ImagePreprocessor")
class ImagePreprocessor(object):
def __init__(self, preprocessor: str) -> None:
assert isinstance(preprocessor, str)
if preprocessor.lower() != "bgr2rgb" and preprocessor.lower() != "mirror":
raise ValueError('preprocessor must be "bgr2rgb" or "mirror".')
self.preprocessor = preprocessor
def is_rgb(self) -> bool:
return self.preprocessor.lower() == "bgr2rgb"
def is_mirror(self) -> bool:
return self.preprocessor.lower() == "mirror"
@oneflow_export("data.ImageResizePreprocessor")
class ImageResizePreprocessor(object):
def __init__(self, width: int, height: int) -> None:
assert isinstance(width, int)
assert isinstance(height, int)
self.width = width
self.height = height
@oneflow_export("data.ImageCodec")
class ImageCodec(object):
def __init__(
self,
image_preprocessors: Optional[
Sequence[Union[ImagePreprocessor, ImageResizePreprocessor,]]
] = None,
) -> None:
if isinstance(image_preprocessors, (list, tuple)):
self.image_preprocessors = list(image_preprocessors)
else:
self.image_preprocessors = []
def color_space(self) -> str:
for img_preprocessor in self.image_preprocessors:
if (
isinstance(img_preprocessor, ImagePreprocessor)
and img_preprocessor.is_rgb()
):
return "RGB"
return "BGR"
def do_mirror(self) -> bool:
for img_preprocessor in self.image_preprocessors:
if (
isinstance(img_preprocessor, ImagePreprocessor)
and img_preprocessor.is_mirror()
):
return True
return False
def do_resize(self):
for img_preprocessor in self.image_preprocessors:
if isinstance(img_preprocessor, ImageResizePreprocessor):
return (True, img_preprocessor.width, img_preprocessor.height)
return (False, -1, -1)
@oneflow_export("data.RawCodec")
class RawCodec(object):
def __init__(self, auto_zero_padding: bool = False) -> None:
self.auto_zero_padding = auto_zero_padding
@oneflow_export("data.NormByChannelPreprocessor")
class NormByChannelPreprocessor(object):
def __init__(
self,
mean_values: Union[List[float], Tuple[float]],
std_values: Union[List[float], Tuple[float]] = (1.0, 1.0, 1.0),
data_format: str = "channels_last",
) -> None:
assert isinstance(mean_values, (list, tuple))
assert isinstance(std_values, (list, tuple))
assert isinstance(data_format, str)
self.mean_values = mean_values
self.std_values = std_values
self.data_format = data_format
def output_layout(self) -> str:
if self.data_format == "channels_last":
return "NHWC"
else:
return "NCHW"
@oneflow_export("data.BlobConf")
class BlobConf(object):
def __init__(
self,
name: str,
shape: Sequence[int],
dtype: flow.dtype,
codec: Union[ImageCodec, RawCodec],
preprocessors: Optional[Sequence[Union[NormByChannelPreprocessor,]]] = None,
) -> None:
assert isinstance(name, str)
assert isinstance(shape, (list, tuple))
self.name = name
self.shape = shape
self.dtype = dtype
self.codec = codec
if isinstance(preprocessors, (list, tuple)):
self.preprocessors = list(preprocessors)
else:
self.preprocessors = []
def decode_blob(
self, input_blob: oneflow._oneflow_internal.BlobDesc, batch_size: int
) -> oneflow._oneflow_internal.BlobDesc:
if isinstance(self.codec, ImageCodec):
color_space = self.codec.color_space()
image = flow.data.ofrecord_image_decoder(
input_blob=input_blob, blob_name=self.name, color_space=color_space
)
coin_flip = None
if self.codec.do_mirror():
coin_flip = flow.random.coin_flip(batch_size)
do_resize, width, height = self.codec.do_resize()
if do_resize:
assert width > 0 and height > 0
image, _, _ = flow.image.resize(
image=image, target_size=(width, height)
)
else:
assert len(self.shape) >= 2
image, _, _ = flow.image.resize(
image=image, target_size=(self.shape[0], self.shape[1])
)
for preprocess in self.preprocessors:
image = flow.image.crop_mirror_normalize(
input_blob=image,
mirror_blob=coin_flip,
color_space=color_space,
output_layout=preprocess.output_layout(),
mean=preprocess.mean_values,
std=preprocess.std_values,
output_dtype=self.dtype,
)
return image
elif isinstance(self.codec, RawCodec):
raw = flow.data.ofrecord_raw_decoder(
input_blob=input_blob,
blob_name=self.name,
shape=self.shape,
dtype=self.dtype,
auto_zero_padding=self.codec.auto_zero_padding,
)
return raw
else:
raise NotImplementedError
@oneflow_export("data.decode_ofrecord")
@oneflow_deprecate()
def decode_ofrecord(
ofrecord_dir: str,
blobs: Sequence[BlobConf],
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
shuffle: bool = False,
buffer_size: int = 1024,
name: str = None,
) -> Tuple[oneflow._oneflow_internal.BlobDesc]:
print(
"WARNING:",
"oneflow.data.decode_ofrecord is deprecated, and NOT work in eager mode, please use: \n",
" 1) ofrecord = oneflow.data.ofrecord_reader(...) to read ofrecord; \n",
" 2) image = oneflow.data.ofrecord_image_decoder(...) to decode image; \n",
" 3) raw = oneflow.data.ofrecord_raw_decoder(...) to decode raw data like label; \n",
traceback.format_stack()[-2],
)
assert not flow.eager_execution_enabled()
ofrecord = flow.data.ofrecord_reader(
ofrecord_dir=ofrecord_dir,
batch_size=batch_size,
data_part_num=data_part_num,
part_name_prefix=part_name_prefix,
part_name_suffix_length=part_name_suffix_length,
random_shuffle=shuffle,
shuffle_buffer_size=buffer_size,
name=name,
)
result_blob_list = []
for blob_conf in blobs:
result_blob_list.append(
blob_conf.decode_blob(input_blob=ofrecord, batch_size=batch_size)
)
return tuple(result_blob_list)
@oneflow_export("data.ofrecord_loader")
def ofrecord_loader(
ofrecord_dir: str,
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
shuffle: bool = False,
shuffle_buffer_size: int = 1024,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
print(
"WARNING:",
"oneflow.data.ofrecord_loader is deprecated, and NOT work in eager mode, please use: \n",
" ofrecord = oneflow.data.ofrecord_reader(...) to read ofrecord; \n",
traceback.format_stack()[-2],
)
return flow.data.ofrecord_reader(
ofrecord_dir=ofrecord_dir,
batch_size=batch_size,
data_part_num=data_part_num,
part_name_prefix=part_name_prefix,
part_name_suffix_length=part_name_suffix_length,
random_shuffle=shuffle,
shuffle_buffer_size=shuffle_buffer_size,
name=name,
)
@oneflow_export("data.ofrecord_reader")
def ofrecord_reader(
ofrecord_dir: str,
batch_size: int = 1,
data_part_num: int = 1,
part_name_prefix: str = "part-",
part_name_suffix_length: int = -1,
random_shuffle: bool = False,
shuffle_buffer_size: int = 1024,
shuffle_after_epoch: bool = False,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Get ofrecord object from ofrecord dataset.
Args:
ofrecord_dir (str): Path to ofrecord dataset.
batch_size (int, optional): Batch size. Defaults to 1.
data_part_num (int, optional): Number of dataset's partitions. Defaults to 1.
part_name_prefix (str, optional): Prefix of dataset's parition file. Defaults to "part-".
part_name_suffix_length (int, optional): Total length of padded suffix number , -1 means no padding. eg: 3 for `part-001`. Defaults to -1.
random_shuffle (bool, optional): Determines records shuffled or not. Defaults to False.
shuffle_buffer_size (int, optional): Shuffle buffer size. Defaults to 1024.
shuffle_after_epoch (bool, optional): Shuffled or not after each epoch. Defaults to False.
name (Optional[str], optional): Optional name. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
@flow.global_function(type="predict")
def ofrecord_reader_job() -> Tuple[tp.Numpy, tp.Numpy]:
batch_size = 16
with flow.scope.placement("cpu", "0:0"):
# our ofrecord file path is "./dataset/part-0"
ofrecord = flow.data.ofrecord_reader(
"./dataset/",
batch_size=batch_size,
data_part_num=1,
part_name_suffix_length=-1,
part_name_prefix='part-',
random_shuffle=True,
shuffle_after_epoch=True,
)
# image shape is (28*28, )
image = flow.data.OFRecordRawDecoder(
ofrecord, "images", shape=(784, ), dtype=flow.int32
)
# label shape is (1, )
label = flow.data.OFRecordRawDecoder(
ofrecord, "labels", shape=(1, ), dtype=flow.int32
)
return image, label
if __name__ == "__main__":
images, labels = ofrecord_reader_job()
print("In per batch, images shape is", images.shape)
print("In per batch, labels shape is", labels.shape)
# In per batch, images shape is (16, 784)
# In per batch, labels shape is (16, 1)
"""
if name is None:
name = id_util.UniqueStr("OFRecord_Reader_")
return (
flow.user_op_builder(name)
.Op("OFRecordReader")
.Output("out")
.Attr("data_dir", ofrecord_dir)
.Attr("data_part_num", data_part_num)
.Attr("batch_size", batch_size)
.Attr("part_name_prefix", part_name_prefix)
.Attr("random_shuffle", random_shuffle)
.Attr("shuffle_buffer_size", shuffle_buffer_size)
.Attr("shuffle_after_epoch", shuffle_after_epoch)
.Attr("part_name_suffix_length", part_name_suffix_length)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("data.decode_random")
def decode_random(
shape: Sequence[int],
dtype: flow.dtype,
batch_size: int = 1,
initializer: Optional[initializer_conf_util.InitializerConf] = None,
tick: Optional[oneflow._oneflow_internal.BlobDesc] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
op_conf = op_conf_util.OperatorConf()
if name is None:
name = id_util.UniqueStr("DecodeRandom_")
assert isinstance(name, str)
op_conf.name = name
assert isinstance(shape, (list, tuple))
op_conf.decode_random_conf.shape.dim.extend(shape)
assert dtype is not None
setattr(
op_conf.decode_random_conf,
"data_type",
oneflow._oneflow_internal.deprecated.GetProtoDtype4OfDtype(dtype),
)
op_conf.decode_random_conf.batch_size = batch_size
if initializer is not None:
op_conf.decode_random_conf.data_initializer.CopyFrom(initializer)
else:
op_conf.decode_random_conf.data_initializer.CopyFrom(
flow.random_uniform_initializer()
)
if tick:
op_conf.decode_random_conf.tick = tick.unique_name
op_conf.decode_random_conf.out = "out"
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
interpret_util.ConsistentForward(op_conf)
return remote_blob_util.RemoteBlob(lbi)
@oneflow_export(
"data.image_decoder_random_crop_resize", "data.ImageDecoderRandomCropResize"
)
def image_decoder_random_crop_resize(
input_blob: oneflow._oneflow_internal.BlobDesc,
target_width: int,
target_height: int,
num_attempts: Optional[int] = None,
seed: Optional[int] = None,
random_area: Optional[Sequence[float]] = None,
random_aspect_ratio: Optional[Sequence[float]] = None,
num_workers: Optional[int] = None,
warmup_size: Optional[int] = None,
max_num_pixels: Optional[int] = None,
name: Optional[str] = None,
) -> Tuple[oneflow._oneflow_internal.BlobDesc]:
if name is None:
name = id_util.UniqueStr("ImageDecoderRandomCropResize_")
op_conf = op_conf_util.OperatorConf()
op_conf.name = name
setattr(op_conf.image_decoder_random_crop_resize_conf, "in", input_blob.unique_name)
op_conf.image_decoder_random_crop_resize_conf.out = "out"
op_conf.image_decoder_random_crop_resize_conf.target_width = target_width
op_conf.image_decoder_random_crop_resize_conf.target_height = target_height
if num_attempts is not None:
op_conf.image_decoder_random_crop_resize_conf.num_attempts = num_attempts
if seed is not None:
op_conf.image_decoder_random_crop_resize_conf.seed = seed
if random_area is not None:
assert len(random_area) == 2
op_conf.image_decoder_random_crop_resize_conf.random_area_min = random_area[0]
op_conf.image_decoder_random_crop_resize_conf.random_area_max = random_area[1]
if random_aspect_ratio is not None:
assert len(random_aspect_ratio) == 2
op_conf.image_decoder_random_crop_resize_conf.random_aspect_ratio_min = random_aspect_ratio[
0
]
op_conf.image_decoder_random_crop_resize_conf.random_aspect_ratio_max = random_aspect_ratio[
1
]
if num_workers is not None:
op_conf.image_decoder_random_crop_resize_conf.num_workers = num_workers
if warmup_size is not None:
op_conf.image_decoder_random_crop_resize_conf.warmup_size = warmup_size
if max_num_pixels is not None:
op_conf.image_decoder_random_crop_resize_conf.max_num_pixels = max_num_pixels
interpret_util.Forward(op_conf)
lbi = logical_blob_id_util.LogicalBlobId()
lbi.op_name = op_conf.name
lbi.blob_name = "out"
return remote_blob_util.RemoteBlob(lbi)
@oneflow_export("data.onerec_reader")
def onerec_reader(
files,
batch_size=1,
random_shuffle=False,
shuffle_mode="instance",
shuffle_buffer_size=1024,
shuffle_after_epoch=False,
verify_example=True,
name=None,
):
assert isinstance(files, (list, tuple))
if name is None:
name = id_util.UniqueStr("OneRecReader_")
return (
flow.user_op_builder(name)
.Op("OneRecReader")
.Output("out")
.Attr("files", files)
.Attr("batch_size", batch_size)
.Attr("random_shuffle", random_shuffle)
.Attr("shuffle_mode", shuffle_mode)
.Attr("shuffle_buffer_size", shuffle_buffer_size)
.Attr("shuffle_after_epoch", shuffle_after_epoch)
.Attr("verify_example", verify_example)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
| [
"oneflow.core.register.logical_blob_id_pb2.LogicalBlobId",
"oneflow.data.ofrecord_image_decoder",
"oneflow.eager_execution_enabled",
"oneflow.random.coin_flip",
"oneflow.python.framework.remote_blob.RemoteBlob",
"oneflow.data.ofrecord_raw_decoder",
"oneflow.data.ofrecord_reader",
"oneflow.python.onefl... | [((1221, 1261), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.ImagePreprocessor"""'], {}), "('data.ImagePreprocessor')\n", (1235, 1261), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((1767, 1813), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.ImageResizePreprocessor"""'], {}), "('data.ImageResizePreprocessor')\n", (1781, 1813), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((2046, 2079), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.ImageCodec"""'], {}), "('data.ImageCodec')\n", (2060, 2079), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((3301, 3332), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.RawCodec"""'], {}), "('data.RawCodec')\n", (3315, 3332), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((3476, 3524), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.NormByChannelPreprocessor"""'], {}), "('data.NormByChannelPreprocessor')\n", (3490, 3524), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((4204, 4235), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.BlobConf"""'], {}), "('data.BlobConf')\n", (4218, 4235), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((6733, 6771), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.decode_ofrecord"""'], {}), "('data.decode_ofrecord')\n", (6747, 6771), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((6773, 6792), 'oneflow.python.oneflow_export.oneflow_deprecate', 'oneflow_deprecate', ([], {}), '()\n', (6790, 6792), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((8175, 8213), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.ofrecord_loader"""'], {}), "('data.ofrecord_loader')\n", (8189, 8213), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((9129, 9167), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.ofrecord_reader"""'], {}), "('data.ofrecord_reader')\n", (9143, 9167), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((12633, 12669), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.decode_random"""'], {}), "('data.decode_random')\n", (12647, 12669), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((14030, 14126), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.image_decoder_random_crop_resize"""', '"""data.ImageDecoderRandomCropResize"""'], {}), "('data.image_decoder_random_crop_resize',\n 'data.ImageDecoderRandomCropResize')\n", (14044, 14126), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((16427, 16463), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""data.onerec_reader"""'], {}), "('data.onerec_reader')\n", (16441, 16463), False, 'from oneflow.python.oneflow_export import oneflow_export, oneflow_deprecate\n'), ((7632, 7900), 'oneflow.data.ofrecord_reader', 'flow.data.ofrecord_reader', ([], {'ofrecord_dir': 'ofrecord_dir', 'batch_size': 'batch_size', 'data_part_num': 'data_part_num', 'part_name_prefix': 'part_name_prefix', 'part_name_suffix_length': 'part_name_suffix_length', 'random_shuffle': 'shuffle', 'shuffle_buffer_size': 'buffer_size', 'name': 'name'}), '(ofrecord_dir=ofrecord_dir, batch_size=batch_size,\n data_part_num=data_part_num, part_name_prefix=part_name_prefix,\n part_name_suffix_length=part_name_suffix_length, random_shuffle=shuffle,\n shuffle_buffer_size=buffer_size, name=name)\n', (7657, 7900), True, 'import oneflow as flow\n'), ((8790, 9066), 'oneflow.data.ofrecord_reader', 'flow.data.ofrecord_reader', ([], {'ofrecord_dir': 'ofrecord_dir', 'batch_size': 'batch_size', 'data_part_num': 'data_part_num', 'part_name_prefix': 'part_name_prefix', 'part_name_suffix_length': 'part_name_suffix_length', 'random_shuffle': 'shuffle', 'shuffle_buffer_size': 'shuffle_buffer_size', 'name': 'name'}), '(ofrecord_dir=ofrecord_dir, batch_size=batch_size,\n data_part_num=data_part_num, part_name_prefix=part_name_prefix,\n part_name_suffix_length=part_name_suffix_length, random_shuffle=shuffle,\n shuffle_buffer_size=shuffle_buffer_size, name=name)\n', (8815, 9066), True, 'import oneflow as flow\n'), ((12986, 13013), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (13011, 13013), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((13842, 13878), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (13876, 13878), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((13941, 13982), 'oneflow.python.framework.interpret_util.ConsistentForward', 'interpret_util.ConsistentForward', (['op_conf'], {}), '(op_conf)\n', (13973, 13982), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((13994, 14026), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (14021, 14026), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((14750, 14777), 'oneflow.core.operator.op_conf_pb2.OperatorConf', 'op_conf_util.OperatorConf', ([], {}), '()\n', (14775, 14777), True, 'import oneflow.core.operator.op_conf_pb2 as op_conf_util\n'), ((16244, 16275), 'oneflow.python.framework.interpret_util.Forward', 'interpret_util.Forward', (['op_conf'], {}), '(op_conf)\n', (16266, 16275), True, 'import oneflow.python.framework.interpret_util as interpret_util\n'), ((16286, 16322), 'oneflow.core.register.logical_blob_id_pb2.LogicalBlobId', 'logical_blob_id_util.LogicalBlobId', ([], {}), '()\n', (16320, 16322), True, 'import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util\n'), ((16391, 16423), 'oneflow.python.framework.remote_blob.RemoteBlob', 'remote_blob_util.RemoteBlob', (['lbi'], {}), '(lbi)\n', (16418, 16423), True, 'import oneflow.python.framework.remote_blob as remote_blob_util\n'), ((7585, 7615), 'oneflow.eager_execution_enabled', 'flow.eager_execution_enabled', ([], {}), '()\n', (7613, 7615), True, 'import oneflow as flow\n'), ((12004, 12041), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OFRecord_Reader_"""'], {}), "('OFRecord_Reader_')\n", (12021, 12041), True, 'import oneflow.python.framework.id_util as id_util\n'), ((13051, 13085), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""DecodeRandom_"""'], {}), "('DecodeRandom_')\n", (13068, 13085), True, 'import oneflow.python.framework.id_util as id_util\n'), ((14684, 14734), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""ImageDecoderRandomCropResize_"""'], {}), "('ImageDecoderRandomCropResize_')\n", (14701, 14734), True, 'import oneflow.python.framework.id_util as id_util\n'), ((16752, 16786), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""OneRecReader_"""'], {}), "('OneRecReader_')\n", (16769, 16786), True, 'import oneflow.python.framework.id_util as id_util\n'), ((5124, 5229), 'oneflow.data.ofrecord_image_decoder', 'flow.data.ofrecord_image_decoder', ([], {'input_blob': 'input_blob', 'blob_name': 'self.name', 'color_space': 'color_space'}), '(input_blob=input_blob, blob_name=self.name,\n color_space=color_space)\n', (5156, 5229), True, 'import oneflow as flow\n'), ((7534, 7558), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (7556, 7558), False, 'import traceback\n'), ((8742, 8766), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (8764, 8766), False, 'import traceback\n'), ((13671, 13704), 'oneflow.random_uniform_initializer', 'flow.random_uniform_initializer', ([], {}), '()\n', (13702, 13704), True, 'import oneflow as flow\n'), ((5352, 5385), 'oneflow.random.coin_flip', 'flow.random.coin_flip', (['batch_size'], {}), '(batch_size)\n', (5373, 5385), True, 'import oneflow as flow\n'), ((5553, 5612), 'oneflow.image.resize', 'flow.image.resize', ([], {'image': 'image', 'target_size': '(width, height)'}), '(image=image, target_size=(width, height))\n', (5570, 5612), True, 'import oneflow as flow\n'), ((5743, 5817), 'oneflow.image.resize', 'flow.image.resize', ([], {'image': 'image', 'target_size': '(self.shape[0], self.shape[1])'}), '(image=image, target_size=(self.shape[0], self.shape[1]))\n', (5760, 5817), True, 'import oneflow as flow\n'), ((6401, 6568), 'oneflow.data.ofrecord_raw_decoder', 'flow.data.ofrecord_raw_decoder', ([], {'input_blob': 'input_blob', 'blob_name': 'self.name', 'shape': 'self.shape', 'dtype': 'self.dtype', 'auto_zero_padding': 'self.codec.auto_zero_padding'}), '(input_blob=input_blob, blob_name=self.name,\n shape=self.shape, dtype=self.dtype, auto_zero_padding=self.codec.\n auto_zero_padding)\n', (6431, 6568), True, 'import oneflow as flow\n'), ((16809, 16835), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (16829, 16835), True, 'import oneflow as flow\n'), ((12064, 12090), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (12084, 12090), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import oneflow
import oneflow.python.framework.blob_desc as blob_desc
import oneflow.python.framework.c_api_util as c_api_util
import oneflow.python.framework.placement_context as placement_ctx
import oneflow.python.framework.blob_trait as blob_trait
from oneflow.python.framework.dtype import convert_proto_dtype_to_oneflow_dtype
import oneflow.python.framework.dtype as dtype_util
import oneflow.python.lib.core.enable_if as enable_if
import oneflow.python.framework.hob as hob
import oneflow.python.eager.eager_blob_util as eager_blob_util
import oneflow.python.eager.blob_register as blob_register_util
import oneflow.python.eager.blob_cache as blob_cache_util
import oneflow.python.eager.vm_util as vm_util
import oneflow.python.eager.gradient_util as gradient_util
import oneflow.python.eager.boxing_util as boxing_util
import oneflow.python.framework.op_arg_util as op_arg_util
import oneflow.core.job.placement_pb2 as placement_pb
import traceback
import sys
blob_register = blob_register_util.GetDefaultBlobRegister()
def RemoteBlob(lbi, **kw):
api = enable_if.unique([EagerLogicalBlob, LazyRemoteBlob])
return api(lbi, **kw)
@enable_if.condition(hob.in_global_mode & hob.eager_execution_enabled)
def EagerLogicalBlob(lbi, **kw):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
lbn = lbi.op_name + "/" + lbi.blob_name
if c_api_util.JobBuildAndInferCtx_IsMirroredBlob(job_name, lbn):
return EagerMirroredBlob(lbi, **kw)
else:
return EagerConsistentBlob(lbi, **kw)
@enable_if.condition(~hob.eager_execution_enabled)
def LazyRemoteBlob(lbi, **kw):
job_name = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
lbn = lbi.op_name + "/" + lbi.blob_name
blob_type = LazyConsistentBlob
if c_api_util.JobBuildAndInferCtx_IsMirroredBlob(job_name, lbn):
blob_type = LazyMirroredBlob
return blob_type(lbi, **kw)
class BlobDef(
blob_desc.BlobDesc, blob_trait.BlobOperatorTrait, blob_trait.BlobHeaderTrait
):
def __init__(self, lbi, **kw):
blob_desc.BlobDesc.__init__(self, lbi, **kw)
self.job_name_ = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
self.parallel_size_ = 0
@property
def batch_axis(self):
raise NotImplementedError
@property
def split_axis(self):
raise NotImplementedError
@property
def disable_boxing(self):
raise NotImplementedError
@property
def parallel_conf(self):
raise NotImplementedError
@property
def parallel_size(self):
if self.parallel_size_ == 0:
self.parallel_size_ = placement_ctx.GetParallelSize(
placement_ctx.MakeMachineId2DeviceIdList(self.parallel_conf)
)
return self.parallel_size_
def with_distribute(self, distribute):
oneflow.distribute.assert_is_valid_distribute(distribute)
ret = RemoteBlob(self.lbi_)
ret.distribute_ = distribute
return ret
def with_gradient_distribute(self, distribute):
return oneflow.parallel_cast(self, gradient_distribute=distribute)
class ConsistentBlob(BlobDef):
def __init__(self, *args, **kwargs):
BlobDef.__init__(self, *args, **kwargs)
class LazyConsistentBlob(ConsistentBlob):
def __init__(self, lbi, **kw):
ConsistentBlob.__init__(self, lbi, **kw)
self.job_name_ = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
@property
def shape(self):
if oneflow.scope.mirrored_view_enabled():
print(
"WARNING:",
"You access a consistent blob shape in mirrored view, there may be problems,",
"you should add 'x = flow.cast_to_current_logical_view(x)'.",
file=sys.stderr,
)
print(traceback.format_stack()[-2])
return c_api_util.JobBuildAndInferCtx_GetStaticShape(self.job_name_, self.lbn_)
@property
def dtype(self):
return convert_proto_dtype_to_oneflow_dtype(
c_api_util.JobBuildAndInferCtx_GetDataType(self.job_name_, self.lbn_)
)
@property
def batch_axis(self):
return c_api_util.JobBuildAndInferCtx_GetBatchAxis(self.job_name_, self.lbn_)
@property
def split_axis(self):
return c_api_util.JobBuildAndInferCtx_GetSplitAxisFromProducerView(
self.job_name_, self.lbn_
)
@property
def is_dynamic(self):
return c_api_util.JobBuildAndInferCtx_IsDynamic(self.job_name_, self.lbn_)
@property
def disable_boxing(self):
return c_api_util.JobBuildAndInferCtx_DisableBoxing(self.job_name_, self.lbn_)
@property
def is_tensor_list(self):
return c_api_util.JobBuildAndInferCtx_IsTensorList(self.job_name_, self.lbn_)
@property
def parallel_conf(self):
return c_api_util.JobBuildAndInferCtx_GetParallelConfFromProducerView(
self.job_name_, self.lbn_
)
def IdenticalTo(self, rhs):
return (
self.unique_name == rhs.unique_name
and self.shape == rhs.shape
and self.batch_axis == rhs.batch_axis
and self.split_axis == rhs.split_axis
and self.is_dynamic == rhs.is_dynamic
and self.disable_boxing == rhs.disable_boxing
and self.is_tensor_list == rhs.is_tensor_list
and self.parallel_conf == rhs.parallel_conf
)
class MirroredBlob(BlobDef):
def __init__(self, *args, **kwargs):
BlobDef.__init__(self, *args, **kwargs)
class LazyMirroredBlob(MirroredBlob):
def __init__(self, lbi, **kw):
MirroredBlob.__init__(self, lbi, **kw)
self.job_name_ = c_api_util.JobBuildAndInferCtx_GetCurrentJobName()
self.sub_consistent_blob_list_ = []
lbn = self.logical_blob_name
num_sub_lbi = c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi(
self.job_name_, lbn
)
for i in range(num_sub_lbi):
sub_lbi = c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi(
self.job_name_, lbn, i
)
consistent_blob = LazyConsistentBlob(sub_lbi)
self.sub_consistent_blob_list_.append(consistent_blob)
@property
def sub_consistent_blob_list(self):
return self.sub_consistent_blob_list_
@property
def shape(self):
if oneflow.scope.consistent_view_enabled():
print(
"WARNING:",
"You access a mirrored blob shape in consistent view, there may be problems,"
"you should add 'x = flow.cast_to_current_logical_view(x)'.",
file=sys.stderr,
)
print(traceback.format_stack()[-2])
return c_api_util.JobBuildAndInferCtx_MirroredBlobGetStaticShape(
self.job_name_, self.lbn_
)
@property
def dtype(self):
return convert_proto_dtype_to_oneflow_dtype(
c_api_util.JobBuildAndInferCtx_MirroredBlobGetDataType(
self.job_name_, self.lbn_
)
)
@property
def batch_axis(self):
return c_api_util.JobBuildAndInferCtx_MirroredBlobGetBatchAxis(
self.job_name_, self.lbn_
)
@property
def split_axis(self):
return c_api_util.JobBuildAndInferCtx_MirroredBlobGetSplitAxisFromProducerView(
self.job_name_, self.lbn_
)
@property
def is_dynamic(self):
return c_api_util.JobBuildAndInferCtx_MirroredBlobIsDynamic(
self.job_name_, self.lbn_
)
@property
def disable_boxing(self):
return True
@property
def is_tensor_list(self):
return c_api_util.JobBuildAndInferCtx_MirroredBlobIsTensorList(
self.job_name_, self.lbn_
)
@property
def parallel_conf(self):
return c_api_util.JobBuildAndInferCtx_MirroredBlobGetParallelConfFromProducerView(
self.job_name_, self.lbn_
)
class EagerBlobTrait(object):
def numpy_size(self):
return self.blob_object.parallel_desc_symbol.parallel_num
def numpy_list_size(self):
return self.blob_object.parallel_desc_symbol.parallel_num
def numpy(self, rank=None):
if rank is None:
if self.numpy_size() == 1:
return self._NumpyAt(0)
else:
assert not self.is_dynamic
assert not self.is_tensor_list
return self._Numpy()
else:
return self._NumpyAt(rank)
def numpy_list(self, rank=None):
assert self.is_tensor_list
assert self.is_dynamic
mirrored_list = self._NumpyMirroredList()
if rank is None:
return mirrored_list
else:
parallel_num = self.blob_object_.parallel_desc_symbol.parallel_num
assert rank >= 0
assert rank < parallel_num
assert len(mirrored_list) == parallel_num
return mirrored_list[rank]
@property
def sub_consistent_blob_list(self):
raise NotImplementedError
@property
def shape(self):
return self.blob_object.op_arg_blob_attr.shape
@property
def dtype(self):
ret = self.blob_object.op_arg_blob_attr.dtype
assert issubclass(ret, dtype_util.dtype)
return ret
@property
def batch_axis(self):
opt_batch_axis = self.blob_object.op_arg_blob_attr.batch_axis
if opt_batch_axis.HasField("value"):
return opt_batch_axis.value
else:
return None
@property
def split_axis(self):
sbp_parallel = self.blob_object.op_arg_parallel_attr.sbp_parallel
if sbp_parallel.HasField("split_parallel"):
return sbp_parallel.split_parallel.axis
elif sbp_parallel.HasField("broadcast_parallel"):
return None
elif sbp_parallel.HasField("partial_sum_parallel"):
return None
else:
raise NotImplementedError
@property
def is_dynamic(self):
return self.blob_object.op_arg_blob_attr.is_dynamic
@property
def disable_boxing(self):
return True
@property
def is_tensor_list(self):
return self.blob_object.op_arg_blob_attr.is_tensor_list
@property
def parallel_conf(self):
return self.blob_object.parallel_desc_symbol.parallel_conf
def __del__(self):
blob_register.CloseRegisteredBlobAccess(self.logical_blob_name)
def _Init(self, blob_object):
access = blob_register.OpenRegisteredBlobAccess(
self.logical_blob_name, blob_object
)
self.registered_blob_access_ = access
self.sub_consistent_blob_list_ = []
@property
def blob_object(self):
return self.registered_blob_access_.blob_object
def _NumpyAt(self, rank):
assert self.is_tensor_list is not True
assert rank >= 0
assert rank < self.blob_object.parallel_desc_symbol.parallel_num
ndarray_list = self._NumpyMirroredList()
return ndarray_list[rank]
def _Numpy(self):
assert self.is_tensor_list is not True
def FetchBlobNumpy(blob_object):
consistent_blob_name = None
def BoxingToSingleDevice(builder):
parallel_conf = placement_pb.ParallelConf()
parallel_conf.device_tag = blob_object.parallel_desc_symbol.device_tag
parallel_conf.device_name.append("{}:{}".format(0, 0))
tmp_parallel_desc_symbol = builder.GetParallelDescSymbol(parallel_conf)
tmp_op_arg_parallel_attr = op_arg_util.OpArgParallelAttribute(
tmp_parallel_desc_symbol,
blob_object.op_arg_parallel_attr.sbp_parallel,
blob_object.op_arg_parallel_attr.opt_mirrored_parallel,
)
with oneflow.scope.placement(
self.parallel_conf.device_tag, list(self.parallel_conf.device_name)
):
tmp_blob_object = boxing_util.BoxingTo(
builder, blob_object, tmp_op_arg_parallel_attr
)
nonlocal consistent_blob_name
consistent_blob_name = "{}-consistent".format(self.logical_blob_name)
if not blob_register.HasObject4BlobName(consistent_blob_name):
blob_register.SetObject4BlobName(
consistent_blob_name, tmp_blob_object
)
vm_util.LogicalRun(BoxingToSingleDevice)
return eager_blob_util.EagerPhysicalBlob(consistent_blob_name).numpy()
blob_cache = blob_cache_util.FindOrCreateBlobCache(self.blob_object)
return blob_cache.GetCachedNumpy(FetchBlobNumpy)
def _NumpyMirroredList(self):
physical_blob_objects = []
def UnpackLogicalBlobToPhysicalBlobs(builder):
nonlocal physical_blob_objects
physical_blob_objects = builder.UnpackLogicalBlobToPhysicalBlobs(
self.blob_object
)
def GetPhyBlobNumpy(i, phy_blob_object):
name = "{}/{}".format(self.logical_blob_name, i)
blob_register.SetObject4BlobName(name, phy_blob_object)
return (
eager_blob_util.EagerPhysicalBlob(name).numpy_list()
if self.is_tensor_list
else eager_blob_util.EagerPhysicalBlob(name).numpy()
)
def FetchBlobNumpyMirroredList(blob_object):
vm_util.LogicalRun(UnpackLogicalBlobToPhysicalBlobs)
return [
GetPhyBlobNumpy(i, phy_blob_object)
for i, phy_blob_object in enumerate(physical_blob_objects)
]
blob_cache = blob_cache_util.FindOrCreateBlobCache(self.blob_object)
return blob_cache.GetCachedNumpyMirroredList(FetchBlobNumpyMirroredList)
def IdenticalTo(self, rhs):
return (
self.blob_object.op_arg_blob_attr == rhs.blob_object.op_arg_blob_attr
and self.blob_object.op_arg_parallel_attr
== rhs.blob_object.op_arg_parallel_attr
)
class EagerConsistentBlob(EagerBlobTrait, ConsistentBlob):
def __init__(self, lbi, blob_object=None, **kw):
ConsistentBlob.__init__(self, lbi, **kw)
self._Init(blob_object)
class EagerMirroredBlob(EagerBlobTrait, MirroredBlob):
def __init__(self, lbi, blob_object=None, **kw):
MirroredBlob.__init__(self, lbi, **kw)
self._Init(blob_object)
| [
"oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetStaticShape",
"oneflow.core.job.placement_pb2.ParallelConf",
"oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobIsTensorList",
"oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetStaticShape",
"oneflow.python.f... | [((1614, 1657), 'oneflow.python.eager.blob_register.GetDefaultBlobRegister', 'blob_register_util.GetDefaultBlobRegister', ([], {}), '()\n', (1655, 1657), True, 'import oneflow.python.eager.blob_register as blob_register_util\n'), ((1779, 1848), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(hob.in_global_mode & hob.eager_execution_enabled)'], {}), '(hob.in_global_mode & hob.eager_execution_enabled)\n', (1798, 1848), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((2164, 2213), 'oneflow.python.lib.core.enable_if.condition', 'enable_if.condition', (['(~hob.eager_execution_enabled)'], {}), '(~hob.eager_execution_enabled)\n', (2183, 2213), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((1697, 1749), 'oneflow.python.lib.core.enable_if.unique', 'enable_if.unique', (['[EagerLogicalBlob, LazyRemoteBlob]'], {}), '([EagerLogicalBlob, LazyRemoteBlob])\n', (1713, 1749), True, 'import oneflow.python.lib.core.enable_if as enable_if\n'), ((1897, 1947), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetCurrentJobName', 'c_api_util.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (1945, 1947), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((1999, 2059), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsMirroredBlob', 'c_api_util.JobBuildAndInferCtx_IsMirroredBlob', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (2044, 2059), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((2260, 2310), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetCurrentJobName', 'c_api_util.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (2308, 2310), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((2397, 2457), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsMirroredBlob', 'c_api_util.JobBuildAndInferCtx_IsMirroredBlob', (['job_name', 'lbn'], {}), '(job_name, lbn)\n', (2442, 2457), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((2672, 2716), 'oneflow.python.framework.blob_desc.BlobDesc.__init__', 'blob_desc.BlobDesc.__init__', (['self', 'lbi'], {}), '(self, lbi, **kw)\n', (2699, 2716), True, 'import oneflow.python.framework.blob_desc as blob_desc\n'), ((2742, 2792), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetCurrentJobName', 'c_api_util.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (2790, 2792), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((3456, 3513), 'oneflow.distribute.assert_is_valid_distribute', 'oneflow.distribute.assert_is_valid_distribute', (['distribute'], {}), '(distribute)\n', (3501, 3513), False, 'import oneflow\n'), ((3674, 3733), 'oneflow.parallel_cast', 'oneflow.parallel_cast', (['self'], {'gradient_distribute': 'distribute'}), '(self, gradient_distribute=distribute)\n', (3695, 3733), False, 'import oneflow\n'), ((4009, 4059), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetCurrentJobName', 'c_api_util.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (4057, 4059), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((4107, 4144), 'oneflow.scope.mirrored_view_enabled', 'oneflow.scope.mirrored_view_enabled', ([], {}), '()\n', (4142, 4144), False, 'import oneflow\n'), ((4476, 4548), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetStaticShape', 'c_api_util.JobBuildAndInferCtx_GetStaticShape', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_, self.lbn_)\n', (4521, 4548), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((4786, 4856), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetBatchAxis', 'c_api_util.JobBuildAndInferCtx_GetBatchAxis', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_, self.lbn_)\n', (4829, 4856), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((4913, 5003), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetSplitAxisFromProducerView', 'c_api_util.JobBuildAndInferCtx_GetSplitAxisFromProducerView', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_,\n self.lbn_)\n', (4972, 5003), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((5078, 5145), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsDynamic', 'c_api_util.JobBuildAndInferCtx_IsDynamic', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_, self.lbn_)\n', (5118, 5145), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((5206, 5277), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_DisableBoxing', 'c_api_util.JobBuildAndInferCtx_DisableBoxing', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_, self.lbn_)\n', (5250, 5277), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((5338, 5408), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_IsTensorList', 'c_api_util.JobBuildAndInferCtx_IsTensorList', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_, self.lbn_)\n', (5381, 5408), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((5468, 5562), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetParallelConfFromProducerView', 'c_api_util.JobBuildAndInferCtx_GetParallelConfFromProducerView', (['self.job_name_', 'self.lbn_'], {}), '(self.\n job_name_, self.lbn_)\n', (5530, 5562), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((6317, 6367), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetCurrentJobName', 'c_api_util.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (6365, 6367), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((6471, 6547), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetNumSubLbi', (['self.job_name_', 'lbn'], {}), '(self.job_name_, lbn)\n', (6526, 6547), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((7009, 7048), 'oneflow.scope.consistent_view_enabled', 'oneflow.scope.consistent_view_enabled', ([], {}), '()\n', (7046, 7048), False, 'import oneflow\n'), ((7379, 7467), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetStaticShape', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetStaticShape', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_,\n self.lbn_)\n', (7436, 7467), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((7765, 7851), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetBatchAxis', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetBatchAxis', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_,\n self.lbn_)\n', (7820, 7851), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((7926, 8029), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetSplitAxisFromProducerView', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetSplitAxisFromProducerView', (['self.job_name_', 'self.lbn_'], {}), '(self\n .job_name_, self.lbn_)\n', (7997, 8029), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((8103, 8182), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobIsDynamic', 'c_api_util.JobBuildAndInferCtx_MirroredBlobIsDynamic', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_, self.lbn_)\n', (8155, 8182), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((8330, 8416), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobIsTensorList', 'c_api_util.JobBuildAndInferCtx_MirroredBlobIsTensorList', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_,\n self.lbn_)\n', (8385, 8416), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((8494, 8600), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetParallelConfFromProducerView', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetParallelConfFromProducerView', (['self.job_name_', 'self.lbn_'], {}), '(self\n .job_name_, self.lbn_)\n', (8568, 8600), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((13335, 13390), 'oneflow.python.eager.blob_cache.FindOrCreateBlobCache', 'blob_cache_util.FindOrCreateBlobCache', (['self.blob_object'], {}), '(self.blob_object)\n', (13372, 13390), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((14436, 14491), 'oneflow.python.eager.blob_cache.FindOrCreateBlobCache', 'blob_cache_util.FindOrCreateBlobCache', (['self.blob_object'], {}), '(self.blob_object)\n', (14473, 14491), True, 'import oneflow.python.eager.blob_cache as blob_cache_util\n'), ((4650, 4719), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_GetDataType', 'c_api_util.JobBuildAndInferCtx_GetDataType', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_, self.lbn_)\n', (4692, 4719), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((6629, 6705), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetSubLbi', (['self.job_name_', 'lbn', 'i'], {}), '(self.job_name_, lbn, i)\n', (6681, 6705), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((7587, 7673), 'oneflow.python.framework.c_api_util.JobBuildAndInferCtx_MirroredBlobGetDataType', 'c_api_util.JobBuildAndInferCtx_MirroredBlobGetDataType', (['self.job_name_', 'self.lbn_'], {}), '(self.job_name_, self\n .lbn_)\n', (7641, 7673), True, 'import oneflow.python.framework.c_api_util as c_api_util\n'), ((13189, 13229), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['BoxingToSingleDevice'], {}), '(BoxingToSingleDevice)\n', (13207, 13229), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((14199, 14251), 'oneflow.python.eager.vm_util.LogicalRun', 'vm_util.LogicalRun', (['UnpackLogicalBlobToPhysicalBlobs'], {}), '(UnpackLogicalBlobToPhysicalBlobs)\n', (14217, 14251), True, 'import oneflow.python.eager.vm_util as vm_util\n'), ((3294, 3354), 'oneflow.python.framework.placement_context.MakeMachineId2DeviceIdList', 'placement_ctx.MakeMachineId2DeviceIdList', (['self.parallel_conf'], {}), '(self.parallel_conf)\n', (3334, 3354), True, 'import oneflow.python.framework.placement_context as placement_ctx\n'), ((11961, 11988), 'oneflow.core.job.placement_pb2.ParallelConf', 'placement_pb.ParallelConf', ([], {}), '()\n', (11986, 11988), True, 'import oneflow.core.job.placement_pb2 as placement_pb\n'), ((12278, 12451), 'oneflow.python.framework.op_arg_util.OpArgParallelAttribute', 'op_arg_util.OpArgParallelAttribute', (['tmp_parallel_desc_symbol', 'blob_object.op_arg_parallel_attr.sbp_parallel', 'blob_object.op_arg_parallel_attr.opt_mirrored_parallel'], {}), '(tmp_parallel_desc_symbol, blob_object.\n op_arg_parallel_attr.sbp_parallel, blob_object.op_arg_parallel_attr.\n opt_mirrored_parallel)\n', (12312, 12451), True, 'import oneflow.python.framework.op_arg_util as op_arg_util\n'), ((4431, 4455), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (4453, 4455), False, 'import traceback\n'), ((7334, 7358), 'traceback.format_stack', 'traceback.format_stack', ([], {}), '()\n', (7356, 7358), False, 'import traceback\n'), ((12712, 12780), 'oneflow.python.eager.boxing_util.BoxingTo', 'boxing_util.BoxingTo', (['builder', 'blob_object', 'tmp_op_arg_parallel_attr'], {}), '(builder, blob_object, tmp_op_arg_parallel_attr)\n', (12732, 12780), True, 'import oneflow.python.eager.boxing_util as boxing_util\n'), ((13249, 13304), 'oneflow.python.eager.eager_blob_util.EagerPhysicalBlob', 'eager_blob_util.EagerPhysicalBlob', (['consistent_blob_name'], {}), '(consistent_blob_name)\n', (13282, 13304), True, 'import oneflow.python.eager.eager_blob_util as eager_blob_util\n'), ((13958, 13997), 'oneflow.python.eager.eager_blob_util.EagerPhysicalBlob', 'eager_blob_util.EagerPhysicalBlob', (['name'], {}), '(name)\n', (13991, 13997), True, 'import oneflow.python.eager.eager_blob_util as eager_blob_util\n'), ((14071, 14110), 'oneflow.python.eager.eager_blob_util.EagerPhysicalBlob', 'eager_blob_util.EagerPhysicalBlob', (['name'], {}), '(name)\n', (14104, 14110), True, 'import oneflow.python.eager.eager_blob_util as eager_blob_util\n')] |
import oneflow as flow
import oneflow.nn as nn
class GLU(nn.Module):
def __init__(self):
super(GLU, self).__init__()
def forward(self, input):
return input * flow.sigmoid(input)
class PixelShuffle(nn.Module):
def __init__(self, upscale_factor):
super(PixelShuffle, self).__init__()
self.upscale_factor = upscale_factor
def forward(self, input):
n = input.shape[0]
c_out = input.shape[1] // 2
w_new = input.shape[2] * 2
return input.view(n, c_out, w_new)
class ResidualLayer(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(ResidualLayer, self).__init__()
self.conv1d_layer = nn.Sequential(
nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
),
nn.InstanceNorm1d(num_features=out_channels, affine=True),
)
self.conv_layer_gates = nn.Sequential(
nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
),
nn.InstanceNorm1d(num_features=out_channels, affine=True),
)
self.conv1d_out_layer = nn.Sequential(
nn.Conv1d(
in_channels=out_channels,
out_channels=in_channels,
kernel_size=kernel_size,
stride=1,
padding=padding,
),
nn.InstanceNorm1d(num_features=in_channels, affine=True),
)
def forward(self, input):
h1_norm = self.conv1d_layer(input)
h1_gates_norm = self.conv_layer_gates(input)
# GLU
h1_glu = h1_norm * flow.sigmoid(h1_gates_norm)
h2_norm = self.conv1d_out_layer(h1_glu)
return input + h2_norm
class downSample_Generator(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride, padding):
super(downSample_Generator, self).__init__()
self.convLayer = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm2d(num_features=out_channels, affine=True),
)
self.convLayer_gates = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm2d(num_features=out_channels, affine=True),
)
def forward(self, input):
return self.convLayer(input) * flow.sigmoid(self.convLayer_gates(input))
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
# 2D Conv Layer
self.conv1 = nn.Conv2d(
in_channels=1,
out_channels=128,
kernel_size=(5, 15),
stride=(1, 1),
padding=(2, 7),
)
self.conv1_gates = nn.Conv2d(
in_channels=1,
out_channels=128,
kernel_size=(5, 15),
stride=1,
padding=(2, 7),
)
# 2D Downsample Layer
self.downSample1 = downSample_Generator(
in_channels=128, out_channels=256, kernel_size=5, stride=2, padding=2
)
self.downSample2 = downSample_Generator(
in_channels=256, out_channels=256, kernel_size=5, stride=2, padding=2
)
# 2D -> 1D Conv
self.conv2dto1dLayer = nn.Sequential(
nn.Conv1d(
in_channels=2304, out_channels=256, kernel_size=1, stride=1, padding=0
),
nn.InstanceNorm1d(num_features=256, affine=True),
)
# Residual Blocks
self.residualLayer1 = ResidualLayer(
in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1
)
self.residualLayer2 = ResidualLayer(
in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1
)
self.residualLayer3 = ResidualLayer(
in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1
)
self.residualLayer4 = ResidualLayer(
in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1
)
self.residualLayer5 = ResidualLayer(
in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1
)
self.residualLayer6 = ResidualLayer(
in_channels=256, out_channels=512, kernel_size=3, stride=1, padding=1
)
# 1D -> 2D Conv
self.conv1dto2dLayer = nn.Sequential(
nn.Conv1d(
in_channels=256, out_channels=2304, kernel_size=1, stride=1, padding=0
),
nn.InstanceNorm1d(num_features=2304, affine=True),
)
# UpSample Layer
self.upSample1 = self.upSample(
in_channels=256, out_channels=1024, kernel_size=5, stride=1, padding=2
)
self.upSample2 = self.upSample(
in_channels=256, out_channels=512, kernel_size=5, stride=1, padding=2
)
self.lastConvLayer = nn.Conv2d(
in_channels=128,
out_channels=1,
kernel_size=(5, 15),
stride=(1, 1),
padding=(2, 7),
)
def downSample(self, in_channels, out_channels, kernel_size, stride, padding):
self.ConvLayer = nn.Sequential(
nn.Conv1d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm1d(num_features=out_channels, affine=True),
GLU(),
)
return self.ConvLayer
def upSample(self, in_channels, out_channels, kernel_size, stride, padding):
self.convLayer = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.PixelShuffle(upscale_factor=2),
nn.InstanceNorm2d(num_features=out_channels // 4, affine=True),
GLU(),
)
return self.convLayer
def forward(self, input):
input = input.unsqueeze(1)
conv1 = self.conv1(input) * flow.sigmoid(self.conv1_gates(input))
# DownloadSample
downsample1 = self.downSample1(conv1)
downsample2 = self.downSample2(downsample1)
# 2D -> 1D
# reshape
reshape2dto1d = downsample2.view(downsample2.size(0), 2304, 1, -1)
reshape2dto1d = reshape2dto1d.squeeze(2)
conv2dto1d_layer = self.conv2dto1dLayer(reshape2dto1d)
residual_layer_1 = self.residualLayer1(conv2dto1d_layer)
residual_layer_2 = self.residualLayer2(residual_layer_1)
residual_layer_3 = self.residualLayer3(residual_layer_2)
residual_layer_4 = self.residualLayer4(residual_layer_3)
residual_layer_5 = self.residualLayer5(residual_layer_4)
residual_layer_6 = self.residualLayer6(residual_layer_5)
# 1D -> 2D
conv1dto2d_layer = self.conv1dto2dLayer(residual_layer_6)
# reshape
reshape1dto2d = conv1dto2d_layer.unsqueeze(2)
reshape1dto2d = reshape1dto2d.view(reshape1dto2d.size(0), 256, 9, -1)
# UpSample
upsample_layer_1 = self.upSample1(reshape1dto2d)
upsample_layer_2 = self.upSample2(upsample_layer_1)
output = self.lastConvLayer(upsample_layer_2)
output = output.squeeze(1)
return output
# PatchGAN
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.convLayer1 = nn.Sequential(
nn.Conv2d(
in_channels=1,
out_channels=128,
kernel_size=(3, 3),
stride=(1, 1),
padding=(1, 1),
),
GLU(),
)
# DownSample Layer
self.downSample1 = self.downSample(
in_channels=128,
out_channels=256,
kernel_size=(3, 3),
stride=(2, 2),
padding=1,
)
self.downSample2 = self.downSample(
in_channels=256,
out_channels=512,
kernel_size=(3, 3),
stride=[2, 2],
padding=1,
)
self.downSample3 = self.downSample(
in_channels=512,
out_channels=1024,
kernel_size=[3, 3],
stride=[2, 2],
padding=1,
)
self.downSample4 = self.downSample(
in_channels=1024,
out_channels=1024,
kernel_size=[1, 5],
stride=(1, 1),
padding=(0, 2),
)
# Conv Layer
self.outputConvLayer = nn.Sequential(
nn.Conv2d(
in_channels=1024,
out_channels=1,
kernel_size=(1, 3),
stride=[1, 1],
padding=[0, 1],
)
)
def downSample(self, in_channels, out_channels, kernel_size, stride, padding):
convLayer = nn.Sequential(
nn.Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
stride=stride,
padding=padding,
),
nn.InstanceNorm2d(num_features=out_channels, affine=True),
GLU(),
)
return convLayer
def forward(self, input):
# input has shape [batch_size, num_features, time]
# discriminator requires shape [batchSize, 1, num_features, time]
input = input.unsqueeze(1)
conv_layer_1 = self.convLayer1(input)
downsample1 = self.downSample1(conv_layer_1)
downsample2 = self.downSample2(downsample1)
downsample3 = self.downSample3(downsample2)
output = flow.sigmoid(self.outputConvLayer(downsample3))
return output
| [
"oneflow.nn.InstanceNorm2d",
"oneflow.nn.InstanceNorm1d",
"oneflow.nn.Conv1d",
"oneflow.sigmoid",
"oneflow.nn.PixelShuffle",
"oneflow.nn.Conv2d"
] | [((3158, 3257), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(128)', 'kernel_size': '(5, 15)', 'stride': '(1, 1)', 'padding': '(2, 7)'}), '(in_channels=1, out_channels=128, kernel_size=(5, 15), stride=(1, \n 1), padding=(2, 7))\n', (3167, 3257), True, 'import oneflow.nn as nn\n'), ((3352, 3445), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(128)', 'kernel_size': '(5, 15)', 'stride': '(1)', 'padding': '(2, 7)'}), '(in_channels=1, out_channels=128, kernel_size=(5, 15), stride=1,\n padding=(2, 7))\n', (3361, 3445), True, 'import oneflow.nn as nn\n'), ((5535, 5634), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(128)', 'out_channels': '(1)', 'kernel_size': '(5, 15)', 'stride': '(1, 1)', 'padding': '(2, 7)'}), '(in_channels=128, out_channels=1, kernel_size=(5, 15), stride=(1, \n 1), padding=(2, 7))\n', (5544, 5634), True, 'import oneflow.nn as nn\n'), ((185, 204), 'oneflow.sigmoid', 'flow.sigmoid', (['input'], {}), '(input)\n', (197, 204), True, 'import oneflow as flow\n'), ((757, 875), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (766, 875), True, 'import oneflow.nn as nn\n'), ((979, 1036), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (996, 1036), True, 'import oneflow.nn as nn\n'), ((1108, 1226), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (1117, 1226), True, 'import oneflow.nn as nn\n'), ((1330, 1387), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (1347, 1387), True, 'import oneflow.nn as nn\n'), ((1459, 1577), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'out_channels', 'out_channels': 'in_channels', 'kernel_size': 'kernel_size', 'stride': '(1)', 'padding': 'padding'}), '(in_channels=out_channels, out_channels=in_channels, kernel_size=\n kernel_size, stride=1, padding=padding)\n', (1468, 1577), True, 'import oneflow.nn as nn\n'), ((1681, 1737), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'in_channels', 'affine': '(True)'}), '(num_features=in_channels, affine=True)\n', (1698, 1737), True, 'import oneflow.nn as nn\n'), ((1918, 1945), 'oneflow.sigmoid', 'flow.sigmoid', (['h1_gates_norm'], {}), '(h1_gates_norm)\n', (1930, 1945), True, 'import oneflow as flow\n'), ((2254, 2377), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (2263, 2377), True, 'import oneflow.nn as nn\n'), ((2481, 2538), 'oneflow.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (2498, 2538), True, 'import oneflow.nn as nn\n'), ((2608, 2731), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (2617, 2731), True, 'import oneflow.nn as nn\n'), ((2835, 2892), 'oneflow.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (2852, 2892), True, 'import oneflow.nn as nn\n'), ((3910, 3995), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(2304)', 'out_channels': '(256)', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=2304, out_channels=256, kernel_size=1, stride=1,\n padding=0)\n', (3919, 3995), True, 'import oneflow.nn as nn\n'), ((4035, 4083), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': '(256)', 'affine': '(True)'}), '(num_features=256, affine=True)\n', (4052, 4083), True, 'import oneflow.nn as nn\n'), ((5027, 5112), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': '(256)', 'out_channels': '(2304)', 'kernel_size': '(1)', 'stride': '(1)', 'padding': '(0)'}), '(in_channels=256, out_channels=2304, kernel_size=1, stride=1,\n padding=0)\n', (5036, 5112), True, 'import oneflow.nn as nn\n'), ((5152, 5201), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': '(2304)', 'affine': '(True)'}), '(num_features=2304, affine=True)\n', (5169, 5201), True, 'import oneflow.nn as nn\n'), ((5837, 5960), 'oneflow.nn.Conv1d', 'nn.Conv1d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (5846, 5960), True, 'import oneflow.nn as nn\n'), ((6064, 6121), 'oneflow.nn.InstanceNorm1d', 'nn.InstanceNorm1d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (6081, 6121), True, 'import oneflow.nn as nn\n'), ((6317, 6440), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (6326, 6440), True, 'import oneflow.nn as nn\n'), ((6544, 6577), 'oneflow.nn.PixelShuffle', 'nn.PixelShuffle', ([], {'upscale_factor': '(2)'}), '(upscale_factor=2)\n', (6559, 6577), True, 'import oneflow.nn as nn\n'), ((6591, 6653), 'oneflow.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': '(out_channels // 4)', 'affine': '(True)'}), '(num_features=out_channels // 4, affine=True)\n', (6608, 6653), True, 'import oneflow.nn as nn\n'), ((8248, 8346), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1)', 'out_channels': '(128)', 'kernel_size': '(3, 3)', 'stride': '(1, 1)', 'padding': '(1, 1)'}), '(in_channels=1, out_channels=128, kernel_size=(3, 3), stride=(1, 1\n ), padding=(1, 1))\n', (8257, 8346), True, 'import oneflow.nn as nn\n'), ((9366, 9465), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': '(1024)', 'out_channels': '(1)', 'kernel_size': '(1, 3)', 'stride': '[1, 1]', 'padding': '[0, 1]'}), '(in_channels=1024, out_channels=1, kernel_size=(1, 3), stride=[1, \n 1], padding=[0, 1])\n', (9375, 9465), True, 'import oneflow.nn as nn\n'), ((9697, 9820), 'oneflow.nn.Conv2d', 'nn.Conv2d', ([], {'in_channels': 'in_channels', 'out_channels': 'out_channels', 'kernel_size': 'kernel_size', 'stride': 'stride', 'padding': 'padding'}), '(in_channels=in_channels, out_channels=out_channels, kernel_size=\n kernel_size, stride=stride, padding=padding)\n', (9706, 9820), True, 'import oneflow.nn as nn\n'), ((9924, 9981), 'oneflow.nn.InstanceNorm2d', 'nn.InstanceNorm2d', ([], {'num_features': 'out_channels', 'affine': '(True)'}), '(num_features=out_channels, affine=True)\n', (9941, 9981), True, 'import oneflow.nn as nn\n')] |
import oneflow as flow
from quantization_ops.q_module import QModule, QParam
__all__ = ["QConvBN"]
class QConvBN(QModule):
def __init__(self, conv_module, bn_module, qi=True, qo=True, quantization_bit=8, quantization_scheme='symmetric', quantization_formula='google', per_layer_quantization=True):
super(QConvBN, self).__init__(qi=qi, qo=qo, quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization)
self.quantization_bit = quantization_bit
self.quantization_scheme = quantization_scheme
self.quantization_formula = quantization_formula
self.per_layer_quantization = per_layer_quantization
self.conv_module = conv_module
self.bn_module = bn_module
self.qw = QParam(quantization_bit=quantization_bit, quantization_scheme=quantization_scheme,
quantization_formula=quantization_formula, per_layer_quantization=per_layer_quantization)
self.quantization = flow.nn.Quantization(
quantization_bit=32, quantization_scheme="affine", quantization_formula="google")
def fold_bn(self, mean, std):
if self.bn_module.affine:
gamma_ = self.bn_module.weight / std
weight = self.conv_module.weight * gamma_.view(self.conv_module.out_channels, 1, 1, 1)
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean + self.bn_module.bias
else:
bias = self.bn_module.bias - gamma_ * mean
else:
gamma_ = 1 / std
weight = self.conv_module.weight * gamma_
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean
else:
bias = -gamma_ * mean
return weight, bias
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = self.qi.fake_quantize_tensor(x)
if self.training:
y = flow.F.conv2d(x, self.conv_module.weight, self.conv_module.bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
y = y.permute(1, 0, 2, 3) # NCHW -> CNHW
y = y.view(self.conv_module.out_channels, -1) # CNHW -> C,NHW
mean = y.mean(1).detach()
var = y.var(1).detach()
self.bn_module.running_mean = \
self.bn_module.momentum * self.bn_module.running_mean + \
(1 - self.bn_module.momentum) * mean
self.bn_module.running_var = \
self.bn_module.momentum * self.bn_module.running_var + \
(1 - self.bn_module.momentum) * var
else:
mean = flow.Tensor(self.bn_module.running_mean)
var = flow.Tensor(self.bn_module.running_var)
std = flow.sqrt(var + self.bn_module.eps)
weight, bias = self.fold_bn(mean, std)
self.qw.update(weight.data)
x = flow.F.conv2d(x, self.qw.fake_quantize_tensor(weight), bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding, dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
if hasattr(self, 'qo'):
self.qo.update(x)
x = self.qo.fake_quantize_tensor(x)
return x
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M = self.qw.scale.numpy() * self.qi.scale.numpy() / self.qo.scale.numpy()
weight, bias = self.fold_bn(self.bn_module.running_mean, self.bn_module.running_var)
self.conv_module.weight = flow.nn.Parameter(
self.qw.quantize_tensor(weight) - self.qw.zero_point)
self.conv_module.bias = flow.nn.Parameter(self.quantization(
bias, self.qi.scale * self.qw.scale, flow.Tensor([0])))
| [
"oneflow.Tensor",
"oneflow.F.conv2d",
"oneflow.sqrt",
"oneflow.nn.Quantization"
] | [((875, 1056), 'quantization_ops.q_module.QParam', 'QParam', ([], {'quantization_bit': 'quantization_bit', 'quantization_scheme': 'quantization_scheme', 'quantization_formula': 'quantization_formula', 'per_layer_quantization': 'per_layer_quantization'}), '(quantization_bit=quantization_bit, quantization_scheme=\n quantization_scheme, quantization_formula=quantization_formula,\n per_layer_quantization=per_layer_quantization)\n', (881, 1056), False, 'from quantization_ops.q_module import QModule, QParam\n'), ((1101, 1207), 'oneflow.nn.Quantization', 'flow.nn.Quantization', ([], {'quantization_bit': '(32)', 'quantization_scheme': '"""affine"""', 'quantization_formula': '"""google"""'}), "(quantization_bit=32, quantization_scheme='affine',\n quantization_formula='google')\n", (1121, 1207), True, 'import oneflow as flow\n'), ((3148, 3183), 'oneflow.sqrt', 'flow.sqrt', (['(var + self.bn_module.eps)'], {}), '(var + self.bn_module.eps)\n', (3157, 3183), True, 'import oneflow as flow\n'), ((2149, 2357), 'oneflow.F.conv2d', 'flow.F.conv2d', (['x', 'self.conv_module.weight', 'self.conv_module.bias'], {'stride': 'self.conv_module.stride', 'padding': 'self.conv_module.padding', 'dilation': 'self.conv_module.dilation', 'groups': 'self.conv_module.groups'}), '(x, self.conv_module.weight, self.conv_module.bias, stride=\n self.conv_module.stride, padding=self.conv_module.padding, dilation=\n self.conv_module.dilation, groups=self.conv_module.groups)\n', (2162, 2357), True, 'import oneflow as flow\n'), ((3034, 3074), 'oneflow.Tensor', 'flow.Tensor', (['self.bn_module.running_mean'], {}), '(self.bn_module.running_mean)\n', (3045, 3074), True, 'import oneflow as flow\n'), ((3093, 3132), 'oneflow.Tensor', 'flow.Tensor', (['self.bn_module.running_var'], {}), '(self.bn_module.running_var)\n', (3104, 3132), True, 'import oneflow as flow\n'), ((4710, 4726), 'oneflow.Tensor', 'flow.Tensor', (['[0]'], {}), '([0])\n', (4721, 4726), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
import oneflow as flow
import oneflow.typing as oft
@flow.unittest.num_nodes_required(2)
def test_multi_node_dynamic_binary_split_concat_empty(test_case):
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.scope.placement("cpu", "0:0"))
func_config.default_data_type(flow.float)
flow.config.machine_num(2)
flow.config.gpu_device_num(1)
@flow.global_function(function_config=func_config)
def DynamicBinaryJob(x: oft.ListNumpy.Placeholder((20,))):
print("in_shape: ", x.shape)
with flow.scope.placement("cpu", "0:0"):
out_list = flow.experimental.dynamic_binary_split(
x, base_shift=4, out_num=6
)
id_out_list = []
for out_blob in out_list:
print("out_shape: ", out_blob.shape)
id_out_list.append(flow.identity(out_blob))
with flow.scope.placement("cpu", "1:0"):
out1 = flow.experimental.dynamic_binary_concat(id_out_list, x)
print("concat_shape: ", out1.shape)
with flow.scope.placement("cpu", "0:0"):
out2 = flow.identity(out1)
print("return_shape: ", out2.shape)
return out2
size = [0, 5, 10, 15, 20]
data = []
for i in size:
data.append(np.ones((i,), dtype=np.float32))
for i in range(5):
ret = DynamicBinaryJob([data[i]]).get().numpy_list()[0]
print(ret)
test_case.assertTrue(np.array_equal(ret, data[i]))
| [
"oneflow.config.machine_num",
"oneflow.global_function",
"oneflow.FunctionConfig",
"oneflow.experimental.dynamic_binary_split",
"oneflow.identity",
"oneflow.scope.placement",
"oneflow.experimental.dynamic_binary_concat",
"oneflow.config.gpu_device_num",
"oneflow.unittest.num_nodes_required",
"onef... | [((664, 699), 'oneflow.unittest.num_nodes_required', 'flow.unittest.num_nodes_required', (['(2)'], {}), '(2)\n', (696, 699), True, 'import oneflow as flow\n'), ((784, 805), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (803, 805), True, 'import oneflow as flow\n'), ((997, 1023), 'oneflow.config.machine_num', 'flow.config.machine_num', (['(2)'], {}), '(2)\n', (1020, 1023), True, 'import oneflow as flow\n'), ((1028, 1057), 'oneflow.config.gpu_device_num', 'flow.config.gpu_device_num', (['(1)'], {}), '(1)\n', (1054, 1057), True, 'import oneflow as flow\n'), ((1064, 1113), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1084, 1113), True, 'import oneflow as flow\n'), ((843, 869), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (867, 869), True, 'import oneflow as flow\n'), ((911, 945), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (931, 945), True, 'import oneflow as flow\n'), ((1142, 1174), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['(20,)'], {}), '((20,))\n', (1167, 1174), True, 'import oneflow.typing as oft\n'), ((1227, 1261), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (1247, 1261), True, 'import oneflow as flow\n'), ((1286, 1352), 'oneflow.experimental.dynamic_binary_split', 'flow.experimental.dynamic_binary_split', (['x'], {'base_shift': '(4)', 'out_num': '(6)'}), '(x, base_shift=4, out_num=6)\n', (1324, 1352), True, 'import oneflow as flow\n'), ((1576, 1610), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""1:0"""'], {}), "('cpu', '1:0')\n", (1596, 1610), True, 'import oneflow as flow\n'), ((1631, 1686), 'oneflow.experimental.dynamic_binary_concat', 'flow.experimental.dynamic_binary_concat', (['id_out_list', 'x'], {}), '(id_out_list, x)\n', (1670, 1686), True, 'import oneflow as flow\n'), ((1748, 1782), 'oneflow.scope.placement', 'flow.scope.placement', (['"""cpu"""', '"""0:0"""'], {}), "('cpu', '0:0')\n", (1768, 1782), True, 'import oneflow as flow\n'), ((1803, 1822), 'oneflow.identity', 'flow.identity', (['out1'], {}), '(out1)\n', (1816, 1822), True, 'import oneflow as flow\n'), ((1975, 2006), 'numpy.ones', 'np.ones', (['(i,)'], {'dtype': 'np.float32'}), '((i,), dtype=np.float32)\n', (1982, 2006), True, 'import numpy as np\n'), ((2143, 2171), 'numpy.array_equal', 'np.array_equal', (['ret', 'data[i]'], {}), '(ret, data[i])\n', (2157, 2171), True, 'import numpy as np\n'), ((1538, 1561), 'oneflow.identity', 'flow.identity', (['out_blob'], {}), '(out_blob)\n', (1551, 1561), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
from collections import OrderedDict
import unittest
import numpy as np
import oneflow as flow
import oneflow.typing as oft
import test_global_storage
from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type
import oneflow.typing as tp
def _random_input(x_shape):
x = np.random.standard_normal(x_shape).astype(np.float32)
return x
def diag_grad_np(input_tensor, dim, output, grad):
input_shape = input_tensor.shape
output_shape = output.shape
grad_output = np.zeros(input_shape)
if len(input_shape) == 1:
stride1 = 1
stride0 = output_shape[1]
beg = stride1 * dim if dim >= 0 else stride0 * abs(dim)
for i in range(input_shape[0]):
if i > 0:
beg += stride1 + stride0
if dim >= 0:
grad_output[i] = grad[i][int(beg % stride0)]
if dim < 0:
grad_output[i] = grad[int((beg - i) / stride0)][i]
return grad_output
else:
stride1 = 1
stride01 = input_shape[1]
beg = stride1 * dim if dim >= 0 else stride01 * abs(dim)
for i in range(output.shape[0]):
if i > 0:
beg += stride1 + stride01
if dim >= 0:
grad_output[i][int(beg % stride01)] = grad[i]
if dim < 0:
stride02 = input_shape[0]
grad_output[int(beg / stride02)][i] = grad[i]
return grad_output
def compare_with_np(device_type, input_tensor, dim, dtype):
assert device_type in ["gpu", "cpu"]
flow.clear_default_session()
func_config = flow.FunctionConfig()
func_config.default_logical_view(flow.scope.mirrored_view())
func_config.default_placement_scope(flow.scope.placement(device_type, "0:0"))
output_np = np.diag(input_tensor, dim)
output_shape = output_np.shape
input_shape = input_tensor.shape
output_dtype = output_np.dtype
grad = np.random.random(output_shape).astype(output_dtype)
@flow.global_function(type="train", function_config=func_config)
def diag_job(
input_tensor: tp.Numpy.Placeholder(shape=(input_shape), dtype=flow.float),
) -> tp.Numpy:
input_var = flow.get_variable(
"input_tensor",
shape=(input_shape),
dtype=flow.float,
initializer=flow.zeros_initializer(),
trainable=True,
)
input_tensor = input_tensor + input_var
input_tensor = flow.cast_to_current_logical_view(input_tensor)
input_tensor = flow.cast(input_tensor, type_name_to_flow_type[dtype])
output = flow.diag(input_tensor, dim)
if (
output.dtype == flow.int64
or output.dtype == flow.int8
or output.dtype == flow.int32
):
output = flow.cast(output, flow.float)
flow.optimizer.Adam(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4])
).minimize(output)
flow.watch(input_tensor, test_global_storage.Setter("x"))
flow.watch_diff(input_tensor, test_global_storage.Setter("x_diff"))
flow.watch(output, test_global_storage.Setter("output"))
flow.watch_diff(output, test_global_storage.Setter("output_diff"))
return output
# OneFlow
check_point = flow.train.CheckPoint()
check_point.init()
output_of = diag_job(input_tensor)
output_diff = test_global_storage.Get("output_diff").astype(dtype)
x_diff_of = test_global_storage.Get("x_diff").astype(dtype)
# np
x_diff_np = diag_grad_np(input_tensor, dim, output_np, output_diff)
assert np.allclose(output_of, output_np)
assert np.allclose(x_diff_of, x_diff_np)
def test_fun(device_type, input_shape, dim, dtype):
input_tensor = np.random.random(input_shape).astype(np.float32)
input_tensor = input_tensor.reshape(input_shape).astype(dtype)
compare_with_np(device_type, input_tensor, dim, dtype)
@flow.unittest.skip_unless_1n1d()
class TestCast(flow.unittest.TestCase):
def test_diag_1D_cpu(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu"]
arg_dict["input_shape"] = [(3)]
arg_dict["dim"] = [0, 2, -3]
arg_dict["dtype"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
test_fun(*arg)
def test_diag_2D_cpu(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu"]
arg_dict["input_shape"] = [(3, 3)]
arg_dict["dim"] = [1, -1, 0]
arg_dict["dtype"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
test_fun(*arg)
def test_diag_1D_gpu(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["input_shape"] = [(3)]
arg_dict["dim"] = [0, 2, -3]
arg_dict["dtype"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
test_fun(*arg)
def test_diag_2D_gpu(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["input_shape"] = [(3, 3)]
arg_dict["dim"] = [1, -1, 0]
arg_dict["dtype"] = ["float32", "double"]
for arg in GenArgList(arg_dict):
test_fun(*arg)
def test_diag_int_cpu(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu"]
arg_dict["input_shape"] = [(3)]
arg_dict["dim"] = [-2]
arg_dict["dtype"] = ["int64", "int8", "int32"]
for arg in GenArgList(arg_dict):
test_fun(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.typing.Numpy.Placeholder",
"oneflow.clear_default_session",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.scope.mirrored_view",
"oneflow.cast_to_current_logical_view",
"oneflow.optimizer.PiecewiseConstantScheduler",
"oneflow.FunctionConfig",
"oneflow.zeros_initializer",
"oneflow.global_func... | [((4568, 4600), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4598, 4600), True, 'import oneflow as flow\n'), ((1118, 1139), 'numpy.zeros', 'np.zeros', (['input_shape'], {}), '(input_shape)\n', (1126, 1139), True, 'import numpy as np\n'), ((2183, 2211), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2209, 2211), True, 'import oneflow as flow\n'), ((2230, 2251), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2249, 2251), True, 'import oneflow as flow\n'), ((2416, 2442), 'numpy.diag', 'np.diag', (['input_tensor', 'dim'], {}), '(input_tensor, dim)\n', (2423, 2442), True, 'import numpy as np\n'), ((2619, 2682), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2639, 2682), True, 'import oneflow as flow\n'), ((3923, 3946), 'oneflow.train.CheckPoint', 'flow.train.CheckPoint', ([], {}), '()\n', (3944, 3946), True, 'import oneflow as flow\n'), ((4238, 4271), 'numpy.allclose', 'np.allclose', (['output_of', 'output_np'], {}), '(output_of, output_np)\n', (4249, 4271), True, 'import numpy as np\n'), ((4283, 4316), 'numpy.allclose', 'np.allclose', (['x_diff_of', 'x_diff_np'], {}), '(x_diff_of, x_diff_np)\n', (4294, 4316), True, 'import numpy as np\n'), ((6219, 6234), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6232, 6234), False, 'import unittest\n'), ((2289, 2315), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (2313, 2315), True, 'import oneflow as flow\n'), ((2357, 2397), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (2377, 2397), True, 'import oneflow as flow\n'), ((3093, 3140), 'oneflow.cast_to_current_logical_view', 'flow.cast_to_current_logical_view', (['input_tensor'], {}), '(input_tensor)\n', (3126, 3140), True, 'import oneflow as flow\n'), ((3164, 3218), 'oneflow.cast', 'flow.cast', (['input_tensor', 'type_name_to_flow_type[dtype]'], {}), '(input_tensor, type_name_to_flow_type[dtype])\n', (3173, 3218), True, 'import oneflow as flow\n'), ((3236, 3264), 'oneflow.diag', 'flow.diag', (['input_tensor', 'dim'], {}), '(input_tensor, dim)\n', (3245, 3264), True, 'import oneflow as flow\n'), ((4697, 4710), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4708, 4710), False, 'from collections import OrderedDict\n'), ((4899, 4919), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4909, 4919), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((5005, 5018), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5016, 5018), False, 'from collections import OrderedDict\n'), ((5210, 5230), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5220, 5230), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((5316, 5329), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5327, 5329), False, 'from collections import OrderedDict\n'), ((5518, 5538), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5528, 5538), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((5624, 5637), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5635, 5637), False, 'from collections import OrderedDict\n'), ((5829, 5849), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5839, 5849), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((5936, 5949), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5947, 5949), False, 'from collections import OrderedDict\n'), ((6137, 6157), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (6147, 6157), False, 'from test_util import GenArgList, type_name_to_flow_type, type_name_to_np_type\n'), ((911, 945), 'numpy.random.standard_normal', 'np.random.standard_normal', (['x_shape'], {}), '(x_shape)\n', (936, 945), True, 'import numpy as np\n'), ((2561, 2591), 'numpy.random.random', 'np.random.random', (['output_shape'], {}), '(output_shape)\n', (2577, 2591), True, 'import numpy as np\n'), ((2723, 2780), 'oneflow.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'input_shape', 'dtype': 'flow.float'}), '(shape=input_shape, dtype=flow.float)\n', (2743, 2780), True, 'import oneflow.typing as tp\n'), ((3432, 3461), 'oneflow.cast', 'flow.cast', (['output', 'flow.float'], {}), '(output, flow.float)\n', (3441, 3461), True, 'import oneflow as flow\n'), ((3618, 3649), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (3644, 3649), False, 'import test_global_storage\n'), ((3689, 3725), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (3715, 3725), False, 'import test_global_storage\n'), ((3754, 3790), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""output"""'], {}), "('output')\n", (3780, 3790), False, 'import test_global_storage\n'), ((3824, 3865), 'test_global_storage.Setter', 'test_global_storage.Setter', (['"""output_diff"""'], {}), "('output_diff')\n", (3850, 3865), False, 'import test_global_storage\n'), ((4027, 4065), 'test_global_storage.Get', 'test_global_storage.Get', (['"""output_diff"""'], {}), "('output_diff')\n", (4050, 4065), False, 'import test_global_storage\n'), ((4096, 4129), 'test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (4119, 4129), False, 'import test_global_storage\n'), ((4390, 4419), 'numpy.random.random', 'np.random.random', (['input_shape'], {}), '(input_shape)\n', (4406, 4419), True, 'import numpy as np\n'), ((2957, 2981), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2979, 2981), True, 'import oneflow as flow\n'), ((3503, 3558), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (3544, 3558), True, 'import oneflow as flow\n')] |
import sys
import math
import numpy as np
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from utils.data_utils import flip, sinc, act_fun
class SincConv_fast(nn.Module):
"""Sinc-based convolution
Parameters
----------
in_channels : `int`
Number of input channels. Must be 1.
out_channels : `int`
Number of filters.
kernel_size : `int`
Filter length.
sample_rate : `int`, optional
Sample rate. Defaults to 16000.
Usage
-----
See `torch.nn.Conv1d`
Reference
---------
<NAME>, <NAME>,
"Speaker Recognition from raw waveform with SincNet".
https://arxiv.org/abs/1808.00158
"""
@staticmethod
def to_mel(hz):
return 2595 * np.log10(1 + hz / 700)
@staticmethod
def to_hz(mel):
return 700 * (10 ** (mel / 2595) - 1)
def __init__(
self,
out_channels,
kernel_size,
sample_rate=16000,
in_channels=1,
stride=1,
padding=0,
dilation=1,
bias=False,
groups=1,
min_low_hz=50,
min_band_hz=50,
):
super(SincConv_fast, self).__init__()
if in_channels != 1:
msg = (
"SincConv only support one input channel (here, in_channels = {%i})"
% (in_channels)
)
raise ValueError(msg)
self.out_channels = out_channels
self.kernel_size = kernel_size
# Forcing the filters to be odd (i.e, perfectly symmetrics)
if kernel_size % 2 == 0:
self.kernel_size = self.kernel_size + 1
self.stride = stride
self.padding = padding
self.dilation = dilation
if bias:
raise ValueError("SincConv does not support bias.")
if groups > 1:
raise ValueError("SincConv does not support groups.")
self.sample_rate = sample_rate
self.min_low_hz = min_low_hz
self.min_band_hz = min_band_hz
# initialize filterbanks such that they are equally spaced in Mel scale
low_hz = 30
high_hz = self.sample_rate / 2 - (self.min_low_hz + self.min_band_hz)
mel = np.linspace(
self.to_mel(low_hz), self.to_mel(high_hz), self.out_channels + 1
)
hz = self.to_hz(mel)
# filter lower frequency (out_channels, 1)
self.low_hz_ = nn.Parameter(flow.Tensor(hz[:-1]).reshape(-1, 1))
# filter frequency band (out_channels, 1)
self.band_hz_ = nn.Parameter(flow.Tensor(np.diff(hz)).reshape(-1, 1))
# Hamming window
n_lin = flow.Tensor(
np.linspace(0, (self.kernel_size / 2) - 1, int((self.kernel_size / 2)))
)
self.window_ = 0.54 - 0.46 * flow.cos(2 * math.pi * n_lin / self.kernel_size)
# (1, kernel_size/2)
n = (self.kernel_size - 1) / 2.0
self.n_ = (
2
* math.pi
* flow.Tensor(
np.arange(-n, 0).reshape(1, -1) / self.sample_rate, dtype=flow.float32
)
)
def forward(self, waveforms):
"""
Parameters
----------
waveforms : `torch.Tensor` (batch_size, 1, n_samples)
Batch of waveforms.
Returns
-------
features : `torch.Tensor` (batch_size, out_channels, n_samples_out)
Batch of sinc filters activations.
"""
self.n_ = self.n_.to(waveforms.device)
self.window_ = self.window_.to(waveforms.device)
low = self.min_low_hz + flow.abs(self.low_hz_)
high = flow.clamp(
low + self.min_band_hz + flow.abs(self.band_hz_),
self.min_low_hz,
self.sample_rate / 2,
)
band = (high - low)[:, 0]
f_times_t_low = flow.matmul(low, self.n_)
f_times_t_high = flow.matmul(high, self.n_)
band_pass_left = (
(flow.sin(f_times_t_high) - flow.sin(f_times_t_low)) / (self.n_ / 2)
) * self.window_
band_pass_center = 2 * band.reshape(-1, 1)
band_pass_right = flow.flip(band_pass_left, dims=[1])
band_pass = flow.cat([band_pass_left, band_pass_center, band_pass_right], dim=1)
band_pass = band_pass / (2 * band[:, None])
self.filters = (band_pass).reshape(self.out_channels, 1, self.kernel_size)
output = F.conv1d(
waveforms,
self.filters,
stride=[self.stride],
padding=[self.padding],
dilation=[self.dilation],
bias=None,
groups=1,
)
return output
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(flow.ones(features))
self.beta = nn.Parameter(flow.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class MLP(nn.Module):
def __init__(self, options):
super(MLP, self).__init__()
self.input_dim = int(options["input_dim"])
self.fc_lay = options["fc_lay"]
self.fc_drop = options["fc_drop"]
self.fc_use_batchnorm = options["fc_use_batchnorm"]
self.fc_use_laynorm = options["fc_use_laynorm"]
self.fc_use_laynorm_inp = options["fc_use_laynorm_inp"]
self.fc_use_batchnorm_inp = options["fc_use_batchnorm_inp"]
self.fc_act = options["fc_act"]
self.wx = nn.ModuleList([])
self.bn = nn.ModuleList([])
self.ln = nn.ModuleList([])
self.act = nn.ModuleList([])
self.drop = nn.ModuleList([])
# input layer normalization
if self.fc_use_laynorm_inp:
self.ln0 = LayerNorm(self.input_dim)
# input batch normalization
if self.fc_use_batchnorm_inp:
self.bn0 = nn.BatchNorm1d([self.input_dim], momentum=0.05)
self.N_fc_lay = len(self.fc_lay)
current_input = self.input_dim
# Initialization of hidden layers
for i in range(self.N_fc_lay):
# dropout
self.drop.append(nn.Dropout(p=self.fc_drop[i]))
# activation
self.act.append(act_fun(self.fc_act[i]))
add_bias = True
# layer norm initialization
self.ln.append(LayerNorm(self.fc_lay[i]))
self.bn.append(nn.BatchNorm1d(self.fc_lay[i], momentum=0.05))
if self.fc_use_laynorm[i] or self.fc_use_batchnorm[i]:
add_bias = False
# Linear operations
self.wx.append(nn.Linear(current_input, self.fc_lay[i], bias=add_bias))
# weight initialization
self.wx[i].weight = nn.Parameter(
flow.Tensor(self.fc_lay[i], current_input).uniform_(
-np.sqrt(0.01 / (current_input + self.fc_lay[i])),
np.sqrt(0.01 / (current_input + self.fc_lay[i])),
)
)
self.wx[i].bias = nn.Parameter(flow.zeros(self.fc_lay[i]))
current_input = self.fc_lay[i]
def forward(self, x):
# Applying Layer/Batch Norm
if bool(self.fc_use_laynorm_inp):
x = self.ln0((x))
if bool(self.fc_use_batchnorm_inp):
x = self.bn0((x))
for i in range(self.N_fc_lay):
if self.fc_act[i] != "linear":
if self.fc_use_laynorm[i]:
x = self.drop[i](self.act[i](self.ln[i](self.wx[i](x))))
if self.fc_use_batchnorm[i]:
x = self.drop[i](self.act[i](self.bn[i](self.wx[i](x))))
if (
self.fc_use_batchnorm[i] == False
and self.fc_use_laynorm[i] == False
):
x = self.drop[i](self.act[i](self.wx[i](x)))
else:
if self.fc_use_laynorm[i]:
x = self.drop[i](self.ln[i](self.wx[i](x)))
if self.fc_use_batchnorm[i]:
x = self.drop[i](self.bn[i](self.wx[i](x)))
if (
self.fc_use_batchnorm[i] == False
and self.fc_use_laynorm[i] == False
):
x = self.drop[i](self.wx[i](x))
return x
| [
"oneflow.cat",
"oneflow.matmul",
"oneflow.flip",
"oneflow.sin",
"oneflow.nn.functional.conv1d",
"oneflow.nn.BatchNorm1d",
"oneflow.nn.Linear",
"oneflow.cos",
"oneflow.abs",
"oneflow.zeros",
"oneflow.ones",
"oneflow.nn.Dropout",
"oneflow.Tensor",
"oneflow.nn.ModuleList"
] | [((3835, 3860), 'oneflow.matmul', 'flow.matmul', (['low', 'self.n_'], {}), '(low, self.n_)\n', (3846, 3860), True, 'import oneflow as flow\n'), ((3886, 3912), 'oneflow.matmul', 'flow.matmul', (['high', 'self.n_'], {}), '(high, self.n_)\n', (3897, 3912), True, 'import oneflow as flow\n'), ((4124, 4159), 'oneflow.flip', 'flow.flip', (['band_pass_left'], {'dims': '[1]'}), '(band_pass_left, dims=[1])\n', (4133, 4159), True, 'import oneflow as flow\n'), ((4181, 4249), 'oneflow.cat', 'flow.cat', (['[band_pass_left, band_pass_center, band_pass_right]'], {'dim': '(1)'}), '([band_pass_left, band_pass_center, band_pass_right], dim=1)\n', (4189, 4249), True, 'import oneflow as flow\n'), ((4405, 4536), 'oneflow.nn.functional.conv1d', 'F.conv1d', (['waveforms', 'self.filters'], {'stride': '[self.stride]', 'padding': '[self.padding]', 'dilation': '[self.dilation]', 'bias': 'None', 'groups': '(1)'}), '(waveforms, self.filters, stride=[self.stride], padding=[self.\n padding], dilation=[self.dilation], bias=None, groups=1)\n', (4413, 4536), True, 'import oneflow.nn.functional as F\n'), ((5607, 5624), 'oneflow.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (5620, 5624), True, 'import oneflow.nn as nn\n'), ((5643, 5660), 'oneflow.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (5656, 5660), True, 'import oneflow.nn as nn\n'), ((5679, 5696), 'oneflow.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (5692, 5696), True, 'import oneflow.nn as nn\n'), ((5716, 5733), 'oneflow.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (5729, 5733), True, 'import oneflow.nn as nn\n'), ((5754, 5771), 'oneflow.nn.ModuleList', 'nn.ModuleList', (['[]'], {}), '([])\n', (5767, 5771), True, 'import oneflow.nn as nn\n'), ((768, 790), 'numpy.log10', 'np.log10', (['(1 + hz / 700)'], {}), '(1 + hz / 700)\n', (776, 790), True, 'import numpy as np\n'), ((3590, 3612), 'oneflow.abs', 'flow.abs', (['self.low_hz_'], {}), '(self.low_hz_)\n', (3598, 3612), True, 'import oneflow as flow\n'), ((4799, 4818), 'oneflow.ones', 'flow.ones', (['features'], {}), '(features)\n', (4808, 4818), True, 'import oneflow as flow\n'), ((4853, 4873), 'oneflow.zeros', 'flow.zeros', (['features'], {}), '(features)\n', (4863, 4873), True, 'import oneflow as flow\n'), ((5992, 6039), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['[self.input_dim]'], {'momentum': '(0.05)'}), '([self.input_dim], momentum=0.05)\n', (6006, 6039), True, 'import oneflow.nn as nn\n'), ((2792, 2840), 'oneflow.cos', 'flow.cos', (['(2 * math.pi * n_lin / self.kernel_size)'], {}), '(2 * math.pi * n_lin / self.kernel_size)\n', (2800, 2840), True, 'import oneflow as flow\n'), ((3678, 3701), 'oneflow.abs', 'flow.abs', (['self.band_hz_'], {}), '(self.band_hz_)\n', (3686, 3701), True, 'import oneflow as flow\n'), ((6256, 6285), 'oneflow.nn.Dropout', 'nn.Dropout', ([], {'p': 'self.fc_drop[i]'}), '(p=self.fc_drop[i])\n', (6266, 6285), True, 'import oneflow.nn as nn\n'), ((6341, 6364), 'utils.data_utils.act_fun', 'act_fun', (['self.fc_act[i]'], {}), '(self.fc_act[i])\n', (6348, 6364), False, 'from utils.data_utils import flip, sinc, act_fun\n'), ((6517, 6562), 'oneflow.nn.BatchNorm1d', 'nn.BatchNorm1d', (['self.fc_lay[i]'], {'momentum': '(0.05)'}), '(self.fc_lay[i], momentum=0.05)\n', (6531, 6562), True, 'import oneflow.nn as nn\n'), ((6725, 6780), 'oneflow.nn.Linear', 'nn.Linear', (['current_input', 'self.fc_lay[i]'], {'bias': 'add_bias'}), '(current_input, self.fc_lay[i], bias=add_bias)\n', (6734, 6780), True, 'import oneflow.nn as nn\n'), ((7150, 7176), 'oneflow.zeros', 'flow.zeros', (['self.fc_lay[i]'], {}), '(self.fc_lay[i])\n', (7160, 7176), True, 'import oneflow as flow\n'), ((2440, 2460), 'oneflow.Tensor', 'flow.Tensor', (['hz[:-1]'], {}), '(hz[:-1])\n', (2451, 2460), True, 'import oneflow as flow\n'), ((3954, 3978), 'oneflow.sin', 'flow.sin', (['f_times_t_high'], {}), '(f_times_t_high)\n', (3962, 3978), True, 'import oneflow as flow\n'), ((3981, 4004), 'oneflow.sin', 'flow.sin', (['f_times_t_low'], {}), '(f_times_t_low)\n', (3989, 4004), True, 'import oneflow as flow\n'), ((7025, 7073), 'numpy.sqrt', 'np.sqrt', (['(0.01 / (current_input + self.fc_lay[i]))'], {}), '(0.01 / (current_input + self.fc_lay[i]))\n', (7032, 7073), True, 'import numpy as np\n'), ((2577, 2588), 'numpy.diff', 'np.diff', (['hz'], {}), '(hz)\n', (2584, 2588), True, 'import numpy as np\n'), ((6881, 6923), 'oneflow.Tensor', 'flow.Tensor', (['self.fc_lay[i]', 'current_input'], {}), '(self.fc_lay[i], current_input)\n', (6892, 6923), True, 'import oneflow as flow\n'), ((6955, 7003), 'numpy.sqrt', 'np.sqrt', (['(0.01 / (current_input + self.fc_lay[i]))'], {}), '(0.01 / (current_input + self.fc_lay[i]))\n', (6962, 7003), True, 'import numpy as np\n'), ((3011, 3027), 'numpy.arange', 'np.arange', (['(-n)', '(0)'], {}), '(-n, 0)\n', (3020, 3027), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import oneflow.unittest
@flow.unittest.skip_unless_1n1d()
class TestLogicalReduce(flow.unittest.TestCase):
@autotest(auto_backward=False)
def test_all_with_random_data(test_case):
device = random_device()
dim = random(1, 4).to(int)
x = random_tensor(ndim=4, dtype=float, requires_grad=False).to(device)
return torch.all(x, dim)
@autotest(auto_backward=False)
def test_all_bool_input_with_random_data(test_case):
device = random_device()
dim = random(1, 4).to(int)
x = random_tensor(ndim=4, dtype=float, requires_grad=False).to(
device, dtype=torch.bool
)
return torch.all(x, dim)
@autotest(auto_backward=False, check_graph=True)
def test_any_with_random_data(test_case):
device = random_device()
dim = random(1, 4).to(int)
x = random_tensor(ndim=4, dtype=float, requires_grad=False).to(device)
return torch.any(x, dim)
@autotest(auto_backward=False)
def test_any_bool_input_with_random_data(test_case):
device = random_device()
dim = random(1, 4).to(int)
x = random_tensor(ndim=4, dtype=float, requires_grad=False).to(
device, dtype=torch.bool
)
return torch.any(x, dim)
@autotest(auto_backward=False, check_graph=True)
def test_scalar_reduce_all_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4, dtype=float, requires_grad=False).to(device)
return torch.all(x)
@autotest(auto_backward=False)
def test_scalar_reduce_any_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4, dtype=float, requires_grad=False).to(device)
return torch.any(x)
@autotest(auto_backward=False)
def test_matrix_row_all_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=2, dtype=float, requires_grad=False).to(device)
return torch.all(x, 1)
@autotest(auto_backward=False)
def test_matrix_row_any_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=2, dtype=float, requires_grad=False).to(device)
return torch.any(x, 1)
@autotest(auto_backward=False)
def test_matrix_col_all_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=2, dtype=float, requires_grad=False).to(device)
return torch.all(x, 0)
@autotest(auto_backward=False)
def test_matrix_col_any_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=2, dtype=float, requires_grad=False).to(device)
return torch.any(x, 0)
@autotest(auto_backward=False)
def test_all_keepdim_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4, dtype=float, requires_grad=False).to(device)
return torch.all(x, 1, keepdim=True)
@autotest(auto_backward=False)
def test_any_keepdim_with_random_data(test_case):
device = random_device()
x = random_tensor(ndim=4, dtype=float, requires_grad=False).to(device)
return torch.any(x, 1, keepdim=True)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.unittest.skip_unless_1n1d"
] | [((819, 851), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (849, 851), True, 'import oneflow as flow\n'), ((4029, 4044), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4042, 4044), False, 'import unittest\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from oneflow.python.ops.array_ops import zeros
from test_util import (
GenArgList,
FlattenArray,
Array2Numpy,
Index2Coordinate,
)
def _np_replication_pad2d_grad(src, dest, padding):
c_idx, h_idx, w_idx = 1, 2, 3
pad_left = padding[0]
pad_right = padding[1]
pad_top = padding[2]
pad_bottom = padding[3]
dx_height, dx_width = dest.shape[h_idx], dest.shape[w_idx]
dy_height, dy_width = src.shape[h_idx], src.shape[w_idx]
numpy_src = np.ones(src.shape, np.int32)
numpy_dest = np.zeros(dest.shape, np.int32)
array_src = FlattenArray(numpy_src)
array_dest = FlattenArray(numpy_dest)
src_num = src.shape[c_idx] * src.shape[h_idx] * src.shape[w_idx]
dest_num = dest.shape[c_idx] * dest.shape[h_idx] * dest.shape[w_idx]
elements_num = src.shape[0] * src_num
for iter_n in range(elements_num):
coords = Index2Coordinate(iter_n, src.shape)
n, c, i, j = coords[0], coords[c_idx], coords[h_idx], coords[w_idx]
ip_x = ip_y = 0
if j < pad_left:
ip_x = pad_left
elif j >= pad_left and j < (dx_width + pad_left):
ip_x = j
else:
ip_x = dx_width + pad_left - 1
if i < pad_top:
ip_y = pad_top
elif i >= pad_top and i < (dx_height + pad_top):
ip_y = i
else:
ip_y = dx_height + pad_top - 1
ip_x = ip_x - pad_left
ip_y = ip_y - pad_top
src_index = n * src_num + c * dy_width * dy_height + i * dy_width + j
dest_index = n * dest_num + c * dx_width * dx_height + ip_y * dx_width + ip_x
array_dest[dest_index] += array_src[src_index]
numpy_dest = Array2Numpy(array_dest, dest.shape)
return numpy_dest
def _test_ReplicationPad2d(test_case, shape, padding, device):
np_input = np.random.random(shape).astype(np.float32)
of_input = flow.Tensor(
np_input, dtype=flow.float32, device=flow.device(device), requires_grad=True
)
if isinstance(padding, int):
np_boundary = ((0, 0), (0, 0), (padding, padding), (padding, padding))
boundry = [padding, padding, padding, padding]
elif isinstance(padding, (tuple, int)) and len(padding) == 4:
np_boundary = (
(0, 0),
(0, 0),
(padding[2], padding[3]),
(padding[0], padding[1]),
)
boundry = [padding[0], padding[1], padding[2], padding[3]]
else:
raise ValueError("padding must be in or list or tuple!")
layer = flow.nn.ReplicationPad2d(padding=padding)
of_out = layer(of_input)
np_out = np.pad(np_input, np_boundary, mode="edge")
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, 1e-5, 1e-5))
of_out = of_out.sum()
of_out.backward()
np_out_grad = _np_replication_pad2d_grad(np_out, np_input, boundry)
test_case.assertTrue(np.allclose(of_input.grad.numpy(), np_out_grad, 1e-3, 1e-3))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestReplicationPad2dModule(flow.unittest.TestCase):
def test_ReplicationPad2d(test_case):
arg_dict = OrderedDict()
arg_dict["shape"] = [(1, 2, 3, 4), (8, 3, 4, 4)]
arg_dict["padding"] = [(2), (1, 1, 2, 2)]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_ReplicationPad2d(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.device",
"oneflow.experimental.nn.ReplicationPad2d",
"oneflow.experimental.unittest.env.eager_execution_enabled"
] | [((1180, 1208), 'numpy.ones', 'np.ones', (['src.shape', 'np.int32'], {}), '(src.shape, np.int32)\n', (1187, 1208), True, 'import numpy as np\n'), ((1226, 1256), 'numpy.zeros', 'np.zeros', (['dest.shape', 'np.int32'], {}), '(dest.shape, np.int32)\n', (1234, 1256), True, 'import numpy as np\n'), ((1273, 1296), 'test_util.FlattenArray', 'FlattenArray', (['numpy_src'], {}), '(numpy_src)\n', (1285, 1296), False, 'from test_util import GenArgList, FlattenArray, Array2Numpy, Index2Coordinate\n'), ((1314, 1338), 'test_util.FlattenArray', 'FlattenArray', (['numpy_dest'], {}), '(numpy_dest)\n', (1326, 1338), False, 'from test_util import GenArgList, FlattenArray, Array2Numpy, Index2Coordinate\n'), ((2391, 2426), 'test_util.Array2Numpy', 'Array2Numpy', (['array_dest', 'dest.shape'], {}), '(array_dest, dest.shape)\n', (2402, 2426), False, 'from test_util import GenArgList, FlattenArray, Array2Numpy, Index2Coordinate\n'), ((3230, 3271), 'oneflow.experimental.nn.ReplicationPad2d', 'flow.nn.ReplicationPad2d', ([], {'padding': 'padding'}), '(padding=padding)\n', (3254, 3271), True, 'import oneflow.experimental as flow\n'), ((3314, 3356), 'numpy.pad', 'np.pad', (['np_input', 'np_boundary'], {'mode': '"""edge"""'}), "(np_input, np_boundary, mode='edge')\n", (3320, 3356), True, 'import numpy as np\n'), ((4168, 4183), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4181, 4183), False, 'import unittest\n'), ((1580, 1615), 'test_util.Index2Coordinate', 'Index2Coordinate', (['iter_n', 'src.shape'], {}), '(iter_n, src.shape)\n', (1596, 1615), False, 'from test_util import GenArgList, FlattenArray, Array2Numpy, Index2Coordinate\n'), ((3875, 3888), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3886, 3888), False, 'from collections import OrderedDict\n'), ((4061, 4081), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4071, 4081), False, 'from test_util import GenArgList, FlattenArray, Array2Numpy, Index2Coordinate\n'), ((3667, 3710), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3708, 3710), True, 'import oneflow.experimental as flow\n'), ((2529, 2552), 'numpy.random.random', 'np.random.random', (['shape'], {}), '(shape)\n', (2545, 2552), True, 'import numpy as np\n'), ((2645, 2664), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2656, 2664), True, 'import oneflow.experimental as flow\n')] |
import oneflow as flow
from oneflow import nn
import libai
def cosine_similarity(x, y, dim=-1):
return flow.sum(x * y, dim=dim) / (flow.linalg.norm(x, dim=dim) * flow.linalg.norm(y, dim=dim))
class MLPLayer(nn.Module):
def __init__(self, cfg):
super().__init__()
self.dense = libai.layers.Linear(
cfg.hidden_size, cfg.hidden_size, bias=True, parallel="row", layer_idx=-1
)
self.activation = libai.layers.build_activation("tanh")
def forward(self, features):
x = self.dense(features)
x = self.activation(x)
return x
| [
"oneflow.sum",
"oneflow.linalg.norm"
] | [((110, 134), 'oneflow.sum', 'flow.sum', (['(x * y)'], {'dim': 'dim'}), '(x * y, dim=dim)\n', (118, 134), True, 'import oneflow as flow\n'), ((305, 404), 'libai.layers.Linear', 'libai.layers.Linear', (['cfg.hidden_size', 'cfg.hidden_size'], {'bias': '(True)', 'parallel': '"""row"""', 'layer_idx': '(-1)'}), "(cfg.hidden_size, cfg.hidden_size, bias=True, parallel=\n 'row', layer_idx=-1)\n", (324, 404), False, 'import libai\n'), ((448, 485), 'libai.layers.build_activation', 'libai.layers.build_activation', (['"""tanh"""'], {}), "('tanh')\n", (477, 485), False, 'import libai\n'), ((138, 166), 'oneflow.linalg.norm', 'flow.linalg.norm', (['x'], {'dim': 'dim'}), '(x, dim=dim)\n', (154, 166), True, 'import oneflow as flow\n'), ((169, 197), 'oneflow.linalg.norm', 'flow.linalg.norm', (['y'], {'dim': 'dim'}), '(y, dim=dim)\n', (185, 197), True, 'import oneflow as flow\n')] |
import oneflow as flow
import numpy as np
def conv2d_layer(
name,
input,
out_channel,
kernel_size = 3,
strides = 1,
padding = "SAME", # or [[], [], [], []]
data_format = "NCHW",
dilation_rate = 1,
use_bias = True,
weight_initializer = flow.random_normal_initializer(mean = 0.0, stddev = 0.02),
bias_initializer = flow.zeros_initializer(),
trainable = True,
reuse = True
):
weight_shape = (out_channel, input.shape[1], kernel_size, kernel_size)
weight = flow.get_variable(
name + "_weight",
shape = weight_shape,
dtype = input.dtype,
initializer = weight_initializer,
trainable = trainable,
reuse = reuse
)
output = flow.nn.conv2d(input, weight, strides, padding, data_format, dilation_rate)
if use_bias:
bias = flow.get_variable(
name + "_bias",
shape = (out_channel,),
dtype = input.dtype,
initializer = bias_initializer,
trainable = trainable
)
output = flow.nn.bias_add(output, bias, data_format)
return output
def upsampleConvLayer(
input,
name_prefix,
channel,
kernel_size,
hw_scale = (2, 2),
data_format = "NCHW",
interpolation = "bilinear",
# interpolation = "nearest",
trainable = True):
upsample = flow.layers.upsample_2d(input, size = hw_scale, data_format = data_format, interpolation = interpolation, name = name_prefix + "_%s" % interpolation)
return conv2d_layer(name_prefix + "_conv", upsample, channel, kernel_size = kernel_size, strides = 1, trainable = trainable)
def deconv(input, out_channel, name_prefix, kernel_size = 4, strides = [2, 2], trainable = True, reuse = True):
weight = flow.get_variable(
name_prefix + "_weight",
shape = (input.shape[1], out_channel, kernel_size, kernel_size),
dtype = flow.float,
initializer = flow.random_normal_initializer(mean = 0.0, stddev = 0.02),
trainable = trainable,
reuse = reuse
)
return flow.nn.conv2d_transpose(
input,
weight,
strides = strides,
padding = "SAME",
output_shape = (input.shape[0], out_channel, input.shape[2] * strides[0], input.shape[3] * strides[1]))
def norm_layer(input, name, norm_type="instance", trainable = True, reuse = True):
return flow.nn.InstanceNorm2d(input, eps=1e-5, affine=False)
def ResnetBlock(input, name_prefix, dim, norm_type="instance",
use_dropout=False, trainable=True, reuse=True):
out = flow.reflection_pad2d(input, padding=[1, 1, 1, 1])
out = conv2d_layer(name_prefix + "_conv1", out, dim, kernel_size=3, padding="VALID", trainable=trainable, reuse=reuse)
out = norm_layer(out, name_prefix + "_norm1", norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.relu(out)
if use_dropout:
out = flow.nn.dropout(out, rate=0.5)
out = flow.reflection_pad2d(out, padding=[1, 1, 1, 1])
out = conv2d_layer(name_prefix + "_conv2", out, dim, kernel_size=3, padding="VALID", trainable=trainable, reuse=reuse)
out = norm_layer(out, name_prefix + "_norm2", norm_type=norm_type, trainable=trainable, reuse=reuse)
return input + out
def GlobalGenerator(input, var_name_prefix, output_nc, ngf=64, n_downsampling=3,
n_blocks=9, norm_type="instance", trainable=True, reuse=True, return_before_final_conv=False):
with flow.scope.namespace(var_name_prefix):
out = flow.reflection_pad2d(input, padding=[3, 3, 3, 3])
out = conv2d_layer("conv1", out, ngf, kernel_size=7, padding="VALID", trainable=trainable, reuse=reuse)
out = norm_layer(out, "norm1", norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.relu(out)
### downsample
for i in range(n_downsampling):
mult = 2**i
out = conv2d_layer("conv_downsample_%d" % i, out, ngf * mult * 2,
kernel_size=3, strides=2, padding="SAME", trainable=trainable, reuse=reuse)
out = norm_layer(out, "norm_downsample_%d" % i, norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.relu(out)
### resnet blocks
mult = 2**n_downsampling
for i in range(n_blocks):
out = ResnetBlock(out, "resblock_%d" % i, ngf * mult, norm_type=norm_layer, trainable=trainable, reuse=reuse)
### upsample
for i in range(n_downsampling):
mult = 2**(n_downsampling - i)
out = deconv(out, int(ngf * mult / 2), "deconv_%d" % i, kernel_size=3, strides=[2, 2], trainable=trainable, reuse=reuse)
out = norm_layer(out, "norm_upsample_%d" % i, norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.relu(out)
if return_before_final_conv:
return out
out = flow.reflection_pad2d(out, padding=[3, 3, 3, 3])
out = conv2d_layer("conv_last", out, output_nc, kernel_size=7, padding="VALID", trainable=trainable, reuse=reuse)
out = flow.math.tanh(out)
return out
def LocalEnhancer(input, output_nc, ngf=32, n_downsample_global=3, n_blocks_global=9,
n_blocks_local=3, norm_type="instance",
trainable=True, reuse=True, train_global_generator=True):
output_prev = 0
if train_global_generator:
with flow.scope.placement("gpu", "0:1"):
input_downsampled = flow.nn.max_pool2d(input, 3, 2, "SAME")
### output at coarest level, get rid of final convolution layers
output_prev = GlobalGenerator(input_downsampled, "G1", output_nc,
ngf=ngf*2, n_downsampling=n_downsample_global, n_blocks=n_blocks_global,
norm_type=norm_type, trainable=trainable, reuse=reuse, return_before_final_conv=True)
with flow.scope.namespace("G2"):
with flow.scope.placement("gpu", "0:1"):
### downsample
out = flow.reflection_pad2d(input, padding=[3, 3, 3, 3])
out = conv2d_layer("conv1", out, ngf, kernel_size=7, padding="VALID", trainable=trainable, reuse=reuse)
out = norm_layer(out, "norm1", norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.relu(out)
out = conv2d_layer("conv_downsample", out, ngf*2,
kernel_size=3, strides=2, padding="SAME", trainable=trainable, reuse=reuse)
out = norm_layer(out, "norm_downsample", norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.relu(out)
if train_global_generator:
out = out + output_prev
with flow.scope.placement("gpu", "0:2"): # for cityscapes
### residual blocks
for i in range(n_blocks_local):
out = ResnetBlock(out, "resblock_%d" % i, ngf * 2, norm_type=norm_layer, trainable=trainable, reuse=reuse)
### upsample
out = deconv(out, ngf, "deconv", kernel_size=3, strides=[2, 2], trainable=trainable, reuse=reuse)
out = norm_layer(out, "norm_upsample", norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.relu(out)
### final convolution
out = flow.reflection_pad2d(out, padding=[3, 3, 3, 3])
out = conv2d_layer("conv_last", out, output_nc, kernel_size=7, padding="VALID", trainable=trainable, reuse=reuse)
out = flow.math.tanh(out)
return out
def define_G(input, output_nc, ngf, netG, n_downsample_global=3, n_blocks_global=9,
n_blocks_local=3, norm_type='instance', trainable=True, reuse=True, train_global_generator=True):
if netG == 'global':
netG = GlobalGenerator(input, "G1", output_nc,
ngf=ngf, n_downsampling=n_downsample_global, n_blocks=n_blocks_global,
norm_type=norm_type, trainable=trainable, reuse=reuse)
elif netG == 'local':
netG = LocalEnhancer(input, output_nc, ngf=ngf, n_downsample_global=n_downsample_global,
n_blocks_global=n_blocks_global, n_blocks_local=n_blocks_local, norm_type=norm_type,
trainable=trainable, reuse=reuse, train_global_generator=train_global_generator)
elif netG == 'encoder':
raise('generator not implemented!')
else:
raise('generator not implemented!')
return netG
def MultiscaleRecLoss(fake, real, num_D=3):
real_downsampled = real
fake_downsampled = fake
loss = 0
for i in range(num_D):
loss = flow.nn.L1Loss(real_downsampled, fake_downsampled) + loss
if i != (num_D-1):
real_downsampled = flow.nn.avg_pool2d(real_downsampled, 3, 2, "SAME")
fake_downsampled = flow.nn.avg_pool2d(fake_downsampled, 3, 2, "SAME")
return loss
def MultiscaleDiscriminator(input, ndf=64, n_layers=3, norm_type="instance",
use_sigmoid=False, num_D=3, trainable=True, reuse=True):
with flow.scope.namespace("Multiscale_"):
input_downsampled = input
result = []
for i in range(num_D):
out = NLayerDiscriminator(input_downsampled, "D_%d" % i, ndf=ndf, n_layers=n_layers,
norm_type=norm_type, use_sigmoid=use_sigmoid,
trainable=trainable, reuse=reuse)
result.append(out)
if i != (num_D-1):
input_downsampled = flow.nn.avg_pool2d(input_downsampled, 3, 2, "SAME")
return result
# Defines the PatchGAN discriminator with the specified arguments.
def NLayerDiscriminator(input, name_prefix, ndf=64, n_layers=3, norm_type="instance",
use_sigmoid=False, trainable=True, reuse=True):
with flow.scope.namespace(name_prefix):
res = []
kernel_size = 4
padw = int(np.ceil((kernel_size-1.0)/2))
padding = [[0, 0], [0, 0], [padw, padw], [padw, padw]]
out = conv2d_layer("conv_0", input, ndf, kernel_size=kernel_size, strides=2,
padding=padding, trainable=trainable, reuse=reuse)
out = flow.nn.leaky_relu(out, 0.2)
res.append(out)
nf = ndf
for i in range(1, n_layers):
nf = min(nf * 2, 512)
out = conv2d_layer("conv_downsample_%d" % i, out, nf, kernel_size=kernel_size,
strides=2, padding=padding, trainable=trainable, reuse=reuse)
out = norm_layer(out, "norm_downsample_%d" % i, norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.leaky_relu(out, 0.2)
res.append(out)
nf = min(nf * 2, 512)
out = conv2d_layer("conv_1", out, nf, kernel_size=kernel_size, strides=1,
padding=padding, trainable=trainable, reuse=reuse)
out = norm_layer(out, "norm_1", norm_type=norm_type, trainable=trainable, reuse=reuse)
out = flow.nn.leaky_relu(out, 0.2)
res.append(out)
out = conv2d_layer("last_conv", out, 1, kernel_size=kernel_size, strides=1,
padding=padding, trainable=trainable, reuse=reuse)
res.append(out)
if use_sigmoid:
out = flow.math.sigmoid(out)
res.append(out)
return res
def GANLoss(input, target_is_real, use_lsgan=True, target_real_label=1.0, target_fake_label=0.0):
assert isinstance(input[0], list)
loss = 0
for i in range(0, len(input)):
if target_is_real:
target = flow.constant_like(input[i][-1], target_real_label)
else:
target = flow.constant_like(input[i][-1], target_fake_label)
if use_lsgan:
loss = flow.nn.MSELoss(input[i][-1], target) + loss
else:
loss = flow.nn.BCELoss(input[i][-1], target) + loss
return loss
| [
"oneflow.nn.dropout",
"oneflow.nn.avg_pool2d",
"oneflow.math.tanh",
"oneflow.nn.leaky_relu",
"oneflow.reflection_pad2d",
"oneflow.layers.upsample_2d",
"oneflow.nn.conv2d",
"oneflow.nn.BCELoss",
"oneflow.nn.MSELoss",
"oneflow.scope.namespace",
"oneflow.zeros_initializer",
"oneflow.nn.InstanceNo... | [((276, 329), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.02)'}), '(mean=0.0, stddev=0.02)\n', (306, 329), True, 'import oneflow as flow\n'), ((358, 382), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (380, 382), True, 'import oneflow as flow\n'), ((518, 662), 'oneflow.get_variable', 'flow.get_variable', (["(name + '_weight')"], {'shape': 'weight_shape', 'dtype': 'input.dtype', 'initializer': 'weight_initializer', 'trainable': 'trainable', 'reuse': 'reuse'}), "(name + '_weight', shape=weight_shape, dtype=input.dtype,\n initializer=weight_initializer, trainable=trainable, reuse=reuse)\n", (535, 662), True, 'import oneflow as flow\n'), ((736, 811), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'weight', 'strides', 'padding', 'data_format', 'dilation_rate'], {}), '(input, weight, strides, padding, data_format, dilation_rate)\n', (750, 811), True, 'import oneflow as flow\n'), ((1367, 1512), 'oneflow.layers.upsample_2d', 'flow.layers.upsample_2d', (['input'], {'size': 'hw_scale', 'data_format': 'data_format', 'interpolation': 'interpolation', 'name': "(name_prefix + '_%s' % interpolation)"}), "(input, size=hw_scale, data_format=data_format,\n interpolation=interpolation, name=name_prefix + '_%s' % interpolation)\n", (1390, 1512), True, 'import oneflow as flow\n'), ((2081, 2263), 'oneflow.nn.conv2d_transpose', 'flow.nn.conv2d_transpose', (['input', 'weight'], {'strides': 'strides', 'padding': '"""SAME"""', 'output_shape': '(input.shape[0], out_channel, input.shape[2] * strides[0], input.shape[3] *\n strides[1])'}), "(input, weight, strides=strides, padding='SAME',\n output_shape=(input.shape[0], out_channel, input.shape[2] * strides[0],\n input.shape[3] * strides[1]))\n", (2105, 2263), True, 'import oneflow as flow\n'), ((2438, 2492), 'oneflow.nn.InstanceNorm2d', 'flow.nn.InstanceNorm2d', (['input'], {'eps': '(1e-05)', 'affine': '(False)'}), '(input, eps=1e-05, affine=False)\n', (2460, 2492), True, 'import oneflow as flow\n'), ((2631, 2681), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['input'], {'padding': '[1, 1, 1, 1]'}), '(input, padding=[1, 1, 1, 1])\n', (2652, 2681), True, 'import oneflow as flow\n'), ((2920, 2937), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (2932, 2937), True, 'import oneflow as flow\n'), ((3014, 3062), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['out'], {'padding': '[1, 1, 1, 1]'}), '(out, padding=[1, 1, 1, 1])\n', (3035, 3062), True, 'import oneflow as flow\n'), ((844, 973), 'oneflow.get_variable', 'flow.get_variable', (["(name + '_bias')"], {'shape': '(out_channel,)', 'dtype': 'input.dtype', 'initializer': 'bias_initializer', 'trainable': 'trainable'}), "(name + '_bias', shape=(out_channel,), dtype=input.dtype,\n initializer=bias_initializer, trainable=trainable)\n", (861, 973), True, 'import oneflow as flow\n'), ((1065, 1108), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output', 'bias', 'data_format'], {}), '(output, bias, data_format)\n', (1081, 1108), True, 'import oneflow as flow\n'), ((2972, 3002), 'oneflow.nn.dropout', 'flow.nn.dropout', (['out'], {'rate': '(0.5)'}), '(out, rate=0.5)\n', (2987, 3002), True, 'import oneflow as flow\n'), ((3522, 3559), 'oneflow.scope.namespace', 'flow.scope.namespace', (['var_name_prefix'], {}), '(var_name_prefix)\n', (3542, 3559), True, 'import oneflow as flow\n'), ((3575, 3625), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['input'], {'padding': '[3, 3, 3, 3]'}), '(input, padding=[3, 3, 3, 3])\n', (3596, 3625), True, 'import oneflow as flow\n'), ((3846, 3863), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (3858, 3863), True, 'import oneflow as flow\n'), ((4998, 5046), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['out'], {'padding': '[3, 3, 3, 3]'}), '(out, padding=[3, 3, 3, 3])\n', (5019, 5046), True, 'import oneflow as flow\n'), ((5183, 5202), 'oneflow.math.tanh', 'flow.math.tanh', (['out'], {}), '(out)\n', (5197, 5202), True, 'import oneflow as flow\n'), ((6065, 6091), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""G2"""'], {}), "('G2')\n", (6085, 6091), True, 'import oneflow as flow\n'), ((9382, 9417), 'oneflow.scope.namespace', 'flow.scope.namespace', (['"""Multiscale_"""'], {}), "('Multiscale_')\n", (9402, 9417), True, 'import oneflow as flow\n'), ((10164, 10197), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name_prefix'], {}), '(name_prefix)\n', (10184, 10197), True, 'import oneflow as flow\n'), ((10531, 10559), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['out', '(0.2)'], {}), '(out, 0.2)\n', (10549, 10559), True, 'import oneflow as flow\n'), ((11347, 11375), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['out', '(0.2)'], {}), '(out, 0.2)\n', (11365, 11375), True, 'import oneflow as flow\n'), ((1952, 2005), 'oneflow.random_normal_initializer', 'flow.random_normal_initializer', ([], {'mean': '(0.0)', 'stddev': '(0.02)'}), '(mean=0.0, stddev=0.02)\n', (1982, 2005), True, 'import oneflow as flow\n'), ((4276, 4293), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (4288, 4293), True, 'import oneflow as flow\n'), ((4896, 4913), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (4908, 4913), True, 'import oneflow as flow\n'), ((5521, 5555), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:1"""'], {}), "('gpu', '0:1')\n", (5541, 5555), True, 'import oneflow as flow\n'), ((5593, 5632), 'oneflow.nn.max_pool2d', 'flow.nn.max_pool2d', (['input', '(3)', '(2)', '"""SAME"""'], {}), "(input, 3, 2, 'SAME')\n", (5611, 5632), True, 'import oneflow as flow\n'), ((6110, 6144), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:1"""'], {}), "('gpu', '0:1')\n", (6130, 6144), True, 'import oneflow as flow\n'), ((6211, 6261), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['input'], {'padding': '[3, 3, 3, 3]'}), '(input, padding=[3, 3, 3, 3])\n', (6232, 6261), True, 'import oneflow as flow\n'), ((6506, 6523), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (6518, 6523), True, 'import oneflow as flow\n'), ((6834, 6851), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (6846, 6851), True, 'import oneflow as flow\n'), ((6958, 6992), 'oneflow.scope.placement', 'flow.scope.placement', (['"""gpu"""', '"""0:2"""'], {}), "('gpu', '0:2')\n", (6978, 6992), True, 'import oneflow as flow\n'), ((7498, 7515), 'oneflow.nn.relu', 'flow.nn.relu', (['out'], {}), '(out)\n', (7510, 7515), True, 'import oneflow as flow\n'), ((7577, 7625), 'oneflow.reflection_pad2d', 'flow.reflection_pad2d', (['out'], {'padding': '[3, 3, 3, 3]'}), '(out, padding=[3, 3, 3, 3])\n', (7598, 7625), True, 'import oneflow as flow\n'), ((7778, 7797), 'oneflow.math.tanh', 'flow.math.tanh', (['out'], {}), '(out)\n', (7792, 7797), True, 'import oneflow as flow\n'), ((8945, 8995), 'oneflow.nn.L1Loss', 'flow.nn.L1Loss', (['real_downsampled', 'fake_downsampled'], {}), '(real_downsampled, fake_downsampled)\n', (8959, 8995), True, 'import oneflow as flow\n'), ((9061, 9111), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['real_downsampled', '(3)', '(2)', '"""SAME"""'], {}), "(real_downsampled, 3, 2, 'SAME')\n", (9079, 9111), True, 'import oneflow as flow\n'), ((9143, 9193), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['fake_downsampled', '(3)', '(2)', '"""SAME"""'], {}), "(fake_downsampled, 3, 2, 'SAME')\n", (9161, 9193), True, 'import oneflow as flow\n'), ((10260, 10292), 'numpy.ceil', 'np.ceil', (['((kernel_size - 1.0) / 2)'], {}), '((kernel_size - 1.0) / 2)\n', (10267, 10292), True, 'import numpy as np\n'), ((10990, 11018), 'oneflow.nn.leaky_relu', 'flow.nn.leaky_relu', (['out', '(0.2)'], {}), '(out, 0.2)\n', (11008, 11018), True, 'import oneflow as flow\n'), ((11630, 11652), 'oneflow.math.sigmoid', 'flow.math.sigmoid', (['out'], {}), '(out)\n', (11647, 11652), True, 'import oneflow as flow\n'), ((11930, 11981), 'oneflow.constant_like', 'flow.constant_like', (['input[i][-1]', 'target_real_label'], {}), '(input[i][-1], target_real_label)\n', (11948, 11981), True, 'import oneflow as flow\n'), ((12017, 12068), 'oneflow.constant_like', 'flow.constant_like', (['input[i][-1]', 'target_fake_label'], {}), '(input[i][-1], target_fake_label)\n', (12035, 12068), True, 'import oneflow as flow\n'), ((9851, 9902), 'oneflow.nn.avg_pool2d', 'flow.nn.avg_pool2d', (['input_downsampled', '(3)', '(2)', '"""SAME"""'], {}), "(input_downsampled, 3, 2, 'SAME')\n", (9869, 9902), True, 'import oneflow as flow\n'), ((12110, 12147), 'oneflow.nn.MSELoss', 'flow.nn.MSELoss', (['input[i][-1]', 'target'], {}), '(input[i][-1], target)\n', (12125, 12147), True, 'import oneflow as flow\n'), ((12188, 12225), 'oneflow.nn.BCELoss', 'flow.nn.BCELoss', (['input[i][-1]', 'target'], {}), '(input[i][-1], target)\n', (12203, 12225), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
r""""Contains definitions of the methods used by the _BaseDataLoaderIter workers to
collate samples fetched from dataset into Tensor(s).
These **needs** to be in global scope since Py2 doesn't support serializing
static methods.
"""
import re
import collections
import oneflow as flow
string_classes = (str, bytes)
np_str_obj_array_pattern = re.compile(r"[SaUO]")
def default_convert(data):
r"""Converts each NumPy array data field into a tensor"""
elem_type = type(data)
if isinstance(data, (flow.Tensor, flow._oneflow_internal.Tensor)):
return data
elif (
elem_type.__module__ == "numpy"
and elem_type.__name__ != "str_"
and elem_type.__name__ != "string_"
):
# array of string classes and object
if (
elem_type.__name__ == "ndarray"
and np_str_obj_array_pattern.search(data.dtype.str) is not None
):
return data
return flow.tensor(data)
elif isinstance(data, collections.abc.Mapping):
return {key: default_convert(data[key]) for key in data}
elif isinstance(data, tuple) and hasattr(data, "_fields"): # namedtuple
return elem_type(*(default_convert(d) for d in data))
elif isinstance(data, collections.abc.Sequence) and not isinstance(
data, string_classes
):
return [default_convert(d) for d in data]
else:
# NOTE: pytorch just return data here, and not raise any exception!
raise TypeError(default_convert_err_msg_format.format(elem_type))
default_collate_err_msg_format = (
"default_collate: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}"
)
default_convert_err_msg_format = (
"default_convert: batch must contain tensors, numpy arrays, numbers, "
"dicts or lists; found {}"
)
def default_collate(batch):
r"""Puts each data field into a tensor with outer dimension batch size"""
elem = batch[0]
elem_type = type(elem)
if isinstance(elem, (flow.Tensor, flow._oneflow_internal.Tensor)):
# TODO: tensor.storage()._new_shared(numel)
return flow._C.stack(batch, dim=0)
elif (
elem_type.__module__ == "numpy"
and elem_type.__name__ != "str_"
and elem_type.__name__ != "string_"
):
if elem_type.__name__ == "ndarray" or elem_type.__name__ == "memmap":
# array of string classes and object
if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
raise TypeError(default_collate_err_msg_format.format(elem.dtype))
return default_collate([flow.tensor(b) for b in batch])
elif elem.shape == (): # scalars
return flow.tensor(batch)
elif isinstance(elem, float):
return flow.tensor(batch, dtype=flow.float64)
elif isinstance(elem, int):
return flow.tensor(batch)
elif isinstance(elem, string_classes):
return batch
elif isinstance(elem, collections.abc.Mapping):
return {key: default_collate([d[key] for d in batch]) for key in elem}
elif isinstance(elem, tuple) and hasattr(elem, "_fields"): # namedtuple
return elem_type(*(default_collate(samples) for samples in zip(*batch)))
elif isinstance(elem, collections.abc.Sequence):
# check to make sure that the elements in batch have consistent size
it = iter(batch)
elem_size = len(next(it))
if not all(len(elem) == elem_size for elem in it):
raise RuntimeError("each element in list of batch should be of equal size")
transposed = zip(*batch)
return [default_collate(samples) for samples in transposed]
raise TypeError(default_collate_err_msg_format.format(elem_type))
| [
"oneflow._C.stack",
"oneflow.tensor"
] | [((937, 957), 're.compile', 're.compile', (['"""[SaUO]"""'], {}), "('[SaUO]')\n", (947, 957), False, 'import re\n'), ((2714, 2741), 'oneflow._C.stack', 'flow._C.stack', (['batch'], {'dim': '(0)'}), '(batch, dim=0)\n', (2727, 2741), True, 'import oneflow as flow\n'), ((1539, 1556), 'oneflow.tensor', 'flow.tensor', (['data'], {}), '(data)\n', (1550, 1556), True, 'import oneflow as flow\n'), ((3369, 3407), 'oneflow.tensor', 'flow.tensor', (['batch'], {'dtype': 'flow.float64'}), '(batch, dtype=flow.float64)\n', (3380, 3407), True, 'import oneflow as flow\n'), ((3301, 3319), 'oneflow.tensor', 'flow.tensor', (['batch'], {}), '(batch)\n', (3312, 3319), True, 'import oneflow as flow\n'), ((3455, 3473), 'oneflow.tensor', 'flow.tensor', (['batch'], {}), '(batch)\n', (3466, 3473), True, 'import oneflow as flow\n'), ((3208, 3222), 'oneflow.tensor', 'flow.tensor', (['b'], {}), '(b)\n', (3219, 3222), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import unittest
import numpy as np
import oneflow as flow
import oneflow.typing as oft
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
def multi_square_sum(
x, name=None,
):
return (
flow.user_op_builder(name if name is not None else "MultiSquareSum")
.Op("multi_square_sum")
.Input("x", x)
.Output("y")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
def _check(test_case, xs, y):
ref_y = np.sum(np.array([np.sum(x ** 2) for x in xs]))
test_case.assertTrue(np.allclose(y, ref_y))
def _run_test(test_case, x, n, dtype, device):
flow.clear_default_session()
@flow.global_function(function_config=func_config)
def multi_square_sum_job(x: oft.Numpy.Placeholder(x.shape, dtype=dtype)):
with flow.scope.placement(device, "0:0"):
xs = [x + 0.1 * i for i in range(n)]
return multi_square_sum(xs)
y = multi_square_sum_job(x).get()
_check(test_case, [(x + 0.1 * i).astype(np.float32) for i in range(n)], y.numpy())
@flow.unittest.skip_unless_1n1d()
class TestMultiSquareSum(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_multi_square_sum_random_gpu(test_case):
x = np.random.rand(3, 4, 5).astype(np.float32)
_run_test(test_case, x, 5, flow.float32, "gpu")
_run_test(test_case, x, 5, flow.float32, "gpu")
_run_test(test_case, x, 88, flow.float32, "gpu")
_run_test(test_case, x, 64, flow.float32, "gpu")
def test_multi_square_sum_random_cpu(test_case):
x = np.random.rand(3, 4, 5).astype(np.float32)
_run_test(test_case, x, 5, flow.float32, "cpu")
if __name__ == "__main__":
unittest.main()
| [
"oneflow.global_function",
"oneflow.typing.Numpy.Placeholder",
"oneflow.FunctionConfig",
"oneflow.scope.placement",
"oneflow.clear_default_session",
"oneflow.user_op_builder",
"oneflow.unittest.skip_unless_1n1d"
] | [((743, 764), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (762, 764), True, 'import oneflow as flow\n'), ((1720, 1752), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1750, 1752), True, 'import oneflow as flow\n'), ((1289, 1317), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1315, 1317), True, 'import oneflow as flow\n'), ((1324, 1373), 'oneflow.global_function', 'flow.global_function', ([], {'function_config': 'func_config'}), '(function_config=func_config)\n', (1344, 1373), True, 'import oneflow as flow\n'), ((2415, 2430), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2428, 2430), False, 'import unittest\n'), ((1213, 1234), 'numpy.allclose', 'np.allclose', (['y', 'ref_y'], {}), '(y, ref_y)\n', (1224, 1234), True, 'import numpy as np\n'), ((1824, 1858), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (1833, 1858), False, 'import os\n'), ((1406, 1449), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x.shape'], {'dtype': 'dtype'}), '(x.shape, dtype=dtype)\n', (1427, 1449), True, 'import oneflow.typing as oft\n'), ((1465, 1500), 'oneflow.scope.placement', 'flow.scope.placement', (['device', '"""0:0"""'], {}), "(device, '0:0')\n", (1485, 1500), True, 'import oneflow as flow\n'), ((1158, 1172), 'numpy.sum', 'np.sum', (['(x ** 2)'], {}), '(x ** 2)\n', (1164, 1172), True, 'import numpy as np\n'), ((1948, 1971), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (1962, 1971), True, 'import numpy as np\n'), ((2283, 2306), 'numpy.random.rand', 'np.random.rand', (['(3)', '(4)', '(5)'], {}), '(3, 4, 5)\n', (2297, 2306), True, 'import numpy as np\n'), ((874, 942), 'oneflow.user_op_builder', 'flow.user_op_builder', (["(name if name is not None else 'MultiSquareSum')"], {}), "(name if name is not None else 'MultiSquareSum')\n", (894, 942), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
from oneflow.test_utils.automated_test_util import *
from oneflow.test_utils.test_util import GenArgList
import oneflow as flow
import numpy as np
def __check(test_case, input, dim, keepdim, device):
of_out = flow.amax(input, dim=dim, keepdim=keepdim)
if type(dim) is tuple:
if len(dim) == 0:
dim = None
np_out = np.amax(input.numpy(), axis=dim, keepdims=keepdim)
test_case.assertTrue(np.allclose(of_out.numpy(), np_out, rtol=0.0001, atol=1e-05,))
def _test_amax_with_negative_dim(test_case, device):
input = flow.tensor(
np.random.randn(3, 5, 6, 8), dtype=flow.float32, device=flow.device(device)
)
dim = random(-4, 0).to(int).value()
keepdim = random_bool().value()
__check(test_case, input, dim, keepdim, device)
def _test_amax_with_positive_dim(test_case, device):
input = flow.tensor(
np.random.randn(3, 5, 6, 8), dtype=flow.float32, device=flow.device(device)
)
dim = random(0, 4).to(int).value()
keepdim = random_bool().value()
__check(test_case, input, dim, keepdim, device)
def _test_amax_with_multiple_axes(test_case, device):
input = flow.tensor(
np.random.randn(3, 5, 6, 8), dtype=flow.float32, device=flow.device(device)
)
axes = set()
num_axes = random(1, 4).to(int).value()
for _ in range(num_axes):
axes.add(random(0, 4).to(int).value())
keepdim = random_bool().value()
__check(test_case, input, tuple(axes), keepdim, device)
def _test_amax_with_empty_dim(test_case, device):
input = flow.tensor(
np.random.randn(3, 5, 6, 8), dtype=flow.float32, device=flow.device(device)
)
keepdim = random_bool().value()
__check(test_case, input, None, keepdim, device)
def _test_amax_keepdim(test_case, device):
input = flow.tensor(
np.random.randn(3, 5, 6, 8), dtype=flow.float32, device=flow.device(device)
)
dim = random(-4, 4).to(int).value()
keepdim = True
__check(test_case, input, dim, keepdim, device)
def _test_amax_not_keepdim(test_case, device):
input = flow.tensor(
np.random.randn(3, 5, 6, 8), dtype=flow.float32, device=flow.device(device)
)
dim = random(-4, 4).to(int).value()
keepdim = False
__check(test_case, input, dim, keepdim, device)
@flow.unittest.skip_unless_1n1d()
class TestAmax(flow.unittest.TestCase):
def test_amax(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_amax_with_negative_dim,
_test_amax_with_positive_dim,
_test_amax_with_multiple_axes,
_test_amax_with_empty_dim,
_test_amax_keepdim,
_test_amax_not_keepdim,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
@autotest()
def test_amax_with_random_data_single_dim(test_case):
device = random_device()
ndim = random(1, 6).to(int)
x = random_tensor(ndim=ndim).to(device)
y = torch.amax(x, dim=random(0, ndim), keepdim=random().to(bool))
return y
@autotest()
def test_amax_with_random_data_empty_dim(test_case):
device = random_device()
ndim = random(1, 6).to(int)
x = random_tensor(ndim=ndim).to(device)
y = torch.amax(x, dim=None, keepdim=random().to(bool))
return y
@autotest()
def test_amax_with_random_data_multi_dims(test_case):
device = random_device()
ndim = random(2, 6).to(int)
x = random_tensor(ndim=ndim).to(device)
dim = set()
for _ in range(random(1, ndim).to(int).value()):
dim.add(random(0, ndim).to(int).value())
y = torch.amax(x, dim=tuple(dim), keepdim=random().to(bool))
return y
if __name__ == "__main__":
unittest.main()
| [
"oneflow.amax",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.test_utils.test_util.GenArgList",
"oneflow.device"
] | [((2936, 2968), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (2966, 2968), True, 'import oneflow as flow\n'), ((859, 901), 'oneflow.amax', 'flow.amax', (['input'], {'dim': 'dim', 'keepdim': 'keepdim'}), '(input, dim=dim, keepdim=keepdim)\n', (868, 901), True, 'import oneflow as flow\n'), ((4470, 4485), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4483, 4485), False, 'import unittest\n'), ((1218, 1245), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)', '(6)', '(8)'], {}), '(3, 5, 6, 8)\n', (1233, 1245), True, 'import numpy as np\n'), ((1516, 1543), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)', '(6)', '(8)'], {}), '(3, 5, 6, 8)\n', (1531, 1543), True, 'import numpy as np\n'), ((1814, 1841), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)', '(6)', '(8)'], {}), '(3, 5, 6, 8)\n', (1829, 1841), True, 'import numpy as np\n'), ((2215, 2242), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)', '(6)', '(8)'], {}), '(3, 5, 6, 8)\n', (2230, 2242), True, 'import numpy as np\n'), ((2464, 2491), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)', '(6)', '(8)'], {}), '(3, 5, 6, 8)\n', (2479, 2491), True, 'import numpy as np\n'), ((2739, 2766), 'numpy.random.randn', 'np.random.randn', (['(3)', '(5)', '(6)', '(8)'], {}), '(3, 5, 6, 8)\n', (2754, 2766), True, 'import numpy as np\n'), ((3058, 3071), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3069, 3071), False, 'from collections import OrderedDict\n'), ((3413, 3433), 'oneflow.test_utils.test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3423, 3433), False, 'from oneflow.test_utils.test_util import GenArgList\n'), ((1274, 1293), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1285, 1293), True, 'import oneflow as flow\n'), ((1572, 1591), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1583, 1591), True, 'import oneflow as flow\n'), ((1870, 1889), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (1881, 1889), True, 'import oneflow as flow\n'), ((2271, 2290), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2282, 2290), True, 'import oneflow as flow\n'), ((2520, 2539), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2531, 2539), True, 'import oneflow as flow\n'), ((2795, 2814), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2806, 2814), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
from test_util import GenArgList
import oneflow as flow
import oneflow.unittest
def _test_embedding_impl(test_case, device):
weight = np.array(
[
[0.68258786, 0.6957856, 1.1829041],
[1.0154, -1.0616943, 0.50303376],
[0.29679507, 0.65562993, 1.0424724],
[-0.42980736, -0.35347632, -0.15600166],
[0.6763601, -0.24286619, -2.0873115],
[-0.13371214, -0.5589277, 1.9173933],
[0.08762296, 1.0264007, -0.67938024],
[0.32019204, -0.26137325, -1.3534237],
[-1.1555519, -0.67776406, 0.27372134],
[1.0615997, -0.59715784, 1.9855849],
],
dtype=np.float32,
)
output = np.array(
[
[
[1.0154, -1.0616943, 0.50303376],
[0.29679507, 0.65562993, 1.0424724],
[0.6763601, -0.24286619, -2.0873115],
[-0.13371214, -0.5589277, 1.9173933],
],
[
[0.6763601, -0.24286619, -2.0873115],
[-0.42980736, -0.35347632, -0.15600166],
[0.29679507, 0.65562993, 1.0424724],
[1.0615997, -0.59715784, 1.9855849],
],
],
dtype=np.float32,
)
indices = flow.tensor(
[[1, 2, 4, 5], [4, 3, 2, 9]],
dtype=flow.int,
device=flow.device(device),
requires_grad=False,
)
m = flow.nn.Embedding(10, 3, _weight=flow.Tensor(weight))
m = m.to(device)
y = m(indices)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
y = y.sum()
y.backward()
weight_grad_np = [
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
]
test_case.assertTrue(
np.allclose(m.weight.grad.numpy(), weight_grad_np, 1e-05, 1e-05)
)
def _test_embedding_functional_impl(test_case, device):
weight_ = np.array(
[
[0.68258786, 0.6957856, 1.1829041],
[1.0154, -1.0616943, 0.50303376],
[0.29679507, 0.65562993, 1.0424724],
[-0.42980736, -0.35347632, -0.15600166],
[0.6763601, -0.24286619, -2.0873115],
[-0.13371214, -0.5589277, 1.9173933],
[0.08762296, 1.0264007, -0.67938024],
[0.32019204, -0.26137325, -1.3534237],
[-1.1555519, -0.67776406, 0.27372134],
[1.0615997, -0.59715784, 1.9855849],
],
dtype=np.float32,
)
weight = flow.Tensor(weight_)
weight = weight.to(device)
weight.requires_grad = True
output = np.array(
[
[
[1.0154, -1.0616943, 0.50303376],
[0.29679507, 0.65562993, 1.0424724],
[0.6763601, -0.24286619, -2.0873115],
[-0.13371214, -0.5589277, 1.9173933],
],
[
[0.6763601, -0.24286619, -2.0873115],
[-0.42980736, -0.35347632, -0.15600166],
[0.29679507, 0.65562993, 1.0424724],
[1.0615997, -0.59715784, 1.9855849],
],
],
dtype=np.float32,
)
indices = flow.tensor(
[[1, 2, 4, 5], [4, 3, 2, 9]],
dtype=flow.int,
device=flow.device(device),
requires_grad=False,
)
y = flow.nn.functional.embedding(indices, weight)
test_case.assertTrue(np.allclose(y.numpy(), output, 1e-05, 1e-05))
y = y.sum()
y.backward()
weight_grad_np = [
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[1.0, 1.0, 1.0],
[2.0, 2.0, 2.0],
[1.0, 1.0, 1.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[0.0, 0.0, 0.0],
[1.0, 1.0, 1.0],
]
test_case.assertTrue(np.allclose(weight.grad.numpy(), weight_grad_np, 1e-05, 1e-05))
@flow.unittest.skip_unless_1n1d()
class TestEmbedding(flow.unittest.TestCase):
def test_embedding(test_case):
arg_dict = OrderedDict()
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
_test_embedding_impl(test_case, *arg)
_test_embedding_functional_impl(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.Tensor",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.nn.functional.embedding",
"oneflow.device"
] | [((4656, 4688), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4686, 4688), True, 'import oneflow as flow\n'), ((804, 1234), 'numpy.array', 'np.array', (['[[0.68258786, 0.6957856, 1.1829041], [1.0154, -1.0616943, 0.50303376], [\n 0.29679507, 0.65562993, 1.0424724], [-0.42980736, -0.35347632, -\n 0.15600166], [0.6763601, -0.24286619, -2.0873115], [-0.13371214, -\n 0.5589277, 1.9173933], [0.08762296, 1.0264007, -0.67938024], [\n 0.32019204, -0.26137325, -1.3534237], [-1.1555519, -0.67776406, \n 0.27372134], [1.0615997, -0.59715784, 1.9855849]]'], {'dtype': 'np.float32'}), '([[0.68258786, 0.6957856, 1.1829041], [1.0154, -1.0616943, \n 0.50303376], [0.29679507, 0.65562993, 1.0424724], [-0.42980736, -\n 0.35347632, -0.15600166], [0.6763601, -0.24286619, -2.0873115], [-\n 0.13371214, -0.5589277, 1.9173933], [0.08762296, 1.0264007, -0.67938024\n ], [0.32019204, -0.26137325, -1.3534237], [-1.1555519, -0.67776406, \n 0.27372134], [1.0615997, -0.59715784, 1.9855849]], dtype=np.float32)\n', (812, 1234), True, 'import numpy as np\n'), ((1377, 1729), 'numpy.array', 'np.array', (['[[[1.0154, -1.0616943, 0.50303376], [0.29679507, 0.65562993, 1.0424724], [\n 0.6763601, -0.24286619, -2.0873115], [-0.13371214, -0.5589277, \n 1.9173933]], [[0.6763601, -0.24286619, -2.0873115], [-0.42980736, -\n 0.35347632, -0.15600166], [0.29679507, 0.65562993, 1.0424724], [\n 1.0615997, -0.59715784, 1.9855849]]]'], {'dtype': 'np.float32'}), '([[[1.0154, -1.0616943, 0.50303376], [0.29679507, 0.65562993, \n 1.0424724], [0.6763601, -0.24286619, -2.0873115], [-0.13371214, -\n 0.5589277, 1.9173933]], [[0.6763601, -0.24286619, -2.0873115], [-\n 0.42980736, -0.35347632, -0.15600166], [0.29679507, 0.65562993, \n 1.0424724], [1.0615997, -0.59715784, 1.9855849]]], dtype=np.float32)\n', (1385, 1729), True, 'import numpy as np\n'), ((2748, 3178), 'numpy.array', 'np.array', (['[[0.68258786, 0.6957856, 1.1829041], [1.0154, -1.0616943, 0.50303376], [\n 0.29679507, 0.65562993, 1.0424724], [-0.42980736, -0.35347632, -\n 0.15600166], [0.6763601, -0.24286619, -2.0873115], [-0.13371214, -\n 0.5589277, 1.9173933], [0.08762296, 1.0264007, -0.67938024], [\n 0.32019204, -0.26137325, -1.3534237], [-1.1555519, -0.67776406, \n 0.27372134], [1.0615997, -0.59715784, 1.9855849]]'], {'dtype': 'np.float32'}), '([[0.68258786, 0.6957856, 1.1829041], [1.0154, -1.0616943, \n 0.50303376], [0.29679507, 0.65562993, 1.0424724], [-0.42980736, -\n 0.35347632, -0.15600166], [0.6763601, -0.24286619, -2.0873115], [-\n 0.13371214, -0.5589277, 1.9173933], [0.08762296, 1.0264007, -0.67938024\n ], [0.32019204, -0.26137325, -1.3534237], [-1.1555519, -0.67776406, \n 0.27372134], [1.0615997, -0.59715784, 1.9855849]], dtype=np.float32)\n', (2756, 3178), True, 'import numpy as np\n'), ((3321, 3341), 'oneflow.Tensor', 'flow.Tensor', (['weight_'], {}), '(weight_)\n', (3332, 3341), True, 'import oneflow as flow\n'), ((3418, 3770), 'numpy.array', 'np.array', (['[[[1.0154, -1.0616943, 0.50303376], [0.29679507, 0.65562993, 1.0424724], [\n 0.6763601, -0.24286619, -2.0873115], [-0.13371214, -0.5589277, \n 1.9173933]], [[0.6763601, -0.24286619, -2.0873115], [-0.42980736, -\n 0.35347632, -0.15600166], [0.29679507, 0.65562993, 1.0424724], [\n 1.0615997, -0.59715784, 1.9855849]]]'], {'dtype': 'np.float32'}), '([[[1.0154, -1.0616943, 0.50303376], [0.29679507, 0.65562993, \n 1.0424724], [0.6763601, -0.24286619, -2.0873115], [-0.13371214, -\n 0.5589277, 1.9173933]], [[0.6763601, -0.24286619, -2.0873115], [-\n 0.42980736, -0.35347632, -0.15600166], [0.29679507, 0.65562993, \n 1.0424724], [1.0615997, -0.59715784, 1.9855849]]], dtype=np.float32)\n', (3426, 3770), True, 'import numpy as np\n'), ((4135, 4180), 'oneflow.nn.functional.embedding', 'flow.nn.functional.embedding', (['indices', 'weight'], {}), '(indices, weight)\n', (4163, 4180), True, 'import oneflow as flow\n'), ((5032, 5047), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5045, 5047), False, 'import unittest\n'), ((4788, 4801), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4799, 4801), False, 'from collections import OrderedDict\n'), ((4866, 4886), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4876, 4886), False, 'from test_util import GenArgList\n'), ((2030, 2049), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (2041, 2049), True, 'import oneflow as flow\n'), ((2127, 2146), 'oneflow.Tensor', 'flow.Tensor', (['weight'], {}), '(weight)\n', (2138, 2146), True, 'import oneflow as flow\n'), ((4071, 4090), 'oneflow.device', 'flow.device', (['device'], {}), '(device)\n', (4082, 4090), True, 'import oneflow as flow\n')] |
import oneflow as flow
import oneflow.nn as nn
import oneflow.nn.functional as F
from flowvision.layers.blocks import ConvBnAct
from flowvision.layers.helpers import make_divisible
class LinearBnAct(nn.Sequential):
def __init__(
self,
in_features: int,
out_features: int,
act_layer: nn.Module = nn.ReLU,
fc: nn.Module = nn.Linear,
normalization: nn.Module = nn.BatchNorm1d,
bias: bool = False,
):
super().__init__()
self.add_module("fc", fc(in_features, out_features, bias=bias))
if normalization:
self.add_module("bn", normalization(out_features))
if act_layer:
self.add_module("act", act_layer())
class BamChannelAttn(nn.Module):
def __init__(
self,
channels,
rd_ratio=1.0 / 16,
rd_channels=None,
rd_divisor=1,
act_layer=nn.ReLU,
gate_layer=nn.Sigmoid,
num_layers=2,
mlp_bias=False,
):
super(BamChannelAttn, self).__init__()
if not rd_channels:
rd_channels = make_divisible(
channels * rd_ratio, rd_divisor, round_limit=0.0
)
self.fc_layers = nn.Sequential()
for i in range(num_layers):
if i == 0:
self.fc_layers.add_module(
"fc_bn_act_%d" % i,
LinearBnAct(channels, rd_channels, act_layer, bias=mlp_bias),
)
else:
self.fc_layers.add_module(
"fc_bn_act_%d" % i,
LinearBnAct(rd_channels, rd_channels, act_layer, bias=mlp_bias),
)
self.fc_layers.add_module(
"fc_out", nn.Linear(rd_channels, channels, bias=mlp_bias)
)
self.gate = gate_layer()
def forward(self, x):
b, c, _, _ = x.shape
x_attn = self.gate(self.fc_layers(x.mean((2, 3)))).view(b, c, 1, 1)
return x * x_attn.expand_as(x)
class BamSpatialAttn(nn.Module):
def __init__(
self,
channels,
rd_ratio=1.0 / 16,
rd_channels=None,
rd_divisor=1,
act_layer=nn.ReLU,
gate_layer=nn.Sigmoid,
num_layers=1,
dilation=4,
mlp_bias=False,
):
super(BamSpatialAttn, self).__init__()
if not rd_channels:
rd_channels = make_divisible(
channels * rd_ratio, rd_divisor, round_limit=0.0
)
self.conv_layers = nn.Sequential()
for i in range(num_layers):
if i == 0:
self.conv_layers.add_module(
"conv_bn_act_%d" % i,
ConvBnAct(
channels,
rd_channels,
act_layer,
kernel_size=3,
padding=4,
dilation=4,
bias=mlp_bias,
),
)
else:
self.conv_layers.add_module(
"conv_bn_act_%d" % i,
ConvBnAct(
rd_channels,
rd_channels,
act_layer,
kernel_size=3,
padding=4,
dilation=4,
bias=mlp_bias,
),
)
self.conv_layers.add_module(
"conv_final", nn.Conv2d(rd_channels, 1, kernel_size=1, bias=mlp_bias)
)
self.gate = gate_layer()
def forward(self, x):
b, c, _, _ = x.shape
x_attn = self.gate(self.conv_layers(x))
return x * x_attn.expand_as(x)
class BAMModule(nn.Module):
def __init__(
self,
channels,
rd_ratio=1.0 / 16,
rd_channels=None,
rd_divisor=1,
act_layer=nn.ReLU,
gate_layer=nn.Sigmoid,
num_channel_attn_layers=2,
num_spatial_attn_layers=2,
mlp_bias=False,
):
super(BAMModule, self).__init__()
self.channel_att = BamChannelAttn(
channels,
rd_ratio,
rd_channels,
rd_divisor,
act_layer,
gate_layer,
num_channel_attn_layers,
mlp_bias,
)
self.spatial_att = BamSpatialAttn(
channels,
rd_ratio,
rd_channels,
rd_divisor,
act_layer,
gate_layer,
num_spatial_attn_layers,
mlp_bias,
)
def forward(self, x):
x_attn = 1 + F.sigmoid(self.channel_att(x) * self.spatial_att(x))
return x * x_attn
| [
"oneflow.nn.Sequential",
"oneflow.nn.Conv2d",
"oneflow.nn.Linear"
] | [((1214, 1229), 'oneflow.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (1227, 1229), True, 'import oneflow.nn as nn\n'), ((2509, 2524), 'oneflow.nn.Sequential', 'nn.Sequential', ([], {}), '()\n', (2522, 2524), True, 'import oneflow.nn as nn\n'), ((1094, 1158), 'flowvision.layers.helpers.make_divisible', 'make_divisible', (['(channels * rd_ratio)', 'rd_divisor'], {'round_limit': '(0.0)'}), '(channels * rd_ratio, rd_divisor, round_limit=0.0)\n', (1108, 1158), False, 'from flowvision.layers.helpers import make_divisible\n'), ((1733, 1780), 'oneflow.nn.Linear', 'nn.Linear', (['rd_channels', 'channels'], {'bias': 'mlp_bias'}), '(rd_channels, channels, bias=mlp_bias)\n', (1742, 1780), True, 'import oneflow.nn as nn\n'), ((2387, 2451), 'flowvision.layers.helpers.make_divisible', 'make_divisible', (['(channels * rd_ratio)', 'rd_divisor'], {'round_limit': '(0.0)'}), '(channels * rd_ratio, rd_divisor, round_limit=0.0)\n', (2401, 2451), False, 'from flowvision.layers.helpers import make_divisible\n'), ((3496, 3551), 'oneflow.nn.Conv2d', 'nn.Conv2d', (['rd_channels', '(1)'], {'kernel_size': '(1)', 'bias': 'mlp_bias'}), '(rd_channels, 1, kernel_size=1, bias=mlp_bias)\n', (3505, 3551), True, 'import oneflow.nn as nn\n'), ((2691, 2791), 'flowvision.layers.blocks.ConvBnAct', 'ConvBnAct', (['channels', 'rd_channels', 'act_layer'], {'kernel_size': '(3)', 'padding': '(4)', 'dilation': '(4)', 'bias': 'mlp_bias'}), '(channels, rd_channels, act_layer, kernel_size=3, padding=4,\n dilation=4, bias=mlp_bias)\n', (2700, 2791), False, 'from flowvision.layers.blocks import ConvBnAct\n'), ((3123, 3226), 'flowvision.layers.blocks.ConvBnAct', 'ConvBnAct', (['rd_channels', 'rd_channels', 'act_layer'], {'kernel_size': '(3)', 'padding': '(4)', 'dilation': '(4)', 'bias': 'mlp_bias'}), '(rd_channels, rd_channels, act_layer, kernel_size=3, padding=4,\n dilation=4, bias=mlp_bias)\n', (3132, 3226), False, 'from flowvision.layers.blocks import ConvBnAct\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from oneflow.compatible import single_client as flow
import numpy as np
from oneflow.compatible.single_client import typing as tp
from test_util import GenArgList
import unittest
from collections import OrderedDict
from typing import Dict
import os
def _compare_swish_with_np(input_shape, beta, device_type, machine_ids, device_counts):
input_1 = np.random.random(size=input_shape).astype(np.float32)
assert device_type in ["cpu", "gpu"]
flow.clear_default_session()
if device_type == "cpu":
flow.config.cpu_device_num(device_counts)
else:
flow.config.gpu_device_num(device_counts)
func_config = flow.FunctionConfig()
func_config.default_placement_scope(flow.scope.placement(device_type, machine_ids))
def np_swish(input, beta):
def np_sigmoid(sigmoid_input):
return 1 / (1 + np.exp(-sigmoid_input))
return input * np_sigmoid(beta * input)
np_out_swish = np_swish(input_1, beta)
def np_diff(input, beta):
# We only test input_1 diff
def np_sigmoid(sigmoid_input):
return 1 / (1 + np.exp(-sigmoid_input))
_fx = input * np_sigmoid(beta * input)
return beta * _fx + (1 - beta * _fx) * np_sigmoid(beta * input)
_np_grad = np_diff(input_1, beta)
def assert_prediction_grad(blob: tp.Numpy):
assert np.allclose(blob, _np_grad)
@flow.global_function(
type="train", function_config=func_config,
)
def oneflow_swish(
of_input_1: tp.Numpy.Placeholder(shape=input_1.shape),
) -> tp.Numpy:
with flow.scope.placement(device_type, "0:0"):
v = flow.get_variable(
shape=input_1.shape,
dtype=flow.float32,
initializer=flow.zeros_initializer(),
name="x_var",
)
x_var = of_input_1 + v
flow.watch_diff(x_var, assert_prediction_grad)
of_swish_out = flow.nn.swish(x_var, beta)
with flow.scope.placement(device_type, "0:0"):
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-3]), momentum=0
).minimize(of_swish_out)
return of_swish_out
of_out_swish = oneflow_swish(input_1)
assert np.allclose(of_out_swish, np_out_swish)
def _gen_arg_dict(shape, beta, device_type, machine_ids, device_counts):
# Generate a dict to pass parameter to test case
arg_dict = OrderedDict()
arg_dict["input_shape"] = [shape]
arg_dict["beta"] = [beta]
arg_dict["device_type"] = [device_type]
arg_dict["machine_ids"] = [machine_ids]
arg_dict["device_counts"] = [device_counts]
return arg_dict
@flow.unittest.skip_unless_1n1d()
class Testswish1n1d(flow.unittest.TestCase):
def test_swish_cpu(test_case):
arg_dict = _gen_arg_dict(
shape=(4, 6), beta=1, device_type="cpu", machine_ids="0:0", device_counts=1
)
for arg in GenArgList(arg_dict):
_compare_swish_with_np(*arg)
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_swish_gpu(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 16, 32),
beta=10,
device_type="gpu",
machine_ids="0:0",
device_counts=1,
)
for arg in GenArgList(arg_dict):
_compare_swish_with_np(*arg)
@flow.unittest.skip_unless_1n2d()
class Teststack1n2d(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_swish_gpu_1n2d(test_case):
arg_dict = _gen_arg_dict(
shape=(3, 8, 8, 4),
beta=2,
device_type="gpu",
machine_ids="0:0-1",
device_counts=2,
)
for arg in GenArgList(arg_dict):
_compare_swish_with_np(*arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.zeros_initializer",
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.watch_diff",
"oneflow.compatible.single_client.unittest.skip_unless_1n2d",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.config... | [((3274, 3306), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (3304, 3306), True, 'from oneflow.compatible import single_client as flow\n'), ((3989, 4021), 'oneflow.compatible.single_client.unittest.skip_unless_1n2d', 'flow.unittest.skip_unless_1n2d', ([], {}), '()\n', (4019, 4021), True, 'from oneflow.compatible import single_client as flow\n'), ((1044, 1072), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1070, 1072), True, 'from oneflow.compatible import single_client as flow\n'), ((1231, 1252), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1250, 1252), True, 'from oneflow.compatible import single_client as flow\n'), ((1972, 2035), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1992, 2035), True, 'from oneflow.compatible import single_client as flow\n'), ((2850, 2889), 'numpy.allclose', 'np.allclose', (['of_out_swish', 'np_out_swish'], {}), '(of_out_swish, np_out_swish)\n', (2861, 2889), True, 'import numpy as np\n'), ((3033, 3046), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3044, 3046), False, 'from collections import OrderedDict\n'), ((4491, 4506), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4504, 4506), False, 'import unittest\n'), ((1110, 1151), 'oneflow.compatible.single_client.config.cpu_device_num', 'flow.config.cpu_device_num', (['device_counts'], {}), '(device_counts)\n', (1136, 1151), True, 'from oneflow.compatible import single_client as flow\n'), ((1170, 1211), 'oneflow.compatible.single_client.config.gpu_device_num', 'flow.config.gpu_device_num', (['device_counts'], {}), '(device_counts)\n', (1196, 1211), True, 'from oneflow.compatible import single_client as flow\n'), ((1293, 1339), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', 'machine_ids'], {}), '(device_type, machine_ids)\n', (1313, 1339), True, 'from oneflow.compatible import single_client as flow\n'), ((1938, 1965), 'numpy.allclose', 'np.allclose', (['blob', '_np_grad'], {}), '(blob, _np_grad)\n', (1949, 1965), True, 'import numpy as np\n'), ((2461, 2507), 'oneflow.compatible.single_client.watch_diff', 'flow.watch_diff', (['x_var', 'assert_prediction_grad'], {}), '(x_var, assert_prediction_grad)\n', (2476, 2507), True, 'from oneflow.compatible import single_client as flow\n'), ((2532, 2558), 'oneflow.compatible.single_client.nn.swish', 'flow.nn.swish', (['x_var', 'beta'], {}), '(x_var, beta)\n', (2545, 2558), True, 'from oneflow.compatible import single_client as flow\n'), ((3538, 3558), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3548, 3558), False, 'from test_util import GenArgList\n'), ((3923, 3943), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (3933, 3943), False, 'from test_util import GenArgList\n'), ((3623, 3657), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (3632, 3657), False, 'import os\n'), ((4395, 4415), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4405, 4415), False, 'from test_util import GenArgList\n'), ((4088, 4122), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (4097, 4122), False, 'import os\n'), ((943, 977), 'numpy.random.random', 'np.random.random', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (959, 977), True, 'import numpy as np\n'), ((2094, 2135), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'tp.Numpy.Placeholder', ([], {'shape': 'input_1.shape'}), '(shape=input_1.shape)\n', (2114, 2135), True, 'from oneflow.compatible.single_client import typing as tp\n'), ((2169, 2209), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (2189, 2209), True, 'from oneflow.compatible import single_client as flow\n'), ((2573, 2613), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (2593, 2613), True, 'from oneflow.compatible import single_client as flow\n'), ((1440, 1462), 'numpy.exp', 'np.exp', (['(-sigmoid_input)'], {}), '(-sigmoid_input)\n', (1446, 1462), True, 'import numpy as np\n'), ((1691, 1713), 'numpy.exp', 'np.exp', (['(-sigmoid_input)'], {}), '(-sigmoid_input)\n', (1697, 1713), True, 'import numpy as np\n'), ((2347, 2371), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2369, 2371), True, 'from oneflow.compatible import single_client as flow\n'), ((2663, 2717), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (2704, 2717), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
import oneflow as flow
config = flow.function_config()
class TestBroadcastOp(unittest.TestCase):
run_test = False
def _test_body(self, x, y, dtype=np.float32):
if not self.run_test:
return
f1 = self.make_job(x.shape, y.shape, dtype=flow.float32)
f2 = self.make_xla_job(x.shape, y.shape, dtype=flow.float32)
a = f1(x, y).get()
b = f2(x, y).get()
print("without xla: ", a)
print("with xla", b)
self.assertTrue(np.allclose(a.numpy(), b.numpy(), rtol=1e-03, atol=1e-05))
flow.clear_default_session()
def _test_ones_body(self, x_shape, y_shape, dtype=np.float32):
x = np.ones(x_shape, dtype=dtype)
y = np.ones(y_shape, dtype=dtype)
self._test_body(x, y, dtype=dtype)
def _test_random_body(self, x_shape, y_shape, dtype=np.float32):
x = np.random.random(x_shape).astype(dtype)
y = np.random.random(y_shape).astype(dtype)
self._test_body(x, y, dtype=dtype)
def test_ones_input(self):
self._test_ones_body((1, 10), (1, 1))
self._test_ones_body((2, 10, 2), (2, 1, 2))
self._test_ones_body((2, 5, 2, 2), (1, 5, 2, 2))
def test_random_input(self):
self._test_random_body((1, 10), (1, 1))
self._test_random_body((2, 10, 2), (2, 1, 2))
self._test_random_body((2, 5, 2, 2), (1, 5, 2, 2))
class TestBroadcastAddOp(TestBroadcastOp):
run_test = True
def make_job(self, x_shape, y_shape, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def broadcast_add_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
y=flow.FixedTensorDef(y_shape, dtype=dtype),
):
return flow.math.add(x, y)
return broadcast_add_job
def make_xla_job(self, x_shape, y_shape, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_broadcast_add_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
y=flow.FixedTensorDef(y_shape, dtype=dtype),
):
return flow.math.add(x, y)
return xla_broadcast_add_job
class TestBroadcastMulOp(TestBroadcastOp):
run_test = True
def make_job(self, x_shape, y_shape, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def broadcast_mul_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
y=flow.FixedTensorDef(y_shape, dtype=dtype),
):
return flow.math.multiply(x, y)
return broadcast_mul_job
def make_xla_job(self, x_shape, y_shape, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_broadcast_mul_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
y=flow.FixedTensorDef(y_shape, dtype=dtype),
):
return flow.math.multiply(x, y)
return xla_broadcast_mul_job
class TestBroadcastDivOp(TestBroadcastOp):
run_test = True
def make_job(self, x_shape, y_shape, dtype=flow.float32):
config.use_xla_jit(False)
config.use_tensorrt(False)
@flow.global_function(config)
def broadcast_div_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
y=flow.FixedTensorDef(y_shape, dtype=dtype),
):
return flow.math.divide(x, y)
return broadcast_div_job
def make_xla_job(self, x_shape, y_shape, dtype=flow.float32):
config.use_xla_jit(True)
config.use_tensorrt(False)
@flow.global_function(config)
def xla_broadcast_div_job(
x=flow.FixedTensorDef(x_shape, dtype=dtype),
y=flow.FixedTensorDef(y_shape, dtype=dtype),
):
return flow.math.divide(x, y)
return xla_broadcast_div_job
if __name__ == "__main__":
unittest.main()
| [
"oneflow.global_function",
"oneflow.math.divide",
"oneflow.FixedTensorDef",
"oneflow.function_config",
"oneflow.clear_default_session",
"oneflow.math.add",
"oneflow.math.multiply"
] | [((659, 681), 'oneflow.function_config', 'flow.function_config', ([], {}), '()\n', (679, 681), True, 'import oneflow as flow\n'), ((4690, 4705), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4703, 4705), False, 'import unittest\n'), ((1190, 1218), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1216, 1218), True, 'import oneflow as flow\n'), ((1299, 1328), 'numpy.ones', 'np.ones', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (1306, 1328), True, 'import numpy as np\n'), ((1341, 1370), 'numpy.ones', 'np.ones', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (1348, 1370), True, 'import numpy as np\n'), ((2220, 2248), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (2240, 2248), True, 'import oneflow as flow\n'), ((2623, 2651), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (2643, 2651), True, 'import oneflow as flow\n'), ((3096, 3124), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (3116, 3124), True, 'import oneflow as flow\n'), ((3504, 3532), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (3524, 3532), True, 'import oneflow as flow\n'), ((3982, 4010), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (4002, 4010), True, 'import oneflow as flow\n'), ((4388, 4416), 'oneflow.global_function', 'flow.global_function', (['config'], {}), '(config)\n', (4408, 4416), True, 'import oneflow as flow\n'), ((2294, 2335), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (2313, 2335), True, 'import oneflow as flow\n'), ((2351, 2392), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (2370, 2392), True, 'import oneflow as flow\n'), ((2424, 2443), 'oneflow.math.add', 'flow.math.add', (['x', 'y'], {}), '(x, y)\n', (2437, 2443), True, 'import oneflow as flow\n'), ((2701, 2742), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (2720, 2742), True, 'import oneflow as flow\n'), ((2758, 2799), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (2777, 2799), True, 'import oneflow as flow\n'), ((2831, 2850), 'oneflow.math.add', 'flow.math.add', (['x', 'y'], {}), '(x, y)\n', (2844, 2850), True, 'import oneflow as flow\n'), ((3170, 3211), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (3189, 3211), True, 'import oneflow as flow\n'), ((3227, 3268), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (3246, 3268), True, 'import oneflow as flow\n'), ((3300, 3324), 'oneflow.math.multiply', 'flow.math.multiply', (['x', 'y'], {}), '(x, y)\n', (3318, 3324), True, 'import oneflow as flow\n'), ((3582, 3623), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (3601, 3623), True, 'import oneflow as flow\n'), ((3639, 3680), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (3658, 3680), True, 'import oneflow as flow\n'), ((3712, 3736), 'oneflow.math.multiply', 'flow.math.multiply', (['x', 'y'], {}), '(x, y)\n', (3730, 3736), True, 'import oneflow as flow\n'), ((4056, 4097), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (4075, 4097), True, 'import oneflow as flow\n'), ((4113, 4154), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (4132, 4154), True, 'import oneflow as flow\n'), ((4186, 4208), 'oneflow.math.divide', 'flow.math.divide', (['x', 'y'], {}), '(x, y)\n', (4202, 4208), True, 'import oneflow as flow\n'), ((4466, 4507), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['x_shape'], {'dtype': 'dtype'}), '(x_shape, dtype=dtype)\n', (4485, 4507), True, 'import oneflow as flow\n'), ((4523, 4564), 'oneflow.FixedTensorDef', 'flow.FixedTensorDef', (['y_shape'], {'dtype': 'dtype'}), '(y_shape, dtype=dtype)\n', (4542, 4564), True, 'import oneflow as flow\n'), ((4596, 4618), 'oneflow.math.divide', 'flow.math.divide', (['x', 'y'], {}), '(x, y)\n', (4612, 4618), True, 'import oneflow as flow\n'), ((1496, 1521), 'numpy.random.random', 'np.random.random', (['x_shape'], {}), '(x_shape)\n', (1512, 1521), True, 'import numpy as np\n'), ((1548, 1573), 'numpy.random.random', 'np.random.random', (['y_shape'], {}), '(y_shape)\n', (1564, 1573), True, 'import numpy as np\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import OrderedDict
from functools import partial
from typing import Dict
import oneflow._oneflow_internal
import oneflow.framework.c_api_util as c_api_util
import oneflow.framework.graph_build_util as graph_build_util
import oneflow.framework.session_context as session_ctx
from oneflow.framework.tensor import Tensor
from oneflow.framework.function_util import FunctionConfig
from oneflow.framework.multi_client_session import MultiClientSession
from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple
from oneflow.nn.graph_block import Block, BlockType
from oneflow.nn.graph_optimizer import OptimizerConfig, VariableConfig
from oneflow.nn.module import Module
from oneflow.nn.optimizer.optimizer import Optimizer
from oneflow.nn.util import add_indent
class Graph(object):
_child_init_cnt = dict()
def __init__(self):
self.config = GraphConfig()
self._generate_name()
self.config.proto.set_job_name(self._name)
self._c_nn_graph = oneflow._oneflow_internal.nn.graph.CNNGraph(self._name)
self._blocks = OrderedDict()
self._optimizers_conf = OrderedDict()
self._variables_conf = OrderedDict()
self._is_compiled = False
self._job_proto = None
self._args_repr = []
self._outs_repr = []
self._debug = False
@property
def name(self):
return self._name
@property
def training(self):
return self.config.training
@property
def _graph_proto(self):
return self._job_proto
def debug(self, mode: bool = True) -> None:
self._debug = mode
for name, block in self._blocks.items():
assert block.type == BlockType.MODULE
block.debug(mode)
def build(self, *args):
raise NotImplementedError()
def add_optimizer(
self,
name: str,
optimizer: Optimizer = None,
lr_scheduler=None,
grad_clipping_conf=None,
weight_decay_conf=None,
):
assert name is not None, "name cannot be None"
assert type(name) is str, "name must be an instance of str"
assert optimizer is not None, "optimizer cannot be None"
assert isinstance(
optimizer, Optimizer
), "optimizer must be an instance of Optimizer"
self._optimizers_conf[name] = OptimizerConfig(
name, optimizer, lr_scheduler, grad_clipping_conf, weight_decay_conf
)
def _generate_name(self):
child_name = self.__class__.__name__
if Graph._child_init_cnt.get(child_name) is None:
Graph._child_init_cnt[child_name] = 0
self._name = child_name + "_" + str(Graph._child_init_cnt[child_name])
Graph._child_init_cnt[child_name] += 1
def _state(self):
for _, b in self._blocks.items():
pa_gen = b.parameters(recurse=True)
for pa in pa_gen:
yield pa
bu_gen = b.buffers(recurse=True)
for bu in bu_gen:
yield bu
def _generate_optimizer_and_variable_configs(self):
if len(self._optimizers_conf) > 0:
self.config._train(True)
for state_block in self._state():
if state_block.type == BlockType.PARAMETER:
self._variables_conf[state_block.origin] = VariableConfig(
state_block.name_prefix + state_block.name
)
for name, opt_config in self._optimizers_conf.items():
self.config._generate_optimizer_and_variable_configs(
opt_config, self._variables_conf
)
def _compile(self, *args):
assert not self._is_compiled, (
"nn.Graph " + self._name + " has already been compiled."
)
if self._debug:
print(self._shallow_repr() + " start graph construting.")
self._generate_optimizer_and_variable_configs()
session = session_ctx.GetDefaultSession()
assert type(session) is MultiClientSession
session.TryInit()
with graph_build_util.graph_build_context(self.config.proto, session):
# Deal with input
lazy_args = []
lazy_arg_op_names = []
for idx, arg in enumerate(args):
op_name = "_" + self.name + "-input_" + str(idx)
lazy_args.append(graph_build_util.build_graph_input_arg(op_name, arg))
lazy_arg_op_names.append(op_name)
in_str = "(INPUT:" + op_name + ":" + arg._meta_repr() + ")"
self._args_repr.append(in_str)
if self._debug:
print(in_str)
# Deal with parameter and buffer
state_op_names = []
state_tensors = []
for state_block in self._state():
op_name = state_block.name_prefix + state_block.name
state_tensor = state_block.origin
state_op_names.append(op_name)
state_tensors.append(state_tensor)
if state_block.type == BlockType.PARAMETER:
state_config = self._variables_conf[state_block.origin]
else:
state_config = None
state_block.set_lazy_origin_builder(
partial(
graph_build_util.build_graph_state,
op_name,
state_tensor,
state_config,
)
)
self._variables = convert_to_tensor_tuple(state_tensors)
# Deal with module in self.build(*args)
outputs = self.build(*lazy_args)
# Deal with outputs
if not (type(outputs) is tuple or type(outputs) is list):
if outputs is None:
outputs = ()
else:
assert type(outputs) is Tensor
outputs = (outputs,)
eager_outputs = []
eager_output_op_names = []
for idx, out in enumerate(outputs):
op_name = "_" + self.name + "-output_" + str(idx)
eager_outputs.append(graph_build_util.build_graph_output(op_name, out))
eager_output_op_names.append(op_name)
out_str = "(OUTPUT:" + op_name + ":" + out._meta_repr() + ")"
self._outs_repr.append(out_str)
if self._debug:
print(out_str)
if len(eager_outputs) == 0:
eager_outputs = None
elif len(eager_outputs) == 1:
eager_outputs = eager_outputs[0]
else:
eager_outputs = tuple(eager_outputs)
self._outputs = convert_to_tensor_tuple(eager_outputs)
self._eager_outputs = eager_outputs
# Register input/output/variable to _c_nn_graph
self._c_nn_graph.register_input_op_names(lazy_arg_op_names)
self._c_nn_graph.register_output_op_names(eager_output_op_names)
self._c_nn_graph.register_variable_op_names_and_tensors(
state_op_names, self._variables
)
# Save job proto for debug
self._job_proto = c_api_util.GetCurrentJob()
# Complie and init Runtime
self._c_nn_graph.complie_and_init_runtime()
self._is_compiled = True
if self._debug:
print(self._shallow_repr() + " end graph construting.")
return eager_outputs
def _launch(self, *args):
# oneflow._oneflow_internal.eager.multi_client.Sync() NOTE(chengcheng): Need Sync?
oneflow._oneflow_internal.nn.graph.RunLazyNNGraph(
convert_to_tensor_tuple(args),
self._outputs,
self._variables,
self._c_nn_graph,
)
return self._eager_outputs
def __call__(self, *args):
if not self._is_compiled:
self._compile(*args)
return self._launch(*args)
def _add_block(self, name: str, module: Module = None) -> None:
r"""Adds a module to the current graph as a block.
The block can be accessed as an attribute using the given name.
Args:
name (string): name of the child block. The child block can be
accessed from this graph using the given name
module (Module): child module to be added to the graph.
"""
if not isinstance(module, Module) and module is not None:
raise TypeError("{} is not a Module subclass".format(type(module)))
elif not isinstance(name, str):
raise TypeError("module name should be a string. Got {}".format(type(name)))
elif hasattr(self, name) and name not in self._blocks:
raise KeyError("attribute '{}' already exists".format(name))
elif "." in name:
raise KeyError('module name can\'t contain ".", got: {}'.format(name))
elif name == "":
raise KeyError('module name can\'t be empty string ""')
self._blocks[name] = Block("", name, module)
def __setattr__(self, name: str, value=None):
if isinstance(value, Module):
self._add_block(name, value)
elif isinstance(value, Optimizer):
raise AttributeError(
"'{}' object are not allowed to set Optimizer attribute named '{}', "
"please use add_optimizer(...) instead.".format(
type(self).__name__, name
)
)
else:
object.__setattr__(self, name, value)
def __getattr__(self, name: str):
if "_blocks" in self.__dict__:
if name in self._blocks:
return self._blocks[name]
if name in self.__dict__:
return self.__dict__[name]
raise AttributeError(
"'{}' object has no attribute '{}'".format(type(self).__name__, name)
)
def __repr__(self):
child_lines = []
if len(self._args_repr) > 0:
for in_str in self._args_repr:
input_str = add_indent(in_str, 2)
child_lines.append(input_str)
if len(self._blocks) > 0:
for n, m in self._blocks.items():
mod_str = repr(m)
mod_str = add_indent(mod_str, 2)
child_lines.append(mod_str)
if len(self._outs_repr) > 0:
for out_str in self._outs_repr:
output_str = add_indent(out_str, 2)
child_lines.append(output_str)
main_str = self._shallow_repr() + ": ("
if len(child_lines) > 0:
main_str += "\n " + "\n ".join(child_lines) + "\n"
main_str += ")"
return main_str
def _shallow_repr(self):
shallow_repr = "(GRAPH:" + self._name + ":" + self.__class__.__name__ + ")"
return shallow_repr
class GraphConfig(FunctionConfig):
def __init__(self):
super().__init__()
self._train(False)
@property
def proto(self):
return self.function_desc.job_config_proto
@property
def training(self):
if self.proto.has_train_conf():
return True
if self.proto.has_predict_conf():
return False
raise NotImplementedError
def _train(self, mode: bool = True):
if mode:
self.proto.mutable_train_conf()
self.proto.mutable_train_conf().set_loss_scale_factor(1.0)
else:
self.proto.mutable_predict_conf()
def _generate_optimizer_and_variable_configs(
self,
optimizer_config: OptimizerConfig = None,
variables_conf: OrderedDict = None,
):
optimizer_config.generate_optimizer_and_variable_configs(
self.proto.mutable_train_conf(), variables_conf
)
from oneflow.nn.graph import Graph as Graph
from oneflow.nn.graph_block import Block, BlockConfig
from oneflow.nn.graph_optimizer import OptimizerConfig
| [
"oneflow.framework.session_context.GetDefaultSession",
"oneflow.nn.graph_optimizer.OptimizerConfig",
"oneflow.nn.graph_block.Block",
"oneflow.nn.graph.Graph._child_init_cnt.get",
"oneflow.framework.graph_build_util.graph_build_context",
"oneflow.framework.c_api_util.GetCurrentJob",
"oneflow.nn.graph_opt... | [((1679, 1692), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1690, 1692), False, 'from collections import OrderedDict\n'), ((1725, 1738), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1736, 1738), False, 'from collections import OrderedDict\n'), ((1770, 1783), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1781, 1783), False, 'from collections import OrderedDict\n'), ((2950, 3039), 'oneflow.nn.graph_optimizer.OptimizerConfig', 'OptimizerConfig', (['name', 'optimizer', 'lr_scheduler', 'grad_clipping_conf', 'weight_decay_conf'], {}), '(name, optimizer, lr_scheduler, grad_clipping_conf,\n weight_decay_conf)\n', (2965, 3039), False, 'from oneflow.nn.graph_optimizer import OptimizerConfig\n'), ((4540, 4571), 'oneflow.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (4569, 4571), True, 'import oneflow.framework.session_context as session_ctx\n'), ((9690, 9713), 'oneflow.nn.graph_block.Block', 'Block', (['""""""', 'name', 'module'], {}), "('', name, module)\n", (9695, 9713), False, 'from oneflow.nn.graph_block import Block, BlockConfig\n'), ((3145, 3182), 'oneflow.nn.graph.Graph._child_init_cnt.get', 'Graph._child_init_cnt.get', (['child_name'], {}), '(child_name)\n', (3170, 3182), True, 'from oneflow.nn.graph import Graph as Graph\n'), ((4662, 4726), 'oneflow.framework.graph_build_util.graph_build_context', 'graph_build_util.graph_build_context', (['self.config.proto', 'session'], {}), '(self.config.proto, session)\n', (4698, 4726), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((6147, 6185), 'oneflow.framework.tensor_tuple_util.convert_to_tensor_tuple', 'convert_to_tensor_tuple', (['state_tensors'], {}), '(state_tensors)\n', (6170, 6185), False, 'from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple\n'), ((7359, 7397), 'oneflow.framework.tensor_tuple_util.convert_to_tensor_tuple', 'convert_to_tensor_tuple', (['eager_outputs'], {}), '(eager_outputs)\n', (7382, 7397), False, 'from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple\n'), ((7857, 7883), 'oneflow.framework.c_api_util.GetCurrentJob', 'c_api_util.GetCurrentJob', ([], {}), '()\n', (7881, 7883), True, 'import oneflow.framework.c_api_util as c_api_util\n'), ((8319, 8348), 'oneflow.framework.tensor_tuple_util.convert_to_tensor_tuple', 'convert_to_tensor_tuple', (['args'], {}), '(args)\n', (8342, 8348), False, 'from oneflow.framework.tensor_tuple_util import convert_to_tensor_tuple\n'), ((3930, 3988), 'oneflow.nn.graph_optimizer.VariableConfig', 'VariableConfig', (['(state_block.name_prefix + state_block.name)'], {}), '(state_block.name_prefix + state_block.name)\n', (3944, 3988), False, 'from oneflow.nn.graph_optimizer import OptimizerConfig, VariableConfig\n'), ((10724, 10745), 'oneflow.nn.util.add_indent', 'add_indent', (['in_str', '(2)'], {}), '(in_str, 2)\n', (10734, 10745), False, 'from oneflow.nn.util import add_indent\n'), ((10933, 10955), 'oneflow.nn.util.add_indent', 'add_indent', (['mod_str', '(2)'], {}), '(mod_str, 2)\n', (10943, 10955), False, 'from oneflow.nn.util import add_indent\n'), ((11111, 11133), 'oneflow.nn.util.add_indent', 'add_indent', (['out_str', '(2)'], {}), '(out_str, 2)\n', (11121, 11133), False, 'from oneflow.nn.util import add_indent\n'), ((4963, 5015), 'oneflow.framework.graph_build_util.build_graph_input_arg', 'graph_build_util.build_graph_input_arg', (['op_name', 'arg'], {}), '(op_name, arg)\n', (5001, 5015), True, 'import oneflow.framework.graph_build_util as graph_build_util\n'), ((5899, 5984), 'functools.partial', 'partial', (['graph_build_util.build_graph_state', 'op_name', 'state_tensor', 'state_config'], {}), '(graph_build_util.build_graph_state, op_name, state_tensor, state_config\n )\n', (5906, 5984), False, 'from functools import partial\n'), ((6791, 6840), 'oneflow.framework.graph_build_util.build_graph_output', 'graph_build_util.build_graph_output', (['op_name', 'out'], {}), '(op_name, out)\n', (6826, 6840), True, 'import oneflow.framework.graph_build_util as graph_build_util\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import os
import numpy as np
import oneflow
import oneflow.experimental as flow
import oneflow.python.framework.graph_build_util as graph_build_util
class SubModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = flow.nn.Conv2d(1, 1, 5)
self.relu = flow.nn.ReLU()
def forward(self, x):
x = self.conv1(x)
x = self.relu(x)
return x
class CustomModule(flow.nn.Module):
def __init__(self):
super().__init__()
self.layer = SubModule()
self.fc1 = flow.nn.Linear(36, 4)
self.register_buffer(
"dummy_buff", flow.Tensor(1, 4),
)
def forward(self, x):
x = self.layer(x)
x = oneflow.F.flatten(x, 1)
x = self.fc1(x) + self.dummy_buff
return x
@flow.unittest.skip_unless_1n1d()
class TestGraph(flow.unittest.TestCase):
def test_add_nested_module(test_case):
x = flow.Tensor(1, 1, 10, 10)
flow.nn.init.uniform_(x, a=-1.0, b=1.0)
# Module init and call
m = CustomModule()
y = m(x)
class CustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = m
def build(self, x):
return self.m(x)
# Graph init
g = CustomGraph()
# check _c_nn_graph init
test_case.assertEqual(g.name, g._c_nn_graph.name)
# g.m is Block
test_case.assertTrue(isinstance(g.m, flow.nn.graph.Block))
test_case.assertEqual(g.m.type, "MODULE")
# g.m.name is "m"
test_case.assertEqual(g.m.name, "m")
# g.m.dummy_buff is Block
test_case.assertTrue(isinstance(g.m.dummy_buff, flow.nn.graph.Block))
test_case.assertEqual(g.m.dummy_buff.type, "BUFFER")
# conv1 is Block
test_case.assertTrue(isinstance(g.m.layer.conv1, flow.nn.graph.Block))
# conv1.name is "conv1"
test_case.assertEqual(g.m.layer.conv1.name, "conv1")
# conv1.name_prefix is "m.layer."
test_case.assertEqual(g.m.layer.conv1.name_prefix, "m.layer.")
# conv1.weight is Block
test_case.assertTrue(isinstance(g.m.layer.conv1.weight, flow.nn.graph.Block))
test_case.assertEqual(g.m.layer.conv1.weight.type, "PARAMETER")
# conv1.weight is Tensor, Graph.build(...) need weight to be Tensor
g.m.layer.conv1._is_executing_forward = True
test_case.assertTrue(isinstance(g.m.layer.conv1.weight, flow.Tensor))
g.m.layer.conv1._is_executing_forward = False
# conv1.kernel_size is original data in original module
test_case.assertEqual(g.m.layer.conv1.kernel_size, (5, 5))
# Graph build
z = g.build(x)
# g got the same result as m
test_case.assertTrue(np.array_equal(y.numpy(), z.numpy()))
def test_graph_config(test_case):
class CustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = CustomModule()
self.config.enable_auto_mixed_precision(True)
def build(self, x):
x = self.m(x)
return x
g = CustomGraph()
# check default training is True
test_case.assertEqual(g.config.training, False)
# set graph config
g.config.enable_fuse_add_to_output(True)
g.config.enable_fuse_add_to_output(False)
for s in g._state():
print("g state: ", repr(s))
# print repr of nn.Graph
print(repr(g))
def test_graph_name(test_case):
class ACustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, x):
return x
class BCustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
def build(self, x):
return x
class CBCustomGraph(BCustomGraph):
def __init__(self):
super().__init__()
def create_graph(cnt):
a = ACustomGraph()
test_case.assertEqual(a.name, "ACustomGraph_" + str(cnt))
b = BCustomGraph()
test_case.assertEqual(b.name, "BCustomGraph_" + str(cnt))
cb = CBCustomGraph()
test_case.assertEqual(cb.name, "CBCustomGraph_" + str(cnt))
flow.nn.Graph._child_init_cnt.clear()
for i in range(0, 3):
create_graph(i)
flow.nn.Graph._child_init_cnt.clear()
for i in range(0, 3):
create_graph(i)
def test_graph_build_ctx(test_case):
# check lazy_mode
test_case.assertEqual(graph_build_util.lazy_mode.is_enabled(), False)
with graph_build_util.lazy_mode.gard(True):
test_case.assertEqual(graph_build_util.lazy_mode.is_enabled(), True)
with graph_build_util.lazy_mode.gard(False):
test_case.assertEqual(graph_build_util.lazy_mode.is_enabled(), False)
test_case.assertEqual(graph_build_util.lazy_mode.is_enabled(), True)
test_case.assertEqual(graph_build_util.lazy_mode.is_enabled(), False)
class CustomGraph(flow.nn.Graph):
def __init__(self):
super().__init__()
self.config.enable_auto_mixed_precision(True)
def build(self):
# check lazy mode in nn.Graph._compile
test_case.assertEqual(graph_build_util.lazy_mode.is_enabled(), True)
# check session type
import oneflow.python.framework.session_context as session_ctx
from oneflow.python.framework.multi_client_session import (
MultiClientSession,
)
session = session_ctx.GetDefaultSession()
test_case.assertEqual(type(session), MultiClientSession)
# check scope
import oneflow.python.framework.scope_util as scope_util
scope = oneflow.current_scope()
scope_proto = graph_build_util.scope_to_proto(scope)
test_case.assertEqual(session.id, scope_proto.session_id)
# check job_build_and_infer_ctx
test_case.assertEqual(
oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName(),
self.name,
)
test_case.assertTrue(oneflow._oneflow_internal.IsMultiClient())
g = CustomGraph()
test_case.assertEqual(graph_build_util.lazy_mode.is_enabled(), False)
g._compile()
print("graph proto", g._graph_proto)
test_case.assertEqual(graph_build_util.lazy_mode.is_enabled(), False)
def test_block_scope(test_case):
class SubModule0(flow.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = flow.nn.Conv2d(1, 1, 5)
def forward(self):
scope = oneflow.current_scope()
scope_proto = graph_build_util.scope_to_proto(scope)
# check scope activation checkpointing
ck_bool = scope_proto.attr_name2attr_value["checkpointing"].at_bool
test_case.assertEqual(ck_bool, True)
# check scope stage id
stage_int = scope_proto.attr_name2attr_value[
"pipeline_stage_id_hint"
].at_int64
test_case.assertEqual(stage_int, 0)
# weight is not get in conv1's forward, so it will return a Block
x = self.conv1.weight
test_case.assertEqual(type(x), flow.nn.graph.Block)
class SubModule1(flow.nn.Module):
def __init__(self):
super().__init__()
self.fc1 = flow.nn.Linear(36, 4)
self.register_buffer(
"dummy_buff", flow.Tensor(1, 4),
)
def forward(self):
scope = oneflow.current_scope()
scope_proto = graph_build_util.scope_to_proto(scope)
# check scope symbol id
test_case.assertEqual(
scope_proto.parent_scope_symbol_id, self.prev_scope.symbol_id
)
# check scope activation checkpointing
ck_bool = scope_proto.attr_name2attr_value["checkpointing"]
test_case.assertEqual(ck_bool.WhichOneof("value"), None)
# check scope stage id
stage_int = scope_proto.attr_name2attr_value[
"pipeline_stage_id_hint"
].at_int64
test_case.assertEqual(stage_int, 1)
name = self.name_prefix + self.name
prefixes = []
for prefix in scope_proto.scope_op_name_prefixes:
prefixes.append(prefix)
name_in_scope = ".".join(prefixes)
test_case.assertEqual(name, name_in_scope)
x = self.dummy_buff
dummy_buff_scope_proto = graph_build_util.scope_to_proto(
self._buffers["dummy_buff"].scope
)
test_case.assertEqual(
dummy_buff_scope_proto.parent_scope_symbol_id, scope.symbol_id
)
class CustomModule1(flow.nn.Module):
def __init__(self):
super().__init__()
self.layer0 = SubModule0()
self.layer1 = SubModule1()
def forward(self):
x = self.layer0()
y = self.layer1()
m = CustomModule1()
class CustomGraph1(flow.nn.Graph):
def __init__(self):
super().__init__()
self.m = m
# config scope
self.m.layer0.config.stage_id = 0
self.m.layer0.config.activation_checkpointing = True
self.m.layer1.config.stage_id = 1
def build(self):
return self.m()
g = CustomGraph1()
x = flow.Tensor(1, 1, 10, 10)
flow.nn.init.uniform_(x, a=-1.0, b=1.0)
g._compile()
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.unittest.skip_unless_1n1d",
"oneflow.experimental.Tensor",
"oneflow.experimental.nn.Graph._child_init_cnt.clear",
"oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName",
"oneflow.current_scope",
"oneflow.experimental.nn.ReLU",
"oneflow.experimental.nn.Conv2d",
"oneflow... | [((1417, 1449), 'oneflow.experimental.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (1447, 1449), True, 'import oneflow.experimental as flow\n'), ((10868, 10883), 'unittest.main', 'unittest.main', ([], {}), '()\n', (10881, 10883), False, 'import unittest\n'), ((864, 887), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(5)'], {}), '(1, 1, 5)\n', (878, 887), True, 'import oneflow.experimental as flow\n'), ((908, 922), 'oneflow.experimental.nn.ReLU', 'flow.nn.ReLU', ([], {}), '()\n', (920, 922), True, 'import oneflow.experimental as flow\n'), ((1159, 1180), 'oneflow.experimental.nn.Linear', 'flow.nn.Linear', (['(36)', '(4)'], {}), '(36, 4)\n', (1173, 1180), True, 'import oneflow.experimental as flow\n'), ((1331, 1354), 'oneflow.F.flatten', 'oneflow.F.flatten', (['x', '(1)'], {}), '(x, 1)\n', (1348, 1354), False, 'import oneflow\n'), ((1546, 1571), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(1)', '(1)', '(10)', '(10)'], {}), '(1, 1, 10, 10)\n', (1557, 1571), True, 'import oneflow.experimental as flow\n'), ((1580, 1619), 'oneflow.experimental.nn.init.uniform_', 'flow.nn.init.uniform_', (['x'], {'a': '(-1.0)', 'b': '(1.0)'}), '(x, a=-1.0, b=1.0)\n', (1601, 1619), True, 'import oneflow.experimental as flow\n'), ((5015, 5052), 'oneflow.experimental.nn.Graph._child_init_cnt.clear', 'flow.nn.Graph._child_init_cnt.clear', ([], {}), '()\n', (5050, 5052), True, 'import oneflow.experimental as flow\n'), ((5119, 5156), 'oneflow.experimental.nn.Graph._child_init_cnt.clear', 'flow.nn.Graph._child_init_cnt.clear', ([], {}), '()\n', (5154, 5156), True, 'import oneflow.experimental as flow\n'), ((10740, 10765), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(1)', '(1)', '(10)', '(10)'], {}), '(1, 1, 10, 10)\n', (10751, 10765), True, 'import oneflow.experimental as flow\n'), ((10774, 10813), 'oneflow.experimental.nn.init.uniform_', 'flow.nn.init.uniform_', (['x'], {'a': '(-1.0)', 'b': '(1.0)'}), '(x, a=-1.0, b=1.0)\n', (10795, 10813), True, 'import oneflow.experimental as flow\n'), ((1237, 1254), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(1)', '(4)'], {}), '(1, 4)\n', (1248, 1254), True, 'import oneflow.experimental as flow\n'), ((5314, 5353), 'oneflow.python.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (5351, 5353), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((5375, 5412), 'oneflow.python.framework.graph_build_util.lazy_mode.gard', 'graph_build_util.lazy_mode.gard', (['(True)'], {}), '(True)\n', (5406, 5412), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((5749, 5788), 'oneflow.python.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (5786, 5788), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((7072, 7113), 'oneflow._oneflow_internal.IsMultiClient', 'oneflow._oneflow_internal.IsMultiClient', ([], {}), '()\n', (7111, 7113), False, 'import oneflow\n'), ((7171, 7210), 'oneflow.python.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (7208, 7210), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((7315, 7354), 'oneflow.python.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (7352, 7354), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((5448, 5487), 'oneflow.python.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (5485, 5487), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((5512, 5550), 'oneflow.python.framework.graph_build_util.lazy_mode.gard', 'graph_build_util.lazy_mode.gard', (['(False)'], {}), '(False)\n', (5543, 5550), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((5672, 5711), 'oneflow.python.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (5709, 5711), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((6417, 6448), 'oneflow.python.framework.session_context.GetDefaultSession', 'session_ctx.GetDefaultSession', ([], {}), '()\n', (6446, 6448), True, 'import oneflow.python.framework.session_context as session_ctx\n'), ((6651, 6674), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (6672, 6674), False, 'import oneflow\n'), ((6705, 6743), 'oneflow.python.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (['scope'], {}), '(scope)\n', (6736, 6743), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((7539, 7562), 'oneflow.experimental.nn.Conv2d', 'flow.nn.Conv2d', (['(1)', '(1)', '(5)'], {}), '(1, 1, 5)\n', (7553, 7562), True, 'import oneflow.experimental as flow\n'), ((7619, 7642), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (7640, 7642), False, 'import oneflow\n'), ((7673, 7711), 'oneflow.python.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (['scope'], {}), '(scope)\n', (7704, 7711), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((8456, 8477), 'oneflow.experimental.nn.Linear', 'flow.nn.Linear', (['(36)', '(4)'], {}), '(36, 4)\n', (8470, 8477), True, 'import oneflow.experimental as flow\n'), ((8643, 8666), 'oneflow.current_scope', 'oneflow.current_scope', ([], {}), '()\n', (8664, 8666), False, 'import oneflow\n'), ((8697, 8735), 'oneflow.python.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (['scope'], {}), '(scope)\n', (8728, 8735), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((9727, 9793), 'oneflow.python.framework.graph_build_util.scope_to_proto', 'graph_build_util.scope_to_proto', (["self._buffers['dummy_buff'].scope"], {}), "(self._buffers['dummy_buff'].scope)\n", (9758, 9793), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((5590, 5629), 'oneflow.python.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (5627, 5629), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((6092, 6131), 'oneflow.python.framework.graph_build_util.lazy_mode.is_enabled', 'graph_build_util.lazy_mode.is_enabled', ([], {}), '()\n', (6129, 6131), True, 'import oneflow.python.framework.graph_build_util as graph_build_util\n'), ((6926, 6991), 'oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName', 'oneflow._oneflow_internal.JobBuildAndInferCtx_GetCurrentJobName', ([], {}), '()\n', (6989, 6991), False, 'import oneflow\n'), ((8550, 8567), 'oneflow.experimental.Tensor', 'flow.Tensor', (['(1)', '(4)'], {}), '(1, 4)\n', (8561, 8567), True, 'import oneflow.experimental as flow\n')] |
import oneflow as flow
flow.enable_eager_execution()
x = flow.tensor([0])
y = flow.tensor([1])
z = x + y
print(z)
| [
"oneflow.enable_eager_execution",
"oneflow.tensor"
] | [((24, 53), 'oneflow.enable_eager_execution', 'flow.enable_eager_execution', ([], {}), '()\n', (51, 53), True, 'import oneflow as flow\n'), ((59, 75), 'oneflow.tensor', 'flow.tensor', (['[0]'], {}), '([0])\n', (70, 75), True, 'import oneflow as flow\n'), ((80, 96), 'oneflow.tensor', 'flow.tensor', (['[1]'], {}), '([1])\n', (91, 96), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from typing import Optional
import numpy as np
import oneflow as flow
from oneflow.framework.tensor import register_tensor_op
from oneflow.nn.module import Module
def nonzero_op(input, as_tuple=False):
if as_tuple and not input.ndim:
input = input.unsqueeze(0)
(res, size) = flow._C.argwhere(input, dtype=flow.int64)
slice_tup_list = [[0, int(size.numpy()), 1]]
res = flow.slice(res, slice_tup_list=slice_tup_list)
if as_tuple:
return tuple([flow._C.transpose(res, [1, 0])[x] for x in range(res.shape[1])])
else:
return res
if __name__ == "__main__":
import doctest
doctest.testmod(raise_on_error=True)
| [
"oneflow._C.argwhere",
"oneflow._C.transpose",
"oneflow.slice"
] | [((885, 926), 'oneflow._C.argwhere', 'flow._C.argwhere', (['input'], {'dtype': 'flow.int64'}), '(input, dtype=flow.int64)\n', (901, 926), True, 'import oneflow as flow\n'), ((986, 1032), 'oneflow.slice', 'flow.slice', (['res'], {'slice_tup_list': 'slice_tup_list'}), '(res, slice_tup_list=slice_tup_list)\n', (996, 1032), True, 'import oneflow as flow\n'), ((1219, 1255), 'doctest.testmod', 'doctest.testmod', ([], {'raise_on_error': '(True)'}), '(raise_on_error=True)\n', (1234, 1255), False, 'import doctest\n'), ((1072, 1102), 'oneflow._C.transpose', 'flow._C.transpose', (['res', '[1, 0]'], {}), '(res, [1, 0])\n', (1089, 1102), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow.experimental as flow
from test_util import GenArgList
def _scatter_add_numpy(src, dim, index, outshape):
output = np.zeros(outshape)
for srcidx in range(0, src.size):
outcoord = np.unravel_index(srcidx, src.shape)
outcoord = [*outcoord]
outcoord[dim] = index[np.unravel_index(srcidx, index.shape)]
output_offset = np.ravel_multi_index(outcoord, outshape)
output[np.unravel_index(output_offset, outshape)] += src[
np.unravel_index(srcidx, src.shape)
]
return output
def _test_gather(test_case, device):
input = np.array([[1, 2], [3, 4]])
index = np.array([[0, 0], [1, 0]])
np_out = np.take_along_axis(input, index, 0)
output = flow.gather(
flow.Tensor(input, device=flow.device(device)),
flow.Tensor(index, dtype=flow.int, device=flow.device(device)),
dim=0,
)
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
def _test_gather_tensor_function(test_case, device):
input = np.array([[1, 2], [3, 4]])
index = np.array([[0, 0], [1, 0]])
np_out = np.take_along_axis(input, index, 1)
input = flow.Tensor(input, device=flow.device(device))
index = flow.Tensor(index, dtype=flow.int, device=flow.device(device))
output = input.gather(index, dim=1)
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
def _test_gather_random_array(test_case, device):
input = np.random.randn(3, 4, 3, 5)
index = np.random.choice(np.arange(3), size=180, replace=True).reshape((3, 4, 3, 5))
np_out = np.take_along_axis(input, index, 1)
output = flow.gather(
flow.Tensor(input, device=flow.device(device)),
flow.Tensor(index, dtype=flow.int, device=flow.device(device)),
dim=1,
)
test_case.assertTrue(np.allclose(output.numpy(), np_out))
np_out2 = np.take_along_axis(input, index, 2)
output2 = flow.gather(
flow.Tensor(input, device=flow.device(device)),
flow.Tensor(index, dtype=flow.int, device=flow.device(device)),
dim=2,
)
test_case.assertTrue(np.allclose(output2.numpy(), np_out2))
np_out3 = np.take_along_axis(input, index, 3)
output3 = flow.gather(
flow.Tensor(input, device=flow.device(device)),
flow.Tensor(index, dtype=flow.int, device=flow.device(device)),
dim=3,
)
test_case.assertTrue(np.allclose(output3.numpy(), np_out3))
def _test_gather_backward(test_case, device):
input = np.array([[1, 2], [3, 4]])
index = np.array([[0, 0], [1, 0]])
np_out = np.take_along_axis(input, index, 0)
np_grad = _scatter_add_numpy(np.ones_like(np_out), 0, index, input.shape)
of_input = flow.Tensor(input, requires_grad=True, device=flow.device(device))
output = flow.gather(
of_input, flow.Tensor(index, dtype=flow.int, device=flow.device(device)), dim=0
)
out_sum = output.sum()
out_sum.backward()
test_case.assertTrue(np.array_equal(output.numpy(), np_out))
test_case.assertTrue(np.array_equal(of_input.grad.numpy(), np_grad))
@unittest.skipIf(
not flow.unittest.env.eager_execution_enabled(),
".numpy() doesn't work in lazy mode",
)
class TestGather(flow.unittest.TestCase):
def test_gather(test_case):
arg_dict = OrderedDict()
arg_dict["test_fun"] = [
_test_gather,
_test_gather_tensor_function,
_test_gather_random_array,
_test_gather_backward,
]
arg_dict["device"] = ["cpu", "cuda"]
for arg in GenArgList(arg_dict):
arg[0](test_case, *arg[1:])
if __name__ == "__main__":
unittest.main()
| [
"oneflow.experimental.device",
"oneflow.experimental.unittest.env.eager_execution_enabled"
] | [((798, 816), 'numpy.zeros', 'np.zeros', (['outshape'], {}), '(outshape)\n', (806, 816), True, 'import numpy as np\n'), ((1268, 1294), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1276, 1294), True, 'import numpy as np\n'), ((1307, 1333), 'numpy.array', 'np.array', (['[[0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0]])\n', (1315, 1333), True, 'import numpy as np\n'), ((1347, 1382), 'numpy.take_along_axis', 'np.take_along_axis', (['input', 'index', '(0)'], {}), '(input, index, 0)\n', (1365, 1382), True, 'import numpy as np\n'), ((1690, 1716), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (1698, 1716), True, 'import numpy as np\n'), ((1729, 1755), 'numpy.array', 'np.array', (['[[0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0]])\n', (1737, 1755), True, 'import numpy as np\n'), ((1769, 1804), 'numpy.take_along_axis', 'np.take_along_axis', (['input', 'index', '(1)'], {}), '(input, index, 1)\n', (1787, 1804), True, 'import numpy as np\n'), ((2108, 2135), 'numpy.random.randn', 'np.random.randn', (['(3)', '(4)', '(3)', '(5)'], {}), '(3, 4, 3, 5)\n', (2123, 2135), True, 'import numpy as np\n'), ((2238, 2273), 'numpy.take_along_axis', 'np.take_along_axis', (['input', 'index', '(1)'], {}), '(input, index, 1)\n', (2256, 2273), True, 'import numpy as np\n'), ((2526, 2561), 'numpy.take_along_axis', 'np.take_along_axis', (['input', 'index', '(2)'], {}), '(input, index, 2)\n', (2544, 2561), True, 'import numpy as np\n'), ((2817, 2852), 'numpy.take_along_axis', 'np.take_along_axis', (['input', 'index', '(3)'], {}), '(input, index, 3)\n', (2835, 2852), True, 'import numpy as np\n'), ((3153, 3179), 'numpy.array', 'np.array', (['[[1, 2], [3, 4]]'], {}), '([[1, 2], [3, 4]])\n', (3161, 3179), True, 'import numpy as np\n'), ((3192, 3218), 'numpy.array', 'np.array', (['[[0, 0], [1, 0]]'], {}), '([[0, 0], [1, 0]])\n', (3200, 3218), True, 'import numpy as np\n'), ((3232, 3267), 'numpy.take_along_axis', 'np.take_along_axis', (['input', 'index', '(0)'], {}), '(input, index, 0)\n', (3250, 3267), True, 'import numpy as np\n'), ((4306, 4321), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4319, 4321), False, 'import unittest\n'), ((874, 909), 'numpy.unravel_index', 'np.unravel_index', (['srcidx', 'src.shape'], {}), '(srcidx, src.shape)\n', (890, 909), True, 'import numpy as np\n'), ((1034, 1074), 'numpy.ravel_multi_index', 'np.ravel_multi_index', (['outcoord', 'outshape'], {}), '(outcoord, outshape)\n', (1054, 1074), True, 'import numpy as np\n'), ((3301, 3321), 'numpy.ones_like', 'np.ones_like', (['np_out'], {}), '(np_out)\n', (3313, 3321), True, 'import numpy as np\n'), ((3948, 3961), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (3959, 3961), False, 'from collections import OrderedDict\n'), ((4211, 4231), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4221, 4231), False, 'from test_util import GenArgList\n'), ((3766, 3809), 'oneflow.experimental.unittest.env.eager_execution_enabled', 'flow.unittest.env.eager_execution_enabled', ([], {}), '()\n', (3807, 3809), True, 'import oneflow.experimental as flow\n'), ((971, 1008), 'numpy.unravel_index', 'np.unravel_index', (['srcidx', 'index.shape'], {}), '(srcidx, index.shape)\n', (987, 1008), True, 'import numpy as np\n'), ((1090, 1131), 'numpy.unravel_index', 'np.unravel_index', (['output_offset', 'outshape'], {}), '(output_offset, outshape)\n', (1106, 1131), True, 'import numpy as np\n'), ((1153, 1188), 'numpy.unravel_index', 'np.unravel_index', (['srcidx', 'src.shape'], {}), '(srcidx, src.shape)\n', (1169, 1188), True, 'import numpy as np\n'), ((1843, 1862), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1854, 1862), True, 'import oneflow.experimental as flow\n'), ((1918, 1937), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1929, 1937), True, 'import oneflow.experimental as flow\n'), ((3408, 3427), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (3419, 3427), True, 'import oneflow.experimental as flow\n'), ((1443, 1462), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1454, 1462), True, 'import oneflow.experimental as flow\n'), ((1515, 1534), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (1526, 1534), True, 'import oneflow.experimental as flow\n'), ((2165, 2177), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (2174, 2177), True, 'import numpy as np\n'), ((2334, 2353), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2345, 2353), True, 'import oneflow.experimental as flow\n'), ((2406, 2425), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2417, 2425), True, 'import oneflow.experimental as flow\n'), ((2623, 2642), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2634, 2642), True, 'import oneflow.experimental as flow\n'), ((2695, 2714), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2706, 2714), True, 'import oneflow.experimental as flow\n'), ((2914, 2933), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2925, 2933), True, 'import oneflow.experimental as flow\n'), ((2986, 3005), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (2997, 3005), True, 'import oneflow.experimental as flow\n'), ((3515, 3534), 'oneflow.experimental.device', 'flow.device', (['device'], {}), '(device)\n', (3526, 3534), True, 'import oneflow.experimental as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import oneflow as flow
from oneflow.python.nn.module import Module
from oneflow.python.oneflow_export import oneflow_export, experimental_api
from oneflow.python.framework.tensor import register_tensor_op
@oneflow_export("nn.Flatten")
@experimental_api
class Flatten(Module):
"""Flattens a contiguous range of dims into a tensor. For use with: nn.Sequential.
Args:
start_dim: first dim to flatten (default = 1).
end_dim: last dim to flatten (default = -1).
For example:
.. code-block:: python
import oneflow as flow
input = flow.Tensor(32, 1, 5, 5)
m = flow.nn.Flatten()
output = m(input)
output.size()
# out flow.Size([32, 25])
"""
def __init__(self, start_dim: int = 1, end_dim: int = -1) -> None:
super().__init__()
self.op_ = (
flow.builtin_op("flatten")
.Input("in")
.Output("out")
.Attr("start_dim", start_dim)
.Attr("end_dim", end_dim)
.Build()
)
def forward(self, input):
return self.op_(input)[0]
@oneflow_export("flatten")
@register_tensor_op("flatten")
@experimental_api
def _flow_flatten(input, start_dim: int = 0, end_dim: int = -1):
"""Flattens a contiguous range of dims into a tensor.
Args:
start_dim: first dim to flatten (default = 0).
end_dim: last dim to flatten (default = -1).
For example:
.. code-block:: python
import oneflow as flow
input = flow.Tensor(32, 1, 5, 5)
output = input.flatten(start_dim=1)
# output = flow.flatten(input, start_dim=1)
output.size()
# out flow.Size([32, 25])
"""
return Flatten(start_dim=start_dim, end_dim=end_dim)(input)
| [
"oneflow.python.oneflow_export.oneflow_export",
"oneflow.python.framework.tensor.register_tensor_op",
"oneflow.builtin_op"
] | [((798, 826), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.Flatten"""'], {}), "('nn.Flatten')\n", (812, 826), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((1711, 1736), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""flatten"""'], {}), "('flatten')\n", (1725, 1736), False, 'from oneflow.python.oneflow_export import oneflow_export, experimental_api\n'), ((1738, 1767), 'oneflow.python.framework.tensor.register_tensor_op', 'register_tensor_op', (['"""flatten"""'], {}), "('flatten')\n", (1756, 1767), False, 'from oneflow.python.framework.tensor import register_tensor_op\n'), ((1453, 1479), 'oneflow.builtin_op', 'flow.builtin_op', (['"""flatten"""'], {}), "('flatten')\n", (1468, 1479), True, 'import oneflow as flow\n')] |
import os
import time
import argparse
import numpy as np
import glob
import imageio
import matplotlib
matplotlib.use("agg")
import matplotlib.pyplot as plt
import oneflow as flow
from utils import (
make_dirs,
load_mnist,
download_mnist,
to_numpy,
to_tensor,
save_to_gif,
save_images,
)
from models import (
Generator,
Discriminator,
GeneratorTrainGraph,
DiscriminatorTrainGraph,
GeneratorEvalGraph,
)
def _parse_args():
parser = argparse.ArgumentParser(description="oneflow DCGAN")
parser.add_argument("--path", type=str, default="./dcgan", required=False)
parser.add_argument("-e", "--epoch_num", type=int, default=100, required=False)
parser.add_argument(
"-lr", "--learning_rate", type=float, default=1e-4, required=False
)
parser.add_argument(
"--load",
type=str,
default="",
required=False,
help="the path to continue training the model",
)
parser.add_argument(
"--data_dir",
type=str,
default="./data/mnist",
required=False,
help="the path to dataset",
)
parser.add_argument("--batch_size", type=int, default=256, required=False)
parser.add_argument("--label_smooth", type=float, default=0.15, required=False)
parser.add_argument(
"--save",
type=bool,
default=True,
required=False,
help="whether to save train_images, train_checkpoint and train_loss",
)
parser.add_argument(
"--no_cuda", action="store_true", default=False, help="disables CUDA training"
)
return parser.parse_args()
class DCGAN(flow.nn.Module):
def __init__(self, args):
super().__init__()
self.lr = args.learning_rate
self.z_dim = 100
self.eval_interval = 100
self.eval_size = 16
self.data_dir = args.data_dir
self.device = "cpu" if args.no_cuda else "cuda"
# evaluate generator based pn fixed noise during training
self.fixed_z = to_tensor(
np.random.normal(0, 1, size=(self.eval_size, self.z_dim)), False
).to(self.device)
self.label_smooth = args.label_smooth
self.G_loss = []
self.D_loss = []
self.path = args.path
self.batch_size = args.batch_size
self.checkpoint_path = os.path.join(self.path, "checkpoint")
self.images_path = os.path.join(self.path, "images")
self.train_images_path = os.path.join(self.images_path, "train_images")
self.val_images_path = os.path.join(self.images_path, "val_images")
make_dirs(self.checkpoint_path, self.train_images_path, self.val_images_path)
def train(self, epochs=1, save=True):
# init dataset
x, _ = load_mnist(self.data_dir)
batch_num = len(x) // self.batch_size
label1 = to_tensor(np.ones(self.batch_size), False, dtype=flow.float32).to(
self.device
)
label0 = flow.Tensor((np.zeros(self.batch_size)), dtype=flow.float32).to(
self.device
)
if self.label_smooth != 0:
label1_smooth = (label1 - self.label_smooth).to(self.device)
# init training include optimizer, model, loss
self.generator = Generator(self.z_dim).to(self.device)
self.discriminator = Discriminator().to(self.device)
if args.load != "":
self.generator.load_state_dict(flow.load(args.load))
self.discriminator.load_state_dict(flow.load(args.load))
self.optimizerG = flow.optim.SGD(self.generator.parameters(), lr=self.lr)
self.optimizerD = flow.optim.SGD(self.discriminator.parameters(), lr=self.lr)
self.of_cross_entropy = flow.nn.BCELoss().to(self.device)
for epoch_idx in range(epochs):
self.generator.train()
self.discriminator.train()
start = time.time()
for batch_idx in range(batch_num):
images = to_tensor(
x[
batch_idx * self.batch_size : (batch_idx + 1) * self.batch_size
].astype(np.float32)
).to(self.device)
# one-side label smooth
if self.label_smooth != 0:
(
d_loss,
d_loss_fake,
d_loss_real,
D_x,
D_gz1,
) = self.train_discriminator(images, label1_smooth, label0)
else:
(
d_loss,
d_loss_fake,
d_loss_real,
D_x,
D_gz1,
) = self.train_discriminator(images, label1, label0)
g_loss, g_out, D_gz2 = self.train_generator(label1)
if (batch_idx + 1) % 100 == 0:
self.G_loss.append(g_loss)
self.D_loss.append(d_loss)
if (batch_idx + 1) % self.eval_interval == 0:
print(
"{}th epoch, {}th batch, d_fakeloss:{:>8.10f}, d_realloss:{:>8.10f}, d_loss:{:>8.10f}, g_loss:{:>8.10f}, D_x:{:>8.10f}, D_Gz:{:>8.10f} / {:>8.10f}".format(
epoch_idx + 1,
batch_idx + 1,
d_loss_fake,
d_loss_real,
d_loss,
g_loss,
D_x,
D_gz1,
D_gz2,
)
)
# save images based on .train()
save_images(
g_out,
self.eval_size,
os.path.join(
self.train_images_path, "fakeimage_{:02d}.png".format(epoch_idx)
),
)
# save images based on .eval()
self._eval_generator_and_save_images(epoch_idx + 1)
print(
"Time for epoch {} is {} sec.".format(
epoch_idx + 1, time.time() - start
)
)
if save:
flow.save(
self.generator.state_dict(),
os.path.join(self.checkpoint_path, "g_{}".format(epoch_idx)),
)
flow.save(
self.discriminator.state_dict(),
os.path.join(self.checkpoint_path, "d_{}".format(epoch_idx)),
)
save_to_gif(self.train_images_path)
save_to_gif(self.val_images_path)
np.save(
os.path.join(self.path, "g_loss_{}.npy".format(epochs)), self.G_loss
)
np.save(
os.path.join(self.path, "d_loss_{}.npy".format(epochs)), self.D_loss
)
def train_discriminator(self, images, label1, label0):
z = self.generate_noise()
g_out = self.generator(z)
cat = flow.cat((images, g_out), dim=0)
result = self.discriminator(cat)
d_logits = result[: images.shape[0]]
g_logits = result[images.shape[0] :]
d_loss_real = self.of_cross_entropy(d_logits, label1)
d_loss_fake = self.of_cross_entropy(g_logits, label0)
d_loss = d_loss_fake + d_loss_real
d_loss.backward()
self.optimizerD.step()
self.optimizerD.zero_grad()
return (
to_numpy(d_loss),
to_numpy(d_loss_fake),
to_numpy(d_loss_real),
to_numpy(d_logits),
to_numpy(g_logits),
)
def train_generator(self, label1):
z = self.generate_noise()
g_out = self.generator(z)
g_logits = self.discriminator(g_out)
g_loss = self.of_cross_entropy(g_logits, label1)
g_loss.backward()
self.optimizerG.step()
self.optimizerG.zero_grad()
return (to_numpy(g_loss), to_numpy(g_out, False), to_numpy(g_logits))
def generate_noise(self):
return to_tensor(
np.random.normal(0, 1, size=(self.batch_size, self.z_dim)), False
).to(self.device)
def _eval_generator_and_save_images(self, epoch_idx):
results = to_numpy(self._eval_generator(), False)
save_images(
results,
self.eval_size,
os.path.join(self.val_images_path, "image_{:02d}.png".format(epoch_idx)),
)
def _eval_generator(self):
self.generator.eval()
with flow.no_grad():
g_out = self.generator(self.fixed_z)
return g_out
def main(args):
dcgan = DCGAN(args)
dcgan.train(args.epoch_num, args.save)
if __name__ == "__main__":
args = _parse_args()
main(args)
| [
"oneflow.cat",
"oneflow.load",
"oneflow.nn.BCELoss",
"oneflow.no_grad"
] | [((103, 124), 'matplotlib.use', 'matplotlib.use', (['"""agg"""'], {}), "('agg')\n", (117, 124), False, 'import matplotlib\n'), ((486, 538), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""oneflow DCGAN"""'}), "(description='oneflow DCGAN')\n", (509, 538), False, 'import argparse\n'), ((2352, 2389), 'os.path.join', 'os.path.join', (['self.path', '"""checkpoint"""'], {}), "(self.path, 'checkpoint')\n", (2364, 2389), False, 'import os\n'), ((2417, 2450), 'os.path.join', 'os.path.join', (['self.path', '"""images"""'], {}), "(self.path, 'images')\n", (2429, 2450), False, 'import os\n'), ((2484, 2530), 'os.path.join', 'os.path.join', (['self.images_path', '"""train_images"""'], {}), "(self.images_path, 'train_images')\n", (2496, 2530), False, 'import os\n'), ((2562, 2606), 'os.path.join', 'os.path.join', (['self.images_path', '"""val_images"""'], {}), "(self.images_path, 'val_images')\n", (2574, 2606), False, 'import os\n'), ((2615, 2692), 'utils.make_dirs', 'make_dirs', (['self.checkpoint_path', 'self.train_images_path', 'self.val_images_path'], {}), '(self.checkpoint_path, self.train_images_path, self.val_images_path)\n', (2624, 2692), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((2774, 2799), 'utils.load_mnist', 'load_mnist', (['self.data_dir'], {}), '(self.data_dir)\n', (2784, 2799), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((7050, 7082), 'oneflow.cat', 'flow.cat', (['(images, g_out)'], {'dim': '(0)'}), '((images, g_out), dim=0)\n', (7058, 7082), True, 'import oneflow as flow\n'), ((3901, 3912), 'time.time', 'time.time', ([], {}), '()\n', (3910, 3912), False, 'import time\n'), ((6585, 6620), 'utils.save_to_gif', 'save_to_gif', (['self.train_images_path'], {}), '(self.train_images_path)\n', (6596, 6620), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((6633, 6666), 'utils.save_to_gif', 'save_to_gif', (['self.val_images_path'], {}), '(self.val_images_path)\n', (6644, 6666), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((7509, 7525), 'utils.to_numpy', 'to_numpy', (['d_loss'], {}), '(d_loss)\n', (7517, 7525), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((7539, 7560), 'utils.to_numpy', 'to_numpy', (['d_loss_fake'], {}), '(d_loss_fake)\n', (7547, 7560), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((7574, 7595), 'utils.to_numpy', 'to_numpy', (['d_loss_real'], {}), '(d_loss_real)\n', (7582, 7595), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((7609, 7627), 'utils.to_numpy', 'to_numpy', (['d_logits'], {}), '(d_logits)\n', (7617, 7627), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((7641, 7659), 'utils.to_numpy', 'to_numpy', (['g_logits'], {}), '(g_logits)\n', (7649, 7659), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((7991, 8007), 'utils.to_numpy', 'to_numpy', (['g_loss'], {}), '(g_loss)\n', (7999, 8007), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((8009, 8031), 'utils.to_numpy', 'to_numpy', (['g_out', '(False)'], {}), '(g_out, False)\n', (8017, 8031), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((8033, 8051), 'utils.to_numpy', 'to_numpy', (['g_logits'], {}), '(g_logits)\n', (8041, 8051), False, 'from utils import make_dirs, load_mnist, download_mnist, to_numpy, to_tensor, save_to_gif, save_images\n'), ((8572, 8586), 'oneflow.no_grad', 'flow.no_grad', ([], {}), '()\n', (8584, 8586), True, 'import oneflow as flow\n'), ((3269, 3290), 'models.Generator', 'Generator', (['self.z_dim'], {}), '(self.z_dim)\n', (3278, 3290), False, 'from models import Generator, Discriminator, GeneratorTrainGraph, DiscriminatorTrainGraph, GeneratorEvalGraph\n'), ((3336, 3351), 'models.Discriminator', 'Discriminator', ([], {}), '()\n', (3349, 3351), False, 'from models import Generator, Discriminator, GeneratorTrainGraph, DiscriminatorTrainGraph, GeneratorEvalGraph\n'), ((3439, 3459), 'oneflow.load', 'flow.load', (['args.load'], {}), '(args.load)\n', (3448, 3459), True, 'import oneflow as flow\n'), ((3508, 3528), 'oneflow.load', 'flow.load', (['args.load'], {}), '(args.load)\n', (3517, 3528), True, 'import oneflow as flow\n'), ((3732, 3749), 'oneflow.nn.BCELoss', 'flow.nn.BCELoss', ([], {}), '()\n', (3747, 3749), True, 'import oneflow as flow\n'), ((2060, 2117), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(self.eval_size, self.z_dim)'}), '(0, 1, size=(self.eval_size, self.z_dim))\n', (2076, 2117), True, 'import numpy as np\n'), ((2873, 2897), 'numpy.ones', 'np.ones', (['self.batch_size'], {}), '(self.batch_size)\n', (2880, 2897), True, 'import numpy as np\n'), ((2994, 3019), 'numpy.zeros', 'np.zeros', (['self.batch_size'], {}), '(self.batch_size)\n', (3002, 3019), True, 'import numpy as np\n'), ((8122, 8180), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)'], {'size': '(self.batch_size, self.z_dim)'}), '(0, 1, size=(self.batch_size, self.z_dim))\n', (8138, 8180), True, 'import numpy as np\n'), ((6178, 6189), 'time.time', 'time.time', ([], {}), '()\n', (6187, 6189), False, 'import time\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import unittest
from collections import OrderedDict
import numpy as np
from test_util import (
GenArgDict,
test_global_storage,
type_name_to_flow_type,
type_name_to_np_type,
)
import oneflow.compatible.single_client.unittest
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
def _masked_fill_np_fw_bw(x, mask, y_diff, type_name, value=0):
brocadcast_shape = np.broadcast(x, mask).shape
brocadcasted_x = np.broadcast_to(x, brocadcast_shape).astype(type_name)
brocadcasted_mask = np.broadcast_to(mask, brocadcast_shape)
masked_x = np.ma.array(brocadcasted_x, mask=brocadcasted_mask, fill_value=value)
y = masked_x.filled()
zero_like = np.zeros_like(y_diff)
filted_y_diff = np.where(brocadcasted_mask, zero_like, y_diff)
extended_axes_num = len(y_diff.shape) - len(x.shape)
extended_axes = tuple(range(extended_axes_num))
mid_diff = np.add.reduce(filted_y_diff, axis=extended_axes)
diff_axes = list()
for i in range(len(x.shape)):
if x.shape[i] != y_diff.shape[i + extended_axes_num]:
assert x.shape[i] == 1 and y_diff.shape[i + extended_axes_num] != 1
diff_axes.append(i)
if len(diff_axes) != 0:
x_diff = np.add.reduce(mid_diff, axis=tuple(diff_axes), keepdims=True)
else:
x_diff = mid_diff
return (y, x_diff)
def _test_masked_fill_fw_bw(test_case, device, x_shape, mask_shape, type_name, value=0):
flow.clear_default_session()
func_config = flow.FunctionConfig()
if type_name == "float16":
flow_type = flow.float
np_type = np.float32
else:
flow_type = type_name_to_flow_type[type_name]
np_type = type_name_to_np_type[type_name]
func_config.default_data_type(flow_type)
@flow.global_function(type="train", function_config=func_config)
def test_masked_fill_fw_bw_job(
x: oft.Numpy.Placeholder(x_shape, dtype=flow_type),
mask: oft.Numpy.Placeholder(mask_shape, dtype=flow_type),
):
with flow.scope.placement(device, "0:0"):
y = flow.get_variable(
name="vx",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
x += flow.cast(y, flow_type)
mask = flow.cast(mask, dtype=flow.int8)
if type_name == "float16":
out = flow.cast(
flow.masked_fill(flow.cast(x, flow.float16), mask, value),
flow.float,
)
else:
out = flow.masked_fill(x, mask, value)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0.0001]), momentum=0
).minimize(out)
flow.watch(x, test_global_storage.Setter("x"))
flow.watch_diff(x, test_global_storage.Setter("x_diff"))
flow.watch(out, test_global_storage.Setter("out"))
flow.watch_diff(out, test_global_storage.Setter("out_diff"))
return out
x = np.random.randint(low=0, high=100, size=x_shape)
mask = np.random.randint(low=0, high=2, size=mask_shape)
test_masked_fill_fw_bw_job(x.astype(np_type), mask.astype(np_type)).get()
out_diff = test_global_storage.Get("out_diff")
(np_out, np_x_diff) = _masked_fill_np_fw_bw(x, mask, out_diff, np_type, value)
if type_name == "float16":
tolerance = 0.001
else:
tolerance = 1e-05
test_case.assertTrue(
np.allclose(
np_out, test_global_storage.Get("out"), rtol=tolerance, atol=tolerance
)
)
test_case.assertTrue(
np.allclose(
np_x_diff, test_global_storage.Get("x_diff"), rtol=tolerance, atol=tolerance
)
)
@flow.unittest.skip_unless_1n1d()
class TestMaskedFill(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_masked_fill_fw_bw(test_case):
arg_dict = OrderedDict()
arg_dict["type_name"] = [
"float32",
"float16",
"double",
"int8",
"int32",
"int64",
]
arg_dict["device"] = ["gpu", "cpu"]
arg_dict["x_shape"] = [(2, 2, 4), (2, 1, 4), (2, 2, 3, 2, 4)]
arg_dict["mask_shape"] = [(2, 1, 2, 4)]
arg_dict["value"] = [2.5, -5.5]
for arg in GenArgDict(arg_dict):
if arg["device"] == "cpu" and arg["type_name"] == "float16":
continue
_test_masked_fill_fw_bw(test_case, **arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.scope.placement",
"oneflow.compatible.single_client.zeros_initializer",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_client.cast",
"oneflow.compatible.single_client.optimizer.PiecewiseCon... | [((4417, 4449), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4447, 4449), True, 'from oneflow.compatible import single_client as flow\n'), ((1173, 1212), 'numpy.broadcast_to', 'np.broadcast_to', (['mask', 'brocadcast_shape'], {}), '(mask, brocadcast_shape)\n', (1188, 1212), True, 'import numpy as np\n'), ((1228, 1297), 'numpy.ma.array', 'np.ma.array', (['brocadcasted_x'], {'mask': 'brocadcasted_mask', 'fill_value': 'value'}), '(brocadcasted_x, mask=brocadcasted_mask, fill_value=value)\n', (1239, 1297), True, 'import numpy as np\n'), ((1340, 1361), 'numpy.zeros_like', 'np.zeros_like', (['y_diff'], {}), '(y_diff)\n', (1353, 1361), True, 'import numpy as np\n'), ((1382, 1428), 'numpy.where', 'np.where', (['brocadcasted_mask', 'zero_like', 'y_diff'], {}), '(brocadcasted_mask, zero_like, y_diff)\n', (1390, 1428), True, 'import numpy as np\n'), ((1553, 1601), 'numpy.add.reduce', 'np.add.reduce', (['filted_y_diff'], {'axis': 'extended_axes'}), '(filted_y_diff, axis=extended_axes)\n', (1566, 1601), True, 'import numpy as np\n'), ((2094, 2122), 'oneflow.compatible.single_client.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (2120, 2122), True, 'from oneflow.compatible import single_client as flow\n'), ((2141, 2162), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (2160, 2162), True, 'from oneflow.compatible import single_client as flow\n'), ((2419, 2482), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2439, 2482), True, 'from oneflow.compatible import single_client as flow\n'), ((3701, 3749), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(100)', 'size': 'x_shape'}), '(low=0, high=100, size=x_shape)\n', (3718, 3749), True, 'import numpy as np\n'), ((3761, 3810), 'numpy.random.randint', 'np.random.randint', ([], {'low': '(0)', 'high': '(2)', 'size': 'mask_shape'}), '(low=0, high=2, size=mask_shape)\n', (3778, 3810), True, 'import numpy as np\n'), ((3904, 3939), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""out_diff"""'], {}), "('out_diff')\n", (3927, 3939), False, 'from test_util import GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((5254, 5269), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5267, 5269), False, 'import unittest\n'), ((1045, 1066), 'numpy.broadcast', 'np.broadcast', (['x', 'mask'], {}), '(x, mask)\n', (1057, 1066), True, 'import numpy as np\n'), ((4638, 4651), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4649, 4651), False, 'from collections import OrderedDict\n'), ((5047, 5067), 'test_util.GenArgDict', 'GenArgDict', (['arg_dict'], {}), '(arg_dict)\n', (5057, 5067), False, 'from test_util import GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((4517, 4551), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (4526, 4551), False, 'import os\n'), ((1094, 1130), 'numpy.broadcast_to', 'np.broadcast_to', (['x', 'brocadcast_shape'], {}), '(x, brocadcast_shape)\n', (1109, 1130), True, 'import numpy as np\n'), ((2530, 2577), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['x_shape'], {'dtype': 'flow_type'}), '(x_shape, dtype=flow_type)\n', (2551, 2577), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2593, 2643), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['mask_shape'], {'dtype': 'flow_type'}), '(mask_shape, dtype=flow_type)\n', (2614, 2643), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2665, 2700), 'oneflow.compatible.single_client.scope.placement', 'flow.scope.placement', (['device', '"""0:0"""'], {}), "(device, '0:0')\n", (2685, 2700), True, 'from oneflow.compatible import single_client as flow\n'), ((2911, 2934), 'oneflow.compatible.single_client.cast', 'flow.cast', (['y', 'flow_type'], {}), '(y, flow_type)\n', (2920, 2934), True, 'from oneflow.compatible import single_client as flow\n'), ((2954, 2986), 'oneflow.compatible.single_client.cast', 'flow.cast', (['mask'], {'dtype': 'flow.int8'}), '(mask, dtype=flow.int8)\n', (2963, 2986), True, 'from oneflow.compatible import single_client as flow\n'), ((4183, 4213), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""out"""'], {}), "('out')\n", (4206, 4213), False, 'from test_util import GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((4332, 4365), 'test_util.test_global_storage.Get', 'test_global_storage.Get', (['"""x_diff"""'], {}), "('x_diff')\n", (4355, 4365), False, 'from test_util import GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3228, 3260), 'oneflow.compatible.single_client.masked_fill', 'flow.masked_fill', (['x', 'mask', 'value'], {}), '(x, mask, value)\n', (3244, 3260), True, 'from oneflow.compatible import single_client as flow\n'), ((3431, 3462), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""x"""'], {}), "('x')\n", (3457, 3462), False, 'from test_util import GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3495, 3531), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""x_diff"""'], {}), "('x_diff')\n", (3521, 3531), False, 'from test_util import GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3561, 3594), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""out"""'], {}), "('out')\n", (3587, 3594), False, 'from test_util import GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((3629, 3667), 'test_util.test_global_storage.Setter', 'test_global_storage.Setter', (['"""out_diff"""'], {}), "('out_diff')\n", (3655, 3667), False, 'from test_util import GenArgDict, test_global_storage, type_name_to_flow_type, type_name_to_np_type\n'), ((2854, 2878), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (2876, 2878), True, 'from oneflow.compatible import single_client as flow\n'), ((3096, 3122), 'oneflow.compatible.single_client.cast', 'flow.cast', (['x', 'flow.float16'], {}), '(x, flow.float16)\n', (3105, 3122), True, 'from oneflow.compatible import single_client as flow\n'), ((3309, 3364), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.0001]'], {}), '([], [0.0001])\n', (3350, 3364), True, 'from oneflow.compatible import single_client as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import collections
import os
import sys
import random
from typing import Union, Optional, Sequence, Tuple, List
import oneflow as flow
import oneflow.core.operator.op_conf_pb2 as op_conf_util
import oneflow.core.register.logical_blob_id_pb2 as logical_blob_id_util
import oneflow.python.framework.interpret_util as interpret_util
import oneflow.python.framework.id_util as id_util
import oneflow.python.framework.module as module_util
import oneflow.python.framework.remote_blob as remote_blob_util
import oneflow.python.framework.distribute as distribute_util
from oneflow.python.oneflow_export import oneflow_export, stable_api
import oneflow._oneflow_internal
IntPair = Tuple[int, int]
def calc_same_padding(input_size, filter_size, dilation_rate, stride):
effective_filter_size = (filter_size - 1) * dilation_rate + 1
output_size = (input_size + stride - 1) // stride
padding_needed = max(
0, int((output_size - 1) * stride + effective_filter_size - input_size)
)
return padding_needed
def get_dhw_offset(channel_pos):
if channel_pos == "channels_first":
return 2
else:
return 1
def check_conv_cudnn_padding_support(
input_size, pad, filter_size, dilation_rate, stride, is_dynamic
):
assert len(pad) == 2
if pad[0] == pad[1]:
return True
elif is_dynamic or pad[0] < pad[1] or pad[0] - pad[1] > 1:
return False
else:
effective_filter_size = (filter_size - 1) * dilation_rate + 1
cudnn_output_size = (
input_size + 2 * pad[0] - effective_filter_size + stride
) // stride
output_size = (
input_size + pad[0] + pad[1] - effective_filter_size + stride
) // stride
return cudnn_output_size == output_size
def check_ndim_conv_cudnn_padding_support(
inputs_shape,
ndim_pads_list,
kernel_sizes,
dilations,
strides,
dhw_offset,
is_dynamic,
):
ndims = len(ndim_pads_list)
for i in range(ndims):
cudnn_support = check_conv_cudnn_padding_support(
inputs_shape[dhw_offset + i],
ndim_pads_list[i],
kernel_sizes[i],
dilations[i],
strides[i],
is_dynamic,
)
if not cudnn_support:
return False
return True
def get_ndim_pads_list(padding, dhw_offset, ndims):
pads_list = []
for i in range(len(padding)):
pad = padding[i]
if isinstance(pad, int):
pad = [pad, pad]
elif isinstance(pad, (list, tuple)):
assert len(pad) == 2
pad = [pad[0], pad[1]]
else:
raise ValueError("padding must be list tuple or int")
if i in range(dhw_offset, dhw_offset + ndims):
pads_list.append(pad)
else:
assert pad == [0, 0]
return pads_list
def calc_ndim_same_padding(
input_shape, padding, kernel_sizes, dilations, strides, dhw_offset
):
ndim_padding_needed = []
ndims = len(kernel_sizes)
for i in range(ndims):
ndim_padding_needed.append(
calc_same_padding(
input_shape[dhw_offset + i], kernel_sizes[i], dilations[i], strides[i],
)
)
pads_small = [padding_needed // 2 for padding_needed in ndim_padding_needed]
pads_large = [ndim_padding_needed[i] - pads_small[i] for i in range(ndims)]
if padding.upper() == "SAME_LOWER":
return [[pads_large[i], pads_small[i]] for i in range(ndims)]
elif padding.upper() == "SAME_UPPER":
return [[pads_small[i], pads_large[i]] for i in range(ndims)]
else:
raise NotImplementedError
def calc_conv_padding(inputs, padding, data_format, kernel_sizes, dilations, strides):
ndims = len(inputs.shape) - 2
assert len(kernel_sizes) == ndims
assert len(dilations) == ndims
assert len(strides) == ndims
is_dynamic = inputs.is_dynamic
channel_pos = "channels_first" if data_format.startswith("NC") else "channels_last"
dhw_offset = get_dhw_offset(channel_pos)
ndim_pads_list = []
if isinstance(padding, str):
padding = "SAME_LOWER" if padding.upper() == "SAME" else padding
assert padding.upper() in ["VALID", "SAME_LOWER", "SAME_UPPER"]
if padding.upper() == "VALID":
return_pads_list = [[0, 0]] * ndims
return inputs, return_pads_list
else:
if is_dynamic:
return_pads_list = [[0, 0]] * ndims
inputs = flow.same_padding(
inputs,
padding.lower(),
data_format=data_format,
kernel_size=kernel_sizes,
strides=strides,
dilation_rate=dilations,
)
return inputs, return_pads_list
else:
ndim_pads_list = calc_ndim_same_padding(
inputs.shape, padding, kernel_sizes, dilations, strides, dhw_offset
)
assert len(ndim_pads_list) == ndims
elif isinstance(padding, (list, tuple)):
assert len(padding) == ndims + 2
ndim_pads_list = get_ndim_pads_list(padding, dhw_offset, ndims)
assert len(ndim_pads_list) == ndims
else:
raise ValueError("padding must be str or a list.")
cudnn_padding_support = check_ndim_conv_cudnn_padding_support(
inputs.shape,
ndim_pads_list,
kernel_sizes,
dilations,
strides,
dhw_offset,
is_dynamic,
)
if cudnn_padding_support:
return inputs, ndim_pads_list
else:
pad_op_list = [[0, 0]] * (ndims + 2)
for i in range(ndims):
pad_op_list[dhw_offset + i] = ndim_pads_list[i]
inputs = flow.pad(inputs, paddings=pad_op_list)
return_pads_list = [[0, 0]] * ndims
return inputs, return_pads_list
class ConvUtil(object):
@classmethod
def split(cls, x, axis, split_num):
split_len = x.shape[axis] // split_num
result_list = []
slice_begin = [0] * len(x.shape)
slice_size = [-1] * len(x.shape)
slice_size[axis] = split_len
for i in range(split_num):
slice_begin[axis] = i * split_len
result = flow.slice(x, slice_begin, slice_size)
result_list.append(result)
return result_list
def conv_op(
conv_type,
inputs,
filters,
bias,
padding_before,
channel_pos,
kernel_size_list,
strides,
dilations,
groups,
name,
):
op_builder = (
flow.user_op_builder(name if name is not None else id_util.UniqueStr("Conv_"))
.Op(conv_type)
.Input("in", [inputs])
.Input("weight", [filters])
.Output("out")
.Attr("filters", filters.shape[0])
.Attr("padding_before", padding_before)
.Attr("data_format", channel_pos)
.Attr("kernel_size", kernel_size_list)
.Attr("strides", strides)
.Attr("dilation_rate", dilations)
.Attr("groups", groups)
)
if bias is not None:
op_builder = op_builder.Input("bias", [bias])
return op_builder.Build().InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("nn.conv1d")
def conv1d(
input: oneflow._oneflow_internal.BlobDesc,
filters: oneflow._oneflow_internal.BlobDesc,
strides: Union[int, Tuple[int]],
padding: Union[str, Tuple[IntPair, IntPair, IntPair]],
data_format: str = "NCW",
dilations: Optional[Union[int, Tuple[int]]] = None,
groups: int = 1,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""1D convolution layer.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 3D input `Blob`. [batch_num, channel, width]
filters (oneflow._oneflow_internal.BlobDesc): A `Blob` with the same type as `input` and has the shape [out_channels, in_channels//groups, filter_width] for `NCW`, or [out_channels, filter_width, in_channels//groups] for `NWC`
strides (Union[int, Tuple[int]]): An int or list of `ints` that has length `1`. The stride of the sliding window for each dimension of `input`.
padding (Union[str, Tuple[IntPair, IntPair, IntPair]]): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID" or Tuple[IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension.
data_format (str, optional): `"NWC" or "NCW"`. Defaults to `"NCW"`.
dilations (Optional[Union[int, Tuple[int]]], optional): An int or list of `ints` that has length `1`. The dilation factor for each dimension of `input`. Defaults to None.
groups (int, optional): int value greater than 0. Defaults to 1.
name (Optional[str], optional): This operator's name. Defaults to None.
Raises:
ValueError: strides must be an int or a list.
ValueError: padding must be "SAME" or "SAME_LOWER" or "SAME_UPPER" or "VALID" or Tuple[IntPair, IntPair, IntPair, IntPair].
ValueError: data_format must be "NWC" or "NCW".
ValueError: dilations must be an int or a list.
ValueError: invalid data_format.
ValueError: data_format NWC not support groups > 1
ValueError: invalid data_format.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as `input` and the same outer batch shape.
Note:
This api is more flexible, if you're new to OneFlow, it's more recommend to use `oneflow.layers.conv1d`
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
def conv1d(input, filters, kernel_size, strides, padding, name):
input_shape = input.shape
weight_initializer = flow.truncated_normal(0.1)
weight_regularizer = flow.regularizers.l2(0.0005)
weight_shape = (filters,
input_shape[1],
kernel_size)
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
initializer=weight_initializer,
regularizer=weight_regularizer,
)
return flow.nn.conv1d(input, weight, strides, padding, name=name)
@flow.global_function()
def conv1d_Job(x: tp.Numpy.Placeholder((1, 64, 32))
) -> tp.Numpy:
conv = conv1d(x,
filters=32,
kernel_size=3,
strides=1,
padding='SAME',
name="Convlayer")
return conv
x = np.random.randn(1, 64, 32).astype(np.float32)
out = conv1d_Job(x)
# out.shape (1, 32, 32)
"""
assert len(input.shape) == 3
assert len(filters.shape) == 3
if isinstance(strides, (list, tuple)):
assert len(strides) == 1, ValueError(
"strides length must be 1 when passed as a list."
)
elif isinstance(strides, int):
strides = [strides]
else:
raise ValueError("strides must be an int or a list.")
if data_format.upper() != "NCW" and data_format.upper() != "NWC":
raise ValueError('data_format must be "NCW" or "NWC".')
channel_pos = "channels_first" if data_format == "NCW" else "channels_last"
if dilations is None:
dilations = [1]
else:
if isinstance(dilations, (list, tuple)):
assert len(dilations) == 1, ValueError(
"dilations length must be 1 when passed as a list."
)
elif isinstance(dilations, int):
dilations = [dilations]
else:
raise ValueError("dilations must be an int or a list.")
if channel_pos == "channels_first":
kernel_size_list = filters.shape[2:3]
in_channel_axis = 1
filter_out_axis = 0
filter_in_axis = 1
elif channel_pos == "channels_last":
kernel_size_list = filters.shape[-2:-1]
in_channel_axis = 2
filter_out_axis = 0
filter_in_axis = 2
if groups > 1:
raise ValueError("data_format NWC not support groups > 1")
else:
raise ValueError("invalid data_format")
assert isinstance(kernel_size_list, tuple)
assert isinstance(groups, int)
assert groups > 0
assert groups <= filters.shape[filter_out_axis]
assert filters.shape[filter_out_axis] % groups == 0
assert groups <= input.shape[in_channel_axis]
assert input.shape[in_channel_axis] % groups == 0
assert filters.shape[filter_in_axis] == input.shape[in_channel_axis] // groups
inputs, pads_list = calc_conv_padding(
input, padding, data_format.upper(), kernel_size_list, dilations, strides,
)
assert len(pads_list) == len(inputs.shape) - 2
padding_before = [pad[0] for pad in pads_list]
if (
groups > 1
and flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu"
):
in_split_list = ConvUtil.split(inputs, axis=in_channel_axis, split_num=groups)
filter_split_list = ConvUtil.split(
filters, axis=filter_out_axis, split_num=groups
)
out_list = []
name = name if name is not None else id_util.UniqueStr("Conv1d_")
for i in range(len(in_split_list)):
out_list.append(
conv_op(
"conv1d",
in_split_list[i],
filter_split_list[i],
None,
padding_before,
channel_pos,
kernel_size_list,
strides,
dilations,
groups=1,
name=name + str(i),
)
)
return flow.concat(out_list, axis=in_channel_axis)
else:
return conv_op(
"conv1d",
inputs,
filters,
None,
padding_before,
channel_pos,
kernel_size_list,
strides,
dilations,
groups,
name,
)
@oneflow_export("nn.conv2d")
def conv2d(
input: oneflow._oneflow_internal.BlobDesc,
filters: oneflow._oneflow_internal.BlobDesc,
strides: Union[int, IntPair],
padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair]],
bias: Optional[oneflow._oneflow_internal.BlobDesc] = None,
data_format: str = "NCHW",
dilations: Optional[Union[int, IntPair]] = None,
groups: int = 1,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""2D convolution layer.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 4D input `Blob`. [batch_num, channel, height, width]
filters (oneflow._oneflow_internal.BlobDesc): A `Blob` with the same type as `input` and has the shape `[out_channels, in_channels//groups, filter_height, filter_width] for NCHW, or [out_channels, filter_height, filter_width, in_channels//groups] for NHWC`
strides (Union[int, IntPair]): An int or list of `ints` that has length `2`. The stride of the sliding window for each dimension of `input`.
padding (Union[str, Tuple[IntPair, IntPair, IntPair, IntPair]]): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID" or Tuple[IntPair, IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension.
data_format (str, optional): `"NHWC"` or `"NCHW"`. Defaults to `"NCHW"`.
dilations (Optional[Union[int, IntPair]], optional): An int or list of `ints` that has length `2`. The dilation factor for each dimension of `input`. Defaults to None.
groups (int, optional): int value greater than 0. Defaults to 1.
name (Optional[str], optional): This operator's name. Defaults to None.
Raises:
ValueError: strides must be an int or a list.
ValueError: padding must be `"SAME"` or `"SAME_LOWER" or `"SAME_UPPER"` or `"VALID"` or Tuple[IntPair, IntPair, IntPair, IntPair].
ValueError: data_format must be `"NHWC"` or `"NCHW"`.
ValueError: dilations must be an int or a list.
ValueError: invalid data_format.
ValueError: data_format NHWC not support groups > 1
ValueError: invalid data_format.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as `input` and the same outer batch shape.
Note:
This api is more flexible, if you're new to OneFlow, it's more recommend to use `oneflow.layers.conv2d`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
def conv2d(input, filters, kernel_size, strides, padding, name):
input_shape = input.shape
weight_initializer = flow.truncated_normal(0.1)
weight_regularizer = flow.regularizers.l2(0.0005)
weight_shape = (filters,
input_shape[1],
kernel_size[0],
kernel_size[1])
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
initializer=weight_initializer,
regularizer=weight_regularizer,
)
return flow.nn.conv2d(input, weight, strides, padding, name=name)
@flow.global_function()
def conv2d_Job(x: tp.Numpy.Placeholder((1, 64, 32, 32))
) -> tp.Numpy:
conv = conv2d(x,
filters=128,
kernel_size=[3, 3],
strides=2,
padding='SAME',
name="Convlayer")
return conv
x = np.random.randn(1, 64, 32, 32).astype(np.float32)
out = conv2d_Job(x)
# out.shape (1, 128, 16, 16)
"""
assert len(input.shape) == 4
assert len(filters.shape) == 4
if bias is not None:
assert len(bias.shape) == 1
if isinstance(strides, (list, tuple)):
assert len(strides) == 2, ValueError(
"strides length must be 2 when passed as a list."
)
elif isinstance(strides, int):
strides = [strides, strides]
else:
raise ValueError("strides must be an int or a list.")
if data_format.upper() != "NCHW" and data_format.upper() != "NHWC":
raise ValueError('data_format must be "NHWC" or "NCHW".')
channel_pos = "channels_first" if data_format == "NCHW" else "channels_last"
if dilations is None:
dilations = [1, 1]
else:
if isinstance(dilations, (list, tuple)):
assert len(dilations) == 2, ValueError(
"dilations length must be 2 when passed as a list."
)
elif isinstance(dilations, int):
dilations = [dilations, dilations]
else:
raise ValueError("dilations must be an int or a list.")
assert isinstance(groups, int)
assert groups > 0
if data_format.upper() == "NCHW":
kernel_size_list = filters.shape[2:4]
in_channel_axis = 1
filter_out_axis = 0
filter_in_axis = 1
elif data_format.upper() == "NHWC":
kernel_size_list = filters.shape[-3:-1]
in_channel_axis = 3
filter_out_axis = 0
filter_in_axis = 3
if (
groups > 1
and flow.current_scope().device_parallel_desc_symbol.device_tag == "gpu"
):
raise ValueError("gpu data_format NHWC not support groups > 1")
else:
raise ValueError('data_format must be "NHWC" or "NCHW".')
assert isinstance(kernel_size_list, tuple)
inputs, pads_list = calc_conv_padding(
input, padding, data_format.upper(), kernel_size_list, dilations, strides,
)
assert len(pads_list) == len(inputs.shape) - 2
padding_before = [pad[0] for pad in pads_list]
assert groups <= filters.shape[filter_out_axis]
assert filters.shape[filter_out_axis] % groups == 0
assert groups <= inputs.shape[in_channel_axis]
assert inputs.shape[in_channel_axis] % groups == 0
assert filters.shape[filter_in_axis] == inputs.shape[in_channel_axis] // groups
if bias is not None:
assert bias.shape[filter_out_axis] == filters.shape[filter_out_axis]
if (
groups > 1
and flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu"
):
in_split_list = ConvUtil.split(inputs, axis=in_channel_axis, split_num=groups)
filter_split_list = ConvUtil.split(
filters, axis=filter_out_axis, split_num=groups
)
bias_spilt_list = (
ConvUtil.split(bias, axis=filter_out_axis, split_num=groups)
if bias is not None
else [None for _ in range(groups)]
)
out_list = []
name = name if name is not None else id_util.UniqueStr("Conv2d_")
for i in range(len(in_split_list)):
out_list.append(
conv_op(
"conv2d",
in_split_list[i],
filter_split_list[i],
bias_spilt_list[i],
padding_before,
channel_pos,
kernel_size_list,
strides,
dilations,
groups=1,
name=name + str(i),
)
)
return flow.concat(out_list, axis=in_channel_axis)
else:
return conv_op(
"conv2d",
inputs,
filters,
bias,
padding_before,
channel_pos,
kernel_size_list,
strides,
dilations,
groups,
name,
)
@oneflow_export("nn.conv3d")
def conv3d(
input: oneflow._oneflow_internal.BlobDesc,
filters: oneflow._oneflow_internal.BlobDesc,
strides: Union[int, Sequence[int]],
padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair, IntPair]],
data_format: str = "NCDHW",
dilations: Optional[Union[int, Sequence[int]]] = None,
groups: int = 1,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""3D convolution layer.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 5D input `Blob`. [batch_num, channel, depth, height, width]
filters (oneflow._oneflow_internal.BlobDesc): A `Blob` with the same type as `input` and has the shape `[out_channels, in_channels//groups, filter_depth, filter_height, filter_width] for NCDHW, or [out_channels, filter_depth, filter_height, filter_width, in_channels//groups] for NDHWC`
strides (Union[int, Sequence[int]]): An `int` or `list of ints` that has length `3`. The stride of the sliding window for each dimension of `input`.
padding (Union[str, Tuple[IntPair, IntPair, IntPair, IntPair, IntPair]]): padding: `string` `"SAME"` or `"SAME_LOWER"` or `"SAME_UPPER"` or `"VALID"` or Tuple[IntPair, IntPair, IntPair, IntPair, IntPair]` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension.
data_format (str, optional): `"NDHWC" or "NCDHW"`. Defaults to `"NCDHW"`.
dilations (Optional[Union[int, Sequence[int]]], optional): An int or list of `ints` that has length `3`. The dilation factor for each dimension of `input`. Defaults to None.
groups (int, optional): int value greater than 0. Defaults to 1.
name (Optional[str], optional): This operator's name. Defaults to None.
Raises:
ValueError: strides must be an int or a list.
ValueError: padding must be "SAME" or "SAME_LOWER" or "SAME_UPPER" or "VALID" or Tuple[IntPair, IntPair, IntPair, IntPair, IntPair].
ValueError: data_format must be "NDHWC" or "NCDHW".
ValueError: dilations must be an int or a list.
ValueError: invalid data_format.
ValueError: data_format NDHWC not support groups > 1
ValueError: invalid data_format.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as `input` and the same outer batch shape.
Note:
This api is more flexible, if you're new to OneFlow, it's more recommend to use `oneflow.layers.conv3d`
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
def conv3d(input, filters, kernel_size, strides, padding, name):
input_shape = input.shape
weight_initializer = flow.truncated_normal(0.1)
weight_regularizer = flow.regularizers.l2(0.0005)
weight_shape = (filters,
input_shape[1],
kernel_size[0],
kernel_size[1],
kernel_size[2])
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
initializer=weight_initializer,
regularizer=weight_regularizer,
)
return flow.nn.conv3d(input, weight, strides, padding, name=name)
@flow.global_function()
def conv3d_Job(x: tp.Numpy.Placeholder((1, 64, 10, 16, 16))
) -> tp.Numpy:
conv = conv3d(x,
filters=128,
kernel_size=[3, 3, 3],
strides=1,
padding='SAME',
name="Convlayer")
return conv
x = np.random.randn(1, 64, 10, 16, 16).astype(np.float32)
out = conv3d_Job(x)
# out.shape (1, 128, 10, 16, 16)
"""
need_transpose = 0
if data_format.upper() == "NDHWC": # NDHWC is not supported before cudnn 8.0
need_transpose = 1
data_format = "NCDHW"
if need_transpose:
input = flow.transpose(input, perm=[0, 4, 1, 2, 3])
filters = flow.transpose(filters, perm=[0, 4, 1, 2, 3])
# padding for `NDHWC` is [0, 0, 1, 1, 1] to `NCDHW` format [0, 1, 1, 1, 0]
if isinstance(padding, (list, tuple)):
padding = list(padding)
padding[1], padding[4] = padding[4], padding[1]
assert len(input.shape) == 5
assert len(filters.shape) == 5
if isinstance(strides, (list, tuple)):
assert len(strides) == 3, ValueError(
"strides length must be 3 when passed as a list."
)
elif isinstance(strides, int):
strides = [strides, strides, strides]
else:
raise ValueError("strides must be an int or a list.")
if data_format.upper() != "NCDHW" and data_format.upper() != "NDHWC":
raise ValueError('data_format must be "NDHWC" or "NCDHW".')
channel_pos = "channels_first" if data_format == "NCDHW" else "channels_last"
if dilations is None:
dilations = [1, 1, 1]
else:
if isinstance(dilations, (list, tuple)):
assert len(dilations) == 3, ValueError(
"dilations length must be 3 when passed as a list."
)
elif isinstance(dilations, int):
dilations = [dilations, dilations, dilations]
else:
raise ValueError("dilations must be an int or a list.")
if channel_pos == "channels_first":
kernel_size_list = filters.shape[2:5]
in_channel_axis = 1
filter_out_axis = 0
filter_in_axis = 1
elif channel_pos == "channels_last":
kernel_size_list = filters.shape[-4:-1]
in_channel_axis = 4
filter_out_axis = 0
filter_in_axis = 4
if groups > 1:
raise ValueError("data_format NDHWC not support groups > 1")
else:
raise ValueError("invalid data_format")
assert isinstance(kernel_size_list, tuple)
assert isinstance(groups, int)
assert groups > 0
assert groups <= filters.shape[filter_out_axis]
assert filters.shape[filter_out_axis] % groups == 0
assert groups <= input.shape[in_channel_axis]
assert input.shape[in_channel_axis] % groups == 0
assert filters.shape[filter_in_axis] == input.shape[1] // groups
inputs, pads_list = calc_conv_padding(
input, padding, data_format.upper(), kernel_size_list, dilations, strides,
)
assert len(pads_list) == len(inputs.shape) - 2
padding_before = [pad[0] for pad in pads_list]
if (
groups > 1
and flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu"
):
in_split_list = ConvUtil.split(inputs, axis=in_channel_axis, split_num=groups)
filter_split_list = ConvUtil.split(
filters, axis=filter_out_axis, split_num=groups
)
out_list = []
name = name if name is not None else id_util.UniqueStr("Conv3d_")
for i in range(len(in_split_list)):
out_list.append(
conv_op(
"conv3d",
in_split_list[i],
filter_split_list[i],
None,
padding_before,
channel_pos,
kernel_size_list,
strides,
dilations,
groups=1,
name=name + str(i),
)
)
output = flow.concat(out_list, axis=in_channel_axis)
else:
output = conv_op(
"conv3d",
inputs,
filters,
None,
padding_before,
channel_pos,
kernel_size_list,
strides,
dilations,
groups,
name,
)
if need_transpose:
output = flow.transpose(output, perm=[0, 2, 3, 4, 1])
return output
@oneflow_export("nn.moments")
def moments(
x: oneflow._oneflow_internal.BlobDesc,
axes: List[int],
keepdims: Optional[bool] = False,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the mean and variance value of input Blob.
Args:
x (oneflow._oneflow_internal.BlobDesc): A Blob
axes (List): Array of ints. Axes along which to compute the mean and variance
keepdims (bool, optional): Whether to keep the same dimensanality as the input x. Defaults to False.
name (str, optional): The operator's name. Defaults to None.
Returns:
remote_blob: Two Blobs, mean and variance.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
from typing import Tuple
@flow.global_function()
def moments_Job(x: tp.Numpy.Placeholder((5,))
) -> Tuple[tp.Numpy, tp.Numpy]:
return flow.nn.moments(x, axes=[0])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
mean, variance = moments_Job(x)
# mean: [3.]
# variance: [2.]
"""
assert isinstance(axes, list)
if name is None:
name = id_util.UniqueStr("Moments_")
with flow.scope.namespace(name):
return (
flow.math.reduce_mean(x, axis=axes, keepdims=keepdims),
flow.math.reduce_variance(x, axis=axes, keepdims=keepdims),
)
@oneflow_export("nn.GroupNorm")
@stable_api
def group_normalization(
x: oneflow._oneflow_internal.BlobDesc,
num_groups: int = 32,
eps: float = 1e-05,
affine: bool = True,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Applies Group Normalization over a ND(N>=3) input.
Args:
x (oneflow._oneflow_internal.BlobDesc): input tensor with shape (N,C,∗), where C means the number of channels.
eps (float): A value added to the denominator for numerical stability. Default: 1e-5.
affine (bool): A boolean value that when set to True, this module has learnable affine parameters,
initialized the same way as done for batch normalization. Default: True.
name (Optional[str], optional): Name of this op.
Returns:
oneflow._oneflow_internal.BlobDesc: The normalized input tensor.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def group_norm_Job(x: tp.Numpy.Placeholder((4, 4, 32, 32))
) -> tp.Numpy:
group_norm = flow.nn.GroupNorm(
x,
num_group=2,
eps=1e-5,
affine=True,
)
return group_norm
x = np.random.random(size=(4, 4, 32, 32)).astype(np.float32)
out = group_norm_Job(x)
"""
assert len(x.shape) >= 3
assert (
x.shape[1] % num_groups == 0
), "The channel should be divisible by num_groups."
if name is None:
name = id_util.UniqueStr("GroupNorm_")
channel = x.shape[1]
assert channel % num_groups == 0
group_size = channel // num_groups
orig_shape = x.shape
reshape_to_1d = flow.reshape(x, shape=[orig_shape[0], num_groups, -1])
(mean, variance) = flow.nn.moments(reshape_to_1d, [2], keepdims=True)
normalized = (reshape_to_1d - mean) / flow.math.sqrt(variance + eps)
normalized = flow.reshape(normalized, shape=[orig_shape[0], channel, -1])
if affine == True:
gamma = flow.get_variable(
name + "_gamma",
shape=(1, channel, 1),
dtype=x.dtype,
initializer=flow.ones_initializer(),
trainable=True,
)
beta = flow.get_variable(
name + "_beta",
shape=(1, channel, 1),
dtype=x.dtype,
initializer=flow.zeros_initializer(),
trainable=True,
)
normalized = gamma * normalized + beta
reshape_back = flow.reshape_like(normalized, like=x)
return reshape_back
@oneflow_export("nn.InstanceNorm1d")
@stable_api
def instance_normalization1d(
x: oneflow._oneflow_internal.BlobDesc,
eps: float = 1e-05,
affine: bool = True,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Applies Instance Normalization over a 3D input.
Args:
x (oneflow._oneflow_internal.BlobDesc): 3D input tensor with NCL data layout.
eps (float): A value added to the denominator for numerical stability. Default: 1e-5.
affine (bool): A boolean value that when set to True, this module has learnable affine parameters,
initialized the same way as done for batch normalization. Default: True.
name (Optional[str], optional): Name of this op.
Returns:
oneflow._oneflow_internal.BlobDesc: The normalized input tensor.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def instance_norm_Job(x: tp.Numpy.Placeholder((4, 2, 32))
) -> tp.Numpy:
instance_norm = flow.nn.InstanceNorm1d(
x,
eps=1e-5,
affine=True,
)
return instance_norm
x = np.random.random(size=(4, 2, 32)).astype(np.float32)
out = instance_norm_Job(x)
"""
assert len(x.shape) == 3
if name is None:
name = id_util.UniqueStr("InstanceNorm1D_")
channel = x.shape[1]
(mean, variance) = flow.nn.moments(x, [2], keepdims=True)
normalized = (x - mean) / flow.math.sqrt(variance + eps)
if affine == True:
gamma = flow.get_variable(
name + "_gamma",
shape=(1, channel, 1),
dtype=x.dtype,
initializer=flow.ones_initializer(),
trainable=True,
)
beta = flow.get_variable(
name + "_beta",
shape=(1, channel, 1),
dtype=x.dtype,
initializer=flow.zeros_initializer(),
trainable=True,
)
return gamma * normalized + beta
else:
return normalized
@oneflow_export("nn.InstanceNorm2d")
@stable_api
def instance_normalization2d(
x: oneflow._oneflow_internal.BlobDesc,
eps: float = 1e-05,
affine: bool = True,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Applies Instance Normalization over a 4D input.
Args:
x (oneflow._oneflow_internal.BlobDesc): 4D input tensor with NCHW data layout.
eps (float): A value added to the denominator for numerical stability. Default: 1e-5.
affine (bool): A boolean value that when set to True, this module has learnable affine parameters,
initialized the same way as done for batch normalization. Default: True.
name (Optional[str], optional): Name of this op.
Returns:
oneflow._oneflow_internal.BlobDesc: The normalized input tensor.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def instance_norm_Job(x: tp.Numpy.Placeholder((4, 2, 32, 32))
) -> tp.Numpy:
instance_norm = flow.nn.InstanceNorm2d(
x,
eps=1e-5,
affine=True,
)
return instance_norm
x = np.random.random(size=(4, 2, 32, 32)).astype(np.float32)
out = instance_norm_Job(x)
"""
assert len(x.shape) == 4
if name is None:
name = id_util.UniqueStr("InstanceNorm2D_")
reshape_to_1d = flow.reshape(x, shape=[x.shape[0], x.shape[1], -1])
normalized_1d_out = flow.nn.InstanceNorm1d(
reshape_to_1d, eps=eps, affine=affine, name=name
)
reshape_back_to_2d = flow.reshape(normalized_1d_out, shape=list(x.shape))
return reshape_back_to_2d
@oneflow_export("nn.InstanceNorm3d")
@stable_api
def instance_normalization3d(
x: oneflow._oneflow_internal.BlobDesc,
eps: float = 1e-05,
affine: bool = True,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Applies Instance Normalization over a 5D input.
Args:
x (oneflow._oneflow_internal.BlobDesc): 5D input tensor with NCDHW data layout.
eps (float): A value added to the denominator for numerical stability. Default: 1e-5.
affine (bool): A boolean value that when set to True, this module has learnable affine parameters,
initialized the same way as done for batch normalization. Default: True.
name (Optional[str], optional): Name of this op.
Returns:
oneflow._oneflow_internal.BlobDesc: The normalized input tensor.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def instance_norm_Job(x: tp.Numpy.Placeholder((4, 2, 32, 32, 32))
) -> tp.Numpy:
instance_norm = flow.nn.InstanceNorm2d(
x,
eps=1e-5,
affine=True,
)
return instance_norm
x = np.random.random(size=(4, 2, 32, 32, 32)).astype(np.float32)
out = instance_norm_Job(x)
"""
assert len(x.shape) == 5
if name is None:
name = id_util.UniqueStr("InstanceNorm3D_")
reshape_to_1d = flow.reshape(x, shape=[x.shape[0], x.shape[1], -1])
normalized_1d_out = flow.nn.InstanceNorm1d(
reshape_to_1d, eps=eps, affine=affine, name=name
)
reshape_back_to_3d = flow.reshape(normalized_1d_out, shape=list(x.shape))
return reshape_back_to_3d
@oneflow_export("nn.batch_normalization")
def batch_normalization(
x: oneflow._oneflow_internal.BlobDesc,
mean: oneflow._oneflow_internal.BlobDesc,
variance: oneflow._oneflow_internal.BlobDesc,
offset: Optional[oneflow._oneflow_internal.BlobDesc] = None,
scale: Optional[oneflow._oneflow_internal.BlobDesc] = None,
variance_epsilon: Optional[float] = 1e-5,
axis: int = 1,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This op does not fully align with tf.nn.batch_normalization.
The `mean`, `variable`, `offset` and `scale` are always 1D. Users need to specify `axis` to 1 for NCHW data format.
Args:
x (oneflow._oneflow_internal.BlobDesc): Input `Blob` of arbitrary dimensionality.
mean (oneflow._oneflow_internal.BlobDesc): A 1D mean `Blob`.
variance (oneflow._oneflow_internal.BlobDesc): A 1D variance `Blob`.
offset (Optional[oneflow._oneflow_internal.BlobDesc]): An 1D offset `Blob`, often denoted in equations, or None. If present, will be added to the normalized `Blob`.
scale (Optional[oneflow._oneflow_internal.BlobDesc]): A 1D scale `Blob`, often denoted in equations, or None. If present, the scale is applied to the normalized `Blob`.
variance_epsilon (float): A small float number to avoid dividing by 0.
axis (int, optional): 1 for '`NCHW'` data format. Defaults to 1.
name (Optional[str], optional): This operator's name.
Returns:
oneflow._oneflow_internal.BlobDesc: the normalized, scaled, offset `Blob`.
Note:
This api is more flexible, if you're new to OneFlow, it's more recommend to use `oneflow.layers.batch_normalization`
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def batch_norm_Job(x: tp.Numpy.Placeholder((1, 5))
) -> tp.Numpy:
bn_mean, bn_variance = flow.nn.moments(x, axes=[1])
batch_norm = flow.nn.batch_normalization(
x,
mean=bn_mean,
variance=bn_variance,
axis=0
)
return batch_norm
x = np.array([[1, 2, 3, 4, 5]]).astype(np.float32)
out = batch_norm_Job(x)
# out [[-1.41421 -0.707105 0. 0.707105 1.41421 ]]
"""
assert axis >= -len(x.shape) and axis < len(x.shape)
if axis < 0:
axis += len(x.shape)
if name is None:
name = id_util.UniqueStr("BatchNorm_")
params_shape = [x.shape[axis]]
if flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu":
if len(mean.shape) == 1:
nd_params_shape = [1] * len(x.shape)
nd_params_shape[axis] = params_shape[0]
mean = flow.reshape(mean, nd_params_shape)
variance = flow.reshape(variance, nd_params_shape)
if scale:
scale = flow.reshape(scale, nd_params_shape)
if offset:
offset = flow.reshape(offset, nd_params_shape)
elif len(mean.shape) == len(x.shape):
pass
else:
raise ValueError(
"shape of mean and variance should be 1D or has number of axes and x's"
)
variance += variance_epsilon
std_inv = flow.math.rsqrt(variance)
normalized = (x - mean) * std_inv
affined = normalized
if scale:
affined *= scale
if offset:
affined += offset
return affined
elif flow.current_scope().device_parallel_desc_symbol.device_tag == "gpu":
params_dtype = flow.float32 if x.dtype == flow.float16 else x.dtype
if scale is None:
scale = flow.constant(
1, dtype=params_dtype, shape=params_shape, name="gamma"
)
if offset is None:
offset = flow.constant(
0, dtype=params_dtype, shape=params_shape, name="beta"
)
builder = (
flow.user_op_builder(name)
.Op("normalization")
.Input("x", [x])
.Input("moving_mean", [mean])
.Input("moving_variance", [variance])
.Input("gamma", [scale])
.Input("beta", [offset])
.Output("y")
.Attr("axis", axis)
.Attr("epsilon", variance_epsilon)
.Attr("training", False)
# momentum is not used
.Attr("momentum", 0.0)
)
return builder.Build().InferAndTryRun().RemoteBlobList()[0]
else:
raise NotImplementedError
@oneflow_export("nn.layer_norm")
def layer_norm(
inputs: oneflow._oneflow_internal.BlobDesc,
gamma: Optional[oneflow._oneflow_internal.BlobDesc] = None,
beta: Optional[oneflow._oneflow_internal.BlobDesc] = None,
begin_norm_axis: int = 1,
begin_params_axis: int = -1,
epsilon: float = 1e-5,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Layer Normalization.
Args:
inputs (oneflow._oneflow_internal.BlobDesc): Input `Blob`.
gamma (Optional[oneflow._oneflow_internal.BlobDesc]).
beta (Optional[oneflow._oneflow_internal.BlobDesc]).
begin_norm_axis (int, optional): An integer specifies which axis to normalize at first. Defaults to 1.
begin_params_axis (int, optional): An integer specifies which axis params at . Defaults to -1.
epsilon (float, optional): A small float is added to avoid division by zero. Defaults to 1e-5.
name (Optional[str], optional): This operator's name. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A normalized `Blob` with same shape of input.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def layer_norm_Job(x: tp.Numpy.Placeholder((1, 64, 128, 128))
) -> tp.Numpy:
layer_norm = flow.nn.layer_norm(
x,
name="LayerNorm1"
)
return layer_norm
x = np.random.randn(1, 64, 128, 128).astype(np.float32)
out = layer_norm_Job(x)
# out.shape (1, 64, 128, 128)
"""
param_shape = inputs.shape[begin_params_axis:]
if name is None:
name = id_util.UniqueStr("LayerNorm_")
if flow.current_scope().device_parallel_desc_symbol.device_tag == "cpu":
if begin_norm_axis < 0:
begin_norm_axis = begin_norm_axis + len(inputs.shape)
reduce_axis = []
for dim in range(len(inputs.shape)):
if dim >= begin_norm_axis:
reduce_axis.append(dim)
mean, variance = flow.nn.moments(inputs, reduce_axis, keepdims=True)
axis = begin_norm_axis
normalized = flow.nn.batch_normalization(
x=inputs,
mean=mean,
variance=variance,
variance_epsilon=epsilon,
axis=axis,
name=name,
)
nd_params_shape = [1] * (len(inputs.shape) - len(param_shape)) + list(
param_shape
)
affined = normalized
if gamma:
gamma = flow.reshape(gamma, nd_params_shape)
affined *= gamma
if beta:
beta = flow.reshape(beta, nd_params_shape)
affined += beta
return affined
elif flow.current_scope().device_parallel_desc_symbol.device_tag == "gpu":
op_builder = (
flow.user_op_builder(name)
.Op("layer_norm")
.Input("x", [inputs])
.Output("y")
.Output("mean")
.Output("inv_variance")
)
scale = False
center = False
if beta is not None:
center = True
op_builder.Input("beta", [beta])
if gamma is not None:
scale = True
op_builder.Input("gamma", [gamma])
op_builder.Output("normalized")
op_builder.Attr("center", center)
op_builder.Attr("scale", scale)
op_builder.Attr("begin_norm_axis", begin_norm_axis)
op_builder.Attr("begin_params_axis", begin_params_axis)
op_builder.Attr("epsilon", epsilon)
y = op_builder.Build().InferAndTryRun().RemoteBlobList()[0]
return y
else:
raise NotImplementedError
@oneflow_export("nn.compat_conv2d")
def tf_conv2d(
input: oneflow._oneflow_internal.BlobDesc,
filters: oneflow._oneflow_internal.BlobDesc,
strides: Union[int, Sequence[int]],
padding: str,
data_format: str = "NCHW",
dilations: Optional[Union[int, Sequence[int]]] = None,
groups: int = 1,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Computes a 2-D convolution given `input` and 4-D `filters` `Blob`.
Args:
input (oneflow._oneflow_internal.BlobDesc): A `Blob` of rank at least 4.
filters (oneflow._oneflow_internal.BlobDesc): A `Blob` with the same type as `input` and has the shape `[out_channels, in_channels//groups, filter_height, filter_width] for NCHW, or [out_channels, filter_height, filter_width, in_channels//groups] for NHWC`
strides (Union[int, Sequence[int]]): An int or list of `ints` that has length `1`, or `2`. The stride of the sliding window for each dimension of `input`.
padding (str): `"SAME"` or `"VALID"` indicating the type of padding algorithm to use, or a list indicating the explicit paddings at the start and end of each dimension.
data_format (str, optional): `"NHWC"` or `"NCHW"`. Defaults to `"NCHW"`.
dilations (Optional[Union[int, Sequence[int]]], optional): The dilation factor for each dimension of`input`. Defaults to None.
groups (int, optional): int value greater than 0. Defaults to 1.
name (Optional[str], optional): This operator's name. Defaults to None.
Raises:
ValueError: strides must be an int or a list.
ValueError: data_format must be "NHWC" or "NCHW".
ValueError: dilations length must be 2 when passed as a list.
ValueError: dilations must be an int or a list.
ValueError: data_format NHWC not support groups > 1.
ValueError: invalid data_format.
ValueError: padding must be "SAME" or "VALID".
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as `input` and the same outer batch shape.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
def conv2d(input, filters, kernel_size, strides, padding, name):
input_shape = input.shape
weight_initializer = flow.truncated_normal(0.1)
weight_regularizer = flow.regularizers.l2(0.0005)
weight_shape = (filters,
input_shape[1],
kernel_size[0],
kernel_size[1])
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
initializer=weight_initializer,
regularizer=weight_regularizer,
)
return flow.nn.compat_conv2d(input, weight, strides, padding, name=name)
@flow.global_function()
def conv2d_Job(x: tp.Numpy.Placeholder((1, 64, 32, 32))
) -> tp.Numpy:
conv = conv2d(x,
filters=128,
kernel_size=[3, 3],
strides=2,
padding='SAME',
name="Convlayer")
return conv
x = np.random.randn(1, 64, 32, 32).astype(np.float32)
out = conv2d_Job(x)
# out.shape (1, 128, 16, 16)
"""
if padding.upper() == "SAME":
padding = "SAME_UPPER"
return flow.nn.conv2d(
input, filters, strides, padding, None, data_format, dilations, groups, name
)
@oneflow_export("nn.bias_add")
def bias_add(
value: oneflow._oneflow_internal.BlobDesc,
bias: oneflow._oneflow_internal.BlobDesc,
data_format: Optional[str] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator adds a bias to Blob.
Args:
value (oneflow._oneflow_internal.BlobDesc): A `Blob`.
bias (oneflow._oneflow_internal.BlobDesc): A 1-D `Blob` with size matching the channel dimension of value. And has the same type as value unless value is a quantized type.
data_format (Optional[str], optional): A string. '`N...C'` or '`NC...'`. Defaults to None.
name (Optional[str], optional): This operator's name. Defaults to None.
Raises:
ValueError: ValueError if data format is unrecognized, if value has less than two dimensions with '`N..C'`/None data_format or value has less than three dimensions with '`NC..'` data_format, if bias is a vector, or if the size of bias does not match the size of the channel dimension of value.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as value.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def bias_add_Job(x: tp.Numpy.Placeholder((1, 64, 128, 128))
) -> tp.Numpy:
bias_initializer = flow.truncated_normal(0.1)
bias_regularizer = flow.regularizers.l2(0.0005)
bias = flow.get_variable(
"Add_bias",
shape=(64,),
initializer=bias_initializer,
regularizer=bias_regularizer,
)
bias_out = flow.nn.bias_add(x, bias)
return bias_out
x = np.random.randn(1, 64, 128, 128).astype(np.float32)
out = bias_add_Job(x)
# out.shape (1, 64, 128, 128)
"""
# TODO: name unused, fix it
if name is None:
name = id_util.UniqueStr("BiasAdd_")
if data_format is None:
bias_add_axis = 1
else:
if data_format.startswith("NC"):
bias_add_axis = 1
elif data_format.startswith("N") and data_format.endswith("C"):
bias_add_axis = len(value.shape) - 1
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
return (
flow.user_op_builder(name)
.Op("bias_add")
.Input("a", [value])
.Input("b", [bias])
.Output("out")
.Attr("axis", bias_add_axis)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.fused_bias_add_gelu")
def fused_bias_add_gelu(
value: oneflow._oneflow_internal.BlobDesc,
bias: oneflow._oneflow_internal.BlobDesc,
data_format: Optional[str] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator fuse flow.nn.bias_add and flow.math.gelu operator.
Args:
value (oneflow._oneflow_internal.BlobDesc): A `Blob`.
bias (oneflow._oneflow_internal.BlobDesc): A 1-D `Blob` with size matching the channel dimension of value. And has the same type as value unless value is a quantized type.
data_format (Optional[str], optional): A string. '`N...C'` or '`NC...'`. Defaults to None.
name (Optional[str], optional): This operator's name. Defaults to None.
Raises:
ValueError: ValueError if data format is unrecognized, if value has less than two dimensions with '`N..C'`/None data_format or value has less than three dimensions with '`NC..'` data_format, if bias is a vector, or if the size of bias does not match the size of the channel dimension of value.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as value.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def fused_bias_add_gelu_Job(x: tp.Numpy.Placeholder((1, 64, 128, 128))
) -> tp.Numpy:
bias_initializer = flow.truncated_normal(0.1)
bias_regularizer = flow.regularizers.l2(0.0005)
bias = flow.get_variable(
"Add_bias",
shape=(64,),
initializer=bias_initializer,
regularizer=bias_regularizer,
)
out = flow.nn.fused_bias_add_gelu(x, bias)
return out
x = np.random.randn(1, 64, 128, 128).astype(np.float32)
out = fused_bias_add_gelu_Job(x)
# out.shape (1, 64, 128, 128)
"""
if name is None:
name = id_util.UniqueStr("FusedBiasAddGelu_")
if data_format is None:
bias_add_axis = 1
else:
if data_format.startswith("NC"):
bias_add_axis = 1
elif data_format.startswith("N") and data_format.endswith("C"):
bias_add_axis = len(value.shape) - 1
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
return (
flow.user_op_builder(name)
.Op("fused_bias_add_gelu")
.Input("a", [value])
.Input("b", [bias])
.Output("out")
.Attr("axis", bias_add_axis)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.fused_bias_add_dropout")
def fused_bias_add_dropout(
value: oneflow._oneflow_internal.BlobDesc,
bias: oneflow._oneflow_internal.BlobDesc,
data_format: Optional[str] = None,
rate: float = 0.0,
noise_shape: Optional[oneflow._oneflow_internal.BlobDesc] = None,
seed: Optional[int] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator fuse flow.nn.bias_add and flow.nn.dropout operator.
Args:
value (oneflow._oneflow_internal.BlobDesc): A `Blob`.
bias (oneflow._oneflow_internal.BlobDesc): A 1-D `Blob` with size matching the channel dimension of value. And has the same type as value unless value is a quantized type.
data_format (Optional[str], optional): A string. '`N...C'` or '`NC...'`. Defaults to None.
rate (float): A scalar `Blob` with the same type as x. The probability that each element is dropped.
noise_shape (Optional[oneflow._oneflow_internal.BlobDesc], optional): optional: A 1-D `Blob`, representing the shape for randomly generated keep/drop flags. Defaults to None.Defaults to None.
seed (Optional[int], optional): Optional int value. Defaults to None.
name (Optional[str], optional): This operator's name. Defaults to None.
Raises:
ValueError: ValueError if data format is unrecognized, if value has less than two dimensions with '`N..C'`/None data_format or value has less than three dimensions with '`NC..'` data_format, if bias is a vector, or if the size of bias does not match the size of the channel dimension of value.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as value.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def fused_bias_add_dropout_Job(x: tp.Numpy.Placeholder((1, 64, 128, 128))
) -> tp.Numpy:
bias_initializer = flow.truncated_normal(0.1)
bias_regularizer = flow.regularizers.l2(0.0005)
bias = flow.get_variable(
"Add_bias",
shape=(64,),
initializer=bias_initializer,
regularizer=bias_regularizer,
)
out = flow.nn.fused_bias_add_dropout(x, bias)
return out
x = np.random.randn(1, 64, 128, 128).astype(np.float32)
out = fused_bias_add_dropout_Job(x)
# out.shape (1, 64, 128, 128)
"""
assert rate is not None and rate >= 0.0 and rate < 1.0
if not flow.current_global_function_desc().IsTrainable() or rate == 0.0:
return flow.nn.bias_add(value, bias, data_format, name)
if name is None:
name = id_util.UniqueStr("BiasAddDropout_")
mask = flow.nn.random_mask_like(
value, rate, seed, noise_shape, "%s-dropout_random_mask_like" % name
)
if data_format is None:
bias_add_axis = 1
else:
if data_format.startswith("NC"):
bias_add_axis = 1
elif data_format.startswith("N") and data_format.endswith("C"):
bias_add_axis = len(value.shape) - 1
else:
raise ValueError("data_format must be of the form `N...C` or `NC...`")
return (
flow.user_op_builder(name)
.Op("fused_bias_add_mask_scale")
.Input("a", [value])
.Input("b", [bias])
.Input("mask", [mask])
.Output("out")
.Attr("axis", bias_add_axis)
.Attr("scale", float(1.0 / (1.0 - rate)))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.max_pool1d")
def max_pool1d(
input: oneflow._oneflow_internal.BlobDesc,
ksize: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
padding: Union[str, Sequence[Sequence[int]]],
data_format: str = "NWC",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Performs the 1d-max pooling on the input.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 3-D `Blob` of the format specified by data_format.
ksize (Union[int, Sequence[int]]): An int or list of ints that has length 1 or 3. The size of the window for each dimension of the input `Blob`.
strides (Union[int, Sequence[int]]): An int or list of ints that has length 1 or 3. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'`. The padding algorithm.
data_format (str, optional): An optional string from: '`NWC'`, '`NCW'`. Defaults to '`NWC'`.
name (Optional[str], optional): This operator's name(optional).Defaults to None.
Raises:
NotImplementedError: TODO: fix cuDNN bugs in pooling_1d
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` of format specified by data_format. The max pooled output `Blob`.
"""
# TODO: fix cuDNN bugs in pooling_1d
raise NotImplementedError
@oneflow_export("nn.avg_pool1d")
def avg_pool1d(
input: oneflow._oneflow_internal.BlobDesc,
ksize: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
padding: Union[str, Sequence[Sequence[int]]],
data_format: str = "NCW",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Performs the average pooling on the input `Blob`.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 3-D `Blob` of the format specified by data_format.
ksize (Union[int, Sequence[int]]): An int or list of ints that has length 1 or 3. The size of the window for each dimension of the input `Blob`.
strides (Union[int, Sequence[int]]): An int or list of ints that has length 1 or 3. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'`.
data_format (str, optional): '`NWC'` or '`NCW'`. Defaults to '`NWC'`.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Raises:
NotImplementedError: TODO: fix cuDNN bugs in pooling_1d
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` of format specified by data_format. The max pooled output `Blob`.
"""
# TODO: fix cuDNN bugs in pooling_1d
raise NotImplementedError
def calc_pool_padding(padding, dhw_offset, ndims):
if isinstance(padding, str):
padding = "SAME_LOWER" if padding.upper() == "SAME" else padding
assert padding.upper() in ["VALID", "SAME_LOWER", "SAME_UPPER"]
padding_type = padding.lower()
ndim_pads_list = [[0, 0]] * ndims
elif isinstance(padding, (list, tuple)):
padding_type = "customized"
ndim_pads_list = get_ndim_pads_list(padding, dhw_offset, ndims)
else:
raise ValueError("padding must be str or a list.")
return padding_type, ndim_pads_list
@oneflow_export("nn.MaxPool1d")
@stable_api
def MaxPool1d(
input: oneflow._oneflow_internal.BlobDesc,
kernel_size: Union[int, IntPair],
stride: Union[int, IntPair],
padding: Union[str, IntPair],
dilation: Union[int, IntPair] = 1,
return_indices: bool = False,
ceil_mode: bool = False,
data_format: str = "NCHW",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r""" Performs the 1d-max pooling on the input `Blob`.
Different from nn.max_pool1d, nn.MaxPool2d supports more params e.g. dilation,return_indices.
Args:
input (remote_blob_util.BlobDesc): A 3-D `Blob` of the format specified by data_format.
kernel_size (Union[int, IntPair]): An int or list of ints that has length 1, 2. The size of the window for each dimension of the input `Blob`.
stride (Union[int, IntPair]): An int or list of ints that has length 1, 2. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'` or '`SAME_LOWER'` or '`SAME_UPPER'` or Tuple[IntPair, IntPair, IntPair, IntPair]`. The padding algorithm.
dilation (Union[int, IntPair]): a parameter that controls the stride of elements in the window.
return_indices (bool): if True, will return the max indices along with the outputs.
ceil_mode (bool): when True, will use ceil instead of floor to compute the output shape.
data_format (str, optional): '`NHWC'`, '`NCHW'` or '`NCHW_VECT_C'`. Defaults to "NCHW", for now only supporr 'NCHW'.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
remote_blob_util.BlobDesc: A `Blob` of format specified by data_format. The max pooled output `Blob`.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
import numpy as np
input_shape = (2, 2, 4)
@flow.global_function(type="train", function_config=func_config)
def maxpool1d_job_with_grad(
input_x: tp.Numpy.Placeholder(input_shape),
) -> Tuple[tp.Numpy, tp.Numpy]:
x_var = flow.get_variable(
name="input_x",
shape=input_shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
trainable=True,
)
x_var = flow.cast_to_current_logical_view(x_var)
flow.watch_diff(x_var, Setter("x_diff"))
x = x_var + input_x
# x = flow.cast(x, dtype=flow.int32)
with flow.scope.placement("cpu", "0:0"):
(y, indice) = flow.nn.MaxPool1d(
x,
kernel_size=3,
stride=2,
padding=1,
dilation=1,
return_indices=True,
ceil_mode=False,
data_format="NCHW",
)
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0
).minimize(y)
return (y, indice)
x = np.arange(16).reshape(input_shape).astype(np.float32)
y, indice = maxpool1d_job(x)
print("in:\n", x, "\ny:\n", y, "\nindice:\n", indice)
# x:
# [[[ 0. 1. 2. 3.]
# [ 4. 5. 6. 7.]]
# [[ 8. 9. 10. 11.]
# [12. 13. 14. 15.]]]
# y:
# [[[ 1. 3.]
# [ 5. 7.]]
# [[ 9. 11.]
# [13. 15.]]]
# indice:
# [[[1 3]
# [1 3]]
# [[1 3]
# [1 3]]]
"""
assert data_format in ["NCHW"]
channel_pos = "channels_last" if data_format == "NHWC" else "channels_first"
kernel_size = _GetSequence(kernel_size, 2, "kernel_size")
dilation = _GetSequence(dilation, 2, "dilation")
stride = _GetSequence(stride, 2, "stride")
assert padding >= 0 or padding in ["SAME", "VALID"]
if padding >= 0:
if data_format == "NCHW":
padding = (0, 0, padding, padding)
elif data_format == "NHWC":
padding = (0, padding, padding, 0)
else:
raise ValueError('data_format must be "NHWC" or "NCHW".')
padding_type, pads_list = calc_pool_padding(padding, get_dhw_offset(channel_pos), 2)
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
expand_input = flow.expand_dims(input=input, axis=2)
assert len(pads_list) == len(expand_input.shape) - 2
y, indice = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("MaxPool1d_")
)
.Op("maxpool_2d")
.Input("x", [expand_input])
.Output("y")
.Output("indice")
.Attr("data_format", channel_pos)
.Attr("stride", stride)
.Attr("kernel_size", kernel_size)
.Attr("padding", padding_type)
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Attr("dilation", dilation)
.Attr("return_indices", return_indices)
.Attr("ceil_mode", ceil_mode)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
y = flow.squeeze(y, axis=(2,))
indice = flow.squeeze(indice, axis=(2,))
if return_indices == True:
return y, indice
else:
return y
@oneflow_export("nn.MaxPool2d")
@stable_api
def MaxPool2d(
input: oneflow._oneflow_internal.BlobDesc,
kernel_size: Union[int, IntPair],
stride: Union[int, IntPair],
padding: Union[str, int, Tuple[int, int]],
dilation: Union[int, IntPair] = 1,
return_indices: bool = False,
ceil_mode: bool = False,
data_format: str = "NCHW",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r""" Performs the 2d-max pooling on the input `Blob`.
Different from nn.max_pool2d, nn.MaxPool2d supports more params e.g. dilation,return_indices.
Args:
input (remote_blob_util.BlobDesc): A 4-D `Blob` of the format specified by data_format.
kernel_size (Union[int, IntPair]): An int or list of ints that has length 1, 2. The size of the window for each dimension of the input `Blob`.
stride (Union[int, IntPair]): An int or list of ints that has length 1, 2. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'` or '`SAME_LOWER'` or '`SAME_UPPER'` or Tuple[IntPair, IntPair, IntPair, IntPair]`. The padding algorithm.
dilation (Union[int, IntPair]): a parameter that controls the stride of elements in the window.
return_indices (bool): if True, will return the max indices along with the outputs.
ceil_mode (bool): when True, will use ceil instead of floor to compute the output shape.
data_format (str, optional): '`NHWC'`, '`NCHW'` or '`NCHW_VECT_C'`. Defaults to "NCHW", for now only supporr 'NCHW'.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
remote_blob_util.BlobDesc: A `Blob` of format specified by data_format. The max pooled output `Blob`.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
import numpy as np
input_shape = (1, 2, 4, 4)
@flow.global_function(type="predict")
def maxpool_job(
x: tp.Numpy.Placeholder(input_shape),
) -> Tuple[tp.Numpy, tp.Numpy]:
with flow.scope.placement("gpu", "0:0"):
(y, indice) = flow.nn.MaxPool2d(
x,
kernel_size=3,
stride=2,
padding=1,
dilation=1,
return_indices=True,
ceil_mode=False,
data_format="NCHW",
)
return (y, indice)
x = np.arange(32).reshape(input_shape).astype(np.float32)
y, indice = maxpool_job(x)
print("in:\n", x, "\ny:\n", y, "\nindice:\n", indice)
#in:
#[[[[ 0. 1. 2. 3.]
#[ 4. 5. 6. 7.]
#[ 8. 9. 10. 11.]
#[12. 13. 14. 15.]]
#[[16. 17. 18. 19.]
#[20. 21. 22. 23.]
#[24. 25. 26. 27.]
#[28. 29. 30. 31.]]]]
#y:
#[[[[ 5. 7.]
#[13. 15.]]
#[[21. 23.]
#[29. 31.]]]]
#indice:
#[[[[5 7]
#[13 15]]
#[[5 7]
#[13 15]]]]
"""
assert data_format in ["NCHW"]
channel_pos = "channels_last" if data_format == "NHWC" else "channels_first"
kernel_size = _GetSequence(kernel_size, 2, "kernel_size")
dilation = _GetSequence(dilation, 2, "dilation")
stride = _GetSequence(stride, 2, "stride")
assert isinstance(padding, int) or len(padding) == 2 or padding in ["SAME", "VALID"]
if isinstance(padding, int):
padding = [padding, padding]
if len(padding) == 2:
if data_format == "NCHW":
padding = (0, 0, padding[0], padding[1])
elif data_format == "NHWC":
padding = (0, padding[0], padding[1], 0)
else:
raise ValueError('data_format must be "NHWC" or "NCHW".')
padding_type, pads_list = calc_pool_padding(padding, get_dhw_offset(channel_pos), 2)
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
assert len(pads_list) == len(input.shape) - 2
y, indice = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("MaxPool2d_")
)
.Op("maxpool_2d")
.Input("x", [input])
.Output("y")
.Output("indice")
.Attr("data_format", channel_pos)
.Attr("stride", stride)
.Attr("kernel_size", kernel_size)
.Attr("padding", padding_type)
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Attr("dilation", dilation)
.Attr("return_indices", return_indices)
.Attr("ceil_mode", ceil_mode)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
if return_indices == True:
return y, indice
else:
return y
@oneflow_export("nn.MaxPool3d")
@stable_api
def MaxPool3d(
input: oneflow._oneflow_internal.BlobDesc,
kernel_size: Union[int, IntPair],
stride: Union[int, IntPair],
padding: Union[str, int, Tuple[int, int, int]],
dilation: Union[int, IntPair] = 1,
return_indices: bool = False,
ceil_mode: bool = False,
data_format: str = "NCDHW",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r""" Performs the 3d-max pooling on the input `Blob`.
Different from nn.max_pool3d, nn.MaxPool3d supports more params e.g. dilation,return_indices.
Args:
input (remote_blob_util.BlobDesc): A 5-D `Blob` of the format specified by data_format.
kernel_size (Union[int, IntPair]): An int or list of ints that has length 1, 2. The size of the window for each dimension of the input `Blob`.
stride (Union[int, IntPair]): An int or list of ints that has length 1, 2. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'` or '`SAME_LOWER'` or '`SAME_UPPER'` or int value or Tuple[int, int, int]`. The padding algorithm.
dilation (Union[int, IntPair]): a parameter that controls the stride of elements in the window.
return_indices (bool): if True, will return the max indices along with the outputs.
ceil_mode (bool): when True, will use ceil instead of floor to compute the output shape.
data_format (str, optional): '`NCDHW'`, '`NCHWD'`. Defaults to "NCDHW", for now only supporr 'NCDHW'.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
remote_blob_util.BlobDesc: A `Blob` of format specified by data_format. The max pooled output `Blob`.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
from typing import Tuple
import numpy as np
input_shape = (1, 1, 2, 4, 4)
@flow.global_function(type="predict")
def maxpool3d_job(
x: tp.Numpy.Placeholder(input_shape),
) -> tp.Numpy:
with flow.scope.placement("gpu", "0:0"):
(y, indice) = flow.nn.MaxPool3d(
input=x,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
return_indices=True,
ceil_mode=False,
data_format="NCDHW",
)
return (y, indice)
x = np.arange(32).reshape(input_shape).astype(np.float32)
y, indice = maxpool3d_job(x)
print("in:\n", x, "\ny:\n", y, "\nindice:\n", indice)
# in:
# [[[[[ 0. 1. 2. 3.]
# [ 4. 5. 6. 7.]
# [ 8. 9. 10. 11.]
# [12. 13. 14. 15.]]
# [[16. 17. 18. 19.]
# [20. 21. 22. 23.]
# [24. 25. 26. 27.]
# [28. 29. 30. 31.]]]]]
# y:
# [[[[[21. 22. 23. 23.]
# [25. 26. 27. 27.]
# [29. 30. 31. 31.]
# [29. 30. 31. 31.]]
# [[21. 22. 23. 23.]
# [25. 26. 27. 27.]
# [29. 30. 31. 31.]
# [29. 30. 31. 31.]]]]]
# indice:
# [[[[[21 22 23 23]
# [25 26 27 27]
# [29 30 31 31]
# [29 30 31 31]]
# [[21 22 23 23]
# [25 26 27 27]
# [29 30 31 31]
# [29 30 31 31]]]]]
"""
assert data_format in ["NCDHW"]
channel_pos = "channels_first" if data_format == "NCDHW" else "channels_last"
kernel_size = _GetSequence(kernel_size, 3, "kernel_size")
dilation = _GetSequence(dilation, 3, "dilation")
stride = _GetSequence(stride, 3, "stride")
assert (
isinstance(padding, int)
or isinstance(padding, Tuple)
or padding in ["SAME", "VALID"]
)
if isinstance(padding, int):
padding = (padding, padding, padding)
if len(padding) == 3:
if data_format == "NCDHW":
padding = (0, 0, padding[0], padding[1], padding[2])
elif data_format == "NDHWC":
padding = (0, padding[0], padding[1], padding[2], 0)
else:
raise ValueError('data_format must be "NHWDC" or "NCDHW".')
padding_type, pads_list = calc_pool_padding(padding, get_dhw_offset(channel_pos), 3)
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
assert len(pads_list) == len(input.shape) - 2
y, indice = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("MaxPool3d_")
)
.Op("maxpool_3d")
.Input("x", [input])
.Output("y")
.Output("indice")
.Attr("data_format", channel_pos)
.Attr("stride", stride)
.Attr("kernel_size", kernel_size)
.Attr("padding", padding_type)
.Attr("padding_before", padding_before)
.Attr("padding_after", padding_after)
.Attr("dilation", dilation)
.Attr("return_indices", return_indices)
.Attr("ceil_mode", ceil_mode)
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
if return_indices == True:
return y, indice
else:
return y
@oneflow_export("nn.max_pool2d")
def max_pool2d(
input: oneflow._oneflow_internal.BlobDesc,
ksize: Union[int, IntPair],
strides: Union[int, IntPair],
padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair]],
data_format: str = "NCHW",
ceil_mode: bool = False,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Performs the 2d-max pooling on the input `Blob`.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 4-D `Blob` of the format specified by data_format.
ksize (Union[int, IntPair]): An int or list of ints that has length 1, 2. The size of the window for each dimension of the input `Blob`.
strides (Union[int, IntPair]): An int or list of ints that has length 1, 2. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'` or '`SAME_LOWER'` or '`SAME_UPPER'` or Tuple[IntPair, IntPair, IntPair, IntPair]`. The padding algorithm.
data_format (str, optional): '`NHWC'`, '`NCHW'` or '`NCHW_VECT_C'`. Defaults to "NCHW".
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` of format specified by data_format. The max pooled output `Blob`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def maxpool2d_Job(x: tp.Numpy.Placeholder((1, 32, 128, 128))
) -> tp.Numpy:
pool_out = flow.nn.max_pool2d(
input=x,
ksize=3,
strides=2,
padding='SAME',
data_format='NCHW'
)
return pool_out
x = np.random.randn(1, 32, 128, 128).astype(np.float32)
out = maxpool2d_Job(x)
# out.shape (1, 32, 64, 64)
"""
op = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("MaxPool2D_")
)
.Op("max_pool_2d")
.Input("x", [input])
.Output("y")
)
assert data_format in ["NHWC", "NCHW", "NCHW_VECT_C"]
channel_pos = "channels_last" if data_format == "NHWC" else "channels_first"
op.Attr("data_format", channel_pos)
pool_size = _GetSequence(ksize, 2, "ksize")
op.Attr("pool_size", pool_size)
strides = _GetSequence(strides, 2, "strides")
op.Attr("strides", strides)
padding_type, pads_list = calc_pool_padding(padding, get_dhw_offset(channel_pos), 2)
assert len(pads_list) == len(input.shape) - 2
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
op.Attr("padding", padding_type)
op.Attr("padding_before", padding_before)
op.Attr("padding_after", padding_after)
op.Attr("ceil_mode", ceil_mode)
return op.Build().InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("nn.avg_pool2d")
def avg_pool2d(
input: oneflow._oneflow_internal.BlobDesc,
ksize: Union[int, IntPair],
strides: Union[int, IntPair],
padding: Union[str, Tuple[IntPair, IntPair, IntPair, IntPair]],
data_format: str = "NCHW",
ceil_mode: bool = False,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Performs the 2d-average pooling on the input.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 4-D `Blob` of shape [batch, height, width, channels].
ksize (Union[int, IntPair]): An int or list of ints that has length 1, 2. The size of the window for each dimension of the input `Blob`.
strides (Union[int, IntPair]): An int or list of ints that has length 1, 2. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'` or '`SAME_LOWER'` or '`SAME_UPPER'` or Tuple[IntPair, IntPair, IntPair, IntPair]. The padding algorithm.
data_format (str, optional): '`NHWC'` or '`NCHW'`. Defaults to "NCHW".
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as '`value'`. The average pooled output `Blob`.
For example:
.. code-block:: python
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def avgpool2d_Job(x: tp.Numpy.Placeholder((1, 32, 128, 128))
) -> tp.Numpy:
pool_out = flow.nn.avg_pool2d(
input=x,
ksize=3,
strides=2,
padding='SAME',
data_format='NCHW'
)
return pool_out
x = np.random.randn(1, 32, 128, 128).astype(np.float32)
out = avgpool2d_Job(x)
# out.shape (1, 32, 64, 64)
"""
op = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("AvgPool2D_")
)
.Op("avg_pool_2d")
.Input("x", [input])
.Output("y")
)
assert data_format in ["NHWC", "NCHW", "NCHW_VECT_C"]
channel_pos = "channels_last" if data_format == "NHWC" else "channels_first"
op.Attr("data_format", channel_pos)
pool_size = _GetSequence(ksize, 2, "ksize")
op.Attr("pool_size", pool_size)
strides = _GetSequence(strides, 2, "strides")
op.Attr("strides", strides)
padding_type, pads_list = calc_pool_padding(padding, get_dhw_offset(channel_pos), 2)
assert len(pads_list) == len(input.shape) - 2
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
op.Attr("padding", padding_type)
op.Attr("padding_before", padding_before)
op.Attr("padding_after", padding_after)
op.Attr("ceil_mode", ceil_mode)
return op.Build().InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("nn.max_pool3d")
def max_pool3d(
input: oneflow._oneflow_internal.BlobDesc,
ksize: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
padding: Union[str, Sequence[Sequence[int]]],
data_format: str = "NCDHW",
ceil_mode: bool = False,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Performs the 3d-max pooling on the input.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 5-D `Blob` of the format specified by data_format.
ksize (Union[int, Sequence[int]]): An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input `Blob`.
strides (Union[int, Sequence[int]]): An int or list of ints that has length 1, 3 or 5. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'` or '`SAME_LOWER'` or '`SAME_UPPER'` or '`Sequence[Sequence[int]]'`.
data_format (str, optional): "NDHWC" or "NCDHW". Defaults to "NCDHW".
name (Optional[str], optional): This operator's name(optional).
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` of format specified by data_format. The max pooled output `Blob`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def maxpool3d_Job(x: tp.Numpy.Placeholder((1, 32, 10, 128, 128))
) -> tp.Numpy:
pool_out = flow.nn.max_pool3d(
input=x,
ksize=3,
strides=2,
padding='SAME',
data_format='NCDHW'
)
return pool_out
x = np.random.randn(1, 32, 10, 128, 128).astype(np.float32)
out = maxpool3d_Job(x)
# out.shape (1, 32, 5, 64, 64)
"""
op = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("MaxPool3D_")
)
.Op("max_pool_3d")
.Input("x", [input])
.Output("y")
)
assert data_format in ["NDHWC", "NCDHW"]
channel_pos = "channels_last" if data_format == "NDHWC" else "channels_first"
op.Attr("data_format", channel_pos)
pool_size = _GetSequence(ksize, 3, "ksize")
op.Attr("pool_size", pool_size)
strides = _GetSequence(strides, 3, "strides")
op.Attr("strides", strides)
padding_type, pads_list = calc_pool_padding(padding, get_dhw_offset(channel_pos), 3)
assert len(pads_list) == len(input.shape) - 2
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
op.Attr("padding", padding_type)
op.Attr("padding_before", padding_before)
op.Attr("padding_after", padding_after)
op.Attr("ceil_mode", ceil_mode)
return op.Build().InferAndTryRun().RemoteBlobList()[0]
@oneflow_export("nn.avg_pool3d")
def avg_pool3d(
input: oneflow._oneflow_internal.BlobDesc,
ksize: Union[int, Sequence[int]],
strides: Union[int, Sequence[int]],
padding: Union[str, Sequence[Sequence[int]]],
data_format: str = "NCDHW",
ceil_mode: bool = False,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Performs the 3d-average pooling on the input.
Args:
input (oneflow._oneflow_internal.BlobDesc): A 5-D `Blob` of shape [batch, height, width, channels].
ksize (Union[int, Sequence[int]]): An int or list of ints that has length 1, 3 or 5. The size of the window for each dimension of the input `Blob`.
strides (Union[int, Sequence[int]]): An int or list of ints that has length 1, 3 or 5. The stride of the sliding window for each dimension of the input `Blob`.
padding (str): '`VALID'` or '`SAME'` or '`SAME_LOWER'` or '`SAME_UPPER or Sequence[Sequence[int]]'`.
data_format (str, optional): '`NDHWC'` or '`NCDHW'`. Defaults to "NCDHW".
name (Optional[str], optional): This operator's name(optional).Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as value. The average pooled output `Blob`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def avgpool3d_Job(x: tp.Numpy.Placeholder((1, 32, 10, 128, 128))
) -> tp.Numpy:
pool_out = flow.nn.avg_pool3d(
input=x,
ksize=3,
strides=2,
padding='SAME',
data_format='NCDHW'
)
return pool_out
x = np.random.randn(1, 32, 10, 128, 128).astype(np.float32)
out = avgpool3d_Job(x)
# out.shape (1, 32, 5, 64, 64)
"""
op = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("AvgPool3D_")
)
.Op("avg_pool_3d")
.Input("x", [input])
.Output("y")
)
assert data_format in ["NDHWC", "NCDHW"]
channel_pos = "channels_last" if data_format == "NDHWC" else "channels_first"
op.Attr("data_format", channel_pos)
pool_size = _GetSequence(ksize, 3, "ksize")
op.Attr("pool_size", pool_size)
strides = _GetSequence(strides, 3, "strides")
op.Attr("strides", strides)
padding_type, pads_list = calc_pool_padding(padding, get_dhw_offset(channel_pos), 3)
assert len(pads_list) == len(input.shape) - 2
padding_before = [pad[0] for pad in pads_list]
padding_after = [pad[1] for pad in pads_list]
op.Attr("padding", padding_type)
op.Attr("padding_before", padding_before)
op.Attr("padding_after", padding_after)
op.Attr("ceil_mode", ceil_mode)
return op.Build().InferAndTryRun().RemoteBlobList()[0]
def _softmax_need_transpose(x, axis):
assert type(axis) is int
dim_num = len(x.shape)
assert dim_num >= 2
if axis < 0:
axis += dim_num
assert axis >= 0
assert axis < dim_num
need_transpose = False
permute = list(range(dim_num))
if axis != dim_num - 1:
need_transpose = True
permute[axis] = permute[-1]
permute[-1] = axis
return need_transpose, permute
@oneflow_export("nn.softmax")
def softmax(
logits: oneflow._oneflow_internal.BlobDesc,
axis: Optional[int] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Computes softmax activations.
For each element, we apply:
.. math::
S_i = \frac{e^i}{\sum_1^j e^j }
Args:
logits (oneflow._oneflow_internal.BlobDesc): A non-empty `Blob`.
axis (Optional[int], optional): The dimension softmax would be performed on. Defaults to None.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` has the same type and shape as logits.
Raises:
InvalidArgumentError: if logits is empty or axis is beyond the last dimension of logits.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def softmax_Job(x: tp.Numpy.Placeholder((1, 5))
) -> tp.Numpy:
softmax_out = flow.nn.softmax(x, axis=1)
return softmax_out
x = np.array([[1, 2, 1, 5, 4]]).astype(np.float32)
out = softmax_Job(x)
# out [[0.01259415 0.03423444 0.01259415 0.68761706 0.2529602 ]]
"""
if axis is None:
axis = -1
need_transpose, permute = _softmax_need_transpose(logits, axis)
if need_transpose:
logits = flow.transpose(logits, perm=permute)
out = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Softmax_")
)
.Op("softmax")
.Input("in", [logits])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
if need_transpose:
out = flow.transpose(out, perm=permute)
return out
@oneflow_export("nn.logsoftmax")
def logsoftmax(
logits: oneflow._oneflow_internal.BlobDesc,
axis: Optional[int] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Computes logsoftmax activations.
For each element, we apply:
.. math::
LogSoftmax(x_i) = Log(\frac{e^i}{\sum_1^j e^j })
Args:
logits (oneflow._oneflow_internal.BlobDesc): A non-empty `Blob`.
axis (Optional[int], optional): The dimension logsoftmax would be performed on. Defaults to None.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` has the same type and shape as logits.
Raises:
InvalidArgumentError: if logits is empty or axis is beyond the last dimension of logits.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def logsoftmax_Job(x: tp.Numpy.Placeholder((1, 5))
) -> tp.Numpy:
logsoftmax_out = flow.nn.logsoftmax(x, axis=1)
return logsoftmax_out
x = np.array([[1, 2, 1, 5, 4]]).astype(np.float32)
out = logsoftmax_Job(x)
# out [[-4.374523 -3.3745232 -4.374523 -0.3745232 -1.374523 ]]
"""
if axis is None:
axis = -1
if name is None:
name = id_util.UniqueStr("logsoftmax")
return flow.math.log(
flow.nn.softmax(logits, axis, name=name + "_softmax"), name=name + "_log"
)
@oneflow_export("nn.softmax_grad")
def softmax_grad(
y: oneflow._oneflow_internal.BlobDesc,
dy: oneflow._oneflow_internal.BlobDesc,
axis: Optional[int] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Computes gradient of softmax activations.
Args:
y (oneflow._oneflow_internal.BlobDesc): A `Blob` representing the softmax of x.
dy (oneflow._oneflow_internal.BlobDesc): gradient of y.
axis (Optional[int], optional): The dimension softmax would be performed on. Defaults to None.
name (Optional[str], optional): This operator's name(optional).
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` representing the gradient of x.
"""
if axis is None:
axis = -1
need_transpose, permute = _softmax_need_transpose(y, axis)
if need_transpose:
y = flow.transpose(y, perm=permute)
dy = flow.transpose(dy, perm=permute)
dx = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Softmax_")
)
.Op("softmax_grad")
.Input("y", [y])
.Input("dy", [dy])
.Output("dx")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
if need_transpose:
dx = flow.transpose(dx, perm=permute)
return dx
@oneflow_export("nn.sparse_cross_entropy")
def sparse_cross_entropy(
labels: oneflow._oneflow_internal.BlobDesc,
prediction: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Computes sparse cross entropy
Args:
labels (oneflow._oneflow_internal.BlobDesc): A `Blob` of shape [d_0, d_1, ..., d_{r-1}] (where r is rank of labels and result). Each entry in labels must be an index in [0, num_classes).
prediction (oneflow._oneflow_internal.BlobDesc): A `Blob` with the rank that is equal to the rank of the labels plus one.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` of the same shape as labels.
Note:
The labels data type should be `oneflow.int32`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def sparse_cross_entropy_Job(input: tp.Numpy.Placeholder((5, 2), dtype=flow.float32),
labels: tp.Numpy.Placeholder((5,), dtype=flow.int32)
) -> tp.Numpy:
loss = flow.nn.sparse_cross_entropy(labels=labels,
prediction=input)
return loss
x = np.array([[0.3, 0.7],
[0.4, 0.6],
[0.5, 0.5],
[0.1, 0.9],
[0.2, 0.8]]).astype(np.float32)
labels = np.array([0, 1, 1, 0, 1]).astype(np.int32)
loss = sparse_cross_entropy_Job(x, labels)
# out [1.2039728 0.5108256 0.6931472 2.3025851 0.22314353]
"""
assert labels is not None
assert prediction is not None
if len(labels.shape) == len(prediction.shape):
assert labels.shape[-1] == 1
labels = flow.squeeze(labels, axis=[-1])
else:
assert len(labels.shape) == len(prediction.shape) - 1
if prediction.distribute is oneflow._oneflow_internal.distribute.split(
len(prediction.shape) - 1
):
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("SparseCrossEntropyMs_")
)
.Op("sparse_cross_entropy_ms")
.Input("prediction", [prediction])
.Input("label", [labels])
.Output("out")
.Attr("depth", int(prediction.shape[-1]))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
else:
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("SparseCrossEntropy_")
)
.Op("sparse_cross_entropy")
.Input("prediction", [prediction])
.Input("label", [labels])
.Output("out")
.Attr("depth", int(prediction.shape[-1]))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.softmax_cross_entropy_with_logits")
def softmax_cross_entropy_with_logits(
labels: oneflow._oneflow_internal.BlobDesc,
logits: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Computes softmax cross entropy between logits and labels.
Args:
labels (oneflow._oneflow_internal.BlobDesc): Each vector along the class dimension should hold a valid probability distribution.
logits (oneflow._oneflow_internal.BlobDesc): Per-label activations, typically a linear output. logits has same shape and dtype as labels.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` that contains the softmax cross entropy loss. Its type is the same as logits and its shape is the same as labels except that it does not have the last dimension of labels.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def softmax_cross_entropy_Job(input: tp.Numpy.Placeholder((3, 3), dtype=flow.float32),
labels: tp.Numpy.Placeholder((3, 3), dtype=flow.float32)
) -> tp.Numpy:
loss = flow.nn.softmax_cross_entropy_with_logits(labels=labels,
logits=input)
return loss
x = np.array([[4, 1, 2],
[3, 2, 3],
[1, 5, 10]]).astype(np.float32)
labels = np.array([[0.9, 0.05, 0.05],
[0.3, 0.4, 0.3],
[0.8, 0.1, 0.1]]).astype(np.float32)
loss = softmax_cross_entropy_Job(x, labels)
# out [0.73441553 1.1240788 1.4488925 ]
"""
assert labels is not None
assert logits is not None
assert labels.shape == logits.shape
assert labels.dtype == logits.dtype
prob, out = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("SoftmaxCrossEntropy_")
)
.Op("softmax_cross_entropy")
.Input("prediction", [logits])
.Input("label", [labels])
.Output("prob")
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
return out
@oneflow_export("nn.sparse_softmax_cross_entropy_with_logits")
def sparse_softmax_cross_entropy_with_logits(
labels: oneflow._oneflow_internal.BlobDesc,
logits: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Computes sparse softmax cross entropy between logits and labels.
Args:
labels (oneflow._oneflow_internal.BlobDesc): `Blob` of shape [d_0, d_1, ..., d_{r-1}] (where r is rank of labels and result). Each entry in labels must be an index in [0, num_classes).
logits (oneflow._oneflow_internal.BlobDesc): Unscaled log probabilities of shape [d_0, d_1, ..., d_{r-1},num_classes].
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Raises:
ValueError: If logits are scalars (need to have rank >= 1) or if the rank of the labels is not equal to the rank of the logits minus one.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` of the same shape as labels and of the same type as logits with the softmax cross entropy loss.
Note:
The labels data type should be `oneflow.int32`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def sparse_softmax_cross_entropy_Job(input: tp.Numpy.Placeholder((3, 3), dtype=flow.float32),
labels: tp.Numpy.Placeholder((3, ), dtype=flow.int32)
) -> tp.Numpy:
loss = flow.nn.sparse_softmax_cross_entropy_with_logits(labels=labels,
logits=input)
return loss
x = np.array([[4, 1, 2],
[3, 2, 3],
[1, 5, 10]]).astype(np.float32)
labels = np.array([0, 1, 2]).astype(np.int32)
loss = sparse_softmax_cross_entropy_Job(x, labels)
# out [0.65784633 1.2842525 0.5557927 ]
"""
assert labels is not None
assert logits is not None
if len(labels.shape) == len(logits.shape):
assert labels.shape[-1] == 1
labels = flow.squeeze(labels, axis=[-1])
else:
assert len(labels.shape) == len(logits.shape) - 1
if logits.distribute is oneflow._oneflow_internal.distribute.split(
len(logits.shape) - 1
):
prob, out = (
flow.user_op_builder(
name
if name is not None
else id_util.UniqueStr("SparseSoftmaxCrossEntropyMs_")
)
.Op("sparse_softmax_cross_entropy_ms")
.Input("prediction", [logits])
.Input("label", [labels])
.Output("prob")
.Output("out")
.Attr("depth", int(logits.shape[-1]))
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
else:
prob, out = (
flow.user_op_builder(
name
if name is not None
else id_util.UniqueStr("SparseSoftmaxCrossEntropy_")
)
.Op("sparse_softmax_cross_entropy")
.Input("prediction", [logits])
.Input("label", [labels])
.Output("prob")
.Output("out")
.Attr("depth", int(logits.shape[-1]))
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
return out
@oneflow_export("nn.distributed_sparse_softmax_cross_entropy_with_logits")
def distributed_sparse_softmax_cross_entropy_with_logits(
labels: oneflow._oneflow_internal.BlobDesc,
logits: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
assert labels is not None
assert logits is not None
if len(labels.shape) == len(logits.shape):
assert labels.shape[-1] == 1
labels = flow.squeeze(labels, axis=[-1])
else:
assert len(labels.shape) == len(logits.shape) - 1
prob, out = (
flow.user_op_builder(
name
if name is not None
else id_util.UniqueStr("DistributedSparseSoftmaxCrossEntropy_")
)
.Op("sparse_softmax_cross_entropy_ms")
.Input("prediction", [logits])
.Input("label", [labels])
.Output("prob")
.Output("out")
.Attr("depth", int(logits.shape[-1]))
.Build()
.InferAndTryRun()
.RemoteBlobList()
)
return out
@oneflow_export("nn.sigmoid_cross_entropy_with_logits")
def sigmoid_cross_entropy_with_logits(
labels: oneflow._oneflow_internal.BlobDesc,
logits: oneflow._oneflow_internal.BlobDesc,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Computes sigmoid cross entropy given logits.
Args:
labels (oneflow._oneflow_internal.BlobDesc): A `Blob` of the same type and shape as logits.
logits (oneflow._oneflow_internal.BlobDesc): A `Blob` of type float.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` of the same shape as logits with the componentwise logistic losses.
Raises:
ValueError: If logits and labels do not have the same shape.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def sigmoid_cross_entropy_Job(input: tp.Numpy.Placeholder((3, 2), dtype=flow.float32),
labels: tp.Numpy.Placeholder((3, 2), dtype=flow.float32)
) -> tp.Numpy:
loss = flow.nn.sigmoid_cross_entropy_with_logits(labels=labels,
logits=input)
return loss
x = np.array([[4, 1],
[3, 2],
[1, 5]]).astype(np.float32)
labels = np.array([[0.7, 0.3],
[0.4, 0.6],
[0.2, 0.8]]).astype(np.float32)
loss = sigmoid_cross_entropy_Job(x, labels)
# out [[0.612735 0.90472794]
# [0.89778364 0.6990613 ]
# [0.97783387 0.51372755]]
"""
assert labels is not None
assert logits is not None
op = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("SigmoidCrossEntropy_")
)
.Op("sigmoid_cross_entropy")
.Input("prediction", [logits])
.Input("label", [labels])
.Output("loss")
.Build()
)
return op.InferAndTryRun().RemoteBlobList()[0]
def _GetSequence(value, n, name):
"""Formats value from input"""
if value is None:
value = [1]
elif not isinstance(value, collections.Sized):
value = [value]
current_n = len(value)
if current_n == 1:
return list(value * n)
elif current_n == n:
return list(value)
else:
raise ValueError(
"{} should be of length 1 or {} but was {}".format(name, n, current_n)
)
@oneflow_export("nn.random_mask_like")
def random_mask_like(
like: oneflow._oneflow_internal.BlobDesc,
rate: float,
seed: Optional[int] = None,
noise_shape: Optional[Sequence] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Random mask `Blob` with same shape as '`like'`.
Args:
like (oneflow._oneflow_internal.BlobDesc): A `Blob`.
rate (float): A float value for the probability that each element is dropped.
seed (Optional[int], optional): Optional, int value. Defaults to None.
noise_shape (Optional[Sequence], optional): Optional, A 1-D `Blob`, representing the shape for randomly generated keep/drop flags. Defaults to None.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A random mask `Blob` of the same shape of `like`.
Raises:
ValueError: If rate is not in [0, 1). Rate=1 is not allowed.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def random_mask_like_Job(like: tp.Numpy.Placeholder((5, 5), dtype=flow.float32)
) -> tp.Numpy:
return flow.nn.random_mask_like(like=like,
rate=0.5)
like = np.ones(shape=(5, 5)).astype(np.float32)
random_mask = random_mask_like_Job(like)
# out [[0 0 0 0 0]
# [1 1 1 0 0]
# [1 0 1 1 0]
# [0 0 0 0 1]
# [1 0 1 1 1]]
"""
assert rate is not None and rate >= 0.0 and rate < 1.0
if noise_shape is not None:
assert 0, "noise_shape will be supported later."
assert isinstance(noise_shape, (list, tuple))
if seed is not None:
assert name is not None
if name is None:
mask_op = (
flow.user_op_builder(id_util.UniqueStr("RandomMaskLike_"))
.Op("random_mask_like")
.Input("like", [like])
.Output("out")
.Attr("rate", float(rate))
)
if seed is not None:
mask_op.Attr("seed", seed)
else:
mask_op.Attr("seed", random.randint(-sys.maxsize, sys.maxsize))
return mask_op.Build().InferAndTryRun().RemoteBlobList()[0]
else:
module = flow.find_or_create_module(
name, lambda: RandomMaskLike(rate=rate, seed=seed, name=name,),
)
return module(like)
class RandomMaskLike(module_util.Module):
def __init__(
self, rate: float, seed: Optional[int] = None, name: str = None,
):
module_util.Module.__init__(self, name)
if seed is None:
seed = random.randint(-sys.maxsize, sys.maxsize)
self.op_module_builder = (
flow.user_op_module_builder("random_mask_like")
.InputSize("like", 1)
.Output("out")
.Attr("rate", float(rate))
.Attr("seed", seed)
.CheckAndComplete()
)
self.op_module_builder.user_op_module.InitOpKernel()
def forward(self, like: oneflow._oneflow_internal.BlobDesc):
if self.call_seq_no == 0:
name = self.module_name
else:
name = id_util.UniqueStr("RandomMaskLike_")
return (
self.op_module_builder.OpName(name)
.Input("like", [like])
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.dropout")
def dropout(
x: oneflow._oneflow_internal.BlobDesc,
rate: float,
noise_shape: Optional[oneflow._oneflow_internal.BlobDesc] = None,
seed: Optional[int] = None,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""For preventing overfitting, randomly set elements to zero.
Args:
x (oneflow._oneflow_internal.BlobDesc): A floating point `Blob`.
rate (float): A scalar `Blob` with the same type as x. The probability that each element is dropped.
noise_shape (Optional[oneflow._oneflow_internal.BlobDesc], optional): optional: A 1-D `Blob`, representing the shape for randomly generated keep/drop flags. Defaults to None.Defaults to None.
seed (Optional[int], optional): Optional int value. Defaults to None.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` of the same shape of x.
Raises:
ValueError: If rate is not in [0, 1) or if x is not a floating point `Blob`. Rate=1 is not allowed.
For example:
.. code-block:: python
import oneflow as flow
def lenet(data, train=False):
initializer = flow.truncated_normal(0.1)
conv1 = flow.layers.conv2d(
data,
32,
5,
padding="SAME",
activation=flow.nn.relu,
name="conv1",
kernel_initializer=initializer,
)
pool1 = flow.nn.max_pool2d(
conv1, ksize=2, strides=2, padding="SAME", name="pool1", data_format="NCHW"
)
conv2 = flow.layers.conv2d(
pool1,
64,
5,
padding="SAME",
activation=flow.nn.relu,
name="conv2",
kernel_initializer=initializer,
)
pool2 = flow.nn.max_pool2d(
conv2, ksize=2, strides=2, padding="SAME", name="pool2", data_format="NCHW"
)
reshape = flow.reshape(pool2, [pool2.shape[0], -1])
hidden = flow.layers.dense(
reshape,
512,
activation=flow.nn.relu,
kernel_initializer=initializer,
name="dense1",
)
if train:
hidden = flow.nn.dropout(hidden, rate=0.5, name="dropout")
return flow.layers.dense(hidden, 10, kernel_initializer=initializer, name="dense2")
"""
assert rate is not None and rate >= 0.0 and rate < 1.0
if not flow.current_global_function_desc().IsTrainable() or rate == 0.0:
return x
if seed is not None:
assert name is not None
if name is None:
name = id_util.UniqueStr("Dropout_")
mask = random_mask_like(
x, rate, seed, noise_shape, "%s-dropout_random_mask_like" % name
)
return (
flow.user_op_builder(name)
.Op("dropout")
.Input("in", [x])
.Input("mask", [mask])
.Output("out")
.Attr("scale", float(1.0 / (1.0 - rate)))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.conv2d_transpose")
def deconv2d(
value: Optional[oneflow._oneflow_internal.BlobDesc] = None,
filter: Optional[oneflow._oneflow_internal.BlobDesc] = None,
output_shape: Tuple[int, int, int, int] = None,
strides: Optional[Union[int, Sequence[int]]] = None,
padding: str = "VALID",
data_format: str = "NCHW",
name: Optional[str] = None,
input: Optional[oneflow._oneflow_internal.BlobDesc] = None,
filters: Optional[oneflow._oneflow_internal.BlobDesc] = None,
dilations: Optional[Union[int, Sequence[int]]] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""2d transposed convolution.
Args:
value (Optional[oneflow._oneflow_internal.BlobDesc], optional): 4-d `Blob`. Defaults to None.
filter (Optional[oneflow._oneflow_internal.BlobDesc], optional): Filter of transposed convolution, usually a variable. Defaults to None.
output_shape (Tuple[int, int, int, int]): A 1-D `Blob` representing the output shape of the deconvolution op. Defaults to None.
strides (Optional[Union[int, Sequence[int]]], optional): `int` or `int list`. Defaults to None.
padding (str, optional): `'VALID'` or `'SAME'`. Defaults to "VALID".
data_format (str, optional): `'NHWC'` or `'NCHW'`. Defaults to "NCHW".
name (Optional[str], optional): This operator's name(optional). Defaults to None.
input (Optional[oneflow._oneflow_internal.BlobDesc], optional): Alias for value. Defaults to None.
filters (Optional[oneflow._oneflow_internal.BlobDesc], optional): Alias for filter. Defaults to None.
dilations (Optional[Union[int, Sequence[int]]], optional): The dilation factor for each dimension of input. Defaults to None.
Raises:
ValueError: shapes of `filter` and `input` must match.
ValueError: dilations must be an int or a list.
ValueError: data_format must be "NHWC" or "NCHW".
ValueError: padding must be "SAME" or "VALID".
Returns:
oneflow._oneflow_internal.BlobDesc: A `Blob` with the same type as `value`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
def deconv2d(input, filters, kernel_size, strides, padding, name):
input_shape = input.shape
weight_initializer = flow.truncated_normal(0.1)
weight_regularizer = flow.regularizers.l2(0.0005)
weight_shape = (filters,
input_shape[1],
kernel_size[0],
kernel_size[1])
weight = flow.get_variable(
name + "-weight",
shape=weight_shape,
initializer=weight_initializer,
regularizer=weight_regularizer,
)
return flow.nn.conv2d_transpose(value=input,
output_shape=(1, 32, 64, 64),
filter=weight,
strides=strides,
padding=padding,
name=name)
@flow.global_function()
def deconv2d_Job(x: tp.Numpy.Placeholder((1, 32, 32, 32),)
) -> tp.Numpy:
deconv = deconv2d(x,
filters=32,
kernel_size=[3, 3],
strides=2,
padding='SAME',
name="Convlayer")
return deconv
x = np.random.randn(1, 32, 32, 32).astype(np.float32)
out = deconv2d_Job(x)
# out.shape (1, 32, 64, 64)
"""
assert (value is not None) ^ (
input is not None
), "only one of `input` and `value` could be not None"
assert (filter is not None) ^ (
filters is not None
), "only one of `filter` and `filters` could be not None"
filters = filters or filter
input = input or value
NDims = 2
assert len(input.shape) == 2 + NDims
assert len(filters.shape) == 2 + NDims
assert len(output_shape) == 2 + NDims
assert output_shape[0] == input.shape[0]
# dilations
if dilations is None:
dilations = [1, 1]
else:
if isinstance(dilations, (list, tuple)):
assert len(dilations) == 2, ValueError(
"dilations length must be 2 when passed as a list."
)
elif isinstance(dilations, int):
dilations = [dilations, dilations]
else:
raise ValueError("dilations must be an int or a list.")
# data format
if data_format.upper() == "NCHW":
input_shape = input.shape[2:]
kernel_size = filters.shape[2:4]
channels = filters.shape[1]
assert output_shape[1] == channels
output_shape = output_shape[2:4]
elif data_format.upper() == "NHWC":
input_shape = input.shape[1:3]
kernel_size = filters.shape[-3:-1]
channels = filters.shape[3]
assert output_shape[3] == channels
output_shape = output_shape[1:3]
assert dilations == [1, 1], ValueError(
"dialtions must be 1 when data format is NHWC "
)
else:
raise ValueError('data_format must be "NHWC" or "NCHW".')
channel_pos = "channels_first" if data_format.startswith("NC") else "channels_last"
# strides
if isinstance(strides, (list, tuple)):
assert len(strides) == NDims, ValueError(
"strides length must be 2 when passed as a list."
)
elif isinstance(strides, int):
strides = [strides, strides]
else:
raise ValueError("strides must be an int or a list.")
# output_padding and padding_needed
output_padding = [0] * NDims
padding_needed = [0] * NDims
if padding.upper() == "VALID":
for i in range(NDims):
effective_filter_size = (kernel_size[i] - 1) * dilations[i] + 1
assert (output_shape[i] + strides[i] - effective_filter_size) // strides[
i
] == input_shape[i]
tmp_output_shape = (input_shape[i] - 1) * strides[i] + effective_filter_size
output_padding[i] = output_shape[i] - tmp_output_shape
elif padding.upper() == "SAME":
padding_left = [0] * NDims
padding_right = [0] * NDims
for i in range(NDims):
assert (output_shape[i] + strides[i] - 1) // strides[i] == input_shape[i]
effective_filter_size = (kernel_size[i] - 1) * dilations[i] + 1
padding_needed[i] = max(
0,
(input_shape[i] - 1) * strides[i]
+ effective_filter_size
- output_shape[i],
)
tmp_output_shape = (
(input_shape[i] - 1) * strides[i]
+ effective_filter_size
- padding_needed[i]
)
output_padding[i] = output_shape[i] - tmp_output_shape
padding_left[i] = padding_needed[i] // 2
padding_right[i] = padding_needed[i] - padding_needed[i] // 2
else:
raise ValueError('padding must be "SAME" or "VALID".')
# add pad op if needs odd padding
if padding.upper() == "SAME" and padding_left != padding_right:
assert data_format.upper() == "NCHW"
padding_before = [0] * NDims
input = (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Deconv2d_")
)
.Op("deconv2d")
.Input("in", [input])
.Input("weight", [filters])
.Output("out")
.Attr("filters", channels)
.Attr("padding_before", padding_before)
.Attr("data_format", channel_pos)
.Attr("kernel_size", kernel_size)
.Attr("strides", strides)
.Attr("dilation_rate", dilations)
.Attr("output_padding", output_padding)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
return flow.pad_grad(
input,
[
(0, 0),
(0, 0),
(padding_left[0], padding_right[0]),
(padding_left[1], padding_right[1]),
],
name=name + "_pad_grad" if name is not None else None,
)
assert len(padding_needed) == len(input.shape) - 2
padding_before = []
for pad in padding_needed:
assert pad % 2 == 0
padding_before.append(pad // 2)
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Deconv2d_")
)
.Op("deconv2d")
.Input("in", [input])
.Input("weight", [filters])
.Output("out")
.Attr("filters", channels)
.Attr("padding_before", padding_before)
.Attr("data_format", channel_pos)
.Attr("kernel_size", kernel_size)
.Attr("strides", strides)
.Attr("dilation_rate", dilations)
.Attr("output_padding", output_padding)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.torch_conv2d_transpose")
def deconv2d_torch(
value=None,
filter=None,
output_padding=None,
strides=None,
padding_needed=None,
data_format="NCHW",
name=None,
input=None,
filters=None,
dilations=None,
):
assert (value is not None) ^ (
input is not None
), "only one of `input` and `value` could be not None"
assert (filter is not None) ^ (
filters is not None
), "only one of `filter` and `filters` could be not None"
filters = filters or filter
input = input or value
NDims = 2
assert len(input.shape) == 2 + NDims
assert len(filters.shape) == 2 + NDims
# dilations
if dilations is None:
dilations = [1, 1]
else:
if isinstance(dilations, (list, tuple)):
assert len(dilations) == 2, ValueError(
"dilations length must be 2 when passed as a list."
)
elif isinstance(dilations, int):
dilations = [dilations, dilations]
else:
raise ValueError("dilations must be an int or a list.")
# data format
if data_format.upper() == "NCHW":
input_shape = input.shape[2:]
kernel_size = filters.shape[2:4]
channels = filters.shape[1]
elif data_format.upper() == "NHWC":
input_shape = input.shape[1:3]
kernel_size = filters.shape[-3:-1]
channels = filters.shape[3]
assert dilations == [1, 1], ValueError(
"dialtions must be 1 when data format is NHWC "
)
else:
raise ValueError('data_format must be "NHWC" or "NCHW".')
channel_pos = "channels_first" if data_format.startswith("NC") else "channels_last"
# strides
if isinstance(strides, (list, tuple)):
assert len(strides) == NDims, ValueError(
"strides length must be 2 when passed as a list."
)
elif isinstance(strides, int):
strides = [strides, strides]
else:
raise ValueError("strides must be an int or a list.")
# output_padding and padding_needed
assert len(padding_needed) == len(input.shape) - 2
padding_before = []
for pad in padding_needed:
assert pad % 2 == 0
padding_before.append(pad // 2)
if output_padding is None:
output_padding = (0, 0)
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("Deconv2d_")
)
.Op("deconv2d")
.Input("in", [input])
.Input("weight", [filters])
.Output("out")
.Attr("filters", channels)
.Attr("padding_before", padding_before)
.Attr("data_format", channel_pos)
.Attr("kernel_size", kernel_size)
.Attr("strides", strides)
.Attr("dilation_rate", dilations)
.Attr("output_padding", output_padding)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.leaky_relu")
def leaky_relu(
x: oneflow._oneflow_internal.BlobDesc,
alpha: float = 0.2,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""Leaky ReLU activation.
.. math::
out = max(x, alpha*x)
Args:
x (oneflow._oneflow_internal.BlobDesc): A `Blob` representing preactivation values.
alpha (float, optional): Slope of the activation function at x < 0 with float type. Default value is 0.2.
name (Optional[str], optional): This operator's name(optional). Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The activation `Blob`.
For example:
.. code-block:: python
import oneflow as flow
import numpy as np
import oneflow.typing as tp
@flow.global_function()
def leaky_relu_Job(x: tp.Numpy.Placeholder((5, ),)
) -> tp.Numpy:
leaky_relu = flow.nn.leaky_relu(x, alpha=0.2)
return leaky_relu
x = np.array([-10, -5, 0, 5, 10]).astype(np.float32)
out = leaky_relu_Job(x)
# out [-2. -1. 0. 5. 10.]
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("LeakyRelu_")
)
.Op("leaky_relu")
.Input("x", [x])
.Output("y")
.Attr("alpha", float(alpha))
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.elu")
def elu(
x: oneflow._oneflow_internal.BlobDesc,
alpha: float = 1.0,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""The ELU activation.
The formula is:
.. math::
\text{ELU}(x) = \begin{cases}
x & \text{ if } x \gt 0 \\
\alpha*(exp(x)-1) & \text{ if } x \le 0 \\
\end{cases}
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def elu_job(x: tp.Numpy.Placeholder(shape=(3, )))->tp.Numpy:
return flow.nn.elu(x, alpha=1.0)
x = np.array([-3.5, 1, 3.5]).astype(np.float32)
out = elu_job(x)
# output [-0.9698026 1. 3.5 ]
Args:
x (oneflow._oneflow_internal.BlobDesc): The input Tensor.
alpha (float, optional): The `alpha` value for the ELU formula. Defaults to 1.0.
name (Optional[str], optional): The name for the operator. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The activated Tensor.
"""
alpha = float(alpha)
if name is None:
name = id_util.UniqueStr("Elu_")
return (
flow.user_op_builder(name)
.Op("elu")
.Input("in", [x])
.Output("out")
.Attr("alpha", alpha)
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.hardsigmoid")
def hard_sigmoid(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
r"""The Hardsigmoid activation.
The formula is:
.. math::
\text{Hardsigmoid}(x) = \begin{cases}
0 & \text{ if } x \le -3 \\
1 & \text{ if } x \ge +3 \\
\frac{x}{6} + \frac{1}{2} & \text{ otherwise } \\
\end{cases}
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def hardsigmoid_job(x: tp.Numpy.Placeholder(shape=(3, )))->tp.Numpy:
out = flow.nn.hardsigmoid(x)
return out
x = np.array([-3.1, 0, 3.3]).astype(np.float32)
out = hardsigmoid_job(x)
# output [0. 0.5 1. ]
Args:
x (oneflow._oneflow_internal.BlobDesc): The input Tensor.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The activated Tensor.
"""
return (
flow.user_op_builder(
name if name is not None else id_util.UniqueStr("HardSigmoid_")
)
.Op("hardsigmoid")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.mish")
def mish(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""The Mish activation function.
The equation is:
.. math::
out = x*tanh(ln(1+e^x))
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def mish_job(x: tp.Numpy.Placeholder(shape=(5, )))->tp.Numpy:
return flow.nn.mish(x)
x = np.array([-0.5, 0, 0.5, 1.0, 1.5]).astype(np.float32)
out = mish_job(x)
Args:
x (oneflow._oneflow_internal.BlobDesc): The input Blob.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
"""
if name is None:
name = id_util.UniqueStr("Mish_")
return x * flow.math.tanh(
flow.math.softplus(x, name=name + "softplus"), name=name + "tanh"
)
@oneflow_export("nn.swish")
def swish(
x: oneflow._oneflow_internal.BlobDesc,
beta: float = 1.0,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""The Swish activation function.
The equation is:
.. math::
out = x * sigmoid(\beta*x)
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def swish_job(x: tp.Numpy.Placeholder(shape=(5, )))->tp.Numpy:
return flow.nn.swish(x)
x = np.array([-0.5, 0, 0.5, 1, 1.5]).astype(np.float32)
out = swish_job(x)
# output [-0.18877034 0. 0.31122968 0.7310586 1.2263618 ]
Args:
x (oneflow._oneflow_internal.BlobDesc): The input Blob.
beta (float, optional): The smooth factor. Defaults to 1.0.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
"""
if name is None:
name = id_util.UniqueStr("Swish_")
return x * flow.math.sigmoid(beta * x, name=name + "_sigmoid")
@oneflow_export("nn.hardswish")
def hardswish(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
r"""The Hardswish activation.
The formula is:
.. math::
\text{Hardswish}(x) = \begin{cases}
0 & \text{ if } x \le -3 \\
x & \text{ if } x \ge +3 \\
x*(x+3)/6 & \text{ otherwise } \\
\end{cases}
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def hardswish_job(x: tp.Numpy.Placeholder(shape=(3, )))->tp.Numpy:
return flow.nn.hardswish(x)
x = np.array([-3.5, 1, 3.5]).astype(np.float32)
out = hardswish_job(x)
# output [0. 0.6666667 3.5 ]
Args:
x (oneflow._oneflow_internal.BlobDesc): The input Tensor.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The activated Tensor.
"""
if name is None:
name = id_util.UniqueStr("HardSwish_")
return (
flow.user_op_builder(name)
.Op("hardswish")
.Input("in", [x])
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.hardtanh")
def hardtanh(
x: oneflow._oneflow_internal.BlobDesc,
min_val: float = -1.0,
max_val: float = 1.0,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""The Hardtanh activation.
The equation is:
.. math::
\text{HardTanh}(x) = \begin{cases}
max\_val & \text{ if } x > max\_val \\
-min\_val & \text{ if } x < min\_val \\
x & \text{ otherwise } \\
\end{cases}
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def hardtanh_job(x: tp.Numpy.Placeholder(shape=(2, 3)))->tp.Numpy:
return flow.nn.hardtanh(x, min_val=-1.25, max_val=1.2)
x = np.array([[-1.5, -1.1, 0.6],
[1.2, 1.3, 1.5]]).astype(np.float32)
out = hardtanh_job(x)
# output [[-1.25 -1.1 0.6 ]
# [ 1.2 1.2 1.2 ]]
Args:
x (oneflow._oneflow_internal.BlobDesc): The input Tensor.
min_val (float, optional): The minimum value of the linear region range. Defaults to -1.
max_val (float, optional): The maximum value of the linear region range. Defaults to 1.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The activated tensor.
"""
if name is None:
name = id_util.UniqueStr("Hardtanh_")
min_val = float(min_val)
max_val = float(max_val)
assert min_val < max_val, "max_val should be larger than min_val"
return (
flow.user_op_builder(name)
.Op("hardtanh")
.Input("in", [x])
.Attr("min_val", min_val)
.Attr("max_val", max_val)
.Output("out")
.Build()
.InferAndTryRun()
.RemoteBlobList()[0]
)
@oneflow_export("nn.relu6")
def relu6(
x: oneflow._oneflow_internal.BlobDesc, name: Optional[str] = None
) -> oneflow._oneflow_internal.BlobDesc:
r"""Relu6 activation, it clips the value around (0, 6).
The equation is:
.. math::
\text{Relu6}(x) = \begin{cases}
6 & \text{ if } x > 6 \\
0 & \text{ if } x < 0 \\
x & \text{ otherwise } \\
\end{cases}
For example:
.. code-block::
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def relu6_job(x: tp.Numpy.Placeholder(shape=(2, 3)))->tp.Numpy:
return flow.nn.relu6(x)
x = np.array([[-1, -0.5, 0.0],
[0.5, 6.0, 7]]).astype(np.float32)
out = relu6_job(x)
# output [[0. 0. 0. ]
# [0.5 6. 6. ]]
Args:
x (oneflow._oneflow_internal.BlobDesc): The input Tensor.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The activated Tensor.
"""
if name is None:
name = id_util.UniqueStr("Relu6_")
return flow.nn.hardtanh(x, min_val=0.0, max_val=6.0, name=name)
@oneflow_export("nn.L1Loss")
@stable_api
def l1_loss(
input: oneflow._oneflow_internal.BlobDesc,
target: oneflow._oneflow_internal.BlobDesc,
reduction: str = "mean",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator computes the L1 Loss between each element in `input` and `target`.
The equation is:
if reduction = "none":
.. math::
output = |Target - Input|
if reduction = "mean":
.. math::
output = \frac{1}{n}\sum_{i=1}^n|Target_i - Input_i|
if reduction = "sum":
.. math::
output = \sum_{i=1}^n|Target_i - Input_i|
Args:
input (oneflow._oneflow_internal.BlobDesc): The input Blob.
target (oneflow._oneflow_internal.BlobDesc): The target value.
reduction (str): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def l1_job(x: tp.Numpy.Placeholder(shape=(3, 3)),
y: tp.Numpy.Placeholder(shape=(3, 3))) -> tp.Numpy:
out = flow.nn.L1Loss(x, y, reduction="mean", name="l1")
return out
input = np.array([[1, 1, 1], [2, 2, 2], [7, 7, 7]]).astype(np.float32)
target = np.array([[4, 4, 4], [4, 4, 4], [4, 4, 4]]).astype(np.float32)
out = l1_job(input, target)
# output [2.6666667]
Example 2:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def l1_job(x: tp.Numpy.Placeholder(shape=(3, 3)),
y: tp.Numpy.Placeholder(shape=(3, 3))) -> tp.Numpy:
out = flow.nn.L1Loss(x, y, reduction="sum", name="l1")
return out
input = np.array([[1, 1, 1], [2, 2, 2], [7, 7, 7]]).astype(np.float32)
target = np.array([[4, 4, 4], [4, 4, 4], [4, 4, 4]]).astype(np.float32)
out = l1_job(input, target)
# output [24.]
"""
assert (
input.shape == target.shape
), "The Input shape must be the same as Target shape"
assert reduction in [
"none",
"mean",
"sum",
], "{} is not a valid value for reduction, The reduction must be the one of `none`, `mean`, `sum`. ".format(
reduction
)
if name is None:
name = id_util.UniqueStr("L1Loss")
l1_value = flow.math.abs(
flow.math.subtract(target, input, name=name + "_sub"), name=name + "_abs"
)
if reduction == "mean":
return flow.math.reduce_mean(l1_value, name=name + "_reduce_mean")
elif reduction == "sum":
return flow.math.reduce_sum(l1_value, name=name + "_reduce_sum")
else:
# Do no reduction
return l1_value
@oneflow_export("nn.BCELoss")
@stable_api
def bce_loss(
input: oneflow._oneflow_internal.BlobDesc,
target: oneflow._oneflow_internal.BlobDesc,
weight: remote_blob_util = None,
reduction: str = "mean",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator computes the binary cross entropy loss.
The equation is:
if reduction = "none":
.. math::
out = -(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
if reduction = "mean":
.. math::
out = -\frac{1}{n}\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
if reduction = "sum":
.. math::
out = -\sum_{i=1}^n(Target_i*log(Input_i) + (1-Target_i)*log(1-Input_i))
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def bce_loss_job(input: tp.Numpy.Placeholder(shape=(2, 3)),
target: tp.Numpy.Placeholder(shape=(2, 3)),
weight: tp.Numpy.Placeholder(shape=(2, 3)))->tp.Numpy:
sigmoid_input = flow.math.sigmoid(input)
return flow.nn.BCELoss(sigmoid_input, target, weight, reduction='mean')
np_input = np.array([[1.2, 0.2, -0.3],
[0.7, 0.6, -2]]).astype(np.float32)
np_target = np.array([[0, 1, 0],
[1, 0, 1]]).astype(np.float32)
np_weight = np.array([[2, 2, 2],
[2, 2, 2]]).astype(np.float32)
# output [2.0611262]
Args:
input (oneflow._oneflow_internal.BlobDesc): The input Blob.
target (oneflow._oneflow_internal.BlobDesc): The target value.
weight (remote_blob_util, optional): The manual rescaling weight to the loss. Default to None, whose corresponding weight value is 1.
reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
name (Optional[str], optional): The name for the operation. Defaults to None.
Attention:
The input value must be in the range of (0, 1). Or the loss function may return `nan` value.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
"""
# TODO: Check the input and target value range is in (0, 1)
assert (
input.shape == target.shape
), "The Input shape must be the same as Target shape"
assert reduction in [
"none",
"mean",
"sum",
], "{} is not a valid value for reduction, The reduction must be the one of `none`, `mean`, `sum`. ".format(
reduction
)
if name is None:
name = id_util.UniqueStr("BCELoss")
_cross_entropy_loss = flow.math.negative(
target * flow.math.log(input) + (1 - target) * flow.math.log(1 - input)
)
if weight is not None:
assert (
weight.shape == input.shape
), "The weight shape must be the same as Input shape"
_weighted_loss = weight * _cross_entropy_loss
else:
_weighted_loss = _cross_entropy_loss
if reduction == "mean":
return flow.math.reduce_mean(_weighted_loss, name=name + "_reduce_mean")
elif reduction == "sum":
return flow.math.reduce_sum(_weighted_loss, name=name + "_reduce_sum")
else:
# Do no reduction
return _weighted_loss
@oneflow_export("nn.BCEWithLogitsLoss")
@stable_api
def bce_with_logits_loss(
input: oneflow._oneflow_internal.BlobDesc,
target: oneflow._oneflow_internal.BlobDesc,
weight: remote_blob_util = None,
pos_weight: remote_blob_util = None,
reduction: str = "mean",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator combines the `Sigmoid` and `BCELoss` together. For numerical stability,
we apply some math tricks instead of using `Sigmoid` layer with `BCELoss`.
The equation is:
if reduction = "none":
.. math::
out = -weight*[Pos\_weight*y*log\sigma({x}) + (1-y)*log(1-\sigma(x))]
if reduction = "mean":
.. math::
out = -\frac{weight}{n}\sum_{i=1}^n[Pos\_weight*y*log\sigma({x}) + (1-y)*log(1-\sigma(x))]
if reduction = "sum":
.. math::
out = -weight*\sum_{i=1}^n[Pos\_weight*y*log\sigma({x}) + (1-y)*log(1-\sigma(x))]
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def bce_with_logits_loss_job(input: tp.Numpy.Placeholder(shape=(2, 3)),
target: tp.Numpy.Placeholder(shape=(2, 3)),
weight: tp.Numpy.Placeholder(shape=(2, 3)),
pos_weight: tp.Numpy.Placeholder(shape=(3, )))->tp.Numpy:
return flow.nn.BCEWithLogitsLoss(input, target, weight, pos_weight, reduction='mean')
np_input = np.array([[1.2, 0.2, -0.3],
[0.7, 0.6, -2]]).astype(np.float32)
np_target = np.array([[0, 1, 0],
[1, 0, 1]]).astype(np.float32)
np_weight = np.array([[2, 2, 2],
[2, 2, 2]]).astype(np.float32)
np_pos_weight = np.array([1.2, 1.3, 1.4]).astype(np.float32)
out = bce_with_logits_loss_job(np_input, np_target, np_weight, np_pos_weight)
# output [2.4314096]
Args:
input (oneflow._oneflow_internal.BlobDesc): The input Tensor.
target (oneflow._oneflow_internal.BlobDesc): The target Tensor.
weight (remote_blob_util, optional): The manual rescaling weight to the loss. Defaults to None.
pos_weight (remote_blob_util, optional): The manual rescaling weight to the positive examples. Defaults to None.
reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
"""
assert (
input.shape == target.shape
), "The Input shape must be the same as Target shape"
assert reduction in [
"none",
"mean",
"sum",
], "{} is not a valid value for reduction, The reduction must be the one of `none`, `mean`, `sum`. ".format(
reduction
)
if name is None:
name = id_util.UniqueStr("BCEWithLogitsLoss")
_neg_input = flow.math.negative(input)
_max_val = flow.clip(_neg_input, min_value=0)
_neg_max_val = flow.math.negative(_max_val)
if pos_weight:
assert pos_weight.shape[0] == input.shape[-1], (
"The length of `pos_weight` must be equal to the number of classes. "
"Found the length of pos_weight {} vs classes {}".format(
pos_weight.shape[0], input.shape[-1]
)
)
_log_weight = ((pos_weight - 1) * target) + 1
_loss = (1 - target) * input + _log_weight * (
flow.math.log(
flow.math.exp(_neg_max_val) + flow.math.exp(_neg_input - _max_val)
)
+ _max_val
)
else:
_loss = (1 - target) * input + _max_val
_loss += flow.math.log(
flow.math.exp(_neg_max_val) + flow.math.exp(_neg_input - _max_val)
)
if weight is not None:
assert (
weight.shape == input.shape
), "The weight shape must be the same as Input shape"
_weighted_loss = weight * _loss
else:
_weighted_loss = _loss
if reduction == "mean":
return flow.math.reduce_mean(_weighted_loss, name=name + "_reduce_mean")
elif reduction == "sum":
return flow.math.reduce_sum(_weighted_loss, name=name + "_reduce_sum")
else:
# Do no reduction
return _weighted_loss
@oneflow_export("nn.MSELoss")
@stable_api
def mse_loss(
input: oneflow._oneflow_internal.BlobDesc,
target: oneflow._oneflow_internal.BlobDesc,
reduction: str = "mean",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator computes the mean squared error between each element in `input` and `target`.
The equation is:
if reduction = "none":
.. math::
out = (Target_i - Input_i)^2
if reduction = "mean":
.. math::
out = \frac{1}{n}\sum_{i=1}^n(Target_i - Input_i)^2
if reduction = "sum":
.. math::
out = \sum_{i=1}^n(Target_i - Input_i)^2
Args:
input (oneflow._oneflow_internal.BlobDesc): The input Blob.
target (oneflow._oneflow_internal.BlobDesc): The target value.
reduction (str) = The reduce type, it can be the one of "none", "mean", "sum". Defaults to "mean".
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
For example:
Example 1:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def mseloss_job(input: tp.Numpy.Placeholder(shape=(3, 3)),
target: tp.Numpy.Placeholder(shape=(3, 3)))->tp.Numpy:
out = flow.nn.MSELoss(input, target, reduction="mean")
return out
input = np.array([[1, 1, 1], [2, 2, 2], [7, 7, 7]]).astype(np.float32)
target = np.array([[4, 4, 4], [4, 4, 4], [4, 4, 4]]).astype(np.float32)
out = mseloss_job(input, target)
# output [7.3333335]
Example 2:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def mseloss_job(input: tp.Numpy.Placeholder(shape=(3, 3)),
target: tp.Numpy.Placeholder(shape=(3, 3)))->tp.Numpy:
out = flow.nn.MSELoss(input, target, reduction="sum")
return out
input = np.array([[1, 1, 1], [2, 2, 2], [7, 7, 7]]).astype(np.float32)
target = np.array([[4, 4, 4], [4, 4, 4], [4, 4, 4]]).astype(np.float32)
out = mseloss_job(input, target)
# output [66.]
"""
assert (
input.shape == target.shape
), "The Input shape must be the same as Target shape"
assert reduction in [
"none",
"mean",
"sum",
], "{} is not a valid value for reduction, The reduction must be the one of `none`, `mean`, `sum`. ".format(
reduction
)
if name is None:
name = id_util.UniqueStr("MSELoss")
mean_squared_difference = flow.math.squared_difference(
target, input, name=name + "_mean_squared"
)
if reduction == "mean":
return flow.math.reduce_mean(
mean_squared_difference, name=name + "_reduce_mean"
)
elif reduction == "sum":
return flow.math.reduce_sum(mean_squared_difference, name=name + "_reduce_sum")
else:
# Do no reduction
return mean_squared_difference
@oneflow_export("nn.MarginRankingLoss")
@stable_api
def margin_ranking_loss(
input1: oneflow._oneflow_internal.BlobDesc,
input2: oneflow._oneflow_internal.BlobDesc,
target: oneflow._oneflow_internal.BlobDesc,
margin: float = 0.0,
reduction: str = "mean",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator computes the Margin Ranking loss.
The equation is:
if reduction = "none":
.. math::
out = \max\ (0, -y*(x_1-x_2)+margin)
if reduction = "mean":
.. math::
out = \frac{1}{n}\sum_{i=1}^n\max\ (0, -y*(x_1-x_2)+margin)
if reduction = "sum":
.. math::
out = \sum_{i=1}^n\max\ (0, -y*(x_1-x_2)+margin)
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def margin_ranking_loss_job(input1: tp.Numpy.Placeholder(shape=(3, 3)),
input2: tp.Numpy.Placeholder(shape=(3, 3)),
target: tp.Numpy.Placeholder(shape=(3, 3)))->tp.Numpy:
out = flow.nn.MarginRankingLoss(input1, input2, target, margin=1.0)
return out
np_input1 = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
np_input2 = np.array([[2, 2, 2],
[2, 2, 2],
[2, 2, 2]]).astype(np.float32)
np_target = np.array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]]).astype(np.float32)
out = margin_ranking_loss_job(np_input1, np_input2, np_target)
# output [0.5555556]
Args:
input1 (oneflow._oneflow_internal.BlobDesc): The ranking score of input1 Blob.
input2 (oneflow._oneflow_internal.BlobDesc): The ranking score of input2 Blob.
target (oneflow._oneflow_internal.BlobDesc): The target Blob.
margin (float): The margin value. Defaults to 0.0.
reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
"""
assert (
input1.shape == input2.shape
), "The shape of `input1`, `input2` must be the same. "
assert reduction in [
"none",
"mean",
"sum",
], "{} is not a valid value for reduction, The reduction must be the one of `none`, `mean`, `sum`. ".format(
reduction
)
if name is None:
name = id_util.UniqueStr("MarginRankingLoss")
_margin_loss = flow.math.negative(flow.math.subtract(input1, input2))
_margin_loss = flow.math.multiply(target, _margin_loss)
_margin_loss = flow.math.add(margin, _margin_loss)
_clipped_margin_loss = flow.clip(_margin_loss, min_value=0.0)
if reduction == "none":
return _clipped_margin_loss
elif reduction == "mean":
return flow.math.reduce_mean(_clipped_margin_loss, name=name + "_reduce_mean")
else:
return flow.math.reduce_sum(_clipped_margin_loss, name=name + "_reduce_sum")
@oneflow_export("nn.TripletMarginLoss")
@stable_api
def triplet_margin_loss(
anchor: oneflow._oneflow_internal.BlobDesc,
positive: oneflow._oneflow_internal.BlobDesc,
negative: oneflow._oneflow_internal.BlobDesc,
margin: float = 1.0,
p: float = 2.0,
eps: float = 1e-6,
swap: bool = False,
reduction: str = "mean",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
r"""This operator computes the Triplet Margin Loss.
The equation is:
if reduction = "none":
.. math::
output = \max\{\left\lVert a_i - p_i \right\rVert_p - \left\lVert a_i - n_i \right\rVert_p + {\rm margin}, 0\}
if reduction = "mean":
.. math::
output = \frac{1}{n}\sum_{i=1}^n\max\{\left\lVert a_i - p_i \right\rVert_p - \left\lVert a_i - n_i \right\rVert_p + {\rm margin}, 0\}
if reduction = "sum":
.. math::
output = \sum_{i=1}^n\max\{\left\lVert a_i - p_i \right\rVert_p - \left\lVert a_i - n_i \right\rVert_p + {\rm margin}, 0\}
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def triplet_loss_job(anchor: tp.Numpy.Placeholder(shape=(3, 3)),
pos: tp.Numpy.Placeholder(shape=(3, 3)),
neg: tp.Numpy.Placeholder(shape=(3, 3)))->tp.Numpy:
out = flow.nn.TripletMarginLoss(anchor, pos, neg, margin=1.0, p=2.0)
return out
np_anchor = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]).astype(np.float32)
np_pos = np.array([[2, 2, 2],
[2, 2, 2],
[2, 2, 2]]).astype(np.float32)
np_neg = np.array([[3, 3, 3],
[3, 3, 3],
[3, 3, 3]]).astype(np.float32)
out = triplet_loss_job(np_anchor, np_pos, np_neg)
# output [1.8449262]
Args:
anchor (oneflow._oneflow_internal.BlobDesc): The anchor Blob.
positive (oneflow._oneflow_internal.BlobDesc): The positive sample Blob.
negative (oneflow._oneflow_internal.BlobDesc): The negative sample Blob.
margin (float, optional): The margin value. Defaults to 1.0.
p (float, optional): The norm degree for computing distance. Defaults to 2.0.
eps (float, optional): A small value use in norm computation. Defaults to 1e-6.
swap (bool, optional): Whether to swap the distance.
For more details you can check the Paper `Learning shallow convolutional feature descriptors with triplet losses`. Defaults to False.
reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
"""
assert reduction in [
"none",
"mean",
"sum",
], "{} is not a valid value for reduction, The reduction must be the one of `none`, `mean`, `sum`. ".format(
reduction
)
assert (
swap == False
), "For now we only support `swap=True`, OneFlow still have backward error in minimum"
if name is None:
name = id_util.UniqueStr("TripletMarginLoss")
def _p_norm(x, p=2.0, name="p_norm"):
r"""Compute the p-norm
The equation is:
.. math::
out = \sqrt[P]{\sum_{i=0}^{n}(abs(x)^P)}
Args:
x ([type]): The input Blob.
p ([type], optional): The norm degree. Defaults to 2..
"""
# In order to avoid the `nan` case.
_abs_val = flow.math.abs(x, name=name + "_abs")
if p == 2.0:
# Use Square to compute the l2-norm
_norm = flow.math.square(_abs_val, name=name + "_square")
_norm = flow.math.reduce_sum(_norm, axis=1, name=name + "_sum")
_norm_val = flow.math.sqrt(_norm, name=name + "_sqrt")
else:
_p_constant = flow.constant_like(
like=_abs_val, value=p, dtype=flow.float32, name=name + "_p_constant"
)
_norm = flow.math.pow(_abs_val, _p_constant, name=name + "_pow1")
_norm = flow.math.reduce_sum(_norm, axis=1, name=name + "_sum")
_p_reciprocal_constant = flow.constant_like(
like=_norm,
value=1.0 / p,
dtype=flow.float32,
name=name + "_p_reciprocal_constant",
)
_norm_val = flow.math.pow(
_norm, _p_reciprocal_constant, name=name + "_norm_val"
)
return _norm_val
# Compute the distance
_distance_1 = _p_norm(anchor - positive + eps, p=p, name=name + "_distance_1")
_distance_2 = _p_norm(anchor - negative + eps, p=p, name=name + "_distance_2")
if swap:
_distance_swap = _p_norm(positive - negative + eps, p=p)
_distance_swap = flow.math.reduce_sum(_distance_swap, axis=1)
# TODO(zhengzekang): minimum still not support backward
_distance_2 = flow.math.minimum(_distance_2, _distance_swap)
_triplet_loss = flow.clip(margin + _distance_1 - _distance_2, min_value=0.0)
if reduction == "mean":
return flow.math.reduce_mean(_triplet_loss, name=name + "_reduce_mean")
elif reduction == "sum":
return flow.math.reduce_sum(_triplet_loss, name=name + "_reduce_sum")
else:
return _triplet_loss
@oneflow_export("nn.PixelShuffle")
@stable_api
def pixel_shuffle(
input: oneflow._oneflow_internal.BlobDesc,
upscale_factor: int,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator do the pixel shuffle, the shape of input(B, C*r*r, H, W) is arranged to
(B, C, H*r, W*r). It can be used to do the sub-pixel convolution.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def PixelShuffleJob(input: tp.Numpy.Placeholder(shape=(3, 4, 2, 2), dtype=flow.float32))->tp.Numpy:
out = flow.nn.PixelShuffle(input, upscale_factor=2)
return out
input = np.random.uniform(size=(3, 4, 2, 2)).astype(np.float32)
out = PixelShuffleJob(input)
# out.shape (3, 1, 4, 4)
Args:
input (oneflow._oneflow_internal.BlobDesc): The input Blob.
upscale_factor (int): The upscale factor.
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
"""
return flow.nn.PixelShufflev2(input, upscale_factor, upscale_factor, name=name)
@oneflow_export("nn.PixelShufflev2")
def pixel_shufflev2(
input: oneflow._oneflow_internal.BlobDesc,
h_upscale_factor: int,
w_upscale_factor: int,
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator is similar to `oneflow.nn.PixelShuffle`. The difference is that in
`oneflow.nn.PixelShuffle`, the upscale factor of height and width is the same. But in
`oneflow.nn.PixelShufflev2`, you can set different upscale factor for height and width.
Args:
input (oneflow._oneflow_internal.BlobDesc): The input Blob.
h_upscale_factor (int): The upscale factor of height.
w_upscale_factor (int): The upscale factor of width.
name (Optional[str], optional): The name for the operation. Defaults to None.
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def PixelShufflev2Job(input: tp.Numpy.Placeholder(shape=(3, 16, 2, 4), dtype=flow.float32))->tp.Numpy:
out = flow.nn.PixelShufflev2(input, h_upscale_factor=2, w_upscale_factor=4)
return out
input = np.random.uniform(size=(3, 16, 2, 4)).astype(np.float32)
out = PixelShuffleJob(input)
# out.shape (3, 2, 4, 16)
Returns:
oneflow._oneflow_internal.BlobDesc: The result Blob.
"""
assert (
h_upscale_factor > 0 and w_upscale_factor > 0
), "The scale factor of height and width must larger than zero"
assert len(input.shape) == 4, "Only Accept 4D Blob"
_batch, _channel, _height, _width = input.shape
assert (
_channel % (h_upscale_factor * w_upscale_factor) == 0
), "The channels of input tensor must be divisible by (h_upscale_factor * w_upscale_factor)"
if name is None:
name = id_util.UniqueStr("PixelShufflev2")
_new_c = int(_channel / (h_upscale_factor * w_upscale_factor))
out = flow.reshape(
input,
[_batch, _new_c, h_upscale_factor * w_upscale_factor, _height, _width],
name=name + "_reshape1",
)
out = flow.reshape(
out,
[_batch, _new_c, h_upscale_factor, w_upscale_factor, _height, _width],
name=name + "_reshape2",
)
out = flow.transpose(out, [0, 1, 4, 2, 5, 3], name=name + "_transpose")
out = flow.reshape(
out,
[_batch, _new_c, _height * h_upscale_factor, _width * w_upscale_factor],
name=name + "_reshape3",
)
return out
@oneflow_export("nn.KLDivLoss")
@stable_api
def kldivloss(
input: oneflow._oneflow_internal.BlobDesc,
target: oneflow._oneflow_internal.BlobDesc,
log_target: bool = False,
reduction: str = "mean",
name: Optional[str] = None,
) -> oneflow._oneflow_internal.BlobDesc:
"""This operator computes the Kullback-Leiber divergence loss.
The equation is:
If :math:`log\_target = True`:
.. math::
loss = e^{target}*(target-input)
If :math:`log\_target = False`:
.. math::
loss = target*(log(target)-input)
Attention:
In `log_target = False` case, the element in loss will set to be `0` when the element in target is less than `0`
For example:
.. code-block:: python
import oneflow as flow
import oneflow.typing as tp
import numpy as np
@flow.global_function()
def of_kldivloss(input: tp.Numpy.Placeholder(shape=(3, 3)),
target: tp.Numpy.Placeholder(shape=(3, 3))) -> tp.Numpy:
return flow.nn.KLDivLoss(input, target, log_target=False, reduction='none')
input = np.array([[0.1, 0.2, 0.7],
[0.8, 0.9, 0.5],
[0.5, 0.15, 0.35]]).astype(np.float32)
target = np.array([[0.3, 0.1, 0.6],
[-0.3, 0.4, 0.4],
[0.35, 0.25, 0.4]]).astype(np.float32)
out = of_kldivloss(input, target)
# output [[-0.39119187 -0.25025854 -0.7264954 ]
# [ 0. -0.72651625 -0.56651634]
# [-0.54243773 -0.3840736 -0.5065163 ]]
Args:
input (oneflow._oneflow_internal.BlobDesc): The input tensor.
target (oneflow._oneflow_internal.BlobDesc): The target tensor.
log_target (bool, optional): Whether the `target` is passed in the log space. Defaults to False.
reduction (str, optional): The reduce type, it can be one of "none", "mean", "sum". Defaults to "mean".
name (Optional[str], optional): The name for the operation. Defaults to None.
Returns:
oneflow._oneflow_internal.BlobDesc: The result tensor.
"""
assert reduction in [
"none",
"mean",
"sum",
], "{} is not a valid value for reduction, The reduction must be the one of `none`, `mean`, `sum`. ".format(
reduction
)
if name is None:
name = id_util.UniqueStr("KLDivLoss_")
if log_target:
_kl_div_loss = flow.math.exp(target, name=name + "exp") * (target - input)
else:
_kl_div_out_loss = target * (flow.math.log(target, name=name + "log") - input)
_zeros = flow.zeros_like(
_kl_div_out_loss, dtype=_kl_div_out_loss.dtype, name=name + "zeros"
)
# when target < 0, we set to `0`, when target > 0, we set to `1`.
_condition = flow.cast(
flow.math.rint(target + 0.5, name=name + "rint"),
dtype=flow.int8,
name=name + "cast2int",
)
# To avoid the `nan` value in log operation
# We set those positions which `target` is less than zero as `0`
_kl_div_loss = flow.where(
_condition, _kl_div_out_loss, _zeros, name=name + "where"
)
if reduction == "mean":
return flow.math.reduce_mean(_kl_div_loss, name=name + "_reduce_mean")
elif reduction == "sum":
return flow.math.reduce_sum(_kl_div_loss, name=name + "_reduce_sum")
else:
return _kl_div_loss
| [
"oneflow.nn.hardtanh",
"oneflow.current_global_function_desc",
"oneflow.math.negative",
"oneflow.ones_initializer",
"oneflow.math.log",
"oneflow.zeros_like",
"oneflow.nn.moments",
"oneflow.math.abs",
"oneflow.math.minimum",
"oneflow.expand_dims",
"oneflow.transpose",
"oneflow.nn.conv2d",
"on... | [((7847, 7874), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.conv1d"""'], {}), "('nn.conv1d')\n", (7861, 7874), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((14878, 14905), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.conv2d"""'], {}), "('nn.conv2d')\n", (14892, 14905), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((22640, 22667), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.conv3d"""'], {}), "('nn.conv3d')\n", (22654, 22667), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((30653, 30681), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.moments"""'], {}), "('nn.moments')\n", (30667, 30681), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((32150, 32180), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.GroupNorm"""'], {}), "('nn.GroupNorm')\n", (32164, 32180), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((34816, 34851), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.InstanceNorm1d"""'], {}), "('nn.InstanceNorm1d')\n", (34830, 34851), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((36979, 37014), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.InstanceNorm2d"""'], {}), "('nn.InstanceNorm2d')\n", (36993, 37014), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((38770, 38805), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.InstanceNorm3d"""'], {}), "('nn.InstanceNorm3d')\n", (38784, 38805), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((40570, 40610), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.batch_normalization"""'], {}), "('nn.batch_normalization')\n", (40584, 40610), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((45245, 45276), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.layer_norm"""'], {}), "('nn.layer_norm')\n", (45259, 45276), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((49041, 49075), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.compat_conv2d"""'], {}), "('nn.compat_conv2d')\n", (49055, 49075), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((52656, 52685), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.bias_add"""'], {}), "('nn.bias_add')\n", (52670, 52685), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((55341, 55381), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.fused_bias_add_gelu"""'], {}), "('nn.fused_bias_add_gelu')\n", (55355, 55381), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((58089, 58132), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.fused_bias_add_dropout"""'], {}), "('nn.fused_bias_add_dropout')\n", (58103, 58132), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((61774, 61805), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.max_pool1d"""'], {}), "('nn.max_pool1d')\n", (61788, 61805), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((63137, 63168), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.avg_pool1d"""'], {}), "('nn.avg_pool1d')\n", (63151, 63168), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((65037, 65067), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.MaxPool1d"""'], {}), "('nn.MaxPool1d')\n", (65051, 65067), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((70500, 70530), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.MaxPool2d"""'], {}), "('nn.MaxPool2d')\n", (70514, 70530), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((75420, 75450), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.MaxPool3d"""'], {}), "('nn.MaxPool3d')\n", (75434, 75450), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((80761, 80792), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.max_pool2d"""'], {}), "('nn.max_pool2d')\n", (80775, 80792), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((83727, 83758), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.avg_pool2d"""'], {}), "('nn.avg_pool2d')\n", (83741, 83758), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((86647, 86678), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.max_pool3d"""'], {}), "('nn.max_pool3d')\n", (86661, 86678), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((89552, 89583), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.avg_pool3d"""'], {}), "('nn.avg_pool3d')\n", (89566, 89583), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((92903, 92931), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.softmax"""'], {}), "('nn.softmax')\n", (92917, 92931), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((94785, 94816), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.logsoftmax"""'], {}), "('nn.logsoftmax')\n", (94799, 94816), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((96379, 96412), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.softmax_grad"""'], {}), "('nn.softmax_grad')\n", (96393, 96412), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((97730, 97771), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.sparse_cross_entropy"""'], {}), "('nn.sparse_cross_entropy')\n", (97744, 97771), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((100823, 100877), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.softmax_cross_entropy_with_logits"""'], {}), "('nn.softmax_cross_entropy_with_logits')\n", (100837, 100877), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((103247, 103308), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.sparse_softmax_cross_entropy_with_logits"""'], {}), "('nn.sparse_softmax_cross_entropy_with_logits')\n", (103261, 103308), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((106735, 106808), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.distributed_sparse_softmax_cross_entropy_with_logits"""'], {}), "('nn.distributed_sparse_softmax_cross_entropy_with_logits')\n", (106749, 106808), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((107787, 107841), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.sigmoid_cross_entropy_with_logits"""'], {}), "('nn.sigmoid_cross_entropy_with_logits')\n", (107801, 107841), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((110438, 110475), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.random_mask_like"""'], {}), "('nn.random_mask_like')\n", (110452, 110475), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((114013, 114041), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.dropout"""'], {}), "('nn.dropout')\n", (114027, 114041), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((117283, 117320), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.conv2d_transpose"""'], {}), "('nn.conv2d_transpose')\n", (117297, 117320), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((126521, 126564), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.torch_conv2d_transpose"""'], {}), "('nn.torch_conv2d_transpose')\n", (126535, 126564), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((129453, 129484), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.leaky_relu"""'], {}), "('nn.leaky_relu')\n", (129467, 129484), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((130910, 130934), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.elu"""'], {}), "('nn.elu')\n", (130924, 130934), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((132378, 132410), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.hardsigmoid"""'], {}), "('nn.hardsigmoid')\n", (132392, 132410), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((133799, 133824), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.mish"""'], {}), "('nn.mish')\n", (133813, 133824), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((134850, 134876), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.swish"""'], {}), "('nn.swish')\n", (134864, 134876), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((136042, 136072), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.hardswish"""'], {}), "('nn.hardswish')\n", (136056, 136072), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((137411, 137440), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.hardtanh"""'], {}), "('nn.hardtanh')\n", (137425, 137440), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((139335, 139361), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.relu6"""'], {}), "('nn.relu6')\n", (139349, 139361), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((140607, 140634), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.L1Loss"""'], {}), "('nn.L1Loss')\n", (140621, 140634), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((143686, 143714), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.BCELoss"""'], {}), "('nn.BCELoss')\n", (143700, 143714), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((147138, 147176), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.BCEWithLogitsLoss"""'], {}), "('nn.BCEWithLogitsLoss')\n", (147152, 147176), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((151661, 151689), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.MSELoss"""'], {}), "('nn.MSELoss')\n", (151675, 151689), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((154869, 154907), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.MarginRankingLoss"""'], {}), "('nn.MarginRankingLoss')\n", (154883, 154907), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((158173, 158211), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.TripletMarginLoss"""'], {}), "('nn.TripletMarginLoss')\n", (158187, 158211), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((163753, 163786), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.PixelShuffle"""'], {}), "('nn.PixelShuffle')\n", (163767, 163786), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((165026, 165061), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.PixelShufflev2"""'], {}), "('nn.PixelShufflev2')\n", (165040, 165061), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((167565, 167595), 'oneflow.python.oneflow_export.oneflow_export', 'oneflow_export', (['"""nn.KLDivLoss"""'], {}), "('nn.KLDivLoss')\n", (167579, 167595), False, 'from oneflow.python.oneflow_export import oneflow_export, stable_api\n'), ((33956, 34010), 'oneflow.reshape', 'flow.reshape', (['x'], {'shape': '[orig_shape[0], num_groups, -1]'}), '(x, shape=[orig_shape[0], num_groups, -1])\n', (33968, 34010), True, 'import oneflow as flow\n'), ((34034, 34084), 'oneflow.nn.moments', 'flow.nn.moments', (['reshape_to_1d', '[2]'], {'keepdims': '(True)'}), '(reshape_to_1d, [2], keepdims=True)\n', (34049, 34084), True, 'import oneflow as flow\n'), ((34175, 34235), 'oneflow.reshape', 'flow.reshape', (['normalized'], {'shape': '[orig_shape[0], channel, -1]'}), '(normalized, shape=[orig_shape[0], channel, -1])\n', (34187, 34235), True, 'import oneflow as flow\n'), ((34750, 34787), 'oneflow.reshape_like', 'flow.reshape_like', (['normalized'], {'like': 'x'}), '(normalized, like=x)\n', (34767, 34787), True, 'import oneflow as flow\n'), ((36351, 36389), 'oneflow.nn.moments', 'flow.nn.moments', (['x', '[2]'], {'keepdims': '(True)'}), '(x, [2], keepdims=True)\n', (36366, 36389), True, 'import oneflow as flow\n'), ((38495, 38546), 'oneflow.reshape', 'flow.reshape', (['x'], {'shape': '[x.shape[0], x.shape[1], -1]'}), '(x, shape=[x.shape[0], x.shape[1], -1])\n', (38507, 38546), True, 'import oneflow as flow\n'), ((38571, 38643), 'oneflow.nn.InstanceNorm1d', 'flow.nn.InstanceNorm1d', (['reshape_to_1d'], {'eps': 'eps', 'affine': 'affine', 'name': 'name'}), '(reshape_to_1d, eps=eps, affine=affine, name=name)\n', (38593, 38643), True, 'import oneflow as flow\n'), ((40295, 40346), 'oneflow.reshape', 'flow.reshape', (['x'], {'shape': '[x.shape[0], x.shape[1], -1]'}), '(x, shape=[x.shape[0], x.shape[1], -1])\n', (40307, 40346), True, 'import oneflow as flow\n'), ((40371, 40443), 'oneflow.nn.InstanceNorm1d', 'flow.nn.InstanceNorm1d', (['reshape_to_1d'], {'eps': 'eps', 'affine': 'affine', 'name': 'name'}), '(reshape_to_1d, eps=eps, affine=affine, name=name)\n', (40393, 40443), True, 'import oneflow as flow\n'), ((52546, 52642), 'oneflow.nn.conv2d', 'flow.nn.conv2d', (['input', 'filters', 'strides', 'padding', 'None', 'data_format', 'dilations', 'groups', 'name'], {}), '(input, filters, strides, padding, None, data_format,\n dilations, groups, name)\n', (52560, 52642), True, 'import oneflow as flow\n'), ((60942, 61041), 'oneflow.nn.random_mask_like', 'flow.nn.random_mask_like', (['value', 'rate', 'seed', 'noise_shape', "('%s-dropout_random_mask_like' % name)"], {}), "(value, rate, seed, noise_shape, \n '%s-dropout_random_mask_like' % name)\n", (60966, 61041), True, 'import oneflow as flow\n'), ((69552, 69589), 'oneflow.expand_dims', 'flow.expand_dims', ([], {'input': 'input', 'axis': '(2)'}), '(input=input, axis=2)\n', (69568, 69589), True, 'import oneflow as flow\n'), ((70342, 70368), 'oneflow.squeeze', 'flow.squeeze', (['y'], {'axis': '(2,)'}), '(y, axis=(2,))\n', (70354, 70368), True, 'import oneflow as flow\n'), ((70382, 70413), 'oneflow.squeeze', 'flow.squeeze', (['indice'], {'axis': '(2,)'}), '(indice, axis=(2,))\n', (70394, 70413), True, 'import oneflow as flow\n'), ((140547, 140603), 'oneflow.nn.hardtanh', 'flow.nn.hardtanh', (['x'], {'min_val': '(0.0)', 'max_val': '(6.0)', 'name': 'name'}), '(x, min_val=0.0, max_val=6.0, name=name)\n', (140563, 140603), True, 'import oneflow as flow\n'), ((150271, 150296), 'oneflow.math.negative', 'flow.math.negative', (['input'], {}), '(input)\n', (150289, 150296), True, 'import oneflow as flow\n'), ((150312, 150346), 'oneflow.clip', 'flow.clip', (['_neg_input'], {'min_value': '(0)'}), '(_neg_input, min_value=0)\n', (150321, 150346), True, 'import oneflow as flow\n'), ((150366, 150394), 'oneflow.math.negative', 'flow.math.negative', (['_max_val'], {}), '(_max_val)\n', (150384, 150394), True, 'import oneflow as flow\n'), ((154446, 154518), 'oneflow.math.squared_difference', 'flow.math.squared_difference', (['target', 'input'], {'name': "(name + '_mean_squared')"}), "(target, input, name=name + '_mean_squared')\n", (154474, 154518), True, 'import oneflow as flow\n'), ((157730, 157770), 'oneflow.math.multiply', 'flow.math.multiply', (['target', '_margin_loss'], {}), '(target, _margin_loss)\n', (157748, 157770), True, 'import oneflow as flow\n'), ((157790, 157825), 'oneflow.math.add', 'flow.math.add', (['margin', '_margin_loss'], {}), '(margin, _margin_loss)\n', (157803, 157825), True, 'import oneflow as flow\n'), ((157854, 157892), 'oneflow.clip', 'flow.clip', (['_margin_loss'], {'min_value': '(0.0)'}), '(_margin_loss, min_value=0.0)\n', (157863, 157892), True, 'import oneflow as flow\n'), ((163434, 163494), 'oneflow.clip', 'flow.clip', (['(margin + _distance_1 - _distance_2)'], {'min_value': '(0.0)'}), '(margin + _distance_1 - _distance_2, min_value=0.0)\n', (163443, 163494), True, 'import oneflow as flow\n'), ((164950, 165022), 'oneflow.nn.PixelShufflev2', 'flow.nn.PixelShufflev2', (['input', 'upscale_factor', 'upscale_factor'], {'name': 'name'}), '(input, upscale_factor, upscale_factor, name=name)\n', (164972, 165022), True, 'import oneflow as flow\n'), ((167010, 167130), 'oneflow.reshape', 'flow.reshape', (['input', '[_batch, _new_c, h_upscale_factor * w_upscale_factor, _height, _width]'], {'name': "(name + '_reshape1')"}), "(input, [_batch, _new_c, h_upscale_factor * w_upscale_factor,\n _height, _width], name=name + '_reshape1')\n", (167022, 167130), True, 'import oneflow as flow\n'), ((167168, 167285), 'oneflow.reshape', 'flow.reshape', (['out', '[_batch, _new_c, h_upscale_factor, w_upscale_factor, _height, _width]'], {'name': "(name + '_reshape2')"}), "(out, [_batch, _new_c, h_upscale_factor, w_upscale_factor,\n _height, _width], name=name + '_reshape2')\n", (167180, 167285), True, 'import oneflow as flow\n'), ((167323, 167388), 'oneflow.transpose', 'flow.transpose', (['out', '[0, 1, 4, 2, 5, 3]'], {'name': "(name + '_transpose')"}), "(out, [0, 1, 4, 2, 5, 3], name=name + '_transpose')\n", (167337, 167388), True, 'import oneflow as flow\n'), ((167399, 167518), 'oneflow.reshape', 'flow.reshape', (['out', '[_batch, _new_c, _height * h_upscale_factor, _width * w_upscale_factor]'], {'name': "(name + '_reshape3')"}), "(out, [_batch, _new_c, _height * h_upscale_factor, _width *\n w_upscale_factor], name=name + '_reshape3')\n", (167411, 167518), True, 'import oneflow as flow\n'), ((6404, 6442), 'oneflow.pad', 'flow.pad', (['inputs'], {'paddings': 'pad_op_list'}), '(inputs, paddings=pad_op_list)\n', (6412, 6442), True, 'import oneflow as flow\n'), ((14541, 14584), 'oneflow.concat', 'flow.concat', (['out_list'], {'axis': 'in_channel_axis'}), '(out_list, axis=in_channel_axis)\n', (14552, 14584), True, 'import oneflow as flow\n'), ((22303, 22346), 'oneflow.concat', 'flow.concat', (['out_list'], {'axis': 'in_channel_axis'}), '(out_list, axis=in_channel_axis)\n', (22314, 22346), True, 'import oneflow as flow\n'), ((26784, 26827), 'oneflow.transpose', 'flow.transpose', (['input'], {'perm': '[0, 4, 1, 2, 3]'}), '(input, perm=[0, 4, 1, 2, 3])\n', (26798, 26827), True, 'import oneflow as flow\n'), ((26846, 26891), 'oneflow.transpose', 'flow.transpose', (['filters'], {'perm': '[0, 4, 1, 2, 3]'}), '(filters, perm=[0, 4, 1, 2, 3])\n', (26860, 26891), True, 'import oneflow as flow\n'), ((30210, 30253), 'oneflow.concat', 'flow.concat', (['out_list'], {'axis': 'in_channel_axis'}), '(out_list, axis=in_channel_axis)\n', (30221, 30253), True, 'import oneflow as flow\n'), ((30587, 30631), 'oneflow.transpose', 'flow.transpose', (['output'], {'perm': '[0, 2, 3, 4, 1]'}), '(output, perm=[0, 2, 3, 4, 1])\n', (30601, 30631), True, 'import oneflow as flow\n'), ((31913, 31942), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Moments_"""'], {}), "('Moments_')\n", (31930, 31942), True, 'import oneflow.python.framework.id_util as id_util\n'), ((31952, 31978), 'oneflow.scope.namespace', 'flow.scope.namespace', (['name'], {}), '(name)\n', (31972, 31978), True, 'import oneflow as flow\n'), ((33777, 33808), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""GroupNorm_"""'], {}), "('GroupNorm_')\n", (33794, 33808), True, 'import oneflow.python.framework.id_util as id_util\n'), ((34127, 34157), 'oneflow.math.sqrt', 'flow.math.sqrt', (['(variance + eps)'], {}), '(variance + eps)\n', (34141, 34157), True, 'import oneflow as flow\n'), ((36265, 36301), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""InstanceNorm1D_"""'], {}), "('InstanceNorm1D_')\n", (36282, 36301), True, 'import oneflow.python.framework.id_util as id_util\n'), ((36420, 36450), 'oneflow.math.sqrt', 'flow.math.sqrt', (['(variance + eps)'], {}), '(variance + eps)\n', (36434, 36450), True, 'import oneflow as flow\n'), ((38437, 38473), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""InstanceNorm2D_"""'], {}), "('InstanceNorm2D_')\n", (38454, 38473), True, 'import oneflow.python.framework.id_util as id_util\n'), ((40237, 40273), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""InstanceNorm3D_"""'], {}), "('InstanceNorm3D_')\n", (40254, 40273), True, 'import oneflow.python.framework.id_util as id_util\n'), ((43125, 43156), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BatchNorm_"""'], {}), "('BatchNorm_')\n", (43142, 43156), True, 'import oneflow.python.framework.id_util as id_util\n'), ((43956, 43981), 'oneflow.math.rsqrt', 'flow.math.rsqrt', (['variance'], {}), '(variance)\n', (43971, 43981), True, 'import oneflow as flow\n'), ((47007, 47038), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""LayerNorm_"""'], {}), "('LayerNorm_')\n", (47024, 47038), True, 'import oneflow.python.framework.id_util as id_util\n'), ((47390, 47441), 'oneflow.nn.moments', 'flow.nn.moments', (['inputs', 'reduce_axis'], {'keepdims': '(True)'}), '(inputs, reduce_axis, keepdims=True)\n', (47405, 47441), True, 'import oneflow as flow\n'), ((47495, 47614), 'oneflow.nn.batch_normalization', 'flow.nn.batch_normalization', ([], {'x': 'inputs', 'mean': 'mean', 'variance': 'variance', 'variance_epsilon': 'epsilon', 'axis': 'axis', 'name': 'name'}), '(x=inputs, mean=mean, variance=variance,\n variance_epsilon=epsilon, axis=axis, name=name)\n', (47522, 47614), True, 'import oneflow as flow\n'), ((54686, 54715), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BiasAdd_"""'], {}), "('BiasAdd_')\n", (54703, 54715), True, 'import oneflow.python.framework.id_util as id_util\n'), ((57414, 57452), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""FusedBiasAddGelu_"""'], {}), "('FusedBiasAddGelu_')\n", (57431, 57452), True, 'import oneflow.python.framework.id_util as id_util\n'), ((60808, 60856), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['value', 'bias', 'data_format', 'name'], {}), '(value, bias, data_format, name)\n', (60824, 60856), True, 'import oneflow as flow\n'), ((60894, 60930), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BiasAddDropout_"""'], {}), "('BiasAddDropout_')\n", (60911, 60930), True, 'import oneflow.python.framework.id_util as id_util\n'), ((94378, 94414), 'oneflow.transpose', 'flow.transpose', (['logits'], {'perm': 'permute'}), '(logits, perm=permute)\n', (94392, 94414), True, 'import oneflow as flow\n'), ((94733, 94766), 'oneflow.transpose', 'flow.transpose', (['out'], {'perm': 'permute'}), '(out, perm=permute)\n', (94747, 94766), True, 'import oneflow as flow\n'), ((96230, 96261), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""logsoftmax"""'], {}), "('logsoftmax')\n", (96247, 96261), True, 'import oneflow.python.framework.id_util as id_util\n'), ((96296, 96349), 'oneflow.nn.softmax', 'flow.nn.softmax', (['logits', 'axis'], {'name': "(name + '_softmax')"}), "(logits, axis, name=name + '_softmax')\n", (96311, 96349), True, 'import oneflow as flow\n'), ((97261, 97292), 'oneflow.transpose', 'flow.transpose', (['y'], {'perm': 'permute'}), '(y, perm=permute)\n', (97275, 97292), True, 'import oneflow as flow\n'), ((97306, 97338), 'oneflow.transpose', 'flow.transpose', (['dy'], {'perm': 'permute'}), '(dy, perm=permute)\n', (97320, 97338), True, 'import oneflow as flow\n'), ((97680, 97712), 'oneflow.transpose', 'flow.transpose', (['dx'], {'perm': 'permute'}), '(dx, perm=permute)\n', (97694, 97712), True, 'import oneflow as flow\n'), ((99679, 99710), 'oneflow.squeeze', 'flow.squeeze', (['labels'], {'axis': '[-1]'}), '(labels, axis=[-1])\n', (99691, 99710), True, 'import oneflow as flow\n'), ((105450, 105481), 'oneflow.squeeze', 'flow.squeeze', (['labels'], {'axis': '[-1]'}), '(labels, axis=[-1])\n', (105462, 105481), True, 'import oneflow as flow\n'), ((107197, 107228), 'oneflow.squeeze', 'flow.squeeze', (['labels'], {'axis': '[-1]'}), '(labels, axis=[-1])\n', (107209, 107228), True, 'import oneflow as flow\n'), ((113153, 113192), 'oneflow.python.framework.module.Module.__init__', 'module_util.Module.__init__', (['self', 'name'], {}), '(self, name)\n', (113180, 113192), True, 'import oneflow.python.framework.module as module_util\n'), ((116863, 116892), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Dropout_"""'], {}), "('Dropout_')\n", (116880, 116892), True, 'import oneflow.python.framework.id_util as id_util\n'), ((125438, 125614), 'oneflow.pad_grad', 'flow.pad_grad', (['input', '[(0, 0), (0, 0), (padding_left[0], padding_right[0]), (padding_left[1],\n padding_right[1])]'], {'name': "(name + '_pad_grad' if name is not None else None)"}), "(input, [(0, 0), (0, 0), (padding_left[0], padding_right[0]),\n (padding_left[1], padding_right[1])], name=name + '_pad_grad' if name\n is not None else None)\n", (125451, 125614), True, 'import oneflow as flow\n'), ((132125, 132150), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Elu_"""'], {}), "('Elu_')\n", (132142, 132150), True, 'import oneflow.python.framework.id_util as id_util\n'), ((134708, 134734), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Mish_"""'], {}), "('Mish_')\n", (134725, 134734), True, 'import oneflow.python.framework.id_util as id_util\n'), ((135943, 135970), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Swish_"""'], {}), "('Swish_')\n", (135960, 135970), True, 'import oneflow.python.framework.id_util as id_util\n'), ((135987, 136038), 'oneflow.math.sigmoid', 'flow.math.sigmoid', (['(beta * x)'], {'name': "(name + '_sigmoid')"}), "(beta * x, name=name + '_sigmoid')\n", (136004, 136038), True, 'import oneflow as flow\n'), ((137176, 137207), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""HardSwish_"""'], {}), "('HardSwish_')\n", (137193, 137207), True, 'import oneflow.python.framework.id_util as id_util\n'), ((138903, 138933), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Hardtanh_"""'], {}), "('Hardtanh_')\n", (138920, 138933), True, 'import oneflow.python.framework.id_util as id_util\n'), ((140508, 140535), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Relu6_"""'], {}), "('Relu6_')\n", (140525, 140535), True, 'import oneflow.python.framework.id_util as id_util\n'), ((143270, 143297), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""L1Loss"""'], {}), "('L1Loss')\n", (143287, 143297), True, 'import oneflow.python.framework.id_util as id_util\n'), ((143337, 143390), 'oneflow.math.subtract', 'flow.math.subtract', (['target', 'input'], {'name': "(name + '_sub')"}), "(target, input, name=name + '_sub')\n", (143355, 143390), True, 'import oneflow as flow\n'), ((143461, 143520), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['l1_value'], {'name': "(name + '_reduce_mean')"}), "(l1_value, name=name + '_reduce_mean')\n", (143482, 143520), True, 'import oneflow as flow\n'), ((146433, 146461), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BCELoss"""'], {}), "('BCELoss')\n", (146450, 146461), True, 'import oneflow.python.framework.id_util as id_util\n'), ((146895, 146960), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['_weighted_loss'], {'name': "(name + '_reduce_mean')"}), "(_weighted_loss, name=name + '_reduce_mean')\n", (146916, 146960), True, 'import oneflow as flow\n'), ((150214, 150252), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""BCEWithLogitsLoss"""'], {}), "('BCEWithLogitsLoss')\n", (150231, 150252), True, 'import oneflow.python.framework.id_util as id_util\n'), ((151418, 151483), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['_weighted_loss'], {'name': "(name + '_reduce_mean')"}), "(_weighted_loss, name=name + '_reduce_mean')\n", (151439, 151483), True, 'import oneflow as flow\n'), ((154386, 154414), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MSELoss"""'], {}), "('MSELoss')\n", (154403, 154414), True, 'import oneflow.python.framework.id_util as id_util\n'), ((154577, 154651), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['mean_squared_difference'], {'name': "(name + '_reduce_mean')"}), "(mean_squared_difference, name=name + '_reduce_mean')\n", (154598, 154651), True, 'import oneflow as flow\n'), ((157597, 157635), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MarginRankingLoss"""'], {}), "('MarginRankingLoss')\n", (157614, 157635), True, 'import oneflow.python.framework.id_util as id_util\n'), ((157675, 157709), 'oneflow.math.subtract', 'flow.math.subtract', (['input1', 'input2'], {}), '(input1, input2)\n', (157693, 157709), True, 'import oneflow as flow\n'), ((161522, 161560), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""TripletMarginLoss"""'], {}), "('TripletMarginLoss')\n", (161539, 161560), True, 'import oneflow.python.framework.id_util as id_util\n'), ((161932, 161968), 'oneflow.math.abs', 'flow.math.abs', (['x'], {'name': "(name + '_abs')"}), "(x, name=name + '_abs')\n", (161945, 161968), True, 'import oneflow as flow\n'), ((163235, 163279), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['_distance_swap'], {'axis': '(1)'}), '(_distance_swap, axis=1)\n', (163255, 163279), True, 'import oneflow as flow\n'), ((163366, 163412), 'oneflow.math.minimum', 'flow.math.minimum', (['_distance_2', '_distance_swap'], {}), '(_distance_2, _distance_swap)\n', (163383, 163412), True, 'import oneflow as flow\n'), ((163539, 163603), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['_triplet_loss'], {'name': "(name + '_reduce_mean')"}), "(_triplet_loss, name=name + '_reduce_mean')\n", (163560, 163603), True, 'import oneflow as flow\n'), ((166895, 166930), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""PixelShufflev2"""'], {}), "('PixelShufflev2')\n", (166912, 166930), True, 'import oneflow.python.framework.id_util as id_util\n'), ((169966, 169997), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""KLDivLoss_"""'], {}), "('KLDivLoss_')\n", (169983, 169997), True, 'import oneflow.python.framework.id_util as id_util\n'), ((170215, 170303), 'oneflow.zeros_like', 'flow.zeros_like', (['_kl_div_out_loss'], {'dtype': '_kl_div_out_loss.dtype', 'name': "(name + 'zeros')"}), "(_kl_div_out_loss, dtype=_kl_div_out_loss.dtype, name=name +\n 'zeros')\n", (170230, 170303), True, 'import oneflow as flow\n'), ((170713, 170782), 'oneflow.where', 'flow.where', (['_condition', '_kl_div_out_loss', '_zeros'], {'name': "(name + 'where')"}), "(_condition, _kl_div_out_loss, _zeros, name=name + 'where')\n", (170723, 170782), True, 'import oneflow as flow\n'), ((170849, 170912), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['_kl_div_loss'], {'name': "(name + '_reduce_mean')"}), "(_kl_div_loss, name=name + '_reduce_mean')\n", (170870, 170912), True, 'import oneflow as flow\n'), ((6903, 6941), 'oneflow.slice', 'flow.slice', (['x', 'slice_begin', 'slice_size'], {}), '(x, slice_begin, slice_size)\n', (6913, 6941), True, 'import oneflow as flow\n'), ((13994, 14022), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Conv1d_"""'], {}), "('Conv1d_')\n", (14011, 14022), True, 'import oneflow.python.framework.id_util as id_util\n'), ((21742, 21770), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Conv2d_"""'], {}), "('Conv2d_')\n", (21759, 21770), True, 'import oneflow.python.framework.id_util as id_util\n'), ((29661, 29689), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Conv3d_"""'], {}), "('Conv3d_')\n", (29678, 29689), True, 'import oneflow.python.framework.id_util as id_util\n'), ((32009, 32063), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['x'], {'axis': 'axes', 'keepdims': 'keepdims'}), '(x, axis=axes, keepdims=keepdims)\n', (32030, 32063), True, 'import oneflow as flow\n'), ((32077, 32135), 'oneflow.math.reduce_variance', 'flow.math.reduce_variance', (['x'], {'axis': 'axes', 'keepdims': 'keepdims'}), '(x, axis=axes, keepdims=keepdims)\n', (32102, 32135), True, 'import oneflow as flow\n'), ((43424, 43459), 'oneflow.reshape', 'flow.reshape', (['mean', 'nd_params_shape'], {}), '(mean, nd_params_shape)\n', (43436, 43459), True, 'import oneflow as flow\n'), ((43483, 43522), 'oneflow.reshape', 'flow.reshape', (['variance', 'nd_params_shape'], {}), '(variance, nd_params_shape)\n', (43495, 43522), True, 'import oneflow as flow\n'), ((47874, 47910), 'oneflow.reshape', 'flow.reshape', (['gamma', 'nd_params_shape'], {}), '(gamma, nd_params_shape)\n', (47886, 47910), True, 'import oneflow as flow\n'), ((47976, 48011), 'oneflow.reshape', 'flow.reshape', (['beta', 'nd_params_shape'], {}), '(beta, nd_params_shape)\n', (47988, 48011), True, 'import oneflow as flow\n'), ((113237, 113278), 'random.randint', 'random.randint', (['(-sys.maxsize)', 'sys.maxsize'], {}), '(-sys.maxsize, sys.maxsize)\n', (113251, 113278), False, 'import random\n'), ((113779, 113815), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""RandomMaskLike_"""'], {}), "('RandomMaskLike_')\n", (113796, 113815), True, 'import oneflow.python.framework.id_util as id_util\n'), ((134775, 134820), 'oneflow.math.softplus', 'flow.math.softplus', (['x'], {'name': "(name + 'softplus')"}), "(x, name=name + 'softplus')\n", (134793, 134820), True, 'import oneflow as flow\n'), ((143565, 143622), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['l1_value'], {'name': "(name + '_reduce_sum')"}), "(l1_value, name=name + '_reduce_sum')\n", (143585, 143622), True, 'import oneflow as flow\n'), ((147005, 147068), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['_weighted_loss'], {'name': "(name + '_reduce_sum')"}), "(_weighted_loss, name=name + '_reduce_sum')\n", (147025, 147068), True, 'import oneflow as flow\n'), ((151528, 151591), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['_weighted_loss'], {'name': "(name + '_reduce_sum')"}), "(_weighted_loss, name=name + '_reduce_sum')\n", (151548, 151591), True, 'import oneflow as flow\n'), ((154718, 154790), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['mean_squared_difference'], {'name': "(name + '_reduce_sum')"}), "(mean_squared_difference, name=name + '_reduce_sum')\n", (154738, 154790), True, 'import oneflow as flow\n'), ((158003, 158074), 'oneflow.math.reduce_mean', 'flow.math.reduce_mean', (['_clipped_margin_loss'], {'name': "(name + '_reduce_mean')"}), "(_clipped_margin_loss, name=name + '_reduce_mean')\n", (158024, 158074), True, 'import oneflow as flow\n'), ((158100, 158169), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['_clipped_margin_loss'], {'name': "(name + '_reduce_sum')"}), "(_clipped_margin_loss, name=name + '_reduce_sum')\n", (158120, 158169), True, 'import oneflow as flow\n'), ((162059, 162108), 'oneflow.math.square', 'flow.math.square', (['_abs_val'], {'name': "(name + '_square')"}), "(_abs_val, name=name + '_square')\n", (162075, 162108), True, 'import oneflow as flow\n'), ((162129, 162184), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['_norm'], {'axis': '(1)', 'name': "(name + '_sum')"}), "(_norm, axis=1, name=name + '_sum')\n", (162149, 162184), True, 'import oneflow as flow\n'), ((162209, 162251), 'oneflow.math.sqrt', 'flow.math.sqrt', (['_norm'], {'name': "(name + '_sqrt')"}), "(_norm, name=name + '_sqrt')\n", (162223, 162251), True, 'import oneflow as flow\n'), ((162292, 162385), 'oneflow.constant_like', 'flow.constant_like', ([], {'like': '_abs_val', 'value': 'p', 'dtype': 'flow.float32', 'name': "(name + '_p_constant')"}), "(like=_abs_val, value=p, dtype=flow.float32, name=name +\n '_p_constant')\n", (162310, 162385), True, 'import oneflow as flow\n'), ((162432, 162489), 'oneflow.math.pow', 'flow.math.pow', (['_abs_val', '_p_constant'], {'name': "(name + '_pow1')"}), "(_abs_val, _p_constant, name=name + '_pow1')\n", (162445, 162489), True, 'import oneflow as flow\n'), ((162510, 162565), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['_norm'], {'axis': '(1)', 'name': "(name + '_sum')"}), "(_norm, axis=1, name=name + '_sum')\n", (162530, 162565), True, 'import oneflow as flow\n'), ((162603, 162710), 'oneflow.constant_like', 'flow.constant_like', ([], {'like': '_norm', 'value': '(1.0 / p)', 'dtype': 'flow.float32', 'name': "(name + '_p_reciprocal_constant')"}), "(like=_norm, value=1.0 / p, dtype=flow.float32, name=name +\n '_p_reciprocal_constant')\n", (162621, 162710), True, 'import oneflow as flow\n'), ((162810, 162879), 'oneflow.math.pow', 'flow.math.pow', (['_norm', '_p_reciprocal_constant'], {'name': "(name + '_norm_val')"}), "(_norm, _p_reciprocal_constant, name=name + '_norm_val')\n", (162823, 162879), True, 'import oneflow as flow\n'), ((163648, 163710), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['_triplet_loss'], {'name': "(name + '_reduce_sum')"}), "(_triplet_loss, name=name + '_reduce_sum')\n", (163668, 163710), True, 'import oneflow as flow\n'), ((170041, 170081), 'oneflow.math.exp', 'flow.math.exp', (['target'], {'name': "(name + 'exp')"}), "(target, name=name + 'exp')\n", (170054, 170081), True, 'import oneflow as flow\n'), ((170440, 170488), 'oneflow.math.rint', 'flow.math.rint', (['(target + 0.5)'], {'name': "(name + 'rint')"}), "(target + 0.5, name=name + 'rint')\n", (170454, 170488), True, 'import oneflow as flow\n'), ((170957, 171018), 'oneflow.math.reduce_sum', 'flow.math.reduce_sum', (['_kl_div_loss'], {'name': "(name + '_reduce_sum')"}), "(_kl_div_loss, name=name + '_reduce_sum')\n", (170977, 171018), True, 'import oneflow as flow\n'), ((34409, 34432), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (34430, 34432), True, 'import oneflow as flow\n'), ((34620, 34644), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (34642, 34644), True, 'import oneflow as flow\n'), ((36624, 36647), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (36645, 36647), True, 'import oneflow as flow\n'), ((36835, 36859), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (36857, 36859), True, 'import oneflow as flow\n'), ((43201, 43221), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (43219, 43221), True, 'import oneflow as flow\n'), ((43569, 43605), 'oneflow.reshape', 'flow.reshape', (['scale', 'nd_params_shape'], {}), '(scale, nd_params_shape)\n', (43581, 43605), True, 'import oneflow as flow\n'), ((43654, 43691), 'oneflow.reshape', 'flow.reshape', (['offset', 'nd_params_shape'], {}), '(offset, nd_params_shape)\n', (43666, 43691), True, 'import oneflow as flow\n'), ((44373, 44443), 'oneflow.constant', 'flow.constant', (['(1)'], {'dtype': 'params_dtype', 'shape': 'params_shape', 'name': '"""gamma"""'}), "(1, dtype=params_dtype, shape=params_shape, name='gamma')\n", (44386, 44443), True, 'import oneflow as flow\n'), ((44522, 44591), 'oneflow.constant', 'flow.constant', (['(0)'], {'dtype': 'params_dtype', 'shape': 'params_shape', 'name': '"""beta"""'}), "(0, dtype=params_dtype, shape=params_shape, name='beta')\n", (44535, 44591), True, 'import oneflow as flow\n'), ((47047, 47067), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (47065, 47067), True, 'import oneflow as flow\n'), ((112723, 112764), 'random.randint', 'random.randint', (['(-sys.maxsize)', 'sys.maxsize'], {}), '(-sys.maxsize, sys.maxsize)\n', (112737, 112764), False, 'import random\n'), ((146526, 146546), 'oneflow.math.log', 'flow.math.log', (['input'], {}), '(input)\n', (146539, 146546), True, 'import oneflow as flow\n'), ((146564, 146588), 'oneflow.math.log', 'flow.math.log', (['(1 - input)'], {}), '(1 - input)\n', (146577, 146588), True, 'import oneflow as flow\n'), ((151069, 151096), 'oneflow.math.exp', 'flow.math.exp', (['_neg_max_val'], {}), '(_neg_max_val)\n', (151082, 151096), True, 'import oneflow as flow\n'), ((151099, 151135), 'oneflow.math.exp', 'flow.math.exp', (['(_neg_input - _max_val)'], {}), '(_neg_input - _max_val)\n', (151112, 151135), True, 'import oneflow as flow\n'), ((170148, 170188), 'oneflow.math.log', 'flow.math.log', (['target'], {'name': "(name + 'log')"}), "(target, name=name + 'log')\n", (170161, 170188), True, 'import oneflow as flow\n'), ((13650, 13670), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (13668, 13670), True, 'import oneflow as flow\n'), ((21208, 21228), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (21226, 21228), True, 'import oneflow as flow\n'), ((29317, 29337), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (29335, 29337), True, 'import oneflow as flow\n'), ((44181, 44201), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (44199, 44201), True, 'import oneflow as flow\n'), ((48072, 48092), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (48090, 48092), True, 'import oneflow as flow\n'), ((60727, 60762), 'oneflow.current_global_function_desc', 'flow.current_global_function_desc', ([], {}), '()\n', (60760, 60762), True, 'import oneflow as flow\n'), ((116687, 116722), 'oneflow.current_global_function_desc', 'flow.current_global_function_desc', ([], {}), '()\n', (116720, 116722), True, 'import oneflow as flow\n'), ((20253, 20273), 'oneflow.current_scope', 'flow.current_scope', ([], {}), '()\n', (20271, 20273), True, 'import oneflow as flow\n'), ((150853, 150880), 'oneflow.math.exp', 'flow.math.exp', (['_neg_max_val'], {}), '(_neg_max_val)\n', (150866, 150880), True, 'import oneflow as flow\n'), ((150883, 150919), 'oneflow.math.exp', 'flow.math.exp', (['(_neg_input - _max_val)'], {}), '(_neg_input - _max_val)\n', (150896, 150919), True, 'import oneflow as flow\n'), ((82792, 82823), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MaxPool2D_"""'], {}), "('MaxPool2D_')\n", (82809, 82823), True, 'import oneflow.python.framework.id_util as id_util\n'), ((85712, 85743), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""AvgPool2D_"""'], {}), "('AvgPool2D_')\n", (85729, 85743), True, 'import oneflow.python.framework.id_util as id_util\n'), ((88629, 88660), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MaxPool3D_"""'], {}), "('MaxPool3D_')\n", (88646, 88660), True, 'import oneflow.python.framework.id_util as id_util\n'), ((91553, 91584), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""AvgPool3D_"""'], {}), "('AvgPool3D_')\n", (91570, 91584), True, 'import oneflow.python.framework.id_util as id_util\n'), ((112423, 112459), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""RandomMaskLike_"""'], {}), "('RandomMaskLike_')\n", (112440, 112459), True, 'import oneflow.python.framework.id_util as id_util\n'), ((113327, 113374), 'oneflow.user_op_module_builder', 'flow.user_op_module_builder', (['"""random_mask_like"""'], {}), "('random_mask_like')\n", (113354, 113374), True, 'import oneflow as flow\n'), ((48177, 48203), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (48197, 48203), True, 'import oneflow as flow\n'), ((109724, 109765), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SigmoidCrossEntropy_"""'], {}), "('SigmoidCrossEntropy_')\n", (109741, 109765), True, 'import oneflow.python.framework.id_util as id_util\n'), ((137229, 137255), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (137249, 137255), True, 'import oneflow as flow\n'), ((94500, 94529), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Softmax_"""'], {}), "('Softmax_')\n", (94517, 94529), True, 'import oneflow.python.framework.id_util as id_util\n'), ((132172, 132198), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (132192, 132198), True, 'import oneflow as flow\n'), ((133598, 133631), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""HardSigmoid_"""'], {}), "('HardSigmoid_')\n", (133615, 133631), True, 'import oneflow.python.framework.id_util as id_util\n'), ((55092, 55118), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (55112, 55118), True, 'import oneflow as flow\n'), ((57829, 57855), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (57849, 57855), True, 'import oneflow as flow\n'), ((97423, 97452), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Softmax_"""'], {}), "('Softmax_')\n", (97440, 97452), True, 'import oneflow.python.framework.id_util as id_util\n'), ((117022, 117048), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (117042, 117048), True, 'import oneflow as flow\n'), ((130678, 130709), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""LeakyRelu_"""'], {}), "('LeakyRelu_')\n", (130695, 130709), True, 'import oneflow.python.framework.id_util as id_util\n'), ((139086, 139112), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (139106, 139112), True, 'import oneflow as flow\n'), ((102945, 102986), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SoftmaxCrossEntropy_"""'], {}), "('SoftmaxCrossEntropy_')\n", (102962, 102986), True, 'import oneflow.python.framework.id_util as id_util\n'), ((99998, 100040), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SparseCrossEntropyMs_"""'], {}), "('SparseCrossEntropyMs_')\n", (100015, 100040), True, 'import oneflow.python.framework.id_util as id_util\n'), ((100465, 100505), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SparseCrossEntropy_"""'], {}), "('SparseCrossEntropy_')\n", (100482, 100505), True, 'import oneflow.python.framework.id_util as id_util\n'), ((107412, 107470), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""DistributedSparseSoftmaxCrossEntropy_"""'], {}), "('DistributedSparseSoftmaxCrossEntropy_')\n", (107429, 107470), True, 'import oneflow.python.framework.id_util as id_util\n'), ((61427, 61453), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (61447, 61453), True, 'import oneflow as flow\n'), ((105794, 105843), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SparseSoftmaxCrossEntropyMs_"""'], {}), "('SparseSoftmaxCrossEntropyMs_')\n", (105811, 105843), True, 'import oneflow.python.framework.id_util as id_util\n'), ((106330, 106377), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""SparseSoftmaxCrossEntropy_"""'], {}), "('SparseSoftmaxCrossEntropy_')\n", (106347, 106377), True, 'import oneflow.python.framework.id_util as id_util\n'), ((7263, 7289), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Conv_"""'], {}), "('Conv_')\n", (7280, 7289), True, 'import oneflow.python.framework.id_util as id_util\n'), ((44654, 44680), 'oneflow.user_op_builder', 'flow.user_op_builder', (['name'], {}), '(name)\n', (44674, 44680), True, 'import oneflow as flow\n'), ((125995, 126025), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Deconv2d_"""'], {}), "('Deconv2d_')\n", (126012, 126025), True, 'import oneflow.python.framework.id_util as id_util\n'), ((128927, 128957), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Deconv2d_"""'], {}), "('Deconv2d_')\n", (128944, 128957), True, 'import oneflow.python.framework.id_util as id_util\n'), ((124836, 124866), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""Deconv2d_"""'], {}), "('Deconv2d_')\n", (124853, 124866), True, 'import oneflow.python.framework.id_util as id_util\n'), ((69737, 69768), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MaxPool1d_"""'], {}), "('MaxPool1d_')\n", (69754, 69768), True, 'import oneflow.python.framework.id_util as id_util\n'), ((74744, 74775), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MaxPool2d_"""'], {}), "('MaxPool2d_')\n", (74761, 74775), True, 'import oneflow.python.framework.id_util as id_util\n'), ((80085, 80116), 'oneflow.python.framework.id_util.UniqueStr', 'id_util.UniqueStr', (['"""MaxPool3d_"""'], {}), "('MaxPool3d_')\n", (80102, 80116), True, 'import oneflow.python.framework.id_util as id_util\n')] |
from typing import Dict, List
import jsonlines
import random
import oneflow as flow
from oneflow.utils.data import Dataset
def load_data(name: str, path: str) -> List:
def load_snli_data_unsup(path):
with jsonlines.open(path, 'r') as f:
return [line.get('origin') for line in f]
def load_snli_data_sup(path):
with jsonlines.open(path, 'r') as f:
return [(line['origin'], line['entailment'], line['contradiction']) for line in f]
def load_lqcmc_data(path):
with open(path, 'r', encoding='utf8') as f:
return [line.strip().split('\t')[0] for line in f]
def load_sts_data(path):
with open(path, 'r', encoding='utf8') as f:
return [(line.split("||")[1], line.split("||")[2], line.split("||")[3]) for line in f]
assert name in ["snli-sup", "snli-unsup", "lqcmc", "sts"]
if name == 'snli-sup':
return load_snli_data_sup(path)
elif name == 'snli-unsup':
return load_snli_data_unsup(path)
elif name == 'sts':
return load_sts_data(path)
else:
return load_lqcmc_data(path)
class TrainDataset(Dataset):
def __init__(self, name, path, tokenizer, max_len, task=None, name2='sts', path2=None):
self.task = task
self.data = load_data(name, path)
if path2 is not None:
data2 = load_data(name2, path2)
data2 = [i[0] for i in data2]
self.data = self.data + data2
if 'snli' in name:
random.shuffle(self.data)
self.tokenizer = tokenizer
self.max_len = max_len
self.pad_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)
self.cls_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.cls_token)
self.sep_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.sep_token)
def __len__(self):
return len(self.data)
def pad_text(self, ids):
attention_mask = [1] * len(ids)
ids = ids + [self.pad_id] * (self.max_len - len(ids))
attention_mask = attention_mask + [self.pad_id] * (self.max_len - len(attention_mask))
return ids, attention_mask
def text2id(self, text):
tokens = self.tokenizer.tokenize(text)
ids = self.tokenizer.convert_tokens_to_ids(tokens)
ids = ids[: self.max_len - 2]
ids = [self.cls_id] + ids + [self.sep_id]
ids, attention_mask = self.pad_text(ids)
return ids, attention_mask
def supervise_task(self, index):
ids0, mask0 = self.text2id(self.data[index][0])
ids1, mask1 = self.text2id(self.data[index][1])
ids2, mask2 = self.text2id(self.data[index][2])
return {
"input_ids" : flow.tensor([ids0, ids1, ids2], dtype=flow.long),
"attention_mask" : flow.tensor([mask0, mask1, mask2], dtype=flow.long)
}
def unsupervise_task(self, index):
ids, mask = self.text2id(self.data[index])
return {
"input_ids" : flow.tensor([ids, ids], dtype=flow.long),
"attention_mask" : flow.tensor([mask, mask], dtype=flow.long)
}
def __getitem__(self, index):
if self.task == "sup":
return self.supervise_task(index)
elif self.task == "unsup":
return self.unsupervise_task(index)
class TestDataset(TrainDataset):
def __getitem__(self, index):
label = int(self.data[index][2])
ids0, mask0 = self.text2id(self.data[index][0])
ids1, mask1 = self.text2id(self.data[index][1])
return {
"input_ids" : flow.tensor([ids0, ids1], dtype=flow.long),
"attention_mask" : flow.tensor([mask0, mask1], dtype=flow.long),
"labels" : label
} | [
"oneflow.tensor"
] | [((220, 245), 'jsonlines.open', 'jsonlines.open', (['path', '"""r"""'], {}), "(path, 'r')\n", (234, 245), False, 'import jsonlines\n'), ((362, 387), 'jsonlines.open', 'jsonlines.open', (['path', '"""r"""'], {}), "(path, 'r')\n", (376, 387), False, 'import jsonlines\n'), ((1530, 1555), 'random.shuffle', 'random.shuffle', (['self.data'], {}), '(self.data)\n', (1544, 1555), False, 'import random\n'), ((2758, 2806), 'oneflow.tensor', 'flow.tensor', (['[ids0, ids1, ids2]'], {'dtype': 'flow.long'}), '([ids0, ids1, ids2], dtype=flow.long)\n', (2769, 2806), True, 'import oneflow as flow\n'), ((2839, 2890), 'oneflow.tensor', 'flow.tensor', (['[mask0, mask1, mask2]'], {'dtype': 'flow.long'}), '([mask0, mask1, mask2], dtype=flow.long)\n', (2850, 2890), True, 'import oneflow as flow\n'), ((3039, 3079), 'oneflow.tensor', 'flow.tensor', (['[ids, ids]'], {'dtype': 'flow.long'}), '([ids, ids], dtype=flow.long)\n', (3050, 3079), True, 'import oneflow as flow\n'), ((3112, 3154), 'oneflow.tensor', 'flow.tensor', (['[mask, mask]'], {'dtype': 'flow.long'}), '([mask, mask], dtype=flow.long)\n', (3123, 3154), True, 'import oneflow as flow\n'), ((3625, 3667), 'oneflow.tensor', 'flow.tensor', (['[ids0, ids1]'], {'dtype': 'flow.long'}), '([ids0, ids1], dtype=flow.long)\n', (3636, 3667), True, 'import oneflow as flow\n'), ((3700, 3744), 'oneflow.tensor', 'flow.tensor', (['[mask0, mask1]'], {'dtype': 'flow.long'}), '([mask0, mask1], dtype=flow.long)\n', (3711, 3744), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
import numpy as np
from oneflow.compatible import single_client as flow
from oneflow.compatible.single_client import typing as oft
import os
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
@flow.unittest.skip_unless_1n1d()
class TestProfilerNvtxRange(flow.unittest.TestCase):
@unittest.skipIf(os.getenv("ONEFLOW_TEST_CPU_ONLY"), "only test cpu cases")
def test_profiler_nvtx_range(test_case):
@flow.global_function(type="train", function_config=func_config)
def nvtx_range_job(x: oft.Numpy.Placeholder((4, 4, 1024, 1024))):
x += flow.get_variable(
name="v1",
shape=(1,),
dtype=flow.float,
initializer=flow.zeros_initializer(),
)
x = flow.math.relu(x)
x = flow.profiler.nvtx_start(x, mark_prefix="softmax")
x = flow.nn.softmax(x)
x = flow.nn.softmax(x)
x = flow.nn.softmax(x)
x = flow.nn.softmax(x)
x = flow.nn.softmax(x)
x = flow.profiler.nvtx_end(x, mark_prefix="softmax")
x = flow.math.relu(x)
x = flow.profiler.nvtx_start(x, mark_prefix="gelu")
x = flow.math.gelu(x)
x = flow.math.gelu(x)
x = flow.math.gelu(x)
x = flow.math.gelu(x)
x = flow.math.gelu(x)
x = flow.math.gelu(x)
x = flow.profiler.nvtx_end(x, mark_prefix="gelu")
flow.optimizer.SGD(
flow.optimizer.PiecewiseConstantScheduler([], [0]), momentum=0
).minimize(x)
return flow.identity(x)
input = np.random.rand(4, 4, 1024, 1024).astype(np.float32)
for i in range(3):
res = nvtx_range_job(input).get()
if __name__ == "__main__":
unittest.main()
| [
"oneflow.compatible.single_client.nn.softmax",
"oneflow.compatible.single_client.FunctionConfig",
"oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler",
"oneflow.compatible.single_client.profiler.nvtx_end",
"oneflow.compatible.single_client.global_function",
"oneflow.compatible.single_cl... | [((762, 783), 'oneflow.compatible.single_client.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (781, 783), True, 'from oneflow.compatible import single_client as flow\n'), ((829, 861), 'oneflow.compatible.single_client.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (859, 861), True, 'from oneflow.compatible import single_client as flow\n'), ((2433, 2448), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2446, 2448), False, 'import unittest\n'), ((1049, 1112), 'oneflow.compatible.single_client.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (1069, 1112), True, 'from oneflow.compatible import single_client as flow\n'), ((936, 970), 'os.getenv', 'os.getenv', (['"""ONEFLOW_TEST_CPU_ONLY"""'], {}), "('ONEFLOW_TEST_CPU_ONLY')\n", (945, 970), False, 'import os\n'), ((1396, 1413), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (1410, 1413), True, 'from oneflow.compatible import single_client as flow\n'), ((1430, 1480), 'oneflow.compatible.single_client.profiler.nvtx_start', 'flow.profiler.nvtx_start', (['x'], {'mark_prefix': '"""softmax"""'}), "(x, mark_prefix='softmax')\n", (1454, 1480), True, 'from oneflow.compatible import single_client as flow\n'), ((1497, 1515), 'oneflow.compatible.single_client.nn.softmax', 'flow.nn.softmax', (['x'], {}), '(x)\n', (1512, 1515), True, 'from oneflow.compatible import single_client as flow\n'), ((1532, 1550), 'oneflow.compatible.single_client.nn.softmax', 'flow.nn.softmax', (['x'], {}), '(x)\n', (1547, 1550), True, 'from oneflow.compatible import single_client as flow\n'), ((1567, 1585), 'oneflow.compatible.single_client.nn.softmax', 'flow.nn.softmax', (['x'], {}), '(x)\n', (1582, 1585), True, 'from oneflow.compatible import single_client as flow\n'), ((1602, 1620), 'oneflow.compatible.single_client.nn.softmax', 'flow.nn.softmax', (['x'], {}), '(x)\n', (1617, 1620), True, 'from oneflow.compatible import single_client as flow\n'), ((1637, 1655), 'oneflow.compatible.single_client.nn.softmax', 'flow.nn.softmax', (['x'], {}), '(x)\n', (1652, 1655), True, 'from oneflow.compatible import single_client as flow\n'), ((1672, 1720), 'oneflow.compatible.single_client.profiler.nvtx_end', 'flow.profiler.nvtx_end', (['x'], {'mark_prefix': '"""softmax"""'}), "(x, mark_prefix='softmax')\n", (1694, 1720), True, 'from oneflow.compatible import single_client as flow\n'), ((1737, 1754), 'oneflow.compatible.single_client.math.relu', 'flow.math.relu', (['x'], {}), '(x)\n', (1751, 1754), True, 'from oneflow.compatible import single_client as flow\n'), ((1771, 1818), 'oneflow.compatible.single_client.profiler.nvtx_start', 'flow.profiler.nvtx_start', (['x'], {'mark_prefix': '"""gelu"""'}), "(x, mark_prefix='gelu')\n", (1795, 1818), True, 'from oneflow.compatible import single_client as flow\n'), ((1835, 1852), 'oneflow.compatible.single_client.math.gelu', 'flow.math.gelu', (['x'], {}), '(x)\n', (1849, 1852), True, 'from oneflow.compatible import single_client as flow\n'), ((1869, 1886), 'oneflow.compatible.single_client.math.gelu', 'flow.math.gelu', (['x'], {}), '(x)\n', (1883, 1886), True, 'from oneflow.compatible import single_client as flow\n'), ((1903, 1920), 'oneflow.compatible.single_client.math.gelu', 'flow.math.gelu', (['x'], {}), '(x)\n', (1917, 1920), True, 'from oneflow.compatible import single_client as flow\n'), ((1937, 1954), 'oneflow.compatible.single_client.math.gelu', 'flow.math.gelu', (['x'], {}), '(x)\n', (1951, 1954), True, 'from oneflow.compatible import single_client as flow\n'), ((1971, 1988), 'oneflow.compatible.single_client.math.gelu', 'flow.math.gelu', (['x'], {}), '(x)\n', (1985, 1988), True, 'from oneflow.compatible import single_client as flow\n'), ((2005, 2022), 'oneflow.compatible.single_client.math.gelu', 'flow.math.gelu', (['x'], {}), '(x)\n', (2019, 2022), True, 'from oneflow.compatible import single_client as flow\n'), ((2039, 2084), 'oneflow.compatible.single_client.profiler.nvtx_end', 'flow.profiler.nvtx_end', (['x'], {'mark_prefix': '"""gelu"""'}), "(x, mark_prefix='gelu')\n", (2061, 2084), True, 'from oneflow.compatible import single_client as flow\n'), ((2241, 2257), 'oneflow.compatible.single_client.identity', 'flow.identity', (['x'], {}), '(x)\n', (2254, 2257), True, 'from oneflow.compatible import single_client as flow\n'), ((1143, 1184), 'oneflow.compatible.single_client.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['(4, 4, 1024, 1024)'], {}), '((4, 4, 1024, 1024))\n', (1164, 1184), True, 'from oneflow.compatible.single_client import typing as oft\n'), ((2275, 2307), 'numpy.random.rand', 'np.random.rand', (['(4)', '(4)', '(1024)', '(1024)'], {}), '(4, 4, 1024, 1024)\n', (2289, 2307), True, 'import numpy as np\n'), ((1340, 1364), 'oneflow.compatible.single_client.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (1362, 1364), True, 'from oneflow.compatible import single_client as flow\n'), ((2133, 2183), 'oneflow.compatible.single_client.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0]'], {}), '([], [0])\n', (2174, 2183), True, 'from oneflow.compatible import single_client as flow\n')] |
import oneflow as flow
from oneflow import nn
def get_loss(name):
if name == "cosface":
return CosFace()
elif name == "arcface":
return ArcFace()
else:
raise ValueError()
class CrossEntropyLoss_sbp(nn.Module):
def __init__(self):
super(CrossEntropyLoss_sbp, self).__init__()
def forward(self, logits, label):
loss = flow._C.sparse_softmax_cross_entropy(
logits, label)
loss = flow.mean(loss)
return loss
def get_loss(name):
if name == "cosface":
return CosFace()
elif name == "arcface":
return ArcFace()
else:
raise ValueError()
class CosFace(nn.Module):
def __init__(self, s=64.0, m=0.40):
super(CosFace, self).__init__()
self.s = s
self.m = m
def forward(self, cosine, label):
index = flow.where(label != -1)[0]
m_hot = flow.zeros(index.size()[0], cosine.size()[
1], device=cosine.device)
m_hot = flow.scatter(m_hot, 1, label[index, None], self.m)
cosine = cosine[index] - m_hot
ret = cosine * self.s
return ret
class ArcFace(nn.Module):
def __init__(self, s=64.0, m=0.5):
super(ArcFace, self).__init__()
self.s = s
self.m = m
def forward(self, cosine: flow.Tensor, label):
index = flow.where(label != -1)[0]
m_hot = flow.zeros(index.size()[0], cosine.size()[
1], device=cosine.device)
m_hot.scatter_(1, label[index, None], self.m)
cosine.acos_()
cosine[index] += m_hot
cosine.cos_().mul_(self.s)
return cosine
| [
"oneflow._C.sparse_softmax_cross_entropy",
"oneflow.where",
"oneflow.scatter",
"oneflow.mean"
] | [((381, 432), 'oneflow._C.sparse_softmax_cross_entropy', 'flow._C.sparse_softmax_cross_entropy', (['logits', 'label'], {}), '(logits, label)\n', (417, 432), True, 'import oneflow as flow\n'), ((461, 476), 'oneflow.mean', 'flow.mean', (['loss'], {}), '(loss)\n', (470, 476), True, 'import oneflow as flow\n'), ((1017, 1067), 'oneflow.scatter', 'flow.scatter', (['m_hot', '(1)', 'label[index, None]', 'self.m'], {}), '(m_hot, 1, label[index, None], self.m)\n', (1029, 1067), True, 'import oneflow as flow\n'), ((861, 884), 'oneflow.where', 'flow.where', (['(label != -1)'], {}), '(label != -1)\n', (871, 884), True, 'import oneflow as flow\n'), ((1370, 1393), 'oneflow.where', 'flow.where', (['(label != -1)'], {}), '(label != -1)\n', (1380, 1393), True, 'import oneflow as flow\n')] |
import oneflow as flow
from .recurrent import rnn
def _FullyConnected(input_blob, weight_blob, bias_blob):
output_blob = flow.matmul(input_blob, weight_blob)
if bias_blob:
output_blob = flow.nn.bias_add(output_blob, bias_blob)
return output_blob
class LSTMCell:
def __init__(self, units,
activation=flow.math.tanh,
recurrent_activation=flow.math.sigmoid,
use_bias=True,
kernel_initializer=flow.glorot_uniform_initializer(),
recurrent_initializer=flow.glorot_normal_initializer(), # should be orthogonal_initializer
bias_initializer=flow.zeros_initializer(),
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
dropout=0.,
recurrent_dropout=0.,
dtype=flow.float32,
trainable=True,
**kwargs):
self.units = units
self.activation = activation
self.recurrent_activation = recurrent_activation
self.use_bias = use_bias
self.kernel_initializer = kernel_initializer
self.recurrent_initializer = recurrent_initializer
self.bias_initializer = bias_initializer
self.unit_forget_bias = unit_forget_bias
self.kernel_regularizer = kernel_regularizer
self.recurrent_regularizer = recurrent_regularizer
self.bias_regularizer = bias_regularizer
self.dropout = min(1., max(0., dropout))
self.dtype = dtype
self.recurrent_dropout = min(1., max(0., recurrent_dropout))
self.trainable = trainable
self.layer_index = kwargs['layer_index'] if 'layer_index' in kwargs else ''
self.direction = kwargs['direction'] if 'layer_index' in kwargs else 'forward'
def _build(self, inputs):
input_size = inputs.shape[-1]
units = self.units
dtype = self.dtype
trainable = self.trainable
with flow.scope.namespace('layer' + str(self.layer_index)):
with flow.scope.namespace(self.direction):
self.kernel_blob_i = flow.get_variable(
name='input' + '-kernel',
shape=[input_size, units],
dtype=dtype,
trainable=trainable,
regularizer=self.kernel_regularizer,
initializer=self.kernel_initializer
)
self.recurrent_kernel_blob_i = flow.get_variable(
name='input' + '-recurrent-kernel',
shape=[units, units],
dtype=dtype,
trainable=trainable,
regularizer=self.recurrent_regularizer,
initializer=self.recurrent_initializer
)
self.bias_blob_i = flow.get_variable(
name='input' + '-bias',
shape=[units],
dtype=dtype,
trainable=trainable,
regularizer=self.bias_regularizer,
initializer=flow.zeros_initializer() if self.unit_forget_bias else self.bias_initializer
) if self.use_bias else None
self.kernel_blob_f = flow.get_variable(
name='forget' + '-kernel',
shape=[input_size, units],
dtype=dtype,
trainable=trainable,
regularizer=self.kernel_regularizer,
initializer=self.kernel_initializer
)
self.recurrent_kernel_blob_f = flow.get_variable(
name='forget' + '-recurrent-kernel',
shape=[units, units],
dtype=dtype,
trainable=trainable,
regularizer=self.recurrent_regularizer,
initializer=self.recurrent_initializer
)
self.bias_blob_f = flow.get_variable(
name='forget' + '-bias',
shape=[units],
dtype=dtype,
trainable=trainable,
regularizer=self.bias_regularizer,
initializer=flow.ones_initializer() if self.unit_forget_bias else self.bias_initializer
) if self.use_bias else None
self.kernel_blob_c = flow.get_variable(
name='cell' + '-kernel',
shape=[input_size, units],
dtype=dtype,
trainable=trainable,
regularizer=self.kernel_regularizer,
initializer=self.kernel_initializer
)
self.recurrent_kernel_blob_c = flow.get_variable(
name='cell' + '-recurrent-kernel',
shape=[units, units],
dtype=dtype,
trainable=trainable,
regularizer=self.recurrent_regularizer,
initializer=self.recurrent_initializer
)
self.bias_blob_c = flow.get_variable(
name='cell' + '-bias',
shape=[units],
dtype=dtype,
trainable=trainable,
regularizer=self.bias_regularizer,
initializer=flow.zeros_initializer() if self.unit_forget_bias else self.bias_initializer
) if self.use_bias else None
self.kernel_blob_o = flow.get_variable(
name='output' + '-kernel',
shape=[input_size, units],
dtype=dtype,
trainable=trainable,
regularizer=self.kernel_regularizer,
initializer=self.kernel_initializer
)
self.recurrent_kernel_blob_o = flow.get_variable(
name='output' + '-recurrent-kernel',
shape=[units, units],
dtype=dtype,
trainable=trainable,
regularizer=self.recurrent_regularizer,
initializer=self.recurrent_initializer
)
self.bias_blob_o = flow.get_variable(
name='output' + '-bias',
shape=[units],
dtype=dtype,
trainable=trainable,
regularizer=self.bias_regularizer,
initializer=flow.zeros_initializer() if self.unit_forget_bias else self.bias_initializer
) if self.use_bias else None
def __call__(self, inputs, states):
self._build(inputs)
hx = states[0]
cx = states[1]
if 0 < self.dropout < 1.:
inputs_i = flow.nn.dropout(inputs, rate=self.dropout)
inputs_f = flow.nn.dropout(inputs, rate=self.dropout)
inputs_c = flow.nn.dropout(inputs, rate=self.dropout)
inputs_o = flow.nn.dropout(inputs, rate=self.dropout)
else:
inputs_i = inputs
inputs_f = inputs
inputs_c = inputs
inputs_o = inputs
if 0 < self.recurrent_dropout < 1.:
hx_i = flow.nn.dropout(hx, rate=self.recurrent_dropout)
hx_f = flow.nn.dropout(hx, rate=self.recurrent_dropout)
hx_c = flow.nn.dropout(hx, rate=self.recurrent_dropout)
hx_o = flow.nn.dropout(hx, rate=self.recurrent_dropout)
else:
hx_i = hx
hx_f = hx
hx_c = hx
hx_o = hx
x_i = _FullyConnected(inputs_i, self.kernel_blob_i, self.bias_blob_i) # input gate
x_f = _FullyConnected(inputs_f, self.kernel_blob_f, self.bias_blob_f) # forget gate
x_c = _FullyConnected(inputs_c, self.kernel_blob_c, self.bias_blob_c) # cell state
x_o = _FullyConnected(inputs_o, self.kernel_blob_o, self.bias_blob_o) # output gate
h_i = _FullyConnected(hx_i, self.recurrent_kernel_blob_i, None)
h_f = _FullyConnected(hx_f, self.recurrent_kernel_blob_f, None)
h_c = _FullyConnected(hx_c, self.recurrent_kernel_blob_c, None)
h_o = _FullyConnected(hx_o, self.recurrent_kernel_blob_o, None)
x_i = x_i + h_i
x_f = x_f + h_f
x_c = x_c + h_c
x_o = x_o + h_o
x_i = self.recurrent_activation(x_i)
x_f = self.recurrent_activation(x_f)
cell_gate = self.activation(x_c)
x_o = self.recurrent_activation(x_o)
cy = x_f * cx + x_i * cell_gate
hy = x_o * self.activation(cy)
return hy, (hy, cy)
def lstm(inputs,
units,
activation=flow.math.tanh,
recurrent_activation=flow.math.sigmoid,
use_bias=True,
kernel_initializer=flow.glorot_uniform_initializer(),
recurrent_initializer=flow.glorot_normal_initializer(), # should be orthogonal_initializer
bias_initializer=flow.zeros_initializer(),
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
initial_state=None,
**kwargs):
return rnn(inputs,
LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
),
return_sequences=return_sequences, initial_state=initial_state, kwargs=kwargs)
def bilstm(inputs,
units,
activation=flow.math.tanh,
recurrent_activation=flow.math.sigmoid,
use_bias=True,
kernel_initializer=flow.glorot_uniform_initializer(),
recurrent_initializer=flow.glorot_normal_initializer(), # should be orthogonal_initializer
bias_initializer=flow.zeros_initializer(),
unit_forget_bias=True,
kernel_regularizer=None,
recurrent_regularizer=None,
bias_regularizer=None,
dropout=0.,
recurrent_dropout=0.,
return_sequences=False,
initial_state=None,
**kwargs):
forward = rnn(inputs,
LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
),
return_sequences=return_sequences, initial_state=initial_state, kwargs=kwargs)
backward = rnn(inputs,
LSTMCell(units,
activation=activation,
recurrent_activation=recurrent_activation,
use_bias=use_bias,
kernel_initializer=kernel_initializer,
recurrent_initializer=recurrent_initializer,
bias_initializer=bias_initializer,
unit_forget_bias=unit_forget_bias,
kernel_regularizer=kernel_regularizer,
recurrent_regularizer=recurrent_regularizer,
bias_regularizer=bias_regularizer,
dropout=dropout,
recurrent_dropout=recurrent_dropout,
),
return_sequences=return_sequences, initial_state=initial_state, kwargs=kwargs)
backward = flow.reverse(backward, axis=1)
outputs = forward + backward
return outputs
| [
"oneflow.glorot_uniform_initializer",
"oneflow.scope.namespace",
"oneflow.matmul",
"oneflow.ones_initializer",
"oneflow.nn.dropout",
"oneflow.glorot_normal_initializer",
"oneflow.zeros_initializer",
"oneflow.reverse",
"oneflow.get_variable",
"oneflow.nn.bias_add"
] | [((128, 164), 'oneflow.matmul', 'flow.matmul', (['input_blob', 'weight_blob'], {}), '(input_blob, weight_blob)\n', (139, 164), True, 'import oneflow as flow\n'), ((9226, 9259), 'oneflow.glorot_uniform_initializer', 'flow.glorot_uniform_initializer', ([], {}), '()\n', (9257, 9259), True, 'import oneflow as flow\n'), ((9292, 9324), 'oneflow.glorot_normal_initializer', 'flow.glorot_normal_initializer', ([], {}), '()\n', (9322, 9324), True, 'import oneflow as flow\n'), ((9388, 9412), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (9410, 9412), True, 'import oneflow as flow\n'), ((10742, 10775), 'oneflow.glorot_uniform_initializer', 'flow.glorot_uniform_initializer', ([], {}), '()\n', (10773, 10775), True, 'import oneflow as flow\n'), ((10810, 10842), 'oneflow.glorot_normal_initializer', 'flow.glorot_normal_initializer', ([], {}), '()\n', (10840, 10842), True, 'import oneflow as flow\n'), ((10908, 10932), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (10930, 10932), True, 'import oneflow as flow\n'), ((13108, 13138), 'oneflow.reverse', 'flow.reverse', (['backward'], {'axis': '(1)'}), '(backward, axis=1)\n', (13120, 13138), True, 'import oneflow as flow\n'), ((205, 245), 'oneflow.nn.bias_add', 'flow.nn.bias_add', (['output_blob', 'bias_blob'], {}), '(output_blob, bias_blob)\n', (221, 245), True, 'import oneflow as flow\n'), ((491, 524), 'oneflow.glorot_uniform_initializer', 'flow.glorot_uniform_initializer', ([], {}), '()\n', (522, 524), True, 'import oneflow as flow\n'), ((565, 597), 'oneflow.glorot_normal_initializer', 'flow.glorot_normal_initializer', ([], {}), '()\n', (595, 597), True, 'import oneflow as flow\n'), ((669, 693), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (691, 693), True, 'import oneflow as flow\n'), ((7152, 7194), 'oneflow.nn.dropout', 'flow.nn.dropout', (['inputs'], {'rate': 'self.dropout'}), '(inputs, rate=self.dropout)\n', (7167, 7194), True, 'import oneflow as flow\n'), ((7218, 7260), 'oneflow.nn.dropout', 'flow.nn.dropout', (['inputs'], {'rate': 'self.dropout'}), '(inputs, rate=self.dropout)\n', (7233, 7260), True, 'import oneflow as flow\n'), ((7284, 7326), 'oneflow.nn.dropout', 'flow.nn.dropout', (['inputs'], {'rate': 'self.dropout'}), '(inputs, rate=self.dropout)\n', (7299, 7326), True, 'import oneflow as flow\n'), ((7350, 7392), 'oneflow.nn.dropout', 'flow.nn.dropout', (['inputs'], {'rate': 'self.dropout'}), '(inputs, rate=self.dropout)\n', (7365, 7392), True, 'import oneflow as flow\n'), ((7599, 7647), 'oneflow.nn.dropout', 'flow.nn.dropout', (['hx'], {'rate': 'self.recurrent_dropout'}), '(hx, rate=self.recurrent_dropout)\n', (7614, 7647), True, 'import oneflow as flow\n'), ((7667, 7715), 'oneflow.nn.dropout', 'flow.nn.dropout', (['hx'], {'rate': 'self.recurrent_dropout'}), '(hx, rate=self.recurrent_dropout)\n', (7682, 7715), True, 'import oneflow as flow\n'), ((7735, 7783), 'oneflow.nn.dropout', 'flow.nn.dropout', (['hx'], {'rate': 'self.recurrent_dropout'}), '(hx, rate=self.recurrent_dropout)\n', (7750, 7783), True, 'import oneflow as flow\n'), ((7803, 7851), 'oneflow.nn.dropout', 'flow.nn.dropout', (['hx'], {'rate': 'self.recurrent_dropout'}), '(hx, rate=self.recurrent_dropout)\n', (7818, 7851), True, 'import oneflow as flow\n'), ((2151, 2187), 'oneflow.scope.namespace', 'flow.scope.namespace', (['self.direction'], {}), '(self.direction)\n', (2171, 2187), True, 'import oneflow as flow\n'), ((2226, 2412), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': "('input' + '-kernel')", 'shape': '[input_size, units]', 'dtype': 'dtype', 'trainable': 'trainable', 'regularizer': 'self.kernel_regularizer', 'initializer': 'self.kernel_initializer'}), "(name='input' + '-kernel', shape=[input_size, units],\n dtype=dtype, trainable=trainable, regularizer=self.kernel_regularizer,\n initializer=self.kernel_initializer)\n", (2243, 2412), True, 'import oneflow as flow\n'), ((2607, 2805), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': "('input' + '-recurrent-kernel')", 'shape': '[units, units]', 'dtype': 'dtype', 'trainable': 'trainable', 'regularizer': 'self.recurrent_regularizer', 'initializer': 'self.recurrent_initializer'}), "(name='input' + '-recurrent-kernel', shape=[units, units],\n dtype=dtype, trainable=trainable, regularizer=self.\n recurrent_regularizer, initializer=self.recurrent_initializer)\n", (2624, 2805), True, 'import oneflow as flow\n'), ((3422, 3609), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': "('forget' + '-kernel')", 'shape': '[input_size, units]', 'dtype': 'dtype', 'trainable': 'trainable', 'regularizer': 'self.kernel_regularizer', 'initializer': 'self.kernel_initializer'}), "(name='forget' + '-kernel', shape=[input_size, units],\n dtype=dtype, trainable=trainable, regularizer=self.kernel_regularizer,\n initializer=self.kernel_initializer)\n", (3439, 3609), True, 'import oneflow as flow\n'), ((3804, 4003), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': "('forget' + '-recurrent-kernel')", 'shape': '[units, units]', 'dtype': 'dtype', 'trainable': 'trainable', 'regularizer': 'self.recurrent_regularizer', 'initializer': 'self.recurrent_initializer'}), "(name='forget' + '-recurrent-kernel', shape=[units, units],\n dtype=dtype, trainable=trainable, regularizer=self.\n recurrent_regularizer, initializer=self.recurrent_initializer)\n", (3821, 4003), True, 'import oneflow as flow\n'), ((4620, 4806), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': "('cell' + '-kernel')", 'shape': '[input_size, units]', 'dtype': 'dtype', 'trainable': 'trainable', 'regularizer': 'self.kernel_regularizer', 'initializer': 'self.kernel_initializer'}), "(name='cell' + '-kernel', shape=[input_size, units], dtype\n =dtype, trainable=trainable, regularizer=self.kernel_regularizer,\n initializer=self.kernel_initializer)\n", (4637, 4806), True, 'import oneflow as flow\n'), ((5000, 5197), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': "('cell' + '-recurrent-kernel')", 'shape': '[units, units]', 'dtype': 'dtype', 'trainable': 'trainable', 'regularizer': 'self.recurrent_regularizer', 'initializer': 'self.recurrent_initializer'}), "(name='cell' + '-recurrent-kernel', shape=[units, units],\n dtype=dtype, trainable=trainable, regularizer=self.\n recurrent_regularizer, initializer=self.recurrent_initializer)\n", (5017, 5197), True, 'import oneflow as flow\n'), ((5813, 6000), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': "('output' + '-kernel')", 'shape': '[input_size, units]', 'dtype': 'dtype', 'trainable': 'trainable', 'regularizer': 'self.kernel_regularizer', 'initializer': 'self.kernel_initializer'}), "(name='output' + '-kernel', shape=[input_size, units],\n dtype=dtype, trainable=trainable, regularizer=self.kernel_regularizer,\n initializer=self.kernel_initializer)\n", (5830, 6000), True, 'import oneflow as flow\n'), ((6195, 6394), 'oneflow.get_variable', 'flow.get_variable', ([], {'name': "('output' + '-recurrent-kernel')", 'shape': '[units, units]', 'dtype': 'dtype', 'trainable': 'trainable', 'regularizer': 'self.recurrent_regularizer', 'initializer': 'self.recurrent_initializer'}), "(name='output' + '-recurrent-kernel', shape=[units, units],\n dtype=dtype, trainable=trainable, regularizer=self.\n recurrent_regularizer, initializer=self.recurrent_initializer)\n", (6212, 6394), True, 'import oneflow as flow\n'), ((3246, 3270), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (3268, 3270), True, 'import oneflow as flow\n'), ((4445, 4468), 'oneflow.ones_initializer', 'flow.ones_initializer', ([], {}), '()\n', (4466, 4468), True, 'import oneflow as flow\n'), ((5637, 5661), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (5659, 5661), True, 'import oneflow as flow\n'), ((6836, 6860), 'oneflow.zeros_initializer', 'flow.zeros_initializer', ([], {}), '()\n', (6858, 6860), True, 'import oneflow as flow\n')] |
"""
Copyright 2020 The OneFlow Authors. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from collections import OrderedDict
import numpy as np
import oneflow as flow
import tensorflow as tf
from tensorflow.python.ops import gen_math_ops
from test_util import GenArgList
import oneflow.typing as oft
gpus = tf.config.experimental.list_physical_devices("GPU")
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def _random_inputs(params_shape, indices_shape):
params = np.random.rand(*params_shape).astype(np.float32)
indices = np.random.randint(
low=0,
high=params_shape[len(indices_shape) - 1],
size=indices_shape,
dtype=np.int32,
)
return params, indices
def _make_gather_fn(
params, indices, axis, batch_dims, device_type, mirrored, compare_fn
):
flow.clear_default_session()
flow.config.enable_debug_mode(True)
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
if mirrored:
func_config.default_logical_view(flow.scope.mirrored_view())
else:
func_config.default_logical_view(flow.scope.consistent_view())
def do_gather(x_blob, i_blob):
with flow.scope.placement(device_type, "0:0"):
x = flow.get_variable(
"params",
shape=params.shape,
dtype=flow.float32,
initializer=flow.constant_initializer(0),
)
x = x + x_blob
y = flow.gather(x, i_blob, axis=axis, batch_dims=batch_dims)
lr_scheduler = flow.optimizer.PiecewiseConstantScheduler([], [1e-3])
flow.optimizer.SGD(lr_scheduler, momentum=0).minimize(y)
flow.watch_diff(x, compare_fn)
return y
if mirrored:
@flow.global_function(type="train", function_config=func_config)
def gather_fn(
params_def: oft.ListNumpy.Placeholder(params.shape, dtype=flow.float32),
indices_def: oft.ListNumpy.Placeholder(indices.shape, dtype=flow.int32),
):
return do_gather(params_def, indices_def)
else:
@flow.global_function(type="train", function_config=func_config)
def gather_fn(
params_def: oft.Numpy.Placeholder(params.shape, dtype=flow.float32),
indices_def: oft.Numpy.Placeholder(indices.shape, dtype=flow.int32),
):
return do_gather(params_def, indices_def)
return gather_fn
def _compare_gather_with_tf(
test_case,
device_type,
params_shape,
indices_shape,
axis,
batch_dims,
mirrored=False,
):
params, indices = _random_inputs(params_shape, indices_shape)
i = tf.constant(indices.astype(np.int32))
with tf.GradientTape() as t:
x = tf.Variable(params.astype(np.float32))
y = tf.gather(x, i, axis=axis, batch_dims=axis)
dy = t.gradient(y, x)
if mirrored:
def compare_dy(params_grad):
test_case.assertTrue(
np.allclose(dy, params_grad.numpy_list()[0], atol=1e-5, rtol=1e-5)
)
else:
def compare_dy(params_grad):
test_case.assertTrue(
np.allclose(dy, params_grad.numpy(), atol=1e-5, rtol=1e-5)
)
gather_fn = _make_gather_fn(
params, indices, axis, batch_dims, device_type, mirrored, compare_dy
)
if mirrored:
of_y = gather_fn([params], [indices]).get().numpy_list()[0]
else:
of_y = gather_fn(params, indices).get().numpy()
test_case.assertTrue(np.array_equal(y.numpy(), of_y))
@flow.unittest.skip_unless_1n1d()
class TestBatchGather(flow.unittest.TestCase):
def test_batch_gather(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu", "cpu"]
arg_dict["params_shape"] = [(2, 8, 4)]
arg_dict["indices_shape"] = [(2, 1)]
arg_dict["axis"] = [1]
arg_dict["batch_dims"] = [1]
for arg in GenArgList(arg_dict):
_compare_gather_with_tf(test_case, *arg)
def test_batch_gather_case_1(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["gpu"]
arg_dict["params_shape"] = [(20, 10, 200)]
arg_dict["indices_shape"] = [(20, 10)]
arg_dict["axis"] = [1]
arg_dict["batch_dims"] = [1]
for arg in GenArgList(arg_dict):
_compare_gather_with_tf(test_case, *arg)
def test_batch_gather_case_2(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["params_shape"] = [(20, 80, 30, 5)]
arg_dict["indices_shape"] = [(20, 40)]
arg_dict["axis"] = [1]
arg_dict["batch_dims"] = [1]
arg_dict["mirrored"] = [True]
for arg in GenArgList(arg_dict):
_compare_gather_with_tf(test_case, *arg)
def test_batch_gather_case_3(test_case):
arg_dict = OrderedDict()
arg_dict["device_type"] = ["cpu", "gpu"]
arg_dict["params_shape"] = [(20, 80, 30, 5)]
arg_dict["indices_shape"] = [(20, 80, 20)]
arg_dict["axis"] = [2]
arg_dict["batch_dims"] = [2]
arg_dict["mirrored"] = [True]
for arg in GenArgList(arg_dict):
_compare_gather_with_tf(test_case, *arg)
if __name__ == "__main__":
unittest.main()
| [
"oneflow.typing.Numpy.Placeholder",
"oneflow.scope.consistent_view",
"oneflow.gather",
"oneflow.constant_initializer",
"oneflow.clear_default_session",
"oneflow.unittest.skip_unless_1n1d",
"oneflow.typing.ListNumpy.Placeholder",
"oneflow.scope.mirrored_view",
"oneflow.watch_diff",
"oneflow.optimiz... | [((826, 877), 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (870, 877), True, 'import tensorflow as tf\n'), ((4097, 4129), 'oneflow.unittest.skip_unless_1n1d', 'flow.unittest.skip_unless_1n1d', ([], {}), '()\n', (4127, 4129), True, 'import oneflow as flow\n'), ((899, 950), 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), '(gpu, True)\n', (939, 950), True, 'import tensorflow as tf\n'), ((1351, 1379), 'oneflow.clear_default_session', 'flow.clear_default_session', ([], {}), '()\n', (1377, 1379), True, 'import oneflow as flow\n'), ((1384, 1419), 'oneflow.config.enable_debug_mode', 'flow.config.enable_debug_mode', (['(True)'], {}), '(True)\n', (1413, 1419), True, 'import oneflow as flow\n'), ((1438, 1459), 'oneflow.FunctionConfig', 'flow.FunctionConfig', ([], {}), '()\n', (1457, 1459), True, 'import oneflow as flow\n'), ((5825, 5840), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5838, 5840), False, 'import unittest\n'), ((2227, 2257), 'oneflow.watch_diff', 'flow.watch_diff', (['x', 'compare_fn'], {}), '(x, compare_fn)\n', (2242, 2257), True, 'import oneflow as flow\n'), ((2303, 2366), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2323, 2366), True, 'import oneflow as flow\n'), ((2646, 2709), 'oneflow.global_function', 'flow.global_function', ([], {'type': '"""train"""', 'function_config': 'func_config'}), "(type='train', function_config=func_config)\n", (2666, 2709), True, 'import oneflow as flow\n'), ((3252, 3269), 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), '()\n', (3267, 3269), True, 'import tensorflow as tf\n'), ((3339, 3382), 'tensorflow.gather', 'tf.gather', (['x', 'i'], {'axis': 'axis', 'batch_dims': 'axis'}), '(x, i, axis=axis, batch_dims=axis)\n', (3348, 3382), True, 'import tensorflow as tf\n'), ((4234, 4247), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4245, 4247), False, 'from collections import OrderedDict\n'), ((4476, 4496), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4486, 4496), False, 'from test_util import GenArgList\n'), ((4616, 4629), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (4627, 4629), False, 'from collections import OrderedDict\n'), ((4857, 4877), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (4867, 4877), False, 'from test_util import GenArgList\n'), ((4997, 5010), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5008, 5010), False, 'from collections import OrderedDict\n'), ((5285, 5305), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5295, 5305), False, 'from test_util import GenArgList\n'), ((5425, 5438), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5436, 5438), False, 'from collections import OrderedDict\n'), ((5717, 5737), 'test_util.GenArgList', 'GenArgList', (['arg_dict'], {}), '(arg_dict)\n', (5727, 5737), False, 'from test_util import GenArgList\n'), ((1015, 1044), 'numpy.random.rand', 'np.random.rand', (['*params_shape'], {}), '(*params_shape)\n', (1029, 1044), True, 'import numpy as np\n'), ((1564, 1590), 'oneflow.scope.mirrored_view', 'flow.scope.mirrored_view', ([], {}), '()\n', (1588, 1590), True, 'import oneflow as flow\n'), ((1643, 1671), 'oneflow.scope.consistent_view', 'flow.scope.consistent_view', ([], {}), '()\n', (1669, 1671), True, 'import oneflow as flow\n'), ((1722, 1762), 'oneflow.scope.placement', 'flow.scope.placement', (['device_type', '"""0:0"""'], {}), "(device_type, '0:0')\n", (1742, 1762), True, 'import oneflow as flow\n'), ((2012, 2068), 'oneflow.gather', 'flow.gather', (['x', 'i_blob'], {'axis': 'axis', 'batch_dims': 'batch_dims'}), '(x, i_blob, axis=axis, batch_dims=batch_dims)\n', (2023, 2068), True, 'import oneflow as flow\n'), ((2096, 2150), 'oneflow.optimizer.PiecewiseConstantScheduler', 'flow.optimizer.PiecewiseConstantScheduler', (['[]', '[0.001]'], {}), '([], [0.001])\n', (2137, 2150), True, 'import oneflow as flow\n'), ((2414, 2473), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['params.shape'], {'dtype': 'flow.float32'}), '(params.shape, dtype=flow.float32)\n', (2439, 2473), True, 'import oneflow.typing as oft\n'), ((2500, 2558), 'oneflow.typing.ListNumpy.Placeholder', 'oft.ListNumpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (2525, 2558), True, 'import oneflow.typing as oft\n'), ((2757, 2812), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['params.shape'], {'dtype': 'flow.float32'}), '(params.shape, dtype=flow.float32)\n', (2778, 2812), True, 'import oneflow.typing as oft\n'), ((2839, 2893), 'oneflow.typing.Numpy.Placeholder', 'oft.Numpy.Placeholder', (['indices.shape'], {'dtype': 'flow.int32'}), '(indices.shape, dtype=flow.int32)\n', (2860, 2893), True, 'import oneflow.typing as oft\n'), ((1925, 1953), 'oneflow.constant_initializer', 'flow.constant_initializer', (['(0)'], {}), '(0)\n', (1950, 1953), True, 'import oneflow as flow\n'), ((2162, 2206), 'oneflow.optimizer.SGD', 'flow.optimizer.SGD', (['lr_scheduler'], {'momentum': '(0)'}), '(lr_scheduler, momentum=0)\n', (2180, 2206), True, 'import oneflow as flow\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.